java.lang.UnsupportedOperationException: The DefaultMongoPartitioner requires MongoDB >= 3.2

Google Groups | Weng Shao Fong | 5 months ago
tip
Your exception is missing from the Samebug knowledge base.
Here are the best solutions we found on the Internet.
Click on the to mark the helpful solution and get rewards for you help.
  1. 1

    Pyspark DataFrameWriter.save() Error

    Google Groups | 5 months ago | Weng Shao Fong
    java.lang.UnsupportedOperationException: The DefaultMongoPartitioner requires MongoDB >= 3.2

    Root Cause Analysis

    1. java.lang.UnsupportedOperationException

      The DefaultMongoPartitioner requires MongoDB >= 3.2

      at com.mongodb.spark.rdd.partitioner.DefaultMongoPartitioner.partitions()
    2. com.mongodb.spark
      MongoRDD.getPartitions
      1. com.mongodb.spark.rdd.partitioner.DefaultMongoPartitioner.partitions(DefaultMongoPartitioner.scala:58)
      2. com.mongodb.spark.rdd.MongoRDD.getPartitions(MongoRDD.scala:137)
      2 frames
    3. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      2. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      2 frames
    4. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    5. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    6. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    7. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    8. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    9. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    10. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    11. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    12. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    13. Spark
      RDD.partitions
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      1 frame
    14. Spark Project SQL
      Dataset.showString
      1. org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:326)
      2. org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:39)
      3. org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2183)
      4. org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
      5. org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2532)
      6. org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2182)
      7. org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2189)
      8. org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1925)
      9. org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1924)
      10. org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2562)
      11. org.apache.spark.sql.Dataset.head(Dataset.scala:1924)
      12. org.apache.spark.sql.Dataset.take(Dataset.scala:2139)
      13. org.apache.spark.sql.Dataset.showString(Dataset.scala:239)
      13 frames
    15. Java RT
      Method.invoke
      1. sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      2. sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
      3. sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      4. java.lang.reflect.Method.invoke(Method.java:606)
      4 frames
    16. Py4J
      GatewayConnection.run
      1. py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:237)
      2. py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
      3. py4j.Gateway.invoke(Gateway.java:280)
      4. py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:128)
      5. py4j.commands.CallCommand.execute(CallCommand.java:79)
      6. py4j.GatewayConnection.run(GatewayConnection.java:211)
      6 frames
    17. Java RT
      Thread.run
      1. java.lang.Thread.run(Thread.java:745)
      1 frame