java.lang.UnsupportedOperationException: The DefaultMongoPartitioner requires MongoDB >= 3.2

Google Groups | Weng Shao Fong | 5 months ago
tip
Do you know that we can give you better hits? Get more relevant results from Samebug’s stack trace search.
  1. 0

    Pyspark DataFrameWriter.save() Error

    Google Groups | 5 months ago | Weng Shao Fong
    java.lang.UnsupportedOperationException: The DefaultMongoPartitioner requires MongoDB >= 3.2

    Root Cause Analysis

    1. java.lang.UnsupportedOperationException

      The DefaultMongoPartitioner requires MongoDB >= 3.2

      at com.mongodb.spark.rdd.partitioner.DefaultMongoPartitioner.partitions()
    2. com.mongodb.spark
      MongoRDD.getPartitions
      1. com.mongodb.spark.rdd.partitioner.DefaultMongoPartitioner.partitions(DefaultMongoPartitioner.scala:58)
      2. com.mongodb.spark.rdd.MongoRDD.getPartitions(MongoRDD.scala:137)
      2 frames
    3. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      2. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      2 frames
    4. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    5. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    6. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    7. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    8. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    9. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    10. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    11. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
      4 frames
    12. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    13. Spark
      RDD.partitions
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
      1 frame
    14. Spark Project SQL
      Dataset.withNewExecutionId
      1. org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:326)
      2. org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:39)
      3. org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2183)
      4. org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
      5. org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2532)
      5 frames