java.lang.NullPointerException

JIRA | Eric Eckstrand | 2 years ago
tip
Your exception is missing from the Samebug knowledge base.
Here are the best solutions we found on the Internet.
Click on the to mark the helpful solution and get rewards for you help.
  1. 0

    In H2OSchemaRDDTest, Junit DataFrame[T_ENUM] to SchemaRDD[StringType] fails on schemaRDD.count with: java.lang.NullPointerException at org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next(H2OSchemaRDD.scala:73) at org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next(H2OSchemaRDD.scala:44) at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$6.apply(Aggregate.scala:132) at org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$6.apply(Aggregate.scala:128) at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601) at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263) at org.apache.spark.rdd.RDD.iterator(RDD.scala:230) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263) at org.apache.spark.rdd.RDD.iterator(RDD.scala:230) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) at org.apache.spark.scheduler.Task.run(Task.scala:56) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745)

    JIRA | 2 years ago | Eric Eckstrand
    java.lang.NullPointerException
  2. 0

    In H2OSchemaRDDTest, Junit DataFrame[T_ENUM] to SchemaRDD[StringType] fails on schemaRDD.count with: java.lang.NullPointerException at org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next(H2OSchemaRDD.scala:73) at org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next(H2OSchemaRDD.scala:44) at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$6.apply(Aggregate.scala:132) at org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$6.apply(Aggregate.scala:128) at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601) at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263) at org.apache.spark.rdd.RDD.iterator(RDD.scala:230) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263) at org.apache.spark.rdd.RDD.iterator(RDD.scala:230) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) at org.apache.spark.scheduler.Task.run(Task.scala:56) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745)

    JIRA | 2 years ago | Eric Eckstrand
    java.lang.NullPointerException

    Root Cause Analysis

    1. java.lang.NullPointerException

      No message provided

      at org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next()
    2. Spark
      H2OSchemaRDD$$anon$1.next
      1. org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next(H2OSchemaRDD.scala:73)
      2. org.apache.spark.rdd.H2OSchemaRDD$$anon$1.next(H2OSchemaRDD.scala:44)
      2 frames
    3. Scala
      Iterator$$anon$11.next
      1. scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
      1 frame
    4. Spark Project SQL
      Aggregate$$anonfun$execute$1$$anonfun$6.apply
      1. org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$6.apply(Aggregate.scala:132)
      2. org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$6.apply(Aggregate.scala:128)
      2 frames
    5. Spark
      Executor$TaskRunner.run
      1. org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
      2. org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:601)
      3. org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
      4. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
      5. org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
      6. org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
      7. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
      8. org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
      9. org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
      10. org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
      11. org.apache.spark.scheduler.Task.run(Task.scala:56)
      12. org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
      12 frames
    6. Java RT
      Thread.run
      1. java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
      2. java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
      3. java.lang.Thread.run(Thread.java:745)
      3 frames