Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

Solutions on the web

via GitHub by ash211
, 1 year ago
org.apache.parquet.column.values.dictionary.PlainValuesDictionary$PlainBinaryDictionary
java.lang.UnsupportedOperationException: org.apache.parquet.column.values.dictionary.PlainValuesDictionary$PlainBinaryDictionary at org.apache.parquet.column.Dictionary.decodeToLong(Dictionary.java:52)[parquet-column-1.7.0.jar:1.7.0] at org.apache.spark.sql.execution.vectorized.OnHeapColumnVector.getLong(OnHeapColumnVector.java:274)[spark-sql_2.11-2.0.1.jar:2.0.1] at org.apache.spark.sql.execution.vectorized.ColumnVector.getDecimal(ColumnVector.java:588)[spark-sql_2.11-2.0.1.jar:2.0.1] at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)[na:na] at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)[spark-sql_2.11-2.0.1.jar:2.0.1] at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)[spark-sql_2.11-2.0.1.jar:2.0.1] at org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)[spark-sql_2.11-2.0.1.jar:2.0.1] at org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)[spark-sql_2.11-2.0.1.jar:2.0.1] at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:803)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:803)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.scheduler.Task.run(Task.scala:86)[spark-core_2.11-2.0.1.jar:2.0.1] at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)[spark-core_2.11-2.0.1.jar:2.0.1]