java.lang.AbstractMethodError: org.apache.spark.sql.CarbonContext$$anon$ 1.org$apache$spark$sql$catalyst$analysis$OverrideCatalog$*setter* $org$apache$spark$sql$catalyst$analysis$OverrideCatalog$$overrides_$eq(Ljava/util/concurrent/ConcurrentHashMap;)V

GitHub | ravipesala | 4 months ago
  1. 0

    GitHub comment 873#235230431

    GitHub | 4 months ago | ravipesala
    java.lang.AbstractMethodError: org.apache.spark.sql.CarbonContext$$anon$ 1.org$apache$spark$sql$catalyst$analysis$OverrideCatalog$*setter* $org$apache$spark$sql$catalyst$analysis$OverrideCatalog$$overrides_$eq(Ljava/util/concurrent/ConcurrentHashMap;)V
  2. 0

    cc.sql 执行创建表的时候报如下错误

    GitHub | 4 months ago | shijiyu
    java.lang.AbstractMethodError: org.apache.spark.sql.CarbonContext$$anon$1.org$apache$spark$sql$catalyst$analysis$OverrideCatalog$_setter_$org$apache$spark$sql$catalyst$analysis$OverrideCatalog$$overrides_$eq(Ljava/util/concurrent/ConcurrentHashMap;)V
  3. 0

    {code} CREATE TABLE test1( customer_id int , uri text , browser text, epoch bigint , PRIMARY KEY (customer_id , epoch,uri) ) {code} Start spark shell: {code} ~/Github/spark-1.4.0-bin-cdh4$ SPARK_CLASSPATH=spark-cassandra-connector_2.10-1.4.0-M1.jar bin/spark-shell --conf spark.cassandra.connection.host=127.0.0.1 {code} {code} scala>import org.apache.spark.sql.cassandra._ scala> val cass=new CassandraSQLContext(sc) cass: org.apache.spark.sql.cassandra.CassandraSQLContext = org.apache.spark.sql.cassandra.CassandraSQLContext@3a665b22 scala> cass.sql("select * from yana_test.test1") java.lang.AbstractMethodError: org.apache.spark.sql.cassandra.CassandraSQLContext$$anon$2.conf()Lorg/apache/spark/sql/catalyst/CatalystConf; at org.apache.spark.sql.catalyst.analysis.Catalog$class.processTableIdentifier(Catalog.scala:62) at org.apache.spark.sql.cassandra.CassandraCatalog.processTableIdentifier(CassandraCatalog.scala:11) at org.apache.spark.sql.catalyst.analysis.OverrideCatalog$class.lookupRelation(Catalog.scala:156) at org.apache.spark.sql.cassandra.CassandraSQLContext$$anon$2.lookupRelation(CassandraSQLContext.scala:218) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:222) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:233) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:229) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:222) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:222) at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:51) at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:221) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:242) at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at scala.collection.Iterator$class.foreach(Iterator.scala:727) at scala.collection.AbstractIterator.foreach(Iterator.scala:1157) at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47) at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273) at scala.collection.AbstractIterator.to(Iterator.scala:1157) at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265) at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157) at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252) at scala.collection.AbstractIterator.toArray(Iterator.scala:1157) at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildrenDown(TreeNode.scala:272) at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:227) at org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:212) {code}

    DataStax JIRA | 1 year ago | Yana Kadiyska
    java.lang.AbstractMethodError: org.apache.spark.sql.cassandra.CassandraSQLContext$$anon$2.conf()Lorg/apache/spark/sql/catalyst/CatalystConf;
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    {code} CREATE TABLE test1( customer_id int , uri text , browser text, epoch bigint , PRIMARY KEY (customer_id , epoch,uri) ) {code} Start spark shell: {code} ~/Github/spark-1.4.0-bin-cdh4$ SPARK_CLASSPATH=spark-cassandra-connector_2.10-1.4.0-M1.jar bin/spark-shell --conf spark.cassandra.connection.host=127.0.0.1 {code} {code} scala>import org.apache.spark.sql.cassandra._ scala> val cass=new CassandraSQLContext(sc) cass: org.apache.spark.sql.cassandra.CassandraSQLContext = org.apache.spark.sql.cassandra.CassandraSQLContext@3a665b22 scala> cass.sql("select * from yana_test.test1") java.lang.AbstractMethodError: org.apache.spark.sql.cassandra.CassandraSQLContext$$anon$2.conf()Lorg/apache/spark/sql/catalyst/CatalystConf; at org.apache.spark.sql.catalyst.analysis.Catalog$class.processTableIdentifier(Catalog.scala:62) at org.apache.spark.sql.cassandra.CassandraCatalog.processTableIdentifier(CassandraCatalog.scala:11) at org.apache.spark.sql.catalyst.analysis.OverrideCatalog$class.lookupRelation(Catalog.scala:156) at org.apache.spark.sql.cassandra.CassandraSQLContext$$anon$2.lookupRelation(CassandraSQLContext.scala:218) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:222) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:233) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:229) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:222) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:222) at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:51) at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:221) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:242) at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at scala.collection.Iterator$class.foreach(Iterator.scala:727) at scala.collection.AbstractIterator.foreach(Iterator.scala:1157) at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47) at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273) at scala.collection.AbstractIterator.to(Iterator.scala:1157) at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265) at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157) at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252) at scala.collection.AbstractIterator.toArray(Iterator.scala:1157) at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildrenDown(TreeNode.scala:272) at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:227) at org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:212) {code}

    DataStax JIRA | 1 year ago | Yana Kadiyska
    java.lang.AbstractMethodError: org.apache.spark.sql.cassandra.CassandraSQLContext$$anon$2.conf()Lorg/apache/spark/sql/catalyst/CatalystConf;
  6. 0

    spark-shell example not working

    GitHub | 10 months ago | INRIX-Trang-Nguyen
    java.lang.AbstractMethodError: org.apache.spark.sql.catalyst.expressions.Expression.genCode(Lorg/apache/spark/sql/catalyst/expressions/codegen/CodeGenContext;Lorg/apache/spark/sql/catalyst/expressions/codegen/GeneratedExpressionCode;)Ljava/lang/String;

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.lang.AbstractMethodError

      org.apache.spark.sql.CarbonContext$$anon$ 1.org$apache$spark$sql$catalyst$analysis$OverrideCatalog$*setter* $org$apache$spark$sql$catalyst$analysis$OverrideCatalog$$overrides_$eq(Ljava/util/concurrent/ConcurrentHashMap;)V

      at org.apache.spark.sql.catalyst.analysis.OverrideCatalog$class.$init$()
    2. Spark Project Catalyst
      OverrideCatalog$class.$init$
      1. org.apache.spark.sql.catalyst.analysis.OverrideCatalog$class.$init$(Catalog.scala:132)
      1 frame
    3. Spark Project SQL
      DataFrame.<init>
      1. org.apache.spark.sql.CarbonContext$$anon$1.<init>(CarbonContext.scala:41)
      2. org.apache.spark.sql.CarbonContext.catalog$lzycompute(CarbonContext.scala:41)
      3. org.apache.spark.sql.CarbonContext.catalog(CarbonContext.scala:39)
      4. org.apache.spark.sql.CarbonContext.analyzer$lzycompute(CarbonContext.scala:45)
      5. org.apache.spark.sql.CarbonContext.analyzer(CarbonContext.scala:45)
      6. org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34)
      7. org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:133)
      7 frames
    4. org.carbondata.integration
      CarbonDataFrameRDD.<init>
      1. org.carbondata.integration.spark.rdd.CarbonDataFrameRDD.<init>(CarbonDataFrameRDD.scala:23)
      1 frame
    5. Spark Project SQL
      CarbonContext.sql
      1. org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:62)
      1 frame