njava.lang.NoSuchMethodError: com.google.protobuf.LazyStringList.getUnmodifiableView()Lcom/google/protobuf/LazyStringList;

github.com | 4 months ago
  1. 0

    Merge pull request #14 from fluxcapacitor/nlp · fluxcapacitor/pipeline@b6c4fd6 · GitHub

    github.com | 4 months ago
    njava.lang.NoSuchMethodError: com.google.protobuf.LazyStringList.getUnmodifiableView()Lcom/google/protobuf/LazyStringList;

    Root Cause Analysis

    1. njava.lang.NoSuchMethodError

      com.google.protobuf.LazyStringList.getUnmodifiableView()Lcom/google/protobuf/LazyStringList;

      at edu.stanford.nlp.pipeline.CoreNLPProtos$Token$Builder.buildPartial()
    2. Stanford CoreNLP
      ProtobufAnnotationSerializer.toProto
      1. edu.stanford.nlp.pipeline.CoreNLPProtos$Token$Builder.buildPartial(CoreNLPProtos.java:12038)
      2. edu.stanford.nlp.pipeline.CoreNLPProtos$Token$Builder.build(CoreNLPProtos.java:11940)
      3. edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer.toProto(ProtobufAnnotationSerializer.java:237)
      4. edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer.toProtoBuilder(ProtobufAnnotationSerializer.java:383)
      5. edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer.toProto(ProtobufAnnotationSerializer.java:344)
      6. edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer.toProtoBuilder(ProtobufAnnotationSerializer.java:493)
      7. edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer.toProto(ProtobufAnnotationSerializer.java:455)
      7 frames
    3. com.databricks.spark
      CoreNLP$$anonfun$1.apply
      1. com.databricks.spark.corenlp.CoreNLP$$anonfun$1.apply(CoreNLP.scala:77)
      2. com.databricks.spark.corenlp.CoreNLP$$anonfun$1.apply(CoreNLP.scala:73)
      2 frames
    4. Spark Project Catalyst
      InterpretedProjection.apply
      1. org.apache.spark.sql.catalyst.expressions.ScalaUDF$$anonfun$2.apply(ScalaUDF.scala:75)
      2. org.apache.spark.sql.catalyst.expressions.ScalaUDF$$anonfun$2.apply(ScalaUDF.scala:74)
      3. org.apache.spark.sql.catalyst.expressions.ScalaUDF.eval(ScalaUDF.scala:964)
      4. org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:247)
      5. org.apache.spark.sql.catalyst.expressions.Alias.eval(namedExpressions.scala:121)
      6. org.apache.spark.sql.catalyst.expressions.InterpretedProjection.apply(Projection.scala:46)
      7. org.apache.spark.sql.catalyst.expressions.InterpretedProjection.apply(Projection.scala:30)
      7 frames
    5. Scala
      AbstractTraversable.map
      1. scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
      2. scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
      3. scala.collection.immutable.List.foreach(List.scala:318)
      4. scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
      5. scala.collection.AbstractTraversable.map(Traversable.scala:105)
      5 frames
    6. Spark Project Catalyst
      TreeNode$$anonfun$4.apply
      1. org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation$$anonfun$apply$23.applyOrElse(Optimizer.scala:834)
      2. org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation$$anonfun$apply$23.applyOrElse(Optimizer.scala:831)
      3. org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:227)
      4. org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:227)
      5. org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:51)
      6. org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:226)
      7. org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$1.apply(TreeNode.scala:232)
      8. org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$1.apply(TreeNode.scala:232)
      9. org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:249)
      9 frames
    7. Scala
      AbstractIterator.toArray
      1. scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
      2. scala.collection.Iterator$class.foreach(Iterator.scala:727)
      3. scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
      4. scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
      5. scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
      6. scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
      7. scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
      8. scala.collection.AbstractIterator.to(Iterator.scala:1157)
      9. scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
      10. scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
      11. scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
      12. scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
      12 frames
    8. Spark Project Catalyst
      RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply
      1. org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:279)
      2. org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:232)
      3. org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:217)
      4. org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation$.apply(Optimizer.scala:831)
      5. org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation$.apply(Optimizer.scala:830)
      6. org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:83)
      7. org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:80)
      7 frames
    9. Scala
      WrappedArray.foldLeft
      1. scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:51)
      2. scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:60)
      3. scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:34)
      3 frames
    10. Spark Project Catalyst
      RuleExecutor$$anonfun$execute$1.apply
      1. org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:80)
      2. org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:72)
      2 frames
    11. Scala
      List.foreach
      1. scala.collection.immutable.List.foreach(List.scala:318)
      1 frame
    12. Spark Project Catalyst
      RuleExecutor.execute
      1. org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72)
      1 frame
    13. Spark Project SQL
      DataFrame.first
      1. org.apache.spark.sql.SQLContext$QueryExecution.optimizedPlan$lzycompute(SQLContext.scala:921)
      2. org.apache.spark.sql.SQLContext$QueryExecution.optimizedPlan(SQLContext.scala:921)
      3. org.apache.spark.sql.SQLContext$QueryExecution.sparkPlan$lzycompute(SQLContext.scala:926)
      4. org.apache.spark.sql.SQLContext$QueryExecution.sparkPlan(SQLContext.scala:924)
      5. org.apache.spark.sql.SQLContext$QueryExecution.executedPlan$lzycompute(SQLContext.scala:930)
      6. org.apache.spark.sql.SQLContext$QueryExecution.executedPlan(SQLContext.scala:930)
      7. org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:53)
      8. org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:1903)
      9. org.apache.spark.sql.DataFrame.collect(DataFrame.scala:1384)
      10. org.apache.spark.sql.DataFrame.head(DataFrame.scala:1314)
      11. org.apache.spark.sql.DataFrame.head(DataFrame.scala:1321)
      12. org.apache.spark.sql.DataFrame.first(DataFrame.scala:1328)
      12 frames