java.lang.NoSuchMethodError: com.google.common.base.Splitter.splitToList(Ljava/lang/CharSequence;)Ljava/util/List;

GitHub | samelamin | 10 months ago
tip
Your exception is missing from the Samebug knowledge base.
Here are the best solutions we found on the Internet.
Click on the to mark the helpful solution and get rewards for you help.
  1. 0

    GitHub comment 10#238589343

    GitHub | 10 months ago | samelamin
    java.lang.NoSuchMethodError: com.google.common.base.Splitter.splitToList(Ljava/lang/CharSequence;)Ljava/util/List;
  2. 0

    GitHub comment 12#258612097

    GitHub | 7 months ago | samelamin
    java.lang.NoSuchMethodError: com.google.common.base.Splitter.splitToList(Ljava/lang/CharSequence;)Ljava/util/List; and the stack trace ```
  3. 0

    Reading file from Google bucket in Spark

    Stack Overflow | 5 months ago | Abhis
    java.lang.NoSuchMethodError: com.google.common.base.Splitter.splitToList(Ljava/lang/CharSequence;)Ljava/util/List;
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.lang.NoSuchMethodError

      com.google.common.base.Splitter.splitToList(Ljava/lang/CharSequence;)Ljava/util/List;

      at com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase$ParentTimestampUpdateIncludePredicate.create()
    2. com.google.cloud
      GoogleHadoopFileSystemBase.initialize
      1. com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase$ParentTimestampUpdateIncludePredicate.create(GoogleHadoopFileSystemBase.java:572)
      2. com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.createOptionsBuilderFromConfig(GoogleHadoopFileSystemBase.java:1890)
      3. com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.configure(GoogleHadoopFileSystemBase.java:1587)
      4. com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.initialize(GoogleHadoopFileSystemBase.java:793)
      5. com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.initialize(GoogleHadoopFileSystemBase.java:756)
      5 frames
    3. Hadoop
      Path.getFileSystem
      1. org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2433)
      2. org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)
      3. org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)
      4. org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)
      5. org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)
      6. org.apache.hadoop.fs.Path.getFileSystem(Path.java:287)
      6 frames
    4. com.google.cloud
      AbstractBigQueryInputFormat.getSplits
      1. com.google.cloud.hadoop.io.bigquery.AbstractBigQueryInputFormat.extractExportPathRoot(AbstractBigQueryInputFormat.java:247)
      2. com.google.cloud.hadoop.io.bigquery.AbstractBigQueryInputFormat.getSplits(AbstractBigQueryInputFormat.java:107)
      2 frames
    5. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:113)
      2. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
      3 frames
    6. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:120)
      1 frame
    7. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
      4 frames
    8. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:120)
      1 frame
    9. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
      2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
      3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
      4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
      4 frames
    10. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:120)
      1 frame
    11. Spark
      RDD.first
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
      2. org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1293)
      3. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
      4. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
      5. org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
      6. org.apache.spark.rdd.RDD.take(RDD.scala:1288)
      7. org.apache.spark.rdd.RDD$$anonfun$first$1.apply(RDD.scala:1328)
      8. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
      9. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
      10. org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
      11. org.apache.spark.rdd.RDD.first(RDD.scala:1327)
      11 frames
    12. com.spotify.spark
      package$BigQuerySQLContext.bigQuerySelect
      1. com.spotify.spark.bigquery.package$BigQuerySQLContext.bigQueryTable(package.scala:112)
      2. com.spotify.spark.bigquery.package$BigQuerySQLContext.bigQuerySelect(package.scala:93)
      2 frames
    13. Unknown
      $iwC.<init>
      1. $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:28)
      2. $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:33)
      3. $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:35)
      4. $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:37)
      5. $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:39)
      6. $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:41)
      7. $iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
      8. $iwC$$iwC$$iwC.<init>(<console>:45)
      9. $iwC$$iwC.<init>(<console>:47)
      10. $iwC.<init>(<console>:49)
      10 frames