java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation

Apache's JIRA Issue Tracker | Peter Haumer | 1 year ago
  1. 0

    [SPARK-8385] java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation - ASF JIRA

    apache.org | 1 year ago
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation
  2. 0

    I used to be able to debug my Spark apps in Eclipse. With Spark 1.3.1 I created a launch and just set the vm var "-Dspark.master=local[4]". With 1.4 this stopped working when reading files from the OS filesystem. Running the same apps with spark-submit works fine. Loosing the ability to debug that way has a major impact on the usability of Spark. The following exception is thrown: Exception in thread "main" java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:213) at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2401) at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2411) at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2428) at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88) at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467) at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:166) at org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:653) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:389) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:362) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at scala.Option.map(Option.scala:145) at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:196) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1535) at org.apache.spark.rdd.RDD.reduce(RDD.scala:900) at org.apache.spark.api.java.JavaRDDLike$class.reduce(JavaRDDLike.scala:357) at org.apache.spark.api.java.AbstractJavaRDDLike.reduce(JavaRDDLike.scala:46) at com.databricks.apps.logs.LogAnalyzer.main(LogAnalyzer.java:60)

    Apache's JIRA Issue Tracker | 1 year ago | Peter Haumer
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation
  3. 0

    I used to be able to debug my Spark apps in Eclipse. With Spark 1.3.1 I created a launch and just set the vm var "-Dspark.master=local[4]". With 1.4 this stopped working when reading files from the OS filesystem. Running the same apps with spark-submit works fine. Loosing the ability to debug that way has a major impact on the usability of Spark. The following exception is thrown: Exception in thread "main" java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:213) at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2401) at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2411) at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2428) at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88) at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467) at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:166) at org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:653) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:389) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:362) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at scala.Option.map(Option.scala:145) at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:196) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1535) at org.apache.spark.rdd.RDD.reduce(RDD.scala:900) at org.apache.spark.api.java.JavaRDDLike$class.reduce(JavaRDDLike.scala:357) at org.apache.spark.api.java.AbstractJavaRDDLike.reduce(JavaRDDLike.scala:46) at com.databricks.apps.logs.LogAnalyzer.main(LogAnalyzer.java:60)

    Apache's JIRA Issue Tracker | 1 year ago | Peter Haumer
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation

    Stack Overflow | 1 year ago | Govardhana Rao Ganji
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation

  1. Anup Ash 1 times, last 3 months ago
10 unregistered visitors
Not finding the right solution?
Take a tour to get the most out of Samebug.

Tired of useless tips?

Automated exception search integrated into your IDE

Root Cause Analysis

  1. java.lang.UnsupportedOperationException

    Not implemented by the TFS FileSystem implementation

    at org.apache.hadoop.fs.FileSystem.getScheme()
  2. Hadoop
    FileSystem.get
    1. org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:213)
    2. org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2401)
    3. org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2411)
    4. org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2428)
    5. org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)
    6. org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)
    7. org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)
    8. org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)
    9. org.apache.hadoop.fs.FileSystem.get(FileSystem.java:166)
    9 frames
  3. Hadoop
    FileInputFormat.setInputPaths
    1. org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:653)
    2. org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:389)
    3. org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:362)
    3 frames
  4. Spark
    HadoopRDD$$anonfun$getJobConf$6.apply
    1. org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762)
    2. org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762)
    3. org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172)
    4. org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172)
    4 frames
  5. Scala
    Option.map
    1. scala.Option.map(Option.scala:145)
    1 frame
  6. Spark
    RDD$$anonfun$partitions$2.apply
    1. org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:172)
    2. org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:196)
    3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
    4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
    4 frames
  7. Scala
    Option.getOrElse
    1. scala.Option.getOrElse(Option.scala:120)
    1 frame
  8. Spark
    RDD$$anonfun$partitions$2.apply
    1. org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
    2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
    3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
    4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
    4 frames
  9. Scala
    Option.getOrElse
    1. scala.Option.getOrElse(Option.scala:120)
    1 frame
  10. Spark
    RDD$$anonfun$partitions$2.apply
    1. org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
    2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
    3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
    4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
    4 frames
  11. Scala
    Option.getOrElse
    1. scala.Option.getOrElse(Option.scala:120)
    1 frame
  12. Spark
    RDD$$anonfun$partitions$2.apply
    1. org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
    2. org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
    3. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
    4. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
    4 frames
  13. Scala
    Option.getOrElse
    1. scala.Option.getOrElse(Option.scala:120)
    1 frame
  14. Spark
    AbstractJavaRDDLike.reduce
    1. org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
    2. org.apache.spark.SparkContext.runJob(SparkContext.scala:1535)
    3. org.apache.spark.rdd.RDD.reduce(RDD.scala:900)
    4. org.apache.spark.api.java.JavaRDDLike$class.reduce(JavaRDDLike.scala:357)
    5. org.apache.spark.api.java.AbstractJavaRDDLike.reduce(JavaRDDLike.scala:46)
    5 frames
  15. com.databricks.apps
    LogAnalyzer.main
    1. com.databricks.apps.logs.LogAnalyzer.main(LogAnalyzer.java:60)
    1 frame