java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation 2015-07-27 16:28:13,341 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:214) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2365) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2375) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2392) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)

Apache's JIRA Issue Tracker | Peter Haumer | 1 year ago
  1. 0

    [SPARK-8385] java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation - ASF JIRA

    apache.org | 1 year ago
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation 2015-07-27 16:28:13,341 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:214) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2365) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2375) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2392) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)
  2. 0

    I used to be able to debug my Spark apps in Eclipse. With Spark 1.3.1 I created a launch and just set the vm var "-Dspark.master=local[4]". With 1.4 this stopped working when reading files from the OS filesystem. Running the same apps with spark-submit works fine. Loosing the ability to debug that way has a major impact on the usability of Spark. The following exception is thrown: Exception in thread "main" java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:213) at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2401) at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2411) at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2428) at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88) at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467) at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:166) at org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:653) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:389) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:362) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at scala.Option.map(Option.scala:145) at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:196) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1535) at org.apache.spark.rdd.RDD.reduce(RDD.scala:900) at org.apache.spark.api.java.JavaRDDLike$class.reduce(JavaRDDLike.scala:357) at org.apache.spark.api.java.AbstractJavaRDDLike.reduce(JavaRDDLike.scala:46) at com.databricks.apps.logs.LogAnalyzer.main(LogAnalyzer.java:60)

    Apache's JIRA Issue Tracker | 1 year ago | Peter Haumer
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation 2015-07-27 16:28:13,341 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:214) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2365) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2375) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2392) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)
  3. 0

    I used to be able to debug my Spark apps in Eclipse. With Spark 1.3.1 I created a launch and just set the vm var "-Dspark.master=local[4]". With 1.4 this stopped working when reading files from the OS filesystem. Running the same apps with spark-submit works fine. Loosing the ability to debug that way has a major impact on the usability of Spark. The following exception is thrown: Exception in thread "main" java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:213) at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2401) at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2411) at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2428) at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88) at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467) at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:166) at org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:653) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:389) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:362) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.SparkContext$$anonfun$28.apply(SparkContext.scala:762) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:172) at scala.Option.map(Option.scala:145) at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:172) at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:196) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:120) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1535) at org.apache.spark.rdd.RDD.reduce(RDD.scala:900) at org.apache.spark.api.java.JavaRDDLike$class.reduce(JavaRDDLike.scala:357) at org.apache.spark.api.java.AbstractJavaRDDLike.reduce(JavaRDDLike.scala:46) at com.databricks.apps.logs.LogAnalyzer.main(LogAnalyzer.java:60)

    Apache's JIRA Issue Tracker | 1 year ago | Peter Haumer
    java.lang.UnsupportedOperationException: Not implemented by the TFS FileSystem implementation 2015-07-27 16:28:13,341 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:214) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2365) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2375) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2392) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    How I have to configure hdfs and pig

    Stack Overflow | 2 years ago | user3712581
    java.lang.UnsupportedOperationException: Not implemented by the DistributedFileSystem FileSystem implementation
  6. 0

    How to integrate apache-nutch-1.9 and Hadoop 2.3.0-cdh5.1.0?

    Stack Overflow | 2 years ago | Sandeep
    java.lang.UnsupportedOperationException: Not implemented by the DistributedFileSystem FileSystem implementation

  1. Anup Ash 1 times, last 3 months ago
10 unregistered visitors
Not finding the right solution?
Take a tour to get the most out of Samebug.

Tired of useless tips?

Automated exception search integrated into your IDE

Root Cause Analysis

  1. java.lang.UnsupportedOperationException

    Not implemented by the TFS FileSystem implementation 2015-07-27 16:28:13,341 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getScheme(FileSystem.java:214) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:2365) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2375) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2392) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413) 2015-07-27 16:28:13,342 [stderr] INFO util.Utils (Logging.scala:logInfo(59)) - at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)

    at org.apache.hadoop.fs.FileSystem.get()
  2. Hadoop
    FileSystem.get
    1. org.apache.hadoop.fs.FileSystem.get(FileSystem.java:167)
    1 frame
  3. Spark Project YARN Stable API
    Client.main
    1. org.apache.spark.deploy.yarn.Client.prepareLocalResources(Client.scala:216)
    2. org.apache.spark.deploy.yarn.Client.createContainerLaunchContext(Client.scala:384)
    3. org.apache.spark.deploy.yarn.Client.submitApplication(Client.scala:102)
    4. org.apache.spark.deploy.yarn.Client.run(Client.scala:619)
    5. org.apache.spark.deploy.yarn.Client$.main(Client.scala:647)
    6. org.apache.spark.deploy.yarn.Client.main(Client.scala)
    6 frames
  4. Java RT
    NativeMethodAccessorImpl.invoke0
    1. sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    1 frame