Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

java.io.IOException: Filesystem closed	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:707)	at org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:776)	at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:837)	at java.io.DataInputStream.read(DataInputStream.java:83)	at org.apache.hadoop.util.LineReader.fillBuffer(LineReader.java:180)	at org.apache.hadoop.util.LineReader.readDefaultLine(LineReader.java:216)	at org.apache.hadoop.util.LineReader.readLine(LineReader.java:174)	at org.apache.hadoop.mapred.LineRecordReader.next(LineRecordReader.java:209)	at org.apache.hadoop.mapred.LineRecordReader.next(LineRecordReader.java:47)	at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:201)	at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:184)	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)	at scala.collection.Iterator$$anon$14.hasNext(Iterator.scala:388)	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)	at scala.collection.Iterator$class.foreach(Iterator.scala:727)	at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)	at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)	at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:107)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:227)	at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:158)	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)	at org.apache.spark.scheduler.Task.run(Task.scala:51)	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:183)	at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)	at java.lang.Thread.run(Thread.java:662)