java.nio.channels.ClosedChannelException

There are no available Samebug tips for this exception. Do you have an idea how to solve this issue? A short tip would help users who saw this issue last week.

  • scala on stackoverflow | Scala Class
    via by Unknown author,
  • Noticed this in testing 1.7.0 on my laptop tonight. Started two tservers, one continuous ingest client and would kill/restart one of the tservers occasionally. {noformat} Failed to close map file java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSInputStream.close(DFSInputStream.java:629) at java.io.FilterInputStream.close(FilterInputStream.java:181) at org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile$Reader.close(CachableBlockFile.java:409) at org.apache.accumulo.core.file.rfile.RFile$Reader.close(RFile.java:921) at org.apache.accumulo.tserver.tablet.Compactor.compactLocalityGroup(Compactor.java:391) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:214) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) null java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at org.apache.accumulo.core.file.rfile.bcfile.SimpleBufferedOutputStream.flushBuffer(SimpleBufferedOutputStream.java:39) at org.apache.accumulo.core.file.rfile.bcfile.SimpleBufferedOutputStream.flush(SimpleBufferedOutputStream.java:68) at org.apache.hadoop.io.compress.CompressionOutputStream.flush(CompressionOutputStream.java:69) at org.apache.accumulo.core.file.rfile.bcfile.Compression$FinishOnFlushCompressionStream.flush(Compression.java:66) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:141) at org.apache.accumulo.core.file.rfile.bcfile.BCFile$Writer$WBlockState.finish(BCFile.java:233) at org.apache.accumulo.core.file.rfile.bcfile.BCFile$Writer$BlockAppender.close(BCFile.java:320) at org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile$BlockWrite.close(CachableBlockFile.java:121) at org.apache.accumulo.core.file.rfile.RFile$Writer.closeBlock(RFile.java:398) at org.apache.accumulo.core.file.rfile.RFile$Writer.append(RFile.java:382) at org.apache.accumulo.tserver.tablet.Compactor.compactLocalityGroup(Compactor.java:356) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:214) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) Filesystem closed java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.delete(DFSClient.java:1927) at org.apache.hadoop.hdfs.DistributedFileSystem$12.doCall(DistributedFileSystem.java:638) at org.apache.hadoop.hdfs.DistributedFileSystem$12.doCall(DistributedFileSystem.java:634) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.delete(DistributedFileSystem.java:634) at org.apache.accumulo.server.fs.VolumeManagerImpl.deleteRecursively(VolumeManagerImpl.java:193) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:255) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) MinC failed (null) to create hdfs://localhost:8020/accumulo17/tables/2/t-0000011/F00000yd.rf_tmp retrying ... MajC Failed, extent = 2;5;45 java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) MajC Failed, extent = 2;45;4 java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) MajC Failed, extent = 2;35;3 java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) Filesystem closed java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) {noformat} This reminded me of something that [~bills] ran into and tried to get to the bottom of. My hunch was that somethign registered a shutdown hook which closed the filesystem object. I also remembered that FileSystem instances are cached. Bill pointed out that he thought this might still be a VFS related issue (despite having nothign to do with VFS). After I noticed a shutdown hook in our VFS usage, I think it's unsafe to be sharing FileSystem instances with VFS code that might also be used by Accumulo for important things like writing to files. We should get a unique FileSystem instance to provide to any VFS code. We can do this by: # Setting {{fs.hdfs.impl.disable.cache}} in the Hadoop Configuration we provide to {{FileSystem.get(Configuration)}} # Call {{FileSystem.newInstance(URI, Configuration)}}. Either seem to do what we want -- the latter possibly having more reliable semantics than just a configuration value.
    via by Josh Elser,
  • oozie java action issue
    via by Immanuel Fredrick,
  • oozie java action issue
    via by Immanuel Fredrick,
    • java.nio.channels.ClosedChannelException at io.confluent.connect.hdfs.TopicPartitionWriter.getWriter(TopicPartitionWriter.java:424) at io.confluent.connect.hdfs.TopicPartitionWriter.writeRecord(TopicPartitionWriter.java:488) at io.confluent.connect.hdfs.TopicPartitionWriter.write(TopicPartitionWriter.java:264) at io.confluent.connect.hdfs.DataWriter.write(DataWriter.java:234) at io.confluent.connect.hdfs.HdfsSinkTask.put(HdfsSinkTask.java:91) at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:280) at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:176) at org.apache.kafka.connect.runtime.WorkerSinkTaskThread.iteration(WorkerSinkTaskThread.java:90) at org.apache.kafka.connect.runtime.WorkerSinkTaskThread.execute(WorkerSinkTaskThread.java:58) at org.apache.kafka.connect.util.ShutdownableThread.run(ShutdownableThread.java:82) Caused by: java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at org.apache.avro.file.DataFileWriter$BufferedFileOutputStream$PositionFilter.write(DataFileWriter.java:446) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at org.apache.avro.io.BufferedBinaryEncoder$OutputStreamSink.innerFlush(BufferedBinaryEncoder.java:220) at org.apache.avro.io.BufferedBinaryEncoder.flush(BufferedBinaryEncoder.java:85) at org.apache.avro.file.DataFileWriter.create(DataFileWriter.java:154) at io.confluent.connect.hdfs.avro.AvroRecordWriterProvider.getRecordWriter(AvroRecordWriterProvider.java:99) at io.confluent.connect.hdfs.TopicPartitionWriter.getWriter(TopicPartitionWriter.java:416) ... 9 more

    Users with the same issue

    Unknown visitor
    Unknown visitor1 times, last one,
    Unknown visitor
    Unknown visitor1 times, last one,
    Unknown visitor
    Unknown visitor1 times, last one,