java.nio.channels.ClosedChannelException

Stack Overflow | nilesh1212 | 7 months ago
tip
Your exception is missing from the Samebug knowledge base.
Here are the best solutions we found on the Internet.
Click on the to mark the helpful solution and get rewards for you help.
  1. 0

    Noticed this in testing 1.7.0 on my laptop tonight. Started two tservers, one continuous ingest client and would kill/restart one of the tservers occasionally. {noformat} Failed to close map file java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSInputStream.close(DFSInputStream.java:629) at java.io.FilterInputStream.close(FilterInputStream.java:181) at org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile$Reader.close(CachableBlockFile.java:409) at org.apache.accumulo.core.file.rfile.RFile$Reader.close(RFile.java:921) at org.apache.accumulo.tserver.tablet.Compactor.compactLocalityGroup(Compactor.java:391) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:214) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) null java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at org.apache.accumulo.core.file.rfile.bcfile.SimpleBufferedOutputStream.flushBuffer(SimpleBufferedOutputStream.java:39) at org.apache.accumulo.core.file.rfile.bcfile.SimpleBufferedOutputStream.flush(SimpleBufferedOutputStream.java:68) at org.apache.hadoop.io.compress.CompressionOutputStream.flush(CompressionOutputStream.java:69) at org.apache.accumulo.core.file.rfile.bcfile.Compression$FinishOnFlushCompressionStream.flush(Compression.java:66) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:141) at org.apache.accumulo.core.file.rfile.bcfile.BCFile$Writer$WBlockState.finish(BCFile.java:233) at org.apache.accumulo.core.file.rfile.bcfile.BCFile$Writer$BlockAppender.close(BCFile.java:320) at org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile$BlockWrite.close(CachableBlockFile.java:121) at org.apache.accumulo.core.file.rfile.RFile$Writer.closeBlock(RFile.java:398) at org.apache.accumulo.core.file.rfile.RFile$Writer.append(RFile.java:382) at org.apache.accumulo.tserver.tablet.Compactor.compactLocalityGroup(Compactor.java:356) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:214) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) Filesystem closed java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.delete(DFSClient.java:1927) at org.apache.hadoop.hdfs.DistributedFileSystem$12.doCall(DistributedFileSystem.java:638) at org.apache.hadoop.hdfs.DistributedFileSystem$12.doCall(DistributedFileSystem.java:634) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.delete(DistributedFileSystem.java:634) at org.apache.accumulo.server.fs.VolumeManagerImpl.deleteRecursively(VolumeManagerImpl.java:193) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:255) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) MinC failed (null) to create hdfs://localhost:8020/accumulo17/tables/2/t-0000011/F00000yd.rf_tmp retrying ... MajC Failed, extent = 2;5;45 java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) MajC Failed, extent = 2;45;4 java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) MajC Failed, extent = 2;35;3 java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) Filesystem closed java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:795) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1651) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1593) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:397) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:393) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:393) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:337) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:126) at org.apache.accumulo.core.file.rfile.RFileOperations.openWriter(RFileOperations.java:106) at org.apache.accumulo.core.file.DispatchingFileFactory.openWriter(DispatchingFileFactory.java:78) at org.apache.accumulo.tserver.tablet.Compactor.call(Compactor.java:192) at org.apache.accumulo.tserver.tablet.Tablet._majorCompact(Tablet.java:1981) at org.apache.accumulo.tserver.tablet.Tablet.majorCompact(Tablet.java:2098) at org.apache.accumulo.tserver.tablet.CompactionRunner.run(CompactionRunner.java:44) at org.apache.htrace.wrappers.TraceRunnable.run(TraceRunnable.java:57) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at org.apache.accumulo.fate.util.LoggingRunnable.run(LoggingRunnable.java:35) at java.lang.Thread.run(Thread.java:745) {noformat} This reminded me of something that [~bills] ran into and tried to get to the bottom of. My hunch was that somethign registered a shutdown hook which closed the filesystem object. I also remembered that FileSystem instances are cached. Bill pointed out that he thought this might still be a VFS related issue (despite having nothign to do with VFS). After I noticed a shutdown hook in our VFS usage, I think it's unsafe to be sharing FileSystem instances with VFS code that might also be used by Accumulo for important things like writing to files. We should get a unique FileSystem instance to provide to any VFS code. We can do this by: # Setting {{fs.hdfs.impl.disable.cache}} in the Hadoop Configuration we provide to {{FileSystem.get(Configuration)}} # Call {{FileSystem.newInstance(URI, Configuration)}}. Either seem to do what we want -- the latter possibly having more reliable semantics than just a configuration value.

    Apache's JIRA Issue Tracker | 2 years ago | Josh Elser
    java.nio.channels.ClosedChannelException
  2. 0

    Encountering exception when shutting down the HDFS Sink connector

    Google Groups | 7 months ago | Mangesh B
    java.nio.channels.ClosedChannelException
  3. Speed up your debug routine!

    Automated exception search integrated into your IDE

  4. 0

    datacenter - HDFS performances on apache spark - Server Fault

    serverfault.com | 1 year ago
    java.nio.channels.ClosedChannelException
  5. 0

    oozie java action issue

    hadoop-common-user | 12 months ago | Immanuel Fredrick
    java.nio.channels.ClosedChannelException

    3 unregistered visitors
    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.nio.channels.ClosedChannelException

      No message provided

      at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed()
    2. Apache Hadoop HDFS
      DFSOutputStream.checkClosed
      1. org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622)
      1 frame
    3. Hadoop
      FSDataOutputStream$PositionCache.write
      1. org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104)
      2. org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58)
      2 frames
    4. Java RT
      DataOutputStream.write
      1. java.io.DataOutputStream.write(DataOutputStream.java:107)
      1 frame
    5. Hive Query Language
      OrcOutputFormat$OrcRecordWriter.close
      1. org.apache.hadoop.hive.ql.io.orc.WriterImpl$DirectStream.output(WriterImpl.java:464)
      2. org.apache.hadoop.hive.ql.io.orc.OutStream.flush(OutStream.java:242)
      3. org.apache.hadoop.hive.ql.io.orc.WriterImpl.writeMetadata(WriterImpl.java:2328)
      4. org.apache.hadoop.hive.ql.io.orc.WriterImpl.close(WriterImpl.java:2426)
      5. org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:106)
      6. org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:91)
      6 frames
    6. org.apache.spark
      InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply
      1. org.apache.spark.sql.hive.orc.OrcOutputWriter.close(OrcRelation.scala:144)
      2. org.apache.spark.sql.execution.datasources.DefaultWriterContainer.abortTask$1(WriterContainer.scala:272)
      3. org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:249)
      4. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150)
      5. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150)
      5 frames
    7. Spark
      Executor$TaskRunner.run
      1. org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
      2. org.apache.spark.scheduler.Task.run(Task.scala:88)
      3. org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
      3 frames
    8. Java RT
      Thread.run
      1. java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
      2. java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
      3. java.lang.Thread.run(Thread.java:745)
      3 frames