java.io.IOException: FileAlreadyExistException(message:Block file is being written! userId(##) blockId(####))

JIRA | Calvin Jia | 2 years ago
  1. 0

    {code:java} java.io.IOException: FileAlreadyExistException(message:Block file is being written! userId(##) blockId(####)) at tachyon.worker.WorkerClient.requestBlockLocation(WorkerClient.java:378) at tachyon.client.TachyonFS.getLocalBlockTemporaryPath(TachyonFS.java:633) at tachyon.client.BlockOutStream.<init>(BlockOutStream.java:96) at tachyon.client.BlockOutStream.<init>(BlockOutStream.java:65) at tachyon.client.RemoteBlockInStream.<init>(RemoteBlockInStream.java:128) at tachyon.client.BlockInStream.get(BlockInStream.java:62) at tachyon.client.FileInStream.seek(FileInStream.java:157) at tachyon.hadoop.HdfsFileInputStream.seek(HdfsFileInputStream.java:244) at org.apache.hadoop.fs.FSDataInputStream.seek(FSDataInputStream.java:48) at org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:103) at org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:54) at org.apache.spark.rdd.HadoopRDD$$anon$1.<init>(HadoopRDD.scala:236) at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:212) at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:101) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) at org.apache.spark.scheduler.Task.run(Task.scala:64) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) {code}

    JIRA | 2 years ago | Calvin Jia
    java.io.IOException: FileAlreadyExistException(message:Block file is being written! userId(##) blockId(####))
  2. 0

    {code:java} java.io.IOException: FileAlreadyExistException(message:Block file is being written! userId(##) blockId(####)) at tachyon.worker.WorkerClient.requestBlockLocation(WorkerClient.java:378) at tachyon.client.TachyonFS.getLocalBlockTemporaryPath(TachyonFS.java:633) at tachyon.client.BlockOutStream.<init>(BlockOutStream.java:96) at tachyon.client.BlockOutStream.<init>(BlockOutStream.java:65) at tachyon.client.RemoteBlockInStream.<init>(RemoteBlockInStream.java:128) at tachyon.client.BlockInStream.get(BlockInStream.java:62) at tachyon.client.FileInStream.seek(FileInStream.java:157) at tachyon.hadoop.HdfsFileInputStream.seek(HdfsFileInputStream.java:244) at org.apache.hadoop.fs.FSDataInputStream.seek(FSDataInputStream.java:48) at org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:103) at org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:54) at org.apache.spark.rdd.HadoopRDD$$anon$1.<init>(HadoopRDD.scala:236) at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:212) at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:101) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277) at org.apache.spark.rdd.RDD.iterator(RDD.scala:244) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) at org.apache.spark.scheduler.Task.run(Task.scala:64) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) {code}

    JIRA | 2 years ago | Calvin Jia
    java.io.IOException: FileAlreadyExistException(message:Block file is being written! userId(##) blockId(####))
  3. 0

    Parquet error when saving from Spark

    Stack Overflow | 2 years ago
    java.io.IOException: The file being written is in an invalid state. Probably caused by an error thrown previously. Current state: COLUMN
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    Tachyon worker is throwing an IOException occassionally

    Google Groups | 8 months ago | Antonio Si
    java.io.IOException: java.io.IOException: error writing blockId: 62724985512263680, userId: 385120561, address: denj51bda04.us.oracle.com/192.168.131.236:29999, message: Failed to write block.
  6. 0

    Tachyon + Spark cache RDD throw Exception while reading cached RDD, it will re-write again...

    Google Groups | 1 year ago | Andy Su
    java.io.IOException: java.io.IOException: error writing blockId: 35433480192, userId: 11, address: XX.XX.XX.XX:29901, message: Failed to write block.

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.io.IOException

      FileAlreadyExistException(message:Block file is being written! userId(##) blockId(####))

      at tachyon.worker.WorkerClient.requestBlockLocation()
    2. Tachyon Project Core
      HdfsFileInputStream.seek
      1. tachyon.worker.WorkerClient.requestBlockLocation(WorkerClient.java:378)
      2. tachyon.client.TachyonFS.getLocalBlockTemporaryPath(TachyonFS.java:633)
      3. tachyon.client.BlockOutStream.<init>(BlockOutStream.java:96)
      4. tachyon.client.BlockOutStream.<init>(BlockOutStream.java:65)
      5. tachyon.client.RemoteBlockInStream.<init>(RemoteBlockInStream.java:128)
      6. tachyon.client.BlockInStream.get(BlockInStream.java:62)
      7. tachyon.client.FileInStream.seek(FileInStream.java:157)
      8. tachyon.hadoop.HdfsFileInputStream.seek(HdfsFileInputStream.java:244)
      8 frames
    3. Hadoop
      FSDataInputStream.seek
      1. org.apache.hadoop.fs.FSDataInputStream.seek(FSDataInputStream.java:48)
      1 frame
    4. Hadoop
      TextInputFormat.getRecordReader
      1. org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:103)
      2. org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:54)
      2 frames
    5. Spark
      Executor$TaskRunner.run
      1. org.apache.spark.rdd.HadoopRDD$$anon$1.<init>(HadoopRDD.scala:236)
      2. org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:212)
      3. org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:101)
      4. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
      5. org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
      6. org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
      7. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
      8. org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
      9. org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
      10. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
      11. org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
      12. org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
      13. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
      14. org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
      15. org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
      16. org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
      17. org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
      18. org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
      19. org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
      20. org.apache.spark.scheduler.Task.run(Task.scala:64)
      21. org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
      21 frames
    6. Java RT
      Thread.run
      1. java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
      2. java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
      3. java.lang.Thread.run(Thread.java:745)
      3 frames