Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

Solutions on the web

via GitHub by krisskross
, 1 year ago
Error creating writer for log file hdfs://hadoop-master03/tmp/kafka-connect/logs/actions_order/89/log
via GitHub by vikeshkhanna
, 1 year ago
Error creating writer for log file hdfs://bikeshed//kafka_connect/logs/goscribe.mp-hash_events/48/log
via GitHub by Perdjesk
, 7 months ago
Error creating writer for log file hdfs://hadoop//srv/prod/blu/connect-data/logs/topicname/6/log
via GitHub by skyahead
, 1 year ago
Error creating writer for log file hdfs://allinone2:54310/tianjil/logs/xxxx/3/log
via GitHub by a2mehta
, 1 year ago
Error creating writer for log file s3a://s3 bucket/streamx/logs/streamx/0/log**
via Google Groups by Basti, 11 months ago
Error creating writer >> for log file hdfs://10.42.0.86:9000/logs/testApp/0/log
java.io.FileNotFoundException: failed to append to non-existent file /tmp/kafka-connect/logs/actions_order/89/log for client 10.100.1.52	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.appendFileInternal(FSNamesystem.java:2672)	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.appendFileInt(FSNamesystem.java:2991)	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.appendFile(FSNamesystem.java:2960)	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.append(NameNodeRpcServer.java:719)	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.append(NameNodeRpcServer.java:719)	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.append(ClientNamenodeProtocolServerSideTranslatorPB.java:421)	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969)	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2151)	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2147)	at java.security.AccessController.doPrivileged(Native Method)	at javax.security.auth.Subject.doAs(Subject.java:422)	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2145)	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)	at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)	at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)	at org.apache.hadoop.hdfs.DFSClient.callAppend(DFSClient.java:1769)	at org.apache.hadoop.hdfs.DFSClient.append(DFSClient.java:1803)	at org.apache.hadoop.hdfs.DFSClient.append(DFSClient.java:1796)	at org.apache.hadoop.hdfs.DistributedFileSystem$4.doCall(DistributedFileSystem.java:323)	at org.apache.hadoop.hdfs.DistributedFileSystem$4.doCall(DistributedFileSystem.java:319)	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)	at org.apache.hadoop.hdfs.DistributedFileSystem.append(DistributedFileSystem.java:319)	at org.apache.hadoop.fs.FileSystem.append(FileSystem.java:1173)	at io.confluent.connect.hdfs.wal.WALFile$Writer.(WALFile.java:221)	at io.confluent.connect.hdfs.wal.WALFile.createWriter(WALFile.java:67)	at io.confluent.connect.hdfs.wal.FSWAL.acquireLease(FSWAL.java:73)	at io.confluent.connect.hdfs.wal.FSWAL.apply(FSWAL.java:105)	at io.confluent.connect.hdfs.TopicPartitionWriter.applyWAL(TopicPartitionWriter.java:441)	at io.confluent.connect.hdfs.TopicPartitionWriter.recover(TopicPartitionWriter.java:197)	at io.confluent.connect.hdfs.DataWriter.recover(DataWriter.java:239)	at io.confluent.connect.hdfs.DataWriter.open(DataWriter.java:281)	at io.confluent.connect.hdfs.HdfsSinkTask.open(HdfsSinkTask.java:104)	at org.apache.kafka.connect.runtime.WorkerSinkTask.openPartitions(WorkerSinkTask.java:417)	at org.apache.kafka.connect.runtime.WorkerSinkTask.access$1000(WorkerSinkTask.java:54)	at org.apache.kafka.connect.runtime.WorkerSinkTask$HandleRebalance.onPartitionsAssigned(WorkerSinkTask.java:453)	at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:222)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$1.onSuccess(AbstractCoordinator.java:232)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$1.onSuccess(AbstractCoordinator.java:227)	at org.apache.kafka.clients.consumer.internals.RequestFuture.fireSuccess(RequestFuture.java:133)	at org.apache.kafka.clients.consumer.internals.RequestFuture.complete(RequestFuture.java:107)	at org.apache.kafka.clients.consumer.internals.RequestFuture$2.onSuccess(RequestFuture.java:182)	at org.apache.kafka.clients.consumer.internals.RequestFuture.fireSuccess(RequestFuture.java:133)	at org.apache.kafka.clients.consumer.internals.RequestFuture.complete(RequestFuture.java:107)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$SyncGroupResponseHandler.handle(AbstractCoordinator.java:436)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$SyncGroupResponseHandler.handle(AbstractCoordinator.java:422)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$CoordinatorResponseHandler.onSuccess(AbstractCoordinator.java:679)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$CoordinatorResponseHandler.onSuccess(AbstractCoordinator.java:658)	at org.apache.kafka.clients.consumer.internals.RequestFuture$1.onSuccess(RequestFuture.java:167)	at org.apache.kafka.clients.consumer.internals.RequestFuture.fireSuccess(RequestFuture.java:133)	at org.apache.kafka.clients.consumer.internals.RequestFuture.complete(RequestFuture.java:107)	at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient$RequestFutureCompletionHandler.onComplete(ConsumerNetworkClient.java:426)	at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:278)	at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.clientPoll(ConsumerNetworkClient.java:360)	at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:224)	at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:192)	at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:163)	at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:243)	at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.ensurePartitionAssignment(ConsumerCoordinator.java:345)	at org.apache.kafka.clients.consumer.KafkaConsumer.pollOnce(KafkaConsumer.java:977)	at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:937)	at org.apache.kafka.connect.runtime.WorkerSinkTask.pollConsumer(WorkerSinkTask.java:305)	at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:222)	at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:170)	at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:142)	at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:140)	at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:175)	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)	at java.util.concurrent.FutureTask.run(FutureTask.java:266)	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)	at java.lang.Thread.run(Thread.java:745)