Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

Solutions on the web

via spark-user by Ted Yu, 1 year ago
via Stack Overflow by javadba
, 2 years ago
This exception has no message.
via GitHub by dusj
, 1 year ago
This exception has no message.
via GitHub by ypopkov
, 2 years ago
This exception has no message.
via GitHub by astrude
, 2 years ago
This exception has no message.
via GitHub by astrude
, 2 years ago
This exception has no message.
java.lang.InterruptedException: 	at java.lang.Object.wait(Native Method)	at java.lang.Object.wait(Object.java:502)	at org.apache.spark.scheduler.JobWaiter.awaitResult(JobWaiter.scala:73)	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:612)	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1922)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply$mcV$sp(InsertIntoHadoopFsRelation.scala:150)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply(InsertIntoHadoopFsRelation.scala:108)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply(InsertIntoHadoopFsRelation.scala:108)	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation.run(InsertIntoHadoopFsRelation.scala:108)	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)	at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:127)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:125)	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:125)	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)	at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:242)	at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:148)	at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:139)	at com.hsbc.rsl.spark.streaming.receiver.functions.PersistLevel3WithDataframes.call(PersistLevel3WithDataframes.java:84)	at com.hsbc.rsl.spark.streaming.receiver.functions.PersistLevel3WithDataframes.call(PersistLevel3WithDataframes.java:27)	at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$3.apply(JavaDStreamLike.scala:335)	at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$3.apply(JavaDStreamLike.scala:335)	at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:656)	at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:656)	at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:50)	at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)	at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)	at org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:424)	at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:49)	at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:49)	at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:49)	at scala.util.Try$.apply(Try.scala:161)	at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)	at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:224)	at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:224)	at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:224)	at scala.util.DynamicVariable.withValue(DynamicVariable.scala:57)	at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:223)	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)	at java.lang.Thread.run(Thread.java:745)