java.lang.InterruptedException

spark-user | Ted Yu | 11 months ago
  1. 0

    Cannot start Jenkins on port 80 on debian 7

    Stack Overflow | 3 years ago | u123
    java.lang.InterruptedException
  2. 0

    Exception connecting to HBase from Hive

    Stack Overflow | 3 years ago | user3474696
    java.lang.InterruptedException
  3. Speed up your debug routine!

    Automated exception search integrated into your IDE

  4. 0

    SSH connections to Gerrit randomly hang

    Google Groups | 5 years ago | Alan
    org.apache.sshd.common.SshException
  5. 0

    java.lang.InterruptedException

    Google Groups | 6 years ago | cvh
    org.apache.commons.dbcp.SQLNestedException: Cannot get a connection, general error

  1. tyson925 1 times, last 7 months ago
  2. treefolk 1 times, last 2 weeks ago
  3. danleyb2Interintel 1 times, last 3 weeks ago
  4. filpgame 1 times, last 2 months ago
  5. Nikolay Rybak 4 times, last 4 months ago
6 more registered users
18 unregistered visitors
Not finding the right solution?
Take a tour to get the most out of Samebug.

Tired of useless tips?

Automated exception search integrated into your IDE

Root Cause Analysis

  1. java.lang.InterruptedException

    No message provided

    at java.lang.Object.wait()
  2. Java RT
    Object.wait
    1. java.lang.Object.wait(Native Method)
    2. java.lang.Object.wait(Object.java:502)
    2 frames
  3. Spark
    SparkContext.runJob
    1. org.apache.spark.scheduler.JobWaiter.awaitResult(JobWaiter.scala:73)
    2. org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:612)
    3. org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
    4. org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
    5. org.apache.spark.SparkContext.runJob(SparkContext.scala:1922)
    5 frames
  4. org.apache.spark
    InsertIntoHadoopFsRelation$$anonfun$run$1.apply
    1. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply$mcV$sp(InsertIntoHadoopFsRelation.scala:150)
    2. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply(InsertIntoHadoopFsRelation.scala:108)
    3. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply(InsertIntoHadoopFsRelation.scala:108)
    3 frames
  5. Spark Project SQL
    SQLExecution$.withNewExecutionId
    1. org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
    1 frame
  6. org.apache.spark
    InsertIntoHadoopFsRelation.run
    1. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation.run(InsertIntoHadoopFsRelation.scala:108)
    1 frame
  7. Spark Project SQL
    SparkPlan$$anonfun$execute$5.apply
    1. org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
    2. org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
    3. org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
    4. org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:127)
    5. org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:125)
    5 frames
  8. Spark
    RDDOperationScope$.withScope
    1. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
    1 frame
  9. Spark Project SQL
    QueryExecution.toRdd
    1. org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:125)
    2. org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
    3. org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
    3 frames
  10. org.apache.spark
    ResolvedDataSource$.apply
    1. org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:242)
    1 frame
  11. Spark Project SQL
    DataFrameWriter.save
    1. org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:148)
    2. org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:139)
    2 frames
  12. com.hsbc.rsl
    PersistLevel3WithDataframes.call
    1. com.hsbc.rsl.spark.streaming.receiver.functions.PersistLevel3WithDataframes.call(PersistLevel3WithDataframes.java:84)
    2. com.hsbc.rsl.spark.streaming.receiver.functions.PersistLevel3WithDataframes.call(PersistLevel3WithDataframes.java:27)
    2 frames
  13. Spark Project Streaming
    ForEachDStream$$anonfun$1.apply
    1. org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$3.apply(JavaDStreamLike.scala:335)
    2. org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$3.apply(JavaDStreamLike.scala:335)
    3. org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:656)
    4. org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:656)
    5. org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:50)
    6. org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)
    7. org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)
    8. org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:424)
    9. org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:49)
    10. org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:49)
    11. org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:49)
    11 frames
  14. Scala
    Try$.apply
    1. scala.util.Try$.apply(Try.scala:161)
    1 frame
  15. Spark Project Streaming
    JobScheduler$JobHandler$$anonfun$run$1.apply
    1. org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
    2. org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:224)
    3. org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:224)
    4. org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:224)
    4 frames
  16. Scala
    DynamicVariable.withValue
    1. scala.util.DynamicVariable.withValue(DynamicVariable.scala:57)
    1 frame
  17. Spark Project Streaming
    JobScheduler$JobHandler.run
    1. org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:223)
    1 frame
  18. Java RT
    Thread.run
    1. java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    2. java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    3. java.lang.Thread.run(Thread.java:745)
    3 frames