org.apache.spark.SparkException: Job cancelled because SparkContext was shut down at org.apache.spark.scheduler.DAGScheduler$$anonfun$c leanUpAfterSchedulerStop$1.apply(DAGScheduler.scal a:703) at org.apache.spark.scheduler.DAGScheduler$$anonfun$c leanUpAfterSchedulerStop$1.apply(DAGScheduler.scal a:702) at scala.collection.mutable.HashSet.foreach(HashSet.s cala:79) at org.apache.spark.scheduler.DAGScheduler.cleanUpAft erSchedulerStop(DAGScheduler.scala:702) at org.apache.spark.scheduler.DAGSchedulerEventProces sLoop.onStop(DAGScheduler.scala:1525) at org.apache.spark.util.EventLoop.stop(EventLoop.sca la:84) at org.apache.spark.scheduler.DAGScheduler.stop(DAGSc heduler.scala:1449) at org.apache.spark.SparkContext$$anonfun$stop$7.appl y$mcV$sp(SparkContext.scala:1724) at org.apache.spark.util.Utils$.tryLogNonFatalError(U tils.scala:1184) at org.apache.spark.SparkContext.stop(SparkContext.sc ala:1723) at org.apache.spark.scheduler.cluster.YarnClientSched ulerBackend$MonitorThread.run(YarnClientSchedulerB ackend.scala:146) at org.apache.spark.scheduler.DAGScheduler.runJob(DAG Scheduler.scala:567) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1824) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1837) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1914) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply$mcV$sp(In sertIntoHadoopFsRelation.scala:150) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply(InsertInt oHadoopFsRelation.scala:108) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply(InsertInt oHadoopFsRelation.scala:108) at org.apache.spark.sql.execution.SQLExecution$.withN ewExecutionId(SQLExecution.scala:56) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation.run(InsertIntoHadoopFsRelation .scala:108) at org.apache.spark.sql.execution.ExecutedCommand.sid eEffectResult$lzycompute(commands.scala:57) at org.apache.spark.sql.execution.ExecutedCommand.sid eEffectResult(commands.scala:57) at org.apache.spark.sql.execution.ExecutedCommand.doE xecute(commands.scala:69) at org.apache.spark.sql.execution.SparkPlan$$anonfun$ execute$5.apply(SparkPlan.scala:140) at org.apache.spark.sql.execution.SparkPlan$$anonfun$ execute$5.apply(SparkPlan.scala:138) at org.apache.spark.rdd.RDDOperationScope$.withScope( RDDOperationScope.scala:147) at org.apache.spark.sql.execution.SparkPlan.execute(S parkPlan.scala:138) at org.apache.spark.sql.SQLContext$QueryExecution.toR dd$lzycompute(SQLContext.scala:933) at org.apache.spark.sql.SQLContext$QueryExecution.toR dd(SQLContext.scala:933) at org.apache.spark.sql.execution.datasources.Resolve dDataSource$.apply(ResolvedDataSource.scala:197) at org.apache.spark.sql.DataFrameWriter.save(DataFram eWriter.scala:146) at org.apache.spark.sql.DataFrameWriter.save(DataFram eWriter.scala:137) at org.apache.spark.sql.DataFrame.save(DataFrame.scal a:1808) at com.acnielsen.madras.utils.ndx_scala_util$.newHive TableData(ndx_scala_util.scala:1264) at com.acnielsen.madras.utils.ndx_scala_util$.UPDATE( ndx_scala_util.scala:238) at com.acnielsen.madras.pkgews_panel_extract$$anonfun $p_signed_rank_yago$1.apply(pkgews_panel_extract.s cala:658) at com.acnielsen.madras.pkgews_panel_extract$$anonfun $p_signed_rank_yago$1.apply(pkgews_panel_extract.s cala:652)

cloudera.com | 3 months ago
  1. 0

    ERROR ActorSystemImpl - Running my spark job on ya... - Cloudera Community

    cloudera.com | 3 months ago
    org.apache.spark.SparkException: Job cancelled because SparkContext was shut down at org.apache.spark.scheduler.DAGScheduler$$anonfun$c leanUpAfterSchedulerStop$1.apply(DAGScheduler.scal a:703) at org.apache.spark.scheduler.DAGScheduler$$anonfun$c leanUpAfterSchedulerStop$1.apply(DAGScheduler.scal a:702) at scala.collection.mutable.HashSet.foreach(HashSet.s cala:79) at org.apache.spark.scheduler.DAGScheduler.cleanUpAft erSchedulerStop(DAGScheduler.scala:702) at org.apache.spark.scheduler.DAGSchedulerEventProces sLoop.onStop(DAGScheduler.scala:1525) at org.apache.spark.util.EventLoop.stop(EventLoop.sca la:84) at org.apache.spark.scheduler.DAGScheduler.stop(DAGSc heduler.scala:1449) at org.apache.spark.SparkContext$$anonfun$stop$7.appl y$mcV$sp(SparkContext.scala:1724) at org.apache.spark.util.Utils$.tryLogNonFatalError(U tils.scala:1184) at org.apache.spark.SparkContext.stop(SparkContext.sc ala:1723) at org.apache.spark.scheduler.cluster.YarnClientSched ulerBackend$MonitorThread.run(YarnClientSchedulerB ackend.scala:146) at org.apache.spark.scheduler.DAGScheduler.runJob(DAG Scheduler.scala:567) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1824) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1837) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1914) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply$mcV$sp(In sertIntoHadoopFsRelation.scala:150) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply(InsertInt oHadoopFsRelation.scala:108) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply(InsertInt oHadoopFsRelation.scala:108) at org.apache.spark.sql.execution.SQLExecution$.withN ewExecutionId(SQLExecution.scala:56) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation.run(InsertIntoHadoopFsRelation .scala:108) at org.apache.spark.sql.execution.ExecutedCommand.sid eEffectResult$lzycompute(commands.scala:57) at org.apache.spark.sql.execution.ExecutedCommand.sid eEffectResult(commands.scala:57) at org.apache.spark.sql.execution.ExecutedCommand.doE xecute(commands.scala:69) at org.apache.spark.sql.execution.SparkPlan$$anonfun$ execute$5.apply(SparkPlan.scala:140) at org.apache.spark.sql.execution.SparkPlan$$anonfun$ execute$5.apply(SparkPlan.scala:138) at org.apache.spark.rdd.RDDOperationScope$.withScope( RDDOperationScope.scala:147) at org.apache.spark.sql.execution.SparkPlan.execute(S parkPlan.scala:138) at org.apache.spark.sql.SQLContext$QueryExecution.toR dd$lzycompute(SQLContext.scala:933) at org.apache.spark.sql.SQLContext$QueryExecution.toR dd(SQLContext.scala:933) at org.apache.spark.sql.execution.datasources.Resolve dDataSource$.apply(ResolvedDataSource.scala:197) at org.apache.spark.sql.DataFrameWriter.save(DataFram eWriter.scala:146) at org.apache.spark.sql.DataFrameWriter.save(DataFram eWriter.scala:137) at org.apache.spark.sql.DataFrame.save(DataFrame.scal a:1808) at com.acnielsen.madras.utils.ndx_scala_util$.newHive TableData(ndx_scala_util.scala:1264) at com.acnielsen.madras.utils.ndx_scala_util$.UPDATE( ndx_scala_util.scala:238) at com.acnielsen.madras.pkgews_panel_extract$$anonfun $p_signed_rank_yago$1.apply(pkgews_panel_extract.s cala:658) at com.acnielsen.madras.pkgews_panel_extract$$anonfun $p_signed_rank_yago$1.apply(pkgews_panel_extract.s cala:652)
  2. 0

    NullPointerException when Training Imagenet

    GitHub | 6 months ago | GalaxyStyle
    org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 2.0 failed 4 times, most recent failure: Lost task 1.3 in stage 2.0 (TID 10, pc-2): java.lang.NullPointerException
  3. 0

    Re: Spark Streaming graceful shutdown in Spark 1.4

    apache.org | 1 year ago
    org.apache.spark.SparkException: Job cancelled because SparkContext was shut down* at org.apache.spark.scheduler.DAGScheduler$$anonfun$cleanUpAfterSchedulerStop$1.apply(DAGScheduler.scala:736) at org.apache.spark.scheduler.DAGScheduler$$anonfun$cleanUpAfterSchedulerStop$1.apply(DAGScheduler.scala:735) INFO : org.apache.spark.scheduler.DAGScheduler - ResultStage 2 (start at Consumer.java:122) failed in 10.383 s at org.apache.spark.scheduler.DAGScheduler.cleanUpAfterSchedulerStop(DAGScheduler.scala:735) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onStop(DAGScheduler.scala:1468)
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    Apache Spark User List - Cannot submit to a Spark Application to a remote cluster Spark 1.0

    nabble.com | 1 year ago
    org.apache.spark.SparkException: Job aborted due to stage failure: Master > removed our application: FAILED at > $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033) at > org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017) at > org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015) at > scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at > scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at > org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015) at > org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633) at > org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
  6. 0

    Apache Spark User List - Job aborted due to stage failure: TID x failed for unknown reasons

    nabble.com | 7 months ago
    org.apache.spark.SparkException: Job aborted due to stage failure: Task 0.0:13 failed 4 times, most recent failure: Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1044) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1028) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1026) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1026) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:634) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:634)

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. org.apache.spark.SparkException

      Job cancelled because SparkContext was shut down at org.apache.spark.scheduler.DAGScheduler$$anonfun$c leanUpAfterSchedulerStop$1.apply(DAGScheduler.scal a:703) at org.apache.spark.scheduler.DAGScheduler$$anonfun$c leanUpAfterSchedulerStop$1.apply(DAGScheduler.scal a:702) at scala.collection.mutable.HashSet.foreach(HashSet.s cala:79) at org.apache.spark.scheduler.DAGScheduler.cleanUpAft erSchedulerStop(DAGScheduler.scala:702) at org.apache.spark.scheduler.DAGSchedulerEventProces sLoop.onStop(DAGScheduler.scala:1525) at org.apache.spark.util.EventLoop.stop(EventLoop.sca la:84) at org.apache.spark.scheduler.DAGScheduler.stop(DAGSc heduler.scala:1449) at org.apache.spark.SparkContext$$anonfun$stop$7.appl y$mcV$sp(SparkContext.scala:1724) at org.apache.spark.util.Utils$.tryLogNonFatalError(U tils.scala:1184) at org.apache.spark.SparkContext.stop(SparkContext.sc ala:1723) at org.apache.spark.scheduler.cluster.YarnClientSched ulerBackend$MonitorThread.run(YarnClientSchedulerB ackend.scala:146) at org.apache.spark.scheduler.DAGScheduler.runJob(DAG Scheduler.scala:567) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1824) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1837) at org.apache.spark.SparkContext.runJob(SparkContext. scala:1914) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply$mcV$sp(In sertIntoHadoopFsRelation.scala:150) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply(InsertInt oHadoopFsRelation.scala:108) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation$$anonfun$run$1.apply(InsertInt oHadoopFsRelation.scala:108) at org.apache.spark.sql.execution.SQLExecution$.withN ewExecutionId(SQLExecution.scala:56) at org.apache.spark.sql.execution.datasources.InsertI ntoHadoopFsRelation.run(InsertIntoHadoopFsRelation .scala:108) at org.apache.spark.sql.execution.ExecutedCommand.sid eEffectResult$lzycompute(commands.scala:57) at org.apache.spark.sql.execution.ExecutedCommand.sid eEffectResult(commands.scala:57) at org.apache.spark.sql.execution.ExecutedCommand.doE xecute(commands.scala:69) at org.apache.spark.sql.execution.SparkPlan$$anonfun$ execute$5.apply(SparkPlan.scala:140) at org.apache.spark.sql.execution.SparkPlan$$anonfun$ execute$5.apply(SparkPlan.scala:138) at org.apache.spark.rdd.RDDOperationScope$.withScope( RDDOperationScope.scala:147) at org.apache.spark.sql.execution.SparkPlan.execute(S parkPlan.scala:138) at org.apache.spark.sql.SQLContext$QueryExecution.toR dd$lzycompute(SQLContext.scala:933) at org.apache.spark.sql.SQLContext$QueryExecution.toR dd(SQLContext.scala:933) at org.apache.spark.sql.execution.datasources.Resolve dDataSource$.apply(ResolvedDataSource.scala:197) at org.apache.spark.sql.DataFrameWriter.save(DataFram eWriter.scala:146) at org.apache.spark.sql.DataFrameWriter.save(DataFram eWriter.scala:137) at org.apache.spark.sql.DataFrame.save(DataFrame.scal a:1808) at com.acnielsen.madras.utils.ndx_scala_util$.newHive TableData(ndx_scala_util.scala:1264) at com.acnielsen.madras.utils.ndx_scala_util$.UPDATE( ndx_scala_util.scala:238) at com.acnielsen.madras.pkgews_panel_extract$$anonfun $p_signed_rank_yago$1.apply(pkgews_panel_extract.s cala:658) at com.acnielsen.madras.pkgews_panel_extract$$anonfun $p_signed_rank_yago$1.apply(pkgews_panel_extract.s cala:652)

      at scala.collection.IndexedSeqOptimized$class.foreach()
    2. Scala
      ArrayOps$ofRef.foreach
      1. scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
      2. scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
      2 frames
    3. com.acnielsen.madras
      pkgews_panel_extract.main
      1. com.acnielsen.madras.pkgews_panel_extract$.p_signed_rank_yago(pkgews_panel_extract.scala:652)
      2. com.acnielsen.madras.pkgews_panel_extract$.p_main(pkgews_panel_extract.scala:4844)
      3. com.acnielsen.madras.pkgews_panel_extract$.main(pkgews_panel_extract.scala:4655)
      4. com.acnielsen.madras.pkgews_panel_extract.main(pkgews_panel_extract.scala)
      4 frames
    4. Java RT
      Method.invoke
      1. sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      2. sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
      3. sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      4. java.lang.reflect.Method.invoke(Method.java:606)
      4 frames