java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext at org.apache.spark.SparkContext.org$apache$spark$Spa rkContext$$assertNotStopped(SparkContext.scala:104 ) at org.apache.spark.SparkContext.defaultParallelism(S parkContext.scala:2063) at org.apache.spark.SparkContext.defaultMinPartitions (SparkContext.scala:2076) at org.apache.spark.sql.hive.HadoopTableReader.<init> (TableReader.scala:70) at org.apache.spark.sql.hive.execution.HiveTableScan. <init>(HiveTableScan.scala:77) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$$anonfun$3.apply(HiveStrategies.scala:77) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$$anonfun$3.apply(HiveStrategies.scala:77) at org.apache.spark.sql.SQLContext$SparkPlanner.prune FilterProject(SQLContext.scala:853) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$.apply(HiveStrategies.scala:73) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.makeBroadcastHashJoin(SparkStrateg ies.scala:92) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.apply(SparkStrategies.scala:101) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Bas icOperators$.apply(SparkStrategies.scala:346) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.makeBroadcastHashJoin(SparkStrateg ies.scala:92)

cloudera.com | 3 months ago
  1. 0

    ERROR ActorSystemImpl - Running my spark job on ya... - Cloudera Community

    cloudera.com | 3 months ago
    java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext at org.apache.spark.SparkContext.org$apache$spark$Spa rkContext$$assertNotStopped(SparkContext.scala:104 ) at org.apache.spark.SparkContext.defaultParallelism(S parkContext.scala:2063) at org.apache.spark.SparkContext.defaultMinPartitions (SparkContext.scala:2076) at org.apache.spark.sql.hive.HadoopTableReader.<init> (TableReader.scala:70) at org.apache.spark.sql.hive.execution.HiveTableScan. <init>(HiveTableScan.scala:77) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$$anonfun$3.apply(HiveStrategies.scala:77) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$$anonfun$3.apply(HiveStrategies.scala:77) at org.apache.spark.sql.SQLContext$SparkPlanner.prune FilterProject(SQLContext.scala:853) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$.apply(HiveStrategies.scala:73) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.makeBroadcastHashJoin(SparkStrateg ies.scala:92) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.apply(SparkStrategies.scala:101) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Bas icOperators$.apply(SparkStrategies.scala:346) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.makeBroadcastHashJoin(SparkStrateg ies.scala:92)
  2. 0

    GitHub comment 195#172252690

    GitHub | 11 months ago | jramos
    java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext. This stopped SparkContext was created at: org.apache.spark.SparkContext.<init>(SparkContext.scala:147) io.prediction.workflow.SharedSparkContext$class.beforeAll(BaseTest.scala:65) io.prediction.controller.EngineSuite.beforeAll(EngineTest.scala:18) org.scalatest.BeforeAndAfterAll$class.beforeAll(BeforeAndAfterAll.scala:187) io.prediction.controller.EngineSuite.beforeAll(EngineTest.scala:18) org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:253) io.prediction.controller.EngineSuite.run(EngineTest.scala:18) org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:444) org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:651) sbt.TestRunner.runTest$1(TestFramework.scala:76) sbt.TestRunner.run(TestFramework.scala:85) sbt.TestFramework$$anon$2$$anonfun$$init$$1$$anonfun$apply$8.apply(TestFramework.scala:202) sbt.TestFramework$$anon$2$$anonfun$$init$$1$$anonfun$apply$8.apply(TestFramework.scala:202) sbt.TestFramework$.sbt$TestFramework$$withContextLoader(TestFramework.scala:185) sbt.TestFramework$$anon$2$$anonfun$$init$$1.apply(TestFramework.scala:202) sbt.TestFramework$$anon$2$$anonfun$$init$$1.apply(TestFramework.scala:202) sbt.TestFunction.apply(TestFramework.scala:207) sbt.Tests$.sbt$Tests$$processRunnable$1(Tests.scala:239) sbt.Tests$$anonfun$makeSerial$1.apply(Tests.scala:245) sbt.Tests$$anonfun$makeSerial$1.apply(Tests.scala:245) The currently active SparkContext was created at: (No active SparkContext.)
  3. 0

    PySpark in Pycharm- unable to connect to remote server

    gmane.org | 8 months ago
    java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext at $apache$spark$SparkContext$$assertNotStopped(SparkContext.scala:103)
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    Re: Spark Effects of Driver Memory, Executor Memory, Driver Memory Overhead and Executor Memory Overhead on success of job runs

    spark-user | 1 year ago | Timothy Sum Hon Mun
    java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext org.apache.spark.SparkContext.org$apache$spark$SparkContext$$assertNotStopped(SparkContext.scala:103) org.apache.spark.SparkContext.broadcast(SparkContext.scala:1282) org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$submitMissingTasks(DAGScheduler.scala:874) org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskCompletion$14$$anonfun$apply$1.apply$mcVI$sp(DAGScheduler.scala:1088) org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskCompletion$14$$anonfun$apply$1.apply(DAGScheduler.scala:1084) org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskCompletion$14$$anonfun$apply$1.apply(DAGScheduler.scala:1084) scala.Option.foreach(Option.scala:236) org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskCompletion$14.apply(DAGScheduler.scala:1084) org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskCompletion$14.apply(DAGScheduler.scala:1083) scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) org.apache.spark.scheduler.DAGScheduler.handleTaskCompletion(DAGScheduler.scala:1083) org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1447) org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1411) org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  6. 0

    Spark cluster computing framework

    gmane.org | 11 months ago
    java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext at $apache$spark$SparkContext$$assertNotStopped(SparkContext.scala:103)

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.lang.IllegalStateException

      Cannot call methods on a stopped SparkContext at org.apache.spark.SparkContext.org$apache$spark$Spa rkContext$$assertNotStopped(SparkContext.scala:104 ) at org.apache.spark.SparkContext.defaultParallelism(S parkContext.scala:2063) at org.apache.spark.SparkContext.defaultMinPartitions (SparkContext.scala:2076) at org.apache.spark.sql.hive.HadoopTableReader.<init> (TableReader.scala:70) at org.apache.spark.sql.hive.execution.HiveTableScan. <init>(HiveTableScan.scala:77) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$$anonfun$3.apply(HiveStrategies.scala:77) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$$anonfun$3.apply(HiveStrategies.scala:77) at org.apache.spark.sql.SQLContext$SparkPlanner.prune FilterProject(SQLContext.scala:853) at org.apache.spark.sql.hive.HiveStrategies$HiveTable Scans$.apply(HiveStrategies.scala:73) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.makeBroadcastHashJoin(SparkStrateg ies.scala:92) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.apply(SparkStrategies.scala:101) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Bas icOperators$.apply(SparkStrategies.scala:346) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at org.apache.spark.sql.catalyst.planning.QueryPlanne r$$anonfun$1.apply(QueryPlanner.scala:58) at scala.collection.Iterator$$anon$13.hasNext(Iterato r.scala:371) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.plan(QueryPlanner.scala:59) at org.apache.spark.sql.catalyst.planning.QueryPlanne r.planLater(QueryPlanner.scala:54) at org.apache.spark.sql.execution.SparkStrategies$Equ iJoinSelection$.makeBroadcastHashJoin(SparkStrateg ies.scala:92)

      at org.apache.spark.sql.execution.SparkStrategies$EquiJoinSelection$.apply()
    2. Spark Project SQL
      SparkStrategies$EquiJoinSelection$.apply
      1. org.apache.spark.sql.execution.SparkStrategies$EquiJoinSelection$.apply(SparkStrategies.scala:101)
      1 frame
    3. Spark Project Catalyst
      QueryPlanner$$anonfun$1.apply
      1. org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:58)
      2. org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:58)
      2 frames
    4. Scala
      Iterator$$anon$13.hasNext
      1. scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
      1 frame
    5. Spark Project Catalyst
      QueryPlanner.planLater
      1. org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:59)
      2. org.apache.spark.sql.catalyst.planning.QueryPlanner.planLater(QueryPlanner.scala:54)
      2 frames
    6. Spark Project SQL
      SparkStrategies$BasicOperators$.apply
      1. org.apache.spark.sql.execution.SparkStrategies$BasicOperators$.apply(SparkStrategies.scala:346)
      1 frame
    7. Spark Project Catalyst
      QueryPlanner$$anonfun$1.apply
      1. org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:58)
      2. org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:58)
      2 frames
    8. Scala
      Iterator$$anon$13.hasNext
      1. scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
      1 frame
    9. Spark Project Catalyst
      QueryPlanner.planLater
      1. org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:59)
      2. org.apache.spark.sql.catalyst.planning.QueryPlanner.planLater(QueryPlanner.scala:54)
      2 frames
    10. Spark Project SQL
      SparkStrategies$Aggregation$.apply
      1. org.apache.spark.sql.execution.SparkStrategies$Aggregation$.apply(SparkStrategies.scala:235)
      1 frame
    11. Spark Project Catalyst
      QueryPlanner$$anonfun$1.apply
      1. org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:58)
      2. org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:58)
      2 frames
    12. Scala
      Iterator$$anon$13.hasNext
      1. scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
      1 frame
    13. Spark Project Catalyst
      QueryPlanner.plan
      1. org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:59)
      1 frame
    14. Spark Project SQL
      DataFrame.collect
      1. org.apache.spark.sql.SQLContext$QueryExecution.sparkPlan$lzycompute(SQLContext.scala:926)
      2. org.apache.spark.sql.SQLContext$QueryExecution.sparkPlan(SQLContext.scala:924)
      3. org.apache.spark.sql.SQLContext$QueryExecution.executedPlan$lzycompute(SQLContext.scala:930)
      4. org.apache.spark.sql.SQLContext$QueryExecution.executedPlan(SQLContext.scala:930)
      5. org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:53)
      6. org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:1904)
      7. org.apache.spark.sql.DataFrame.collect(DataFrame.scala:1385)
      7 frames
    15. com.acnielsen.madras
      pkgews_panel_extract.main
      1. com.acnielsen.madras.pkgews_panel_extract$.p_signed_rank_yago(pkgews_panel_extract.scala:685)
      2. com.acnielsen.madras.pkgews_panel_extract$.p_main(pkgews_panel_extract.scala:4844)
      3. com.acnielsen.madras.pkgews_panel_extract$.main(pkgews_panel_extract.scala:4655)
      4. com.acnielsen.madras.pkgews_panel_extract.main(pkgews_panel_extract.scala)
      4 frames
    16. Java RT
      Method.invoke
      1. sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
      2. sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
      3. sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
      4. java.lang.reflect.Method.invoke(Method.java:606)
      4 frames