java.lang.OutOfMemoryError: GC overhead limit exceeded

DataStax JIRA | Rahul Shukla | 2 years ago
tip
Your exception is missing from the Samebug knowledge base.
Here are the best solutions we found on the Internet.
Click on the to mark the helpful solution and get rewards for you help.
  1. 0

    I am trying to run below code with spark 1.4.0 and its not working {code:scala} import org.apache.spark.SparkConf import org.apache.spark.SparkContext import com.datastax.spark.connector._ object Hello { def main(args: Array[String]): Unit = { val conf = new SparkConf(true) .set("spark.cassandra.connection.host", "127.0.0.1") .setMaster("local") .setAppName("POS Producer") val sc = new SparkContext(conf) val rdd = sc.cassandraTable[POS]("store", "sales_fact"); println("count => "+rdd.count()) rdd.collect.foreach(println); } } {code} build file : {code:java} name := "hello-scala" version := "1.0" scalaVersion := "2.11.0" libraryDependencies += "org.scalatest" % "scalatest_2.11" % "2.1.3" % "test" libraryDependencies += "org.apache.spark" % "spark-core_2.11" % "1.4.0" libraryDependencies += "com.datastax.spark" % "spark-cassandra-connector_2.11" % "1.4.0-M1" {code} Console Log : {code:java} Connected to the target VM, address: '127.0.0.1:50502', transport: 'socket' Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties 15/07/09 16:23:18 INFO SparkContext: Running Spark version 1.4.0 15/07/09 16:23:18 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 15/07/09 16:23:19 WARN Utils: Your hostname, ULTP-438 resolves to a loopback address: 127.0.1.1; using 172.25.30.61 instead (on interface eth0) 15/07/09 16:23:19 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address 15/07/09 16:23:19 INFO SecurityManager: Changing view acls to: synerzip 15/07/09 16:23:19 INFO SecurityManager: Changing modify acls to: synerzip 15/07/09 16:23:19 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(synerzip); users with modify permissions: Set(synerzip) 15/07/09 16:23:20 INFO Slf4jLogger: Slf4jLogger started 15/07/09 16:23:20 INFO Remoting: Starting remoting 15/07/09 16:23:20 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriver@172.25.30.61:38696] 15/07/09 16:23:20 INFO Utils: Successfully started service 'sparkDriver' on port 38696. 15/07/09 16:23:20 INFO SparkEnv: Registering MapOutputTracker 15/07/09 16:23:20 INFO SparkEnv: Registering BlockManagerMaster 15/07/09 16:23:20 INFO DiskBlockManager: Created local directory at /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db/blockmgr-0417ff5d-3866-41dd-b63f-da5f37dd0ca6 15/07/09 16:23:20 INFO MemoryStore: MemoryStore started with capacity 1915.4 MB 15/07/09 16:23:20 INFO HttpFileServer: HTTP File server directory is /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db/httpd-d0366739-785f-4a11-8c8c-ad6414a497e4 15/07/09 16:23:20 INFO HttpServer: Starting HTTP Server 15/07/09 16:23:20 INFO Utils: Successfully started service 'HTTP file server' on port 41201. 15/07/09 16:23:20 INFO SparkEnv: Registering OutputCommitCoordinator 15/07/09 16:23:21 INFO Utils: Successfully started service 'SparkUI' on port 4040. 15/07/09 16:23:21 INFO SparkUI: Started SparkUI at http://172.25.30.61:4040 15/07/09 16:23:21 INFO Executor: Starting executor ID driver on host localhost 15/07/09 16:23:21 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 57362. 15/07/09 16:23:21 INFO NettyBlockTransferService: Server created on 57362 15/07/09 16:23:21 INFO BlockManagerMaster: Trying to register BlockManager 15/07/09 16:23:21 INFO BlockManagerMasterEndpoint: Registering block manager localhost:57362 with 1915.4 MB RAM, BlockManagerId(driver, localhost, 57362) 15/07/09 16:23:21 INFO BlockManagerMaster: Registered BlockManager 15/07/09 16:23:24 INFO Cluster: New Cassandra host /127.0.0.1:9042 added 15/07/09 16:23:24 INFO CassandraConnector: Connected to Cassandra cluster: Test Cluster 15/07/09 16:23:25 INFO CassandraConnector: Disconnected from Cassandra cluster: Test Cluster Exception in thread "main" java.lang.OutOfMemoryError: GC overhead limit exceeded at scala.collection.immutable.VectorBuilder.<init>(Vector.scala:706) at scala.collection.immutable.Vector$.newBuilder(Vector.scala:22) at scala.collection.generic.GenericTraversableTemplate$class.genericBuilder(GenericTraversableTemplate.scala:70) at scala.collection.AbstractTraversable.genericBuilder(Traversable.scala:104) at scala.collection.generic.GenTraversableFactory$GenericCanBuildFrom.apply(GenTraversableFactory.scala:57) at scala.collection.generic.GenTraversableFactory$GenericCanBuildFrom.apply(GenTraversableFactory.scala:52) at scala.collection.TraversableLike$class.builder$1(TraversableLike.scala:240) at scala.collection.TraversableLike$class.map(TraversableLike.scala:244) at scala.collection.AbstractTraversable.map(Traversable.scala:104) at com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner$$anonfun$partitions$2.apply(CassandraRDDPartitioner.scala:137) at com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner$$anonfun$partitions$2.apply(CassandraRDDPartitioner.scala:135) at scala.collection.TraversableLike$WithFilter$$anonfun$map$2.apply(TraversableLike.scala:728) at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) at scala.collection.TraversableLike$WithFilter.map(TraversableLike.scala:727) at com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner.partitions(CassandraRDDPartitioner.scala:135) at com.datastax.spark.connector.rdd.CassandraTableScanRDD.getPartitions(CassandraTableScanRDD.scala:120) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1802) at org.apache.spark.rdd.RDD$$anonfun$reduce$1.apply(RDD.scala:979) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:148) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:109) at org.apache.spark.rdd.RDD.withScope(RDD.scala:286) at org.apache.spark.rdd.RDD.reduce(RDD.scala:961) at com.datastax.spark.connector.rdd.CassandraTableScanRDD.count(CassandraTableScanRDD.scala:247) at Hello$.main(Hello.scala:13) at Hello.main(Hello.scala) 15/07/09 16:29:57 INFO SparkContext: Invoking stop() from shutdown hook 15/07/09 16:29:57 INFO SparkUI: Stopped Spark web UI at http://172.25.30.61:4040 15/07/09 16:29:57 INFO DAGScheduler: Stopping DAGScheduler 15/07/09 16:29:57 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped! 15/07/09 16:29:57 INFO Utils: path = /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db/blockmgr-0417ff5d-3866-41dd-b63f-da5f37dd0ca6, already present as root for deletion. 15/07/09 16:29:57 INFO MemoryStore: MemoryStore cleared 15/07/09 16:29:57 INFO BlockManager: BlockManager stopped 15/07/09 16:29:57 INFO BlockManagerMaster: BlockManagerMaster stopped 15/07/09 16:29:57 INFO SparkContext: Successfully stopped SparkContext 15/07/09 16:29:57 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped! 15/07/09 16:29:57 INFO Utils: Shutdown hook called Disconnected from the target VM, address: '127.0.0.1:50502', transport: 'socket' 15/07/09 16:29:57 INFO Utils: Deleting directory /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db Process finished with exit code 1 {code} The same code is working with spark 1.3.1

    DataStax JIRA | 2 years ago | Rahul Shukla
    java.lang.OutOfMemoryError: GC overhead limit exceeded
  2. 0

    I am trying to run below code with spark 1.4.0 and its not working {code:scala} import org.apache.spark.SparkConf import org.apache.spark.SparkContext import com.datastax.spark.connector._ object Hello { def main(args: Array[String]): Unit = { val conf = new SparkConf(true) .set("spark.cassandra.connection.host", "127.0.0.1") .setMaster("local") .setAppName("POS Producer") val sc = new SparkContext(conf) val rdd = sc.cassandraTable[POS]("store", "sales_fact"); println("count => "+rdd.count()) rdd.collect.foreach(println); } } {code} build file : {code:java} name := "hello-scala" version := "1.0" scalaVersion := "2.11.0" libraryDependencies += "org.scalatest" % "scalatest_2.11" % "2.1.3" % "test" libraryDependencies += "org.apache.spark" % "spark-core_2.11" % "1.4.0" libraryDependencies += "com.datastax.spark" % "spark-cassandra-connector_2.11" % "1.4.0-M1" {code} Console Log : {code:java} Connected to the target VM, address: '127.0.0.1:50502', transport: 'socket' Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties 15/07/09 16:23:18 INFO SparkContext: Running Spark version 1.4.0 15/07/09 16:23:18 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 15/07/09 16:23:19 WARN Utils: Your hostname, ULTP-438 resolves to a loopback address: 127.0.1.1; using 172.25.30.61 instead (on interface eth0) 15/07/09 16:23:19 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address 15/07/09 16:23:19 INFO SecurityManager: Changing view acls to: synerzip 15/07/09 16:23:19 INFO SecurityManager: Changing modify acls to: synerzip 15/07/09 16:23:19 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(synerzip); users with modify permissions: Set(synerzip) 15/07/09 16:23:20 INFO Slf4jLogger: Slf4jLogger started 15/07/09 16:23:20 INFO Remoting: Starting remoting 15/07/09 16:23:20 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriver@172.25.30.61:38696] 15/07/09 16:23:20 INFO Utils: Successfully started service 'sparkDriver' on port 38696. 15/07/09 16:23:20 INFO SparkEnv: Registering MapOutputTracker 15/07/09 16:23:20 INFO SparkEnv: Registering BlockManagerMaster 15/07/09 16:23:20 INFO DiskBlockManager: Created local directory at /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db/blockmgr-0417ff5d-3866-41dd-b63f-da5f37dd0ca6 15/07/09 16:23:20 INFO MemoryStore: MemoryStore started with capacity 1915.4 MB 15/07/09 16:23:20 INFO HttpFileServer: HTTP File server directory is /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db/httpd-d0366739-785f-4a11-8c8c-ad6414a497e4 15/07/09 16:23:20 INFO HttpServer: Starting HTTP Server 15/07/09 16:23:20 INFO Utils: Successfully started service 'HTTP file server' on port 41201. 15/07/09 16:23:20 INFO SparkEnv: Registering OutputCommitCoordinator 15/07/09 16:23:21 INFO Utils: Successfully started service 'SparkUI' on port 4040. 15/07/09 16:23:21 INFO SparkUI: Started SparkUI at http://172.25.30.61:4040 15/07/09 16:23:21 INFO Executor: Starting executor ID driver on host localhost 15/07/09 16:23:21 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 57362. 15/07/09 16:23:21 INFO NettyBlockTransferService: Server created on 57362 15/07/09 16:23:21 INFO BlockManagerMaster: Trying to register BlockManager 15/07/09 16:23:21 INFO BlockManagerMasterEndpoint: Registering block manager localhost:57362 with 1915.4 MB RAM, BlockManagerId(driver, localhost, 57362) 15/07/09 16:23:21 INFO BlockManagerMaster: Registered BlockManager 15/07/09 16:23:24 INFO Cluster: New Cassandra host /127.0.0.1:9042 added 15/07/09 16:23:24 INFO CassandraConnector: Connected to Cassandra cluster: Test Cluster 15/07/09 16:23:25 INFO CassandraConnector: Disconnected from Cassandra cluster: Test Cluster Exception in thread "main" java.lang.OutOfMemoryError: GC overhead limit exceeded at scala.collection.immutable.VectorBuilder.<init>(Vector.scala:706) at scala.collection.immutable.Vector$.newBuilder(Vector.scala:22) at scala.collection.generic.GenericTraversableTemplate$class.genericBuilder(GenericTraversableTemplate.scala:70) at scala.collection.AbstractTraversable.genericBuilder(Traversable.scala:104) at scala.collection.generic.GenTraversableFactory$GenericCanBuildFrom.apply(GenTraversableFactory.scala:57) at scala.collection.generic.GenTraversableFactory$GenericCanBuildFrom.apply(GenTraversableFactory.scala:52) at scala.collection.TraversableLike$class.builder$1(TraversableLike.scala:240) at scala.collection.TraversableLike$class.map(TraversableLike.scala:244) at scala.collection.AbstractTraversable.map(Traversable.scala:104) at com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner$$anonfun$partitions$2.apply(CassandraRDDPartitioner.scala:137) at com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner$$anonfun$partitions$2.apply(CassandraRDDPartitioner.scala:135) at scala.collection.TraversableLike$WithFilter$$anonfun$map$2.apply(TraversableLike.scala:728) at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) at scala.collection.TraversableLike$WithFilter.map(TraversableLike.scala:727) at com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner.partitions(CassandraRDDPartitioner.scala:135) at com.datastax.spark.connector.rdd.CassandraTableScanRDD.getPartitions(CassandraTableScanRDD.scala:120) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1802) at org.apache.spark.rdd.RDD$$anonfun$reduce$1.apply(RDD.scala:979) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:148) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:109) at org.apache.spark.rdd.RDD.withScope(RDD.scala:286) at org.apache.spark.rdd.RDD.reduce(RDD.scala:961) at com.datastax.spark.connector.rdd.CassandraTableScanRDD.count(CassandraTableScanRDD.scala:247) at Hello$.main(Hello.scala:13) at Hello.main(Hello.scala) 15/07/09 16:29:57 INFO SparkContext: Invoking stop() from shutdown hook 15/07/09 16:29:57 INFO SparkUI: Stopped Spark web UI at http://172.25.30.61:4040 15/07/09 16:29:57 INFO DAGScheduler: Stopping DAGScheduler 15/07/09 16:29:57 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped! 15/07/09 16:29:57 INFO Utils: path = /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db/blockmgr-0417ff5d-3866-41dd-b63f-da5f37dd0ca6, already present as root for deletion. 15/07/09 16:29:57 INFO MemoryStore: MemoryStore cleared 15/07/09 16:29:57 INFO BlockManager: BlockManager stopped 15/07/09 16:29:57 INFO BlockManagerMaster: BlockManagerMaster stopped 15/07/09 16:29:57 INFO SparkContext: Successfully stopped SparkContext 15/07/09 16:29:57 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped! 15/07/09 16:29:57 INFO Utils: Shutdown hook called Disconnected from the target VM, address: '127.0.0.1:50502', transport: 'socket' 15/07/09 16:29:57 INFO Utils: Deleting directory /tmp/spark-6e06440e-2351-4063-a6fa-3e809f3d50db Process finished with exit code 1 {code} The same code is working with spark 1.3.1

    DataStax JIRA | 2 years ago | Rahul Shukla
    java.lang.OutOfMemoryError: GC overhead limit exceeded
  3. 0

    0.13.7-M3 - memory leak, or large increase in memory usage?

    GitHub | 2 years ago | paddymahoney
    java.lang.OutOfMemoryError: GC overhead limit exceeded
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    GC overhead error in scala while computing strongly connected components of graph

    Stack Overflow | 2 years ago | user2460990
    java.lang.OutOfMemoryError: GC overhead limit exceeded

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.lang.OutOfMemoryError

      GC overhead limit exceeded

      at scala.collection.immutable.VectorBuilder.<init>()
    2. Scala
      AbstractTraversable.map
      1. scala.collection.immutable.VectorBuilder.<init>(Vector.scala:706)
      2. scala.collection.immutable.Vector$.newBuilder(Vector.scala:22)
      3. scala.collection.generic.GenericTraversableTemplate$class.genericBuilder(GenericTraversableTemplate.scala:70)
      4. scala.collection.AbstractTraversable.genericBuilder(Traversable.scala:104)
      5. scala.collection.generic.GenTraversableFactory$GenericCanBuildFrom.apply(GenTraversableFactory.scala:57)
      6. scala.collection.generic.GenTraversableFactory$GenericCanBuildFrom.apply(GenTraversableFactory.scala:52)
      7. scala.collection.TraversableLike$class.builder$1(TraversableLike.scala:240)
      8. scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
      9. scala.collection.AbstractTraversable.map(Traversable.scala:104)
      9 frames
    3. spark-cassandra-connector
      CassandraRDDPartitioner$$anonfun$partitions$2.apply
      1. com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner$$anonfun$partitions$2.apply(CassandraRDDPartitioner.scala:137)
      2. com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner$$anonfun$partitions$2.apply(CassandraRDDPartitioner.scala:135)
      2 frames
    4. Scala
      TraversableLike$WithFilter.map
      1. scala.collection.TraversableLike$WithFilter$$anonfun$map$2.apply(TraversableLike.scala:728)
      2. scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
      3. scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
      4. scala.collection.TraversableLike$WithFilter.map(TraversableLike.scala:727)
      4 frames
    5. spark-cassandra-connector
      CassandraTableScanRDD.getPartitions
      1. com.datastax.spark.connector.rdd.partitioner.CassandraRDDPartitioner.partitions(CassandraRDDPartitioner.scala:135)
      2. com.datastax.spark.connector.rdd.CassandraTableScanRDD.getPartitions(CassandraTableScanRDD.scala:120)
      2 frames
    6. Spark
      RDD$$anonfun$partitions$2.apply
      1. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219)
      2. org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217)
      2 frames
    7. Scala
      Option.getOrElse
      1. scala.Option.getOrElse(Option.scala:121)
      1 frame
    8. Spark
      RDD.reduce
      1. org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
      2. org.apache.spark.SparkContext.runJob(SparkContext.scala:1802)
      3. org.apache.spark.rdd.RDD$$anonfun$reduce$1.apply(RDD.scala:979)
      4. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:148)
      5. org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:109)
      6. org.apache.spark.rdd.RDD.withScope(RDD.scala:286)
      7. org.apache.spark.rdd.RDD.reduce(RDD.scala:961)
      7 frames
    9. spark-cassandra-connector
      CassandraTableScanRDD.count
      1. com.datastax.spark.connector.rdd.CassandraTableScanRDD.count(CassandraTableScanRDD.scala:247)
      1 frame
    10. Unknown
      Hello.main
      1. Hello$.main(Hello.scala:13)
      2. Hello.main(Hello.scala)
      2 frames