Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

Solutions on the web

via Stack Overflow by Ronaldinho
, 1 year ago
GC overhead limit exceeded
java.lang.OutOfMemoryError: GC overhead limit exceeded	at org.apache.hadoop.fs.Path.initialize(Path.java:203)	at org.apache.hadoop.fs.Path.(Path.java:172)	at org.apache.spark.sql.sources.HadoopFsRelation$$anonfun$21.apply(interfaces.scala:908)	at org.apache.spark.sql.sources.HadoopFsRelation$$anonfun$21.apply(interfaces.scala:906)	at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)	at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)	at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)	at org.apache.spark.sql.sources.HadoopFsRelation$.listLeafFilesInParallel(interfaces.scala:906)	at org.apache.spark.sql.sources.HadoopFsRelation$FileStatusCache.listLeafFiles(interfaces.scala:445)	at org.apache.spark.sql.sources.HadoopFsRelation$FileStatusCache.refresh(interfaces.scala:477)	at org.apache.spark.sql.sources.HadoopFsRelation.org$apache$spark$sql$sources$HadoopFsRelation$$fileStatusCache$lzycompute(interfaces.scala:489)	at org.apache.spark.sql.sources.HadoopFsRelation.org$apache$spark$sql$sources$HadoopFsRelation$$fileStatusCache(interfaces.scala:487)	at org.apache.spark.sql.sources.HadoopFsRelation.cachedLeafStatuses(interfaces.scala:494)	at org.apache.spark.sql.execution.datasources.parquet.ParquetRelation$MetadataCache.refresh(ParquetRelation.scala:398)	at org.apache.spark.sql.execution.datasources.parquet.ParquetRelation.org$apache$spark$sql$execution$datasources$parquet$ParquetRelation$$metadataCache$lzycompute(ParquetRelation.scala:145)	at org.apache.spark.sql.execution.datasources.parquet.ParquetRelation.org$apache$spark$sql$execution$datasources$parquet$ParquetRelation$$metadataCache(ParquetRelation.scala:143)	at org.apache.spark.sql.execution.datasources.parquet.ParquetRelation$$anonfun$6.apply(ParquetRelation.scala:202)	at org.apache.spark.sql.execution.datasources.parquet.ParquetRelation$$anonfun$6.apply(ParquetRelation.scala:202)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.sql.execution.datasources.parquet.ParquetRelation.dataSchema(ParquetRelation.scala:202)	at org.apache.spark.sql.sources.HadoopFsRelation.schema$lzycompute(interfaces.scala:636)	at org.apache.spark.sql.sources.HadoopFsRelation.schema(interfaces.scala:635)	at org.apache.spark.sql.execution.datasources.LogicalRelation.(LogicalRelation.scala:37)	at org.apache.spark.sql.hive.HiveMetastoreCatalog$$anonfun$12.apply(HiveMetastoreCatalog.scala:481)	at org.apache.spark.sql.hive.HiveMetastoreCatalog$$anonfun$12.apply(HiveMetastoreCatalog.scala:480)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.sql.hive.HiveMetastoreCatalog.org$apache$spark$sql$hive$HiveMetastoreCatalog$$convertToParquetRelation(HiveMetastoreCatalog.scala:480)	at org.apache.spark.sql.hive.HiveMetastoreCatalog$ParquetConversions$$anonfun$apply$1.applyOrElse(HiveMetastoreCatalog.scala:542)	at org.apache.spark.sql.hive.HiveMetastoreCatalog$ParquetConversions$$anonfun$apply$1.applyOrElse(HiveMetastoreCatalog.scala:522)