Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

Solutions on the web

via Stack Overflow by saeed talaee
, 10 months ago
via GitHub by jkommeren
, 10 months ago
Task failed while writing rows
via Stack Overflow by vijay
, 9 months ago
via GitHub by dong929311
, 1 year ago
via GitHub by manisha803
, 1 year ago
java.lang.OutOfMemoryError: Unable to acquire 100 bytes of memory, got 0	at org.apache.spark.memory.MemoryConsumer.allocatePage(MemoryConsumer.java:129)	at org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter.acquireNewPageIfNecessary(UnsafeExternalSorter.java:374)	at org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter.insertRecord(UnsafeExternalSorter.java:396)	at org.apache.spark.sql.execution.UnsafeExternalRowSorter.insertRow(UnsafeExternalRowSorter.java:94)	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.sort_addToSorter$(Unknown Source)	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)	at org.apache.spark.sql.execution.WindowExec$$anonfun$15$$anon$1.fetchNextRow(WindowExec.scala:300)	at org.apache.spark.sql.execution.WindowExec$$anonfun$15$$anon$1.(WindowExec.scala:309)	at org.apache.spark.sql.execution.WindowExec$$anonfun$15.apply(WindowExec.scala:289)	at org.apache.spark.sql.execution.WindowExec$$anonfun$15.apply(WindowExec.scala:288)	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:766)	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:766)	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)	at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)	at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)	at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)	at org.apache.spark.rdd.CoalescedRDD$$anonfun$compute$1.apply(CoalescedRDD.scala:96)	at org.apache.spark.rdd.CoalescedRDD$$anonfun$compute$1.apply(CoalescedRDD.scala:95)	at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)	at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)	at org.apache.spark.sql.execution.datasources.DefaultWriterContainer$$anonfun$writeRows$1.apply$mcV$sp(WriterContainer.scala:253)	at org.apache.spark.sql.execution.datasources.DefaultWriterContainer$$anonfun$writeRows$1.apply(WriterContainer.scala:252)	at org.apache.spark.sql.execution.datasources.DefaultWriterContainer$$anonfun$writeRows$1.apply(WriterContainer.scala:252)	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1325)	at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:258)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)	at org.apache.spark.scheduler.Task.run(Task.scala:85)	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)	at java.lang.Thread.run(Thread.java:745)