Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

java.lang.IllegalArgumentException: bound must be positive	at java.util.Random.nextInt(Random.java:388)	at org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.confChanged(LocalDirAllocator.java:305)	at org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.getLocalPathForWrite(LocalDirAllocator.java:344)	at org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.createTmpFileForWrite(LocalDirAllocator.java:416)	at org.apache.hadoop.fs.LocalDirAllocator.createTmpFileForWrite(LocalDirAllocator.java:198)	at org.apache.hadoop.fs.s3a.S3AOutputStream.(S3AOutputStream.java:87)	at org.apache.hadoop.fs.s3a.S3AFileSystem.create(S3AFileSystem.java:421)	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:913)	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:894)	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:791)	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:780)	at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.commitJob(FileOutputCommitter.java:336)	at org.apache.parquet.hadoop.ParquetOutputCommitter.commitJob(ParquetOutputCommitter.java:46)	at org.apache.spark.sql.execution.datasources.BaseWriterContainer.commitJob(WriterContainer.scala:222)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1.apply$mcV$sp(InsertIntoHadoopFsRelationCommand.scala:144)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1.apply(InsertIntoHadoopFsRelationCommand.scala:115)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1.apply(InsertIntoHadoopFsRelationCommand.scala:115)	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:115)	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:60)	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:58)	at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:115)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:115)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:136)	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:133)	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:114)	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:86)	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:86)	at org.apache.spark.sql.execution.datasources.DataSource.write(DataSource.scala:487)	at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:211)	at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:194)	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)	at java.lang.reflect.Method.invoke(Method.java:498)	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:237)	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)	at py4j.Gateway.invoke(Gateway.java:280)	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:128)	at py4j.commands.CallCommand.execute(CallCommand.java:79)	at py4j.GatewayConnection.run(GatewayConnection.java:211)	at java.lang.Thread.run(Thread.java:745)