Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

java.lang.IllegalStateException: state should be: open	at com.mongodb.assertions.Assertions.isTrue(Assertions.java:70)	at com.mongodb.connection.DefaultServer.getDescription(DefaultServer.java:97)	at com.mongodb.binding.ClusterBinding$ClusterBindingConnectionSource.getServerDescription(ClusterBinding.java:81)	at com.mongodb.operation.QueryBatchCursor.(QueryBatchCursor.java:53)	at com.mongodb.operation.FindOperation$1.call(FindOperation.java:409)	at com.mongodb.operation.FindOperation$1.call(FindOperation.java:394)	at com.mongodb.operation.OperationHelper.withConnectionSource(OperationHelper.java:195)	at com.mongodb.operation.OperationHelper.withConnection(OperationHelper.java:168)	at com.mongodb.operation.FindOperation.execute(FindOperation.java:394)	at com.mongodb.operation.FindOperation.execute(FindOperation.java:57)	at com.mongodb.Mongo.execute(Mongo.java:738)	at com.mongodb.Mongo$2.execute(Mongo.java:725)	at com.mongodb.DBCursor.initializeCursor(DBCursor.java:815)	at com.mongodb.DBCursor.hasNext(DBCursor.java:149)	at com.mongodb.hadoop.input.MongoRecordReader.nextKeyValue(MongoRecordReader.java:75)	at org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:143)	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:413)	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)	at org.apache.spark.util.random.SamplingUtils$.reservoirSampleAndCount(SamplingUtils.scala:41)	at org.apache.spark.RangePartitioner$$anonfun$8.apply(Partitioner.scala:259)	at org.apache.spark.RangePartitioner$$anonfun$8.apply(Partitioner.scala:257)	at org.apache.spark.rdd.RDD$$anonfun$15.apply(RDD.scala:647)	at org.apache.spark.rdd.RDD$$anonfun$15.apply(RDD.scala:647)	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)	at org.apache.spark.scheduler.Task.run(Task.scala:64)	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)	at java.lang.Thread.run(Thread.java:745)