Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

java.net.UnknownHostException: namenode1.hdfs.mesos	at org.apache.hadoop.security.SecurityUtil.buildTokenService(SecurityUtil.java:377)	at org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:240)	at org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider.getProxy(ConfiguredFailoverProxyProvider.java:124)	at org.apache.hadoop.io.retry.RetryInvocationHandler.(RetryInvocationHandler.java:74)	at org.apache.hadoop.io.retry.RetryInvocationHandler.(RetryInvocationHandler.java:65)	at org.apache.hadoop.io.retry.RetryProxy.create(RetryProxy.java:58)	at org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:152)	at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:579)	at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:524)	at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:146)	at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2397)	at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89)	at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431)	at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413)	at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)	at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:167)	at org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:653)	at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:427)	at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:400)	at org.apache.spark.SparkContext$$anonfun$hadoopFile$1$$anonfun$33.apply(SparkContext.scala:1015)	at org.apache.spark.SparkContext$$anonfun$hadoopFile$1$$anonfun$33.apply(SparkContext.scala:1015)	at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:176)	at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:176)	at scala.Option.map(Option.scala:145)	at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:176)	at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:195)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.ShuffleDependency.(Dependency.scala:91)	at org.apache.spark.sql.execution.Exchange.prepareShuffleDependency(Exchange.scala:220)	at org.apache.spark.sql.execution.Exchange$$anonfun$doExecute$1.apply(Exchange.scala:254)	at org.apache.spark.sql.execution.Exchange$$anonfun$doExecute$1.apply(Exchange.scala:248)	at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:48)	at org.apache.spark.sql.execution.Exchange.doExecute(Exchange.scala:247)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)	at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.apply(TungstenAggregate.scala:86)	at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.apply(TungstenAggregate.scala:80)	at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:48)	at org.apache.spark.sql.execution.aggregate.TungstenAggregate.doExecute(TungstenAggregate.scala:80)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)	at org.apache.spark.sql.execution.ConvertToSafe.doExecute(rowFormatConverters.scala:56)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)	at org.apache.spark.sql.execution.TakeOrderedAndProject.collectData(basicOperators.scala:213)	at org.apache.spark.sql.execution.TakeOrderedAndProject.executeCollect(basicOperators.scala:218)	at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)	at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)	at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)	at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)	at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)	at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)	at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)	at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)	at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)	at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)	at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)	at java.lang.reflect.Method.invoke(Method.java:498)	at org.apache.zeppelin.spark.ZeppelinContext.showDF(ZeppelinContext.java:199)	at org.apache.zeppelin.spark.SparkSqlInterpreter.interpret(SparkSqlInterpreter.java:151)	at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)	at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)	at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:345)	at org.apache.zeppelin.scheduler.Job.run(Job.java:176)	at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:139)	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)	at java.util.concurrent.FutureTask.run(FutureTask.java:266)	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)	at java.lang.Thread.run(Thread.java:745)