Searched on Google with the first line of a JAVA stack trace?

We can recommend more relevant solutions and speed up debugging when you paste your entire stack trace with the exception message. Try a sample exception.

Recommended solutions based on your search

Solutions on the web

via cloudera.com by Unknown author, 1 year ago
via Stack Overflow by wazza
, 2 years ago
java.net.UnknownHostException: hadoopcluster
java.lang.IllegalArgumentException: java.net.UnknownHostException: quickstart.cloudera	at org.apache.hadoop.security.SecurityUtil.buildTokenService(SecurityUtil.java:377)	at org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:240)	at org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:144)	at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:579)	at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:524)	at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:146)	at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2397)	at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89)	at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2431)	at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2413)	at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)	at org.apache.hadoop.fs.Path.getFileSystem(Path.java:296)	at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:256)	at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228)	at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:304)	at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:207)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)	at scala.Option.getOrElse(Option.scala:120)	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:193)	at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:207)	at org.apache.spark.sql.DataFrame$$anonfun$collect$1.apply(DataFrame.scala:1386)	at org.apache.spark.sql.DataFrame$$anonfun$collect$1.apply(DataFrame.scala:1386)	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)	at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:1904)	at org.apache.spark.sql.DataFrame.collect(DataFrame.scala:1385)	at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1315)	at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1378)	at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:178)	at org.apache.spark.sql.DataFrame.show(DataFrame.scala:402)	at org.apache.spark.sql.DataFrame.show(DataFrame.scala:363)	at org.apache.spark.sql.DataFrame.show(DataFrame.scala:371)	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.(:20)	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.(:25)	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.(:27)	at $iwC$$iwC$$iwC$$iwC$$iwC.(:29)	at $iwC$$iwC$$iwC$$iwC.(:31)	at $iwC$$iwC$$iwC.(:33)	at $iwC$$iwC.(:35)	at $iwC.(:37)