org.apache.spark.SparkException: Task failed while writing rows.

DataStax JIRA | Dmytro Popovych | 2 years ago
tip
Do you know that we can give you better hits? Get more relevant results from Samebug’s stack trace search.
  1. 0

    Will be nice to add [the same fix|https://github.com/datastax/spark-cassandra-connector/commit/6601ce67f6ea3aff5f6a8132c89bdba2bf1d1d20#diff-3692b29c789a7d0d0e9238c662c693d6R49] for values in complex types (set/list/map). Sorry, that PR isn't attached, I fixed the problem locally, but feel like my scala skills aren't good enough to contribute in the project :) {code} 5/09/19 20:33:38 WARN TaskSetManager: Lost task 2.0 in stage 0.0 (TID 13, 10.0.2.218): org.apache.spark.SparkException: Task failed while writing rows. at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:251) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) at org.apache.spark.scheduler.Task.run(Task.scala:88) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) Caused by: java.lang.ClassCastException: java.util.Date cannot be cast to java.sql.Timestamp at org.apache.spark.sql.catalyst.CatalystTypeConverters$TimestampConverter$.toCatalystImpl(CatalystTypeConverters.scala:308) at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply(CatalystTypeConverters.scala:205) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply(CatalystTypeConverters.scala:203) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772) at scala.collection.immutable.Map$Map2.foreach(Map.scala:130) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter.toCatalystImpl(CatalystTypeConverters.scala:203) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter.toCatalystImpl(CatalystTypeConverters.scala:188) at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102) at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:396) at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:63) at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:60) at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:242) ... 8 more {code}

    DataStax JIRA | 2 years ago | Dmytro Popovych
    org.apache.spark.SparkException: Task failed while writing rows.
  2. 0

    Will be nice to add [the same fix|https://github.com/datastax/spark-cassandra-connector/commit/6601ce67f6ea3aff5f6a8132c89bdba2bf1d1d20#diff-3692b29c789a7d0d0e9238c662c693d6R49] for values in complex types (set/list/map). Sorry, that PR isn't attached, I fixed the problem locally, but feel like my scala skills aren't good enough to contribute in the project :) {code} 5/09/19 20:33:38 WARN TaskSetManager: Lost task 2.0 in stage 0.0 (TID 13, 10.0.2.218): org.apache.spark.SparkException: Task failed while writing rows. at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:251) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) at org.apache.spark.scheduler.Task.run(Task.scala:88) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) Caused by: java.lang.ClassCastException: java.util.Date cannot be cast to java.sql.Timestamp at org.apache.spark.sql.catalyst.CatalystTypeConverters$TimestampConverter$.toCatalystImpl(CatalystTypeConverters.scala:308) at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply(CatalystTypeConverters.scala:205) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply(CatalystTypeConverters.scala:203) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772) at scala.collection.immutable.Map$Map2.foreach(Map.scala:130) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter.toCatalystImpl(CatalystTypeConverters.scala:203) at org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter.toCatalystImpl(CatalystTypeConverters.scala:188) at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102) at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:396) at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:63) at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:60) at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:242) ... 8 more {code}

    DataStax JIRA | 2 years ago | Dmytro Popovych
    org.apache.spark.SparkException: Task failed while writing rows.
  3. 0

    Help:basic RMI question

    Google Groups | 2 decades ago | Paolo De Lutiis
    java.lang.ClassCastException: serverPackage.ServerClass_Stub at clientPackage.myApplet.init(myApplet.java:line#) at sun.applet.AppletPanel.run(AppletPanel.java:273) at java.lang.Thread.
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    Shared hosting Bungeecord problems.

    GitHub | 3 years ago | Arksenu
    java.lang.ClassCastException: bka cannot be cast to fs All my servers are set to onlinemode: false Here is my config.yml groups: arksenu: - admin disabled_commands: - find player_limit: -1 stats: 347d1d62-6fb6-4869-bd43-7d0745de8e3c permissions: default: - bungeecord.command.server - bungeecord.command.list admin: - bungeecord.command.ip - bungeecord.command.alert - bungeecord.command.end - bungeecord.command.reload listeners: - max_players: 18 fallback_server: hub host: 0.0.0.0:35289 bind_local_address: true ping_passthrough: false tab_list: GLOBAL_PING default_server: hub forced_hosts: pvp.md-5.net: hub tab_size: 60 force_default_server: true motd: ’Network’ query_enabled: false query_port: 25565 timeout: 30000 connection_throttle: 4000 servers: hub: address: 108.170.8.146:35289 restricted: false motd: test UvGames: address: 66.85.165.170:26200 restricted: false motd: test UvPrison: address: 66.85.128.90:25928 restricted: false motd: test ip_forward: false online_mode: true And here is my console message -> UpstreamBridge has disconnected disconnected with: Exception Connecting:RuntimeException : Server is online mode! @ net.md_5.bungee.ServerConnector:188
  6. 0

    Shared hosting Bungeecord problems.

    GitHub | 3 years ago | Arksenu
    java.lang.ClassCastException: bka cannot be cast to fs All my servers are set to onlinemode: false Here is my config.yml groups: arksenu: - admin disabled_commands: - find player_limit: -1 stats: 347d1d62-6fb6-4869-bd43-7d0745de8e3c permissions: default: - bungeecord.command.server - bungeecord.command.list admin: - bungeecord.command.ip - bungeecord.command.alert - bungeecord.command.end - bungeecord.command.reload listeners: - max_players: 18 fallback_server: hub host: 0.0.0.0:35289 bind_local_address: true ping_passthrough: false tab_list: GLOBAL_PING default_server: hub forced_hosts: pvp.md-5.net: hub tab_size: 60 force_default_server: true motd: ’Network’ query_enabled: false query_port: 25565 timeout: 30000 connection_throttle: 4000 servers: hub: address: 108.170.8.146:35289 restricted: false motd: test UvGames: address: 66.85.165.170:26200 restricted: false motd: test UvPrison: address: 66.85.128.90:25928 restricted: false motd: test ip_forward: false online_mode: true And here is my console message -> UpstreamBridge has disconnected disconnected with: Exception Connecting:RuntimeException : Server is online mode! @ net.md_5.bungee.ServerConnector:188

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.lang.ClassCastException

      java.util.Date cannot be cast to java.sql.Timestamp

      at org.apache.spark.sql.catalyst.CatalystTypeConverters$TimestampConverter$.toCatalystImpl()
    2. Spark Project Catalyst
      CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply
      1. org.apache.spark.sql.catalyst.CatalystTypeConverters$TimestampConverter$.toCatalystImpl(CatalystTypeConverters.scala:308)
      2. org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
      3. org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply(CatalystTypeConverters.scala:205)
      4. org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter$$anonfun$toCatalystImpl$4.apply(CatalystTypeConverters.scala:203)
      4 frames
    3. Scala
      TraversableLike$WithFilter.foreach
      1. scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772)
      2. scala.collection.immutable.Map$Map2.foreach(Map.scala:130)
      3. scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771)
      3 frames
    4. Spark Project Catalyst
      CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply
      1. org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter.toCatalystImpl(CatalystTypeConverters.scala:203)
      2. org.apache.spark.sql.catalyst.CatalystTypeConverters$MapConverter.toCatalystImpl(CatalystTypeConverters.scala:188)
      3. org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
      4. org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:396)
      4 frames
    5. Spark Project SQL
      RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply
      1. org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:63)
      2. org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:60)
      2 frames
    6. Scala
      Iterator$$anon$11.next
      1. scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
      1 frame
    7. org.apache.spark
      InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply
      1. org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:242)
      2. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150)
      3. org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1$$anonfun$apply$mcV$sp$3.apply(InsertIntoHadoopFsRelation.scala:150)
      3 frames
    8. Spark
      Executor$TaskRunner.run
      1. org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
      2. org.apache.spark.scheduler.Task.run(Task.scala:88)
      3. org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
      3 frames
    9. Java RT
      Thread.run
      1. java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
      2. java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
      3. java.lang.Thread.run(Thread.java:745)
      3 frames