java.lang.IllegalArgumentException: Unsupported type: StructType(StructField(id,StringType,true))

Stack Overflow | Nandan Rao | 5 months ago
  1. 0

    spark-cassandra-connector - Creating Table from Dataframe - StructType?

    Stack Overflow | 5 months ago | Nandan Rao
    java.lang.IllegalArgumentException: Unsupported type: StructType(StructField(id,StringType,true))
  2. 0

    GitHub comment 231#231815897

    GitHub | 5 months ago | samelamin
    java.lang.IllegalArgumentException: Don't know how to save StructField(blah,StructType(StructField(blah,StructType(StructField(ApplicationNaStructField(foo,BooleanType,true)),true)),true) to JDBC
  3. 0

    Steps to reproduce: Follow [new spark ml api guide|http://spark.apache.org/docs/latest/ml-guide.html]: {code} val training = sparkContext.parallelize(Seq( LabeledDocument(0L, "a b c d e spark", 1.0), LabeledDocument(1L, "b d", 0.0), LabeledDocument(2L, "spark f g h", 1.0), LabeledDocument(3L, "hadoop mapreduce", 0.0))) // Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. val tokenizer = new Tokenizer() .setInputCol("text") .setOutputCol("words") val hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol) .setOutputCol("features") val pipeline = new Pipeline().setStages(Array(tokenizer, hashingTF)) val model = pipeline.fit(training) val tranformed = model.transform(training) scala> transformed.schema res7: org.apache.spark.sql.StructType = StructType(ArrayBuffer(StructField(id,LongType,false), StructField(text,StringType,true), StructField(label,DoubleType,false), StructField(words,ArrayType(StringType,false),true), StructField(features,org.apache.spark.mllib.linalg.VectorUDT@5172cce4,true))) scala> toDataFrame(transformed) java.lang.IllegalArgumentException: Unsupported type ArrayType(StringType,false) at org.apache.spark.h2o.H2OContextUtils$.dataTypeToClass(H2OContextUtils.scala:175) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at scala.collection.TraversableLike$class.map(TraversableLike.scala:244) val transformed2 = transformed.select('features) scala> transformed2.schema res4: org.apache.spark.sql.StructType = StructType(ArrayBuffer(StructField(features,org.apache.spark.mllib.linalg.VectorUDT@5172cce4,true))) scala> toDataFrame(transformed2) java.lang.IllegalArgumentException: Unsupported type org.apache.spark.mllib.linalg.VectorUDT@5172cce4 at org.apache.spark.h2o.H2OContextUtils$.dataTypeToClass(H2OContextUtils.scala:175) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) {code}

    JIRA | 2 years ago | Peter Rudenko
    java.lang.IllegalArgumentException: Unsupported type ArrayType(StringType,false)
  4. Speed up your debug routine!

    Automated exception search integrated into your IDE

  5. 0

    Steps to reproduce: Follow [new spark ml api guide|http://spark.apache.org/docs/latest/ml-guide.html]: {code} val training = sparkContext.parallelize(Seq( LabeledDocument(0L, "a b c d e spark", 1.0), LabeledDocument(1L, "b d", 0.0), LabeledDocument(2L, "spark f g h", 1.0), LabeledDocument(3L, "hadoop mapreduce", 0.0))) // Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. val tokenizer = new Tokenizer() .setInputCol("text") .setOutputCol("words") val hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol) .setOutputCol("features") val pipeline = new Pipeline().setStages(Array(tokenizer, hashingTF)) val model = pipeline.fit(training) val tranformed = model.transform(training) scala> transformed.schema res7: org.apache.spark.sql.StructType = StructType(ArrayBuffer(StructField(id,LongType,false), StructField(text,StringType,true), StructField(label,DoubleType,false), StructField(words,ArrayType(StringType,false),true), StructField(features,org.apache.spark.mllib.linalg.VectorUDT@5172cce4,true))) scala> toDataFrame(transformed) java.lang.IllegalArgumentException: Unsupported type ArrayType(StringType,false) at org.apache.spark.h2o.H2OContextUtils$.dataTypeToClass(H2OContextUtils.scala:175) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at scala.collection.TraversableLike$class.map(TraversableLike.scala:244) val transformed2 = transformed.select('features) scala> transformed2.schema res4: org.apache.spark.sql.StructType = StructType(ArrayBuffer(StructField(features,org.apache.spark.mllib.linalg.VectorUDT@5172cce4,true))) scala> toDataFrame(transformed2) java.lang.IllegalArgumentException: Unsupported type org.apache.spark.mllib.linalg.VectorUDT@5172cce4 at org.apache.spark.h2o.H2OContextUtils$.dataTypeToClass(H2OContextUtils.scala:175) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at org.apache.spark.h2o.H2OContext$$anonfun$4.apply(H2OContext.scala:282) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) {code}

    JIRA | 2 years ago | Peter Rudenko
    java.lang.IllegalArgumentException: Unsupported type ArrayType(StringType,false)
  6. 0

    Write Spark dataframe to Redshift:save StructField(user_agent,ArrayType(StringType,true),true)

    Stack Overflow | 6 months ago | jduff1075
    java.lang.IllegalArgumentException: Don't know how to save StructField(user_agent,ArrayType(StringType,true),true) to JDBC

    Not finding the right solution?
    Take a tour to get the most out of Samebug.

    Tired of useless tips?

    Automated exception search integrated into your IDE

    Root Cause Analysis

    1. java.lang.IllegalArgumentException

      Unsupported type: StructType(StructField(id,StringType,true))

      at com.datastax.spark.connector.types.ColumnType$.unsupportedType$1()
    2. spark-cassandra-connector
      DataFrameColumnMapper$$anonfun$1.apply
      1. com.datastax.spark.connector.types.ColumnType$.unsupportedType$1(ColumnType.scala:132)
      2. com.datastax.spark.connector.types.ColumnType$.fromSparkSqlType(ColumnType.scala:155)
      3. com.datastax.spark.connector.mapper.DataFrameColumnMapper$$anonfun$1.apply(DataFrameColumnMapper.scala:18)
      4. com.datastax.spark.connector.mapper.DataFrameColumnMapper$$anonfun$1.apply(DataFrameColumnMapper.scala:16)
      4 frames
    3. Scala
      AbstractTraversable.map
      1. scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
      2. scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
      3. scala.collection.immutable.List.foreach(List.scala:318)
      4. scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
      5. scala.collection.AbstractTraversable.map(Traversable.scala:105)
      5 frames
    4. spark-cassandra-connector
      DataFrameFunctions.createCassandraTable
      1. com.datastax.spark.connector.mapper.DataFrameColumnMapper.newTable(DataFrameColumnMapper.scala:16)
      2. com.datastax.spark.connector.cql.TableDef$.fromDataFrame(Schema.scala:215)
      3. com.datastax.spark.connector.DataFrameFunctions.createCassandraTable(DataFrameFunctions.scala:26)
      3 frames