diff --git a/README.md b/README.md index d15c0019..e1eb0436 100644 --- a/README.md +++ b/README.md @@ -20,13 +20,13 @@ You can link against this library in your program at the following coordinates: ``` groupId: com.databricks artifactId: spark-xml_2.10 -version: 0.3.1 +version: 0.3.2 ``` ### Scala 2.11 ``` groupId: com.databricks artifactId: spark-xml_2.11 -version: 0.3.1 +version: 0.3.2 ``` ## Using with Spark shell @@ -34,12 +34,12 @@ This package can be added to Spark using the `--packages` command line option. ### Spark compiled with Scala 2.10 ``` -$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.10:0.3.1 +$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.10:0.3.2 ``` ### Spark compiled with Scala 2.11 ``` -$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.11:0.3.1 +$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.11:0.3.2 ``` ## Features @@ -436,7 +436,7 @@ Automatically infer schema (data types) ```R library(SparkR) -Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-xml_2.10:0.3.1" "sparkr-shell"') +Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-xml_2.10:0.3.2" "sparkr-shell"') sqlContext <- sparkRSQL.init(sc) df <- read.df(sqlContext, "books.xml", source = "com.databricks.spark.xml", rowTag = "book") @@ -449,7 +449,7 @@ You can manually specify schema: ```R library(SparkR) -Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-csv_2.10:0.3.1" "sparkr-shell"') +Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-csv_2.10:0.3.2" "sparkr-shell"') sqlContext <- sparkRSQL.init(sc) customSchema <- structType( structField("@id", "string"), diff --git a/build.sbt b/build.sbt index c82868e2..8033d6a2 100755 --- a/build.sbt +++ b/build.sbt @@ -1,6 +1,6 @@ name := "spark-xml" -version := "0.3.2-SNAPSHOT" +version := "0.3.2" organization := "com.databricks"