Skip to content

Commit

Permalink
remove scala-side api
Browse files Browse the repository at this point in the history
  • Loading branch information
goldmedal committed Sep 26, 2017
1 parent 7525b48 commit 350a93d
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 50 deletions.
34 changes: 0 additions & 34 deletions sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
Original file line number Diff line number Diff line change
Expand Up @@ -455,40 +455,6 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
csv(Seq(path): _*)
}

/**
* Loads a `JavaRDD[String]` storing storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* @param csvRDD input RDD with one CSV row per record
* @since 2.2.0
*/
@deprecated("Use csv(Dataset[String]) instead.", "2.2.0")
def csv(csvRDD: JavaRDD[String]): DataFrame = csv(csvRDD.rdd)

/**
* Loads a `RDD[String]` storing storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* @param csvRDD input RDD with one CSV row per record
* @since 2.2.0
*/
@deprecated("Use csv(Dataset[String]) instead.", "2.2.0")
def csv(csvRDD: RDD[String]): DataFrame = {
csv(sparkSession.createDataset(csvRDD)(Encoders.STRING))
}

/**
* Loads an `Dataset[String]` storing CSV rows and returns the result as a `DataFrame`.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,22 +146,6 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
verifyCars(carsWithoutHeader, withHeader = false, checkTypes = false)
}

test("simple csv test with string RDD") {
val csvRDD = spark.sparkContext.textFile(testFile(carsFile))
val cars = spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(csvRDD)

verifyCars(cars, withHeader = true, checkTypes = true)

val carsWithoutHeader = spark.read
.option("header", "false")
.csv(csvRDD)

verifyCars(carsWithoutHeader, withHeader = false, checkTypes = false)
}

test("test inferring booleans") {
val result = spark.read
.format("csv")
Expand Down

0 comments on commit 350a93d

Please sign in to comment.