Skip to content

Commit

Permalink
[SPARK-20410][SQL] Make sparkConf a def in SharedSQLContext
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?
It is kind of annoying that `SharedSQLContext.sparkConf` is a val when overriding test cases, because you cannot call `super` on it. This PR makes it a function.

## How was this patch tested?
Existing tests.

Author: Herman van Hovell <hvanhovell@databricks.com>

Closes #17705 from hvanhovell/SPARK-20410.
  • Loading branch information
hvanhovell committed Apr 20, 2017
1 parent d95e4d9 commit 0332063
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 43 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@ package org.apache.spark.sql

import org.scalatest.BeforeAndAfter

class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
import org.apache.spark.SparkConf

protected override def beforeAll(): Unit = {
sparkConf.set("spark.sql.codegen.fallback", "false")
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
super.beforeAll()
}
class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.sql.codegen.fallback", "false")
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")

// adding some checking after each test is run, assuring that the configs are not changed
// in test code
Expand All @@ -38,12 +37,9 @@ class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with Befo
}

class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {

protected override def beforeAll(): Unit = {
sparkConf.set("spark.sql.codegen.fallback", "false")
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
super.beforeAll()
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.sql.codegen.fallback", "false")
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")

// adding some checking after each test is run, assuring that the configs are not changed
// in test code
Expand All @@ -55,15 +51,14 @@ class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeA
}
}

class TwoLevelAggregateHashMapWithVectorizedMapSuite extends DataFrameAggregateSuite with
BeforeAndAfter {
class TwoLevelAggregateHashMapWithVectorizedMapSuite
extends DataFrameAggregateSuite
with BeforeAndAfter {

protected override def beforeAll(): Unit = {
sparkConf.set("spark.sql.codegen.fallback", "false")
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
sparkConf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
super.beforeAll()
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.sql.codegen.fallback", "false")
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")

// adding some checking after each test is run, assuring that the configs are not changed
// in test code
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,20 @@ package org.apache.spark.sql
import com.esotericsoftware.kryo.{Kryo, Serializer}
import com.esotericsoftware.kryo.io.{Input, Output}

import org.apache.spark.SparkConf
import org.apache.spark.serializer.KryoRegistrator
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.test.TestSparkSession

/**
* Test suite to test Kryo custom registrators.
*/
class DatasetSerializerRegistratorSuite extends QueryTest with SharedSQLContext {
import testImplicits._

/**
* Initialize the [[TestSparkSession]] with a [[KryoRegistrator]].
*/
protected override def beforeAll(): Unit = {
sparkConf.set("spark.kryo.registrator", TestRegistrator().getClass.getCanonicalName)
super.beforeAll()

override protected def sparkConf: SparkConf = {
// Make sure we use the KryoRegistrator
super.sparkConf.set("spark.kryo.registrator", TestRegistrator().getClass.getCanonicalName)
}

test("Kryo registrator") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,17 @@ package org.apache.spark.sql.execution

import org.apache.hadoop.fs.Path

import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils

/**
* Suite that tests the redaction of DataSourceScanExec
*/
class DataSourceScanExecRedactionSuite extends QueryTest with SharedSQLContext {

import Utils._

override def beforeAll(): Unit = {
sparkConf.set("spark.redaction.string.regex",
"file:/[\\w_]+")
super.beforeAll()
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.redaction.string.regex", "file:/[\\w_]+")

test("treeString is redacted") {
withTempDir { dir =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ import org.apache.spark.util.Utils
class FileSourceStrategySuite extends QueryTest with SharedSQLContext with PredicateHelper {
import testImplicits._

protected override val sparkConf = new SparkConf().set("spark.default.parallelism", "1")
protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")

test("unpartitioned table, single partition") {
val table =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ import org.apache.spark.sql.test.SharedSQLContext
class CompactibleFileStreamLogSuite extends SparkFunSuite with SharedSQLContext {

/** To avoid caching of FS objects */
override protected val sparkConf =
new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
override protected def sparkConf =
super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")

import CompactibleFileStreamLog._

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ import org.apache.spark.util.UninterruptibleThread
class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext {

/** To avoid caching of FS objects */
override protected val sparkConf =
new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
override protected def sparkConf =
super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")

private implicit def toOption[A](a: A): Option[A] = Option(a)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@ import org.apache.spark.sql.{SparkSession, SQLContext}
*/
trait SharedSQLContext extends SQLTestUtils with BeforeAndAfterEach with Eventually {

protected val sparkConf = new SparkConf()
protected def sparkConf = {
new SparkConf().set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName)
}

/**
* The [[TestSparkSession]] to use for all tests in this suite.
Expand All @@ -51,8 +53,7 @@ trait SharedSQLContext extends SQLTestUtils with BeforeAndAfterEach with Eventua
protected implicit def sqlContext: SQLContext = _spark.sqlContext

protected def createSparkSession: TestSparkSession = {
new TestSparkSession(
sparkConf.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName))
new TestSparkSession(sparkConf)
}

/**
Expand Down

0 comments on commit 0332063

Please sign in to comment.