From 16696759e9a292378cbfdf695a63d6d0cff0d79a Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Mon, 11 May 2015 22:06:56 -0700 Subject: [PATCH] [SQL] Rename Dialect -> ParserDialect. Author: Reynold Xin Closes #6071 from rxin/parserdialect and squashes the following commits: ca2eb31 [Reynold Xin] Rename Dialect -> ParserDialect. --- .../catalyst/{Dialect.scala => ParserDialect.scala} | 2 +- .../main/scala/org/apache/spark/sql/SQLContext.scala | 12 ++++++------ .../scala/org/apache/spark/sql/SQLQuerySuite.scala | 4 ++-- .../org/apache/spark/sql/hive/HiveContext.scala | 4 ++-- .../spark/sql/hive/execution/SQLQuerySuite.scala | 6 +++--- 5 files changed, 14 insertions(+), 14 deletions(-) rename sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/{Dialect.scala => ParserDialect.scala} (97%) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/Dialect.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala similarity index 97% rename from sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/Dialect.scala rename to sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala index 977003493d471..05a92b06f9fd9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/Dialect.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala @@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan * */ @DeveloperApi -abstract class Dialect { +abstract class ParserDialect { // this is the main function that will be implemented by sql parser. def parse(sqlText: String): LogicalPlan } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index 28fc9d04436f7..648021806f3ee 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -37,7 +37,7 @@ import org.apache.spark.sql.catalyst.errors.DialectException import org.apache.spark.sql.catalyst.optimizer.{DefaultOptimizer, Optimizer} import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan} import org.apache.spark.sql.catalyst.rules.RuleExecutor -import org.apache.spark.sql.catalyst.Dialect +import org.apache.spark.sql.catalyst.ParserDialect import org.apache.spark.sql.catalyst.{CatalystTypeConverters, ScalaReflection, expressions} import org.apache.spark.sql.execution.{Filter, _} import org.apache.spark.sql.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation} @@ -49,7 +49,7 @@ import org.apache.spark.{Partition, SparkContext} /** * Currently we support the default dialect named "sql", associated with the class - * [[DefaultDialect]] + * [[DefaultParserDialect]] * * And we can also provide custom SQL Dialect, for example in Spark SQL CLI: * {{{ @@ -74,7 +74,7 @@ import org.apache.spark.{Partition, SparkContext} *-- "hiveql" (for HiveContext) * }}} */ -private[spark] class DefaultDialect extends Dialect { +private[spark] class DefaultParserDialect extends ParserDialect { @transient protected val sqlParser = new catalyst.SqlParser @@ -176,10 +176,10 @@ class SQLContext(@transient val sparkContext: SparkContext) @transient protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect().parse(_)) - protected[sql] def getSQLDialect(): Dialect = { + protected[sql] def getSQLDialect(): ParserDialect = { try { val clazz = Utils.classForName(dialectClassName) - clazz.newInstance().asInstanceOf[Dialect] + clazz.newInstance().asInstanceOf[ParserDialect] } catch { case NonFatal(e) => // Since we didn't find the available SQL Dialect, it will fail even for SET command: @@ -209,7 +209,7 @@ class SQLContext(@transient val sparkContext: SparkContext) protected[sql] val defaultSession = createSession() protected[sql] def dialectClassName = if (conf.dialect == "sql") { - classOf[DefaultDialect].getCanonicalName + classOf[DefaultParserDialect].getCanonicalName } else { conf.dialect } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index b44eb223c80c5..ec0e76cde6f7c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -30,7 +30,7 @@ import org.apache.spark.sql.test.TestSQLContext.{udf => _, _} import org.apache.spark.sql.types._ /** A SQL Dialect for testing purpose, and it can not be nested type */ -class MyDialect extends DefaultDialect +class MyDialect extends DefaultParserDialect class SQLQuerySuite extends QueryTest with BeforeAndAfterAll { // Make sure the tables are loaded. @@ -94,7 +94,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll { newContext.sql("SELECT 1") } // test if the dialect set back to DefaultSQLDialect - assert(newContext.getSQLDialect().getClass === classOf[DefaultDialect]) + assert(newContext.getSQLDialect().getClass === classOf[DefaultParserDialect]) } test("SPARK-4625 support SORT BY in SimpleSQLParser & DSL") { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala index 3bab648e31d1f..61e8c154e8c3c 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala @@ -22,7 +22,7 @@ import java.sql.Timestamp import java.util.{ArrayList => JArrayList} import org.apache.hadoop.hive.ql.parse.VariableSubstitution -import org.apache.spark.sql.catalyst.Dialect +import org.apache.spark.sql.catalyst.ParserDialect import scala.collection.JavaConversions._ import scala.language.implicitConversions @@ -54,7 +54,7 @@ import org.apache.spark.util.Utils /** * This is the HiveQL Dialect, this dialect is strongly bind with HiveContext */ -private[hive] class HiveQLDialect extends Dialect { +private[hive] class HiveQLDialect extends ParserDialect { override def parse(sqlText: String): LogicalPlan = { HiveQl.parseSql(sqlText) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index 0d739dead4c73..a5744ccc68a47 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.catalyst.analysis.EliminateSubQueries import org.apache.spark.sql.catalyst.errors.DialectException -import org.apache.spark.sql.DefaultDialect +import org.apache.spark.sql.DefaultParserDialect import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SQLConf} import org.apache.spark.sql.hive.MetastoreRelation import org.apache.spark.sql.hive.test.TestHive @@ -53,7 +53,7 @@ case class WindowData( area: String, product: Int) /** A SQL Dialect for testing purpose, and it can not be nested type */ -class MyDialect extends DefaultDialect +class MyDialect extends DefaultParserDialect /** * A collection of hive query tests where we generate the answers ourselves instead of depending on @@ -247,7 +247,7 @@ class SQLQuerySuite extends QueryTest { // set the dialect back to the DefaultSQLDialect sql("SET spark.sql.dialect=sql") - assert(getSQLDialect().getClass === classOf[DefaultDialect]) + assert(getSQLDialect().getClass === classOf[DefaultParserDialect]) sql("SET spark.sql.dialect=hiveql") assert(getSQLDialect().getClass === classOf[HiveQLDialect])