From 4a2c375be2bcd98cc7e00bea920fd6a0f68a4e14 Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Tue, 16 Aug 2016 21:35:39 -0700 Subject: [PATCH 001/270] [SPARK-17084][SQL] Rename ParserUtils.assert to validate ## What changes were proposed in this pull request? This PR renames `ParserUtils.assert` to `ParserUtils.validate`. This is done because this method is used to check requirements, and not to check if the program is in an invalid state. ## How was this patch tested? Simple rename. Compilation should do. Author: Herman van Hovell Closes #14665 from hvanhovell/SPARK-17084. --- .../spark/sql/catalyst/parser/AstBuilder.scala | 14 +++++++------- .../spark/sql/catalyst/parser/ParserUtils.scala | 4 ++-- .../spark/sql/execution/SparkSqlParser.scala | 5 ++--- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 25c8445b4d33f..09b650ce18790 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -132,7 +132,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { // Build the insert clauses. val inserts = ctx.multiInsertQueryBody.asScala.map { body => - assert(body.querySpecification.fromClause == null, + validate(body.querySpecification.fromClause == null, "Multi-Insert queries cannot have a FROM clause in their individual SELECT statements", body) @@ -596,7 +596,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { // function takes X PERCENT as the input and the range of X is [0, 100], we need to // adjust the fraction. val eps = RandomSampler.roundingEpsilon - assert(fraction >= 0.0 - eps && fraction <= 1.0 + eps, + validate(fraction >= 0.0 - eps && fraction <= 1.0 + eps, s"Sampling fraction ($fraction) must be on interval [0, 1]", ctx) Sample(0.0, fraction, withReplacement = false, (math.random * 1000).toInt, query)(true) @@ -664,7 +664,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { // Get the backing expressions. val expressions = ctx.expression.asScala.map { eCtx => val e = expression(eCtx) - assert(e.foldable, "All expressions in an inline table must be constants.", eCtx) + validate(e.foldable, "All expressions in an inline table must be constants.", eCtx) e } @@ -686,7 +686,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { val baseAttributes = structType.toAttributes.map(_.withNullability(true)) val attributes = if (ctx.identifierList != null) { val aliases = visitIdentifierList(ctx.identifierList) - assert(aliases.size == baseAttributes.size, + validate(aliases.size == baseAttributes.size, "Number of aliases must match the number of fields in an inline table.", ctx) baseAttributes.zip(aliases).map(p => p._1.withName(p._2)) } else { @@ -1094,7 +1094,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { // We currently only allow foldable integers. def value: Int = { val e = expression(ctx.expression) - assert(e.resolved && e.foldable && e.dataType == IntegerType, + validate(e.resolved && e.foldable && e.dataType == IntegerType, "Frame bound value must be a constant integer.", ctx) e.eval().asInstanceOf[Int] @@ -1347,7 +1347,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { */ override def visitInterval(ctx: IntervalContext): Literal = withOrigin(ctx) { val intervals = ctx.intervalField.asScala.map(visitIntervalField) - assert(intervals.nonEmpty, "at least one time unit should be given for interval literal", ctx) + validate(intervals.nonEmpty, "at least one time unit should be given for interval literal", ctx) Literal(intervals.reduce(_.add(_))) } @@ -1374,7 +1374,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { case (from, Some(t)) => throw new ParseException(s"Intervals FROM $from TO $t are not supported.", ctx) } - assert(interval != null, "No interval can be constructed", ctx) + validate(interval != null, "No interval can be constructed", ctx) interval } catch { // Handle Exceptions thrown by CalendarInterval diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala index b04ce58e233aa..bc35ae2f55409 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala @@ -77,8 +77,8 @@ object ParserUtils { Origin(Option(token.getLine), Option(token.getCharPositionInLine)) } - /** Assert if a condition holds. If it doesn't throw a parse exception. */ - def assert(f: => Boolean, message: String, ctx: ParserRuleContext): Unit = { + /** Validate the condition. If it doesn't throw a parse exception. */ + def validate(f: => Boolean, message: String, ctx: ParserRuleContext): Unit = { if (!f) { throw new ParseException(message, ctx) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 9da2b5a254e28..71c3bd31e02e4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -18,7 +18,6 @@ package org.apache.spark.sql.execution import scala.collection.JavaConverters._ -import scala.util.Try import org.antlr.v4.runtime.{ParserRuleContext, Token} import org.antlr.v4.runtime.tree.TerminalNode @@ -799,7 +798,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { } /** - * Create an [[AlterTableDiscoverPartitionsCommand]] command + * Create an [[AlterTableRecoverPartitionsCommand]] command * * For example: * {{{ @@ -1182,7 +1181,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { entry("mapkey.delim", ctx.keysTerminatedBy) ++ Option(ctx.linesSeparatedBy).toSeq.map { token => val value = string(token) - assert( + validate( value == "\n", s"LINES TERMINATED BY only supports newline '\\n' right now: $value", ctx) From f7c9ff57c17a950cccdc26aadf8768c899a4d572 Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Tue, 16 Aug 2016 23:09:53 -0700 Subject: [PATCH 002/270] [SPARK-17068][SQL] Make view-usage visible during analysis ## What changes were proposed in this pull request? This PR adds a field to subquery alias in order to make the usage of views in a resolved `LogicalPlan` more visible (and more understandable). For example, the following view and query: ```sql create view constants as select 1 as id union all select 1 union all select 42 select * from constants; ``` ...now yields the following analyzed plan: ``` Project [id#39] +- SubqueryAlias c, `default`.`constants` +- Project [gen_attr_0#36 AS id#39] +- SubqueryAlias gen_subquery_0 +- Union :- Union : :- Project [1 AS gen_attr_0#36] : : +- OneRowRelation$ : +- Project [1 AS gen_attr_1#37] : +- OneRowRelation$ +- Project [42 AS gen_attr_2#38] +- OneRowRelation$ ``` ## How was this patch tested? Added tests for the two code paths in `SessionCatalogSuite` (sql/core) and `HiveMetastoreCatalogSuite` (sql/hive) Author: Herman van Hovell Closes #14657 from hvanhovell/SPARK-17068. --- .../sql/catalyst/analysis/Analyzer.scala | 4 +-- .../sql/catalyst/analysis/CheckAnalysis.scala | 4 +-- .../sql/catalyst/catalog/SessionCatalog.scala | 30 ++++++++++--------- .../spark/sql/catalyst/dsl/package.scala | 4 +-- .../sql/catalyst/expressions/subquery.scala | 8 ++--- .../sql/catalyst/optimizer/Optimizer.scala | 8 ++--- .../sql/catalyst/parser/AstBuilder.scala | 4 +-- .../plans/logical/basicLogicalOperators.scala | 7 ++++- .../sql/catalyst/analysis/AnalysisSuite.scala | 4 +-- .../catalog/SessionCatalogSuite.scala | 19 ++++++++---- .../optimizer/ColumnPruningSuite.scala | 8 ++--- .../EliminateSubqueryAliasesSuite.scala | 6 ++-- .../optimizer/JoinOptimizationSuite.scala | 8 ++--- .../sql/catalyst/parser/PlanParserSuite.scala | 2 +- .../scala/org/apache/spark/sql/Dataset.scala | 2 +- .../spark/sql/catalyst/SQLBuilder.scala | 6 ++-- .../sql/execution/datasources/rules.scala | 2 +- .../spark/sql/hive/HiveMetastoreCatalog.scala | 21 ++++++------- .../spark/sql/hive/HiveSessionCatalog.scala | 4 +-- .../sql/hive/HiveMetastoreCatalogSuite.scala | 14 ++++++++- 20 files changed, 94 insertions(+), 71 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index a2a022c2476fb..bd4c19181f647 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -138,7 +138,7 @@ class Analyzer( case u : UnresolvedRelation => val substituted = cteRelations.find(x => resolver(x._1, u.tableIdentifier.table)) .map(_._2).map { relation => - val withAlias = u.alias.map(SubqueryAlias(_, relation)) + val withAlias = u.alias.map(SubqueryAlias(_, relation, None)) withAlias.getOrElse(relation) } substituted.getOrElse(u) @@ -2057,7 +2057,7 @@ class Analyzer( */ object EliminateSubqueryAliases extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { - case SubqueryAlias(_, child) => child + case SubqueryAlias(_, child, _) => child } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala index 41b7e62d8ccea..e07e9194bee9f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala @@ -141,8 +141,8 @@ trait CheckAnalysis extends PredicateHelper { // Skip projects and subquery aliases added by the Analyzer and the SQLBuilder. def cleanQuery(p: LogicalPlan): LogicalPlan = p match { - case SubqueryAlias(_, child) => cleanQuery(child) - case Project(_, child) => cleanQuery(child) + case s: SubqueryAlias => cleanQuery(s.child) + case p: Project => cleanQuery(p.child) case child => child } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala index 00c3db0aac1ac..62d0da076b5a2 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala @@ -411,27 +411,29 @@ class SessionCatalog( } /** - * Return a [[LogicalPlan]] that represents the given table. + * Return a [[LogicalPlan]] that represents the given table or view. * - * If a database is specified in `name`, this will return the table from that database. - * If no database is specified, this will first attempt to return a temporary table with - * the same name, then, if that does not exist, return the table from the current database. + * If a database is specified in `name`, this will return the table/view from that database. + * If no database is specified, this will first attempt to return a temporary table/view with + * the same name, then, if that does not exist, return the table/view from the current database. + * + * If the relation is a view, the relation will be wrapped in a [[SubqueryAlias]] which will + * track the name of the view. */ def lookupRelation(name: TableIdentifier, alias: Option[String] = None): LogicalPlan = { synchronized { val db = formatDatabaseName(name.database.getOrElse(currentDb)) val table = formatTableName(name.table) - val relation = - if (name.database.isDefined || !tempTables.contains(table)) { - val metadata = externalCatalog.getTable(db, table) - SimpleCatalogRelation(db, metadata) - } else { - tempTables(table) + val relationAlias = alias.getOrElse(table) + if (name.database.isDefined || !tempTables.contains(table)) { + val metadata = externalCatalog.getTable(db, table) + val view = Option(metadata.tableType).collect { + case CatalogTableType.VIEW => name } - val qualifiedTable = SubqueryAlias(table, relation) - // If an alias was specified by the lookup, wrap the plan in a subquery so that - // attributes are properly qualified with this alias. - alias.map(a => SubqueryAlias(a, qualifiedTable)).getOrElse(qualifiedTable) + SubqueryAlias(relationAlias, SimpleCatalogRelation(db, metadata), view) + } else { + SubqueryAlias(relationAlias, tempTables(table), Option(name)) + } } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala index 5181dcc786a3d..9f54d709a022d 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala @@ -343,7 +343,7 @@ package object dsl { orderSpec: Seq[SortOrder]): LogicalPlan = Window(windowExpressions, partitionSpec, orderSpec, logicalPlan) - def subquery(alias: Symbol): LogicalPlan = SubqueryAlias(alias.name, logicalPlan) + def subquery(alias: Symbol): LogicalPlan = SubqueryAlias(alias.name, logicalPlan, None) def except(otherPlan: LogicalPlan): LogicalPlan = Except(logicalPlan, otherPlan) @@ -367,7 +367,7 @@ package object dsl { def as(alias: String): LogicalPlan = logicalPlan match { case UnresolvedRelation(tbl, _) => UnresolvedRelation(tbl, Option(alias)) - case plan => SubqueryAlias(alias, plan) + case plan => SubqueryAlias(alias, plan, None) } def repartition(num: Integer): LogicalPlan = diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala index ac44f08897cbd..ddbe937cba9bd 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala @@ -72,7 +72,7 @@ case class ScalarSubquery( override def dataType: DataType = query.schema.fields.head.dataType override def foldable: Boolean = false override def nullable: Boolean = true - override def plan: LogicalPlan = SubqueryAlias(toString, query) + override def plan: LogicalPlan = SubqueryAlias(toString, query, None) override def withNewPlan(plan: LogicalPlan): ScalarSubquery = copy(query = plan) override def toString: String = s"scalar-subquery#${exprId.id} $conditionString" } @@ -100,7 +100,7 @@ case class PredicateSubquery( override lazy val resolved = childrenResolved && query.resolved override lazy val references: AttributeSet = super.references -- query.outputSet override def nullable: Boolean = nullAware - override def plan: LogicalPlan = SubqueryAlias(toString, query) + override def plan: LogicalPlan = SubqueryAlias(toString, query, None) override def withNewPlan(plan: LogicalPlan): PredicateSubquery = copy(query = plan) override def semanticEquals(o: Expression): Boolean = o match { case p: PredicateSubquery => @@ -153,7 +153,7 @@ case class ListQuery(query: LogicalPlan, exprId: ExprId = NamedExpression.newExp override def dataType: DataType = ArrayType(NullType) override def nullable: Boolean = false override def withNewPlan(plan: LogicalPlan): ListQuery = copy(query = plan) - override def plan: LogicalPlan = SubqueryAlias(toString, query) + override def plan: LogicalPlan = SubqueryAlias(toString, query, None) override def toString: String = s"list#${exprId.id}" } @@ -174,6 +174,6 @@ case class Exists(query: LogicalPlan, exprId: ExprId = NamedExpression.newExprId override def children: Seq[Expression] = Seq.empty override def nullable: Boolean = false override def withNewPlan(plan: LogicalPlan): Exists = copy(query = plan) - override def plan: LogicalPlan = SubqueryAlias(toString, query) + override def plan: LogicalPlan = SubqueryAlias(toString, query, None) override def toString: String = s"exists#${exprId.id}" } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index e34a478818e98..f97a78b411597 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -1862,7 +1862,7 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { // and Project operators, followed by an optional Filter, followed by an // Aggregate. Traverse the operators recursively. def evalPlan(lp : LogicalPlan) : Map[ExprId, Option[Any]] = lp match { - case SubqueryAlias(_, child) => evalPlan(child) + case SubqueryAlias(_, child, _) => evalPlan(child) case Filter(condition, child) => val bindings = evalPlan(child) if (bindings.isEmpty) bindings @@ -1920,7 +1920,7 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { topPart += p bottomPart = child - case s @ SubqueryAlias(_, child) => + case s @ SubqueryAlias(_, child, _) => topPart += s bottomPart = child @@ -1991,8 +1991,8 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { topPart.reverse.foreach { case Project(projList, _) => subqueryRoot = Project(projList ++ havingInputs, subqueryRoot) - case s @ SubqueryAlias(alias, _) => - subqueryRoot = SubqueryAlias(alias, subqueryRoot) + case s @ SubqueryAlias(alias, _, None) => + subqueryRoot = SubqueryAlias(alias, subqueryRoot, None) case op => sys.error(s"Unexpected operator $op in corelated subquery") } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 09b650ce18790..adf78396d7fc0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -107,7 +107,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { * This is only used for Common Table Expressions. */ override def visitNamedQuery(ctx: NamedQueryContext): SubqueryAlias = withOrigin(ctx) { - SubqueryAlias(ctx.name.getText, plan(ctx.queryNoWith)) + SubqueryAlias(ctx.name.getText, plan(ctx.queryNoWith), None) } /** @@ -723,7 +723,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { * Create an alias (SubqueryAlias) for a LogicalPlan. */ private def aliasPlan(alias: ParserRuleContext, plan: LogicalPlan): LogicalPlan = { - SubqueryAlias(alias.getText, plan) + SubqueryAlias(alias.getText, plan, None) } /** diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala index 2917d8d2a97aa..af1736e60799b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala @@ -19,6 +19,7 @@ package org.apache.spark.sql.catalyst.plans.logical import scala.collection.mutable.ArrayBuffer +import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression @@ -693,7 +694,11 @@ case class LocalLimit(limitExpr: Expression, child: LogicalPlan) extends UnaryNo } } -case class SubqueryAlias(alias: String, child: LogicalPlan) extends UnaryNode { +case class SubqueryAlias( + alias: String, + child: LogicalPlan, + view: Option[TableIdentifier]) + extends UnaryNode { override def output: Seq[Attribute] = child.output.map(_.withQualifier(Some(alias))) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala index 22e1c9be0573d..8971edc7d3b9a 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala @@ -339,8 +339,8 @@ class AnalysisSuite extends AnalysisTest { val query = Project(Seq($"x.key", $"y.key"), Join( - Project(Seq($"x.key"), SubqueryAlias("x", input)), - Project(Seq($"y.key"), SubqueryAlias("y", input)), + Project(Seq($"x.key"), SubqueryAlias("x", input, None)), + Project(Seq($"y.key"), SubqueryAlias("y", input, None)), Inner, None)) assertAnalysisSuccess(query) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala index b31b4406ae600..c9d4fef8056ca 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala @@ -395,31 +395,38 @@ class SessionCatalogSuite extends SparkFunSuite { sessionCatalog.setCurrentDatabase("db2") // If we explicitly specify the database, we'll look up the relation in that database assert(sessionCatalog.lookupRelation(TableIdentifier("tbl1", Some("db2"))) - == SubqueryAlias("tbl1", SimpleCatalogRelation("db2", metastoreTable1))) + == SubqueryAlias("tbl1", SimpleCatalogRelation("db2", metastoreTable1), None)) // Otherwise, we'll first look up a temporary table with the same name assert(sessionCatalog.lookupRelation(TableIdentifier("tbl1")) - == SubqueryAlias("tbl1", tempTable1)) + == SubqueryAlias("tbl1", tempTable1, Some(TableIdentifier("tbl1")))) // Then, if that does not exist, look up the relation in the current database sessionCatalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false) assert(sessionCatalog.lookupRelation(TableIdentifier("tbl1")) - == SubqueryAlias("tbl1", SimpleCatalogRelation("db2", metastoreTable1))) + == SubqueryAlias("tbl1", SimpleCatalogRelation("db2", metastoreTable1), None)) } test("lookup table relation with alias") { val catalog = new SessionCatalog(newBasicCatalog()) val alias = "monster" val tableMetadata = catalog.getTableMetadata(TableIdentifier("tbl1", Some("db2"))) - val relation = SubqueryAlias("tbl1", SimpleCatalogRelation("db2", tableMetadata)) + val relation = SubqueryAlias("tbl1", SimpleCatalogRelation("db2", tableMetadata), None) val relationWithAlias = SubqueryAlias(alias, - SubqueryAlias("tbl1", - SimpleCatalogRelation("db2", tableMetadata))) + SimpleCatalogRelation("db2", tableMetadata), None) assert(catalog.lookupRelation( TableIdentifier("tbl1", Some("db2")), alias = None) == relation) assert(catalog.lookupRelation( TableIdentifier("tbl1", Some("db2")), alias = Some(alias)) == relationWithAlias) } + test("lookup view with view name in alias") { + val catalog = new SessionCatalog(newBasicCatalog()) + val tmpView = Range(1, 10, 2, 10) + catalog.createTempView("vw1", tmpView, overrideIfExists = false) + val plan = catalog.lookupRelation(TableIdentifier("vw1"), Option("range")) + assert(plan == SubqueryAlias("range", tmpView, Option(TableIdentifier("vw1")))) + } + test("table exists") { val catalog = new SessionCatalog(newBasicCatalog()) assert(catalog.tableExists(TableIdentifier("tbl1", Some("db2")))) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala index 589607e3ad5cb..5bd1bc80c3b8a 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala @@ -320,16 +320,16 @@ class ColumnPruningSuite extends PlanTest { val query = Project(Seq($"x.key", $"y.key"), Join( - SubqueryAlias("x", input), - BroadcastHint(SubqueryAlias("y", input)), Inner, None)).analyze + SubqueryAlias("x", input, None), + BroadcastHint(SubqueryAlias("y", input, None)), Inner, None)).analyze val optimized = Optimize.execute(query) val expected = Join( - Project(Seq($"x.key"), SubqueryAlias("x", input)), + Project(Seq($"x.key"), SubqueryAlias("x", input, None)), BroadcastHint( - Project(Seq($"y.key"), SubqueryAlias("y", input))), + Project(Seq($"y.key"), SubqueryAlias("y", input, None))), Inner, None).analyze comparePlans(optimized, expected) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala index 9b6d68aee803a..a8aeedbd62759 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala @@ -46,13 +46,13 @@ class EliminateSubqueryAliasesSuite extends PlanTest with PredicateHelper { test("eliminate top level subquery") { val input = LocalRelation('a.int, 'b.int) - val query = SubqueryAlias("a", input) + val query = SubqueryAlias("a", input, None) comparePlans(afterOptimization(query), input) } test("eliminate mid-tree subquery") { val input = LocalRelation('a.int, 'b.int) - val query = Filter(TrueLiteral, SubqueryAlias("a", input)) + val query = Filter(TrueLiteral, SubqueryAlias("a", input, None)) comparePlans( afterOptimization(query), Filter(TrueLiteral, LocalRelation('a.int, 'b.int))) @@ -61,7 +61,7 @@ class EliminateSubqueryAliasesSuite extends PlanTest with PredicateHelper { test("eliminate multiple subqueries") { val input = LocalRelation('a.int, 'b.int) val query = Filter(TrueLiteral, - SubqueryAlias("c", SubqueryAlias("b", SubqueryAlias("a", input)))) + SubqueryAlias("c", SubqueryAlias("b", SubqueryAlias("a", input, None), None), None)) comparePlans( afterOptimization(query), Filter(TrueLiteral, LocalRelation('a.int, 'b.int))) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala index c1ebf8b09e08d..dbb3e6a5272ec 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala @@ -97,15 +97,15 @@ class JoinOptimizationSuite extends PlanTest { val query = Project(Seq($"x.key", $"y.key"), Join( - SubqueryAlias("x", input), - BroadcastHint(SubqueryAlias("y", input)), Inner, None)).analyze + SubqueryAlias("x", input, None), + BroadcastHint(SubqueryAlias("y", input, None)), Inner, None)).analyze val optimized = Optimize.execute(query) val expected = Join( - Project(Seq($"x.key"), SubqueryAlias("x", input)), - BroadcastHint(Project(Seq($"y.key"), SubqueryAlias("y", input))), + Project(Seq($"x.key"), SubqueryAlias("x", input, None)), + BroadcastHint(Project(Seq($"y.key"), SubqueryAlias("y", input, None))), Inner, None).analyze comparePlans(optimized, expected) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala index 34d52c75e0af2..7af333b34f723 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala @@ -80,7 +80,7 @@ class PlanParserSuite extends PlanTest { def cte(plan: LogicalPlan, namedPlans: (String, LogicalPlan)*): With = { val ctes = namedPlans.map { case (name, cte) => - name -> SubqueryAlias(name, cte) + name -> SubqueryAlias(name, cte, None) } With(plan, ctes) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index c119df83b3d71..6da99ce0dd683 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -967,7 +967,7 @@ class Dataset[T] private[sql]( * @since 1.6.0 */ def as(alias: String): Dataset[T] = withTypedPlan { - SubqueryAlias(alias, logicalPlan) + SubqueryAlias(alias, logicalPlan, None) } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala index 5d93419f357ef..ff8e0f2642055 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala @@ -75,7 +75,7 @@ class SQLBuilder private ( val aliasedOutput = canonicalizedPlan.output.zip(outputNames).map { case (attr, name) => Alias(attr.withQualifier(None), name)() } - val finalPlan = Project(aliasedOutput, SubqueryAlias(finalName, canonicalizedPlan)) + val finalPlan = Project(aliasedOutput, SubqueryAlias(finalName, canonicalizedPlan, None)) try { val replaced = finalPlan.transformAllExpressions { @@ -440,7 +440,7 @@ class SQLBuilder private ( object RemoveSubqueriesAboveSQLTable extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { - case SubqueryAlias(_, t @ ExtractSQLTable(_)) => t + case SubqueryAlias(_, t @ ExtractSQLTable(_), _) => t } } @@ -557,7 +557,7 @@ class SQLBuilder private ( } private def addSubquery(plan: LogicalPlan): SubqueryAlias = { - SubqueryAlias(newSubqueryName(), plan) + SubqueryAlias(newSubqueryName(), plan, None) } private def addSubqueryIfNeeded(plan: LogicalPlan): LogicalPlan = plan match { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala index fc8d8c3667901..5eb2f0a9ff034 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala @@ -55,7 +55,7 @@ class ResolveDataSource(sparkSession: SparkSession) extends Rule[LogicalPlan] { s"${u.tableIdentifier.database.get}") } val plan = LogicalRelation(dataSource.resolveRelation()) - u.alias.map(a => SubqueryAlias(u.alias.get, plan)).getOrElse(plan) + u.alias.map(a => SubqueryAlias(u.alias.get, plan, None)).getOrElse(plan) } catch { case e: ClassNotFoundException => u case e: Exception => diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index c7c1acda25db2..7118edabb83cf 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -162,24 +162,21 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log if (table.properties.get(DATASOURCE_PROVIDER).isDefined) { val dataSourceTable = cachedDataSourceTables(qualifiedTableName) - val qualifiedTable = SubqueryAlias(qualifiedTableName.name, dataSourceTable) + val qualifiedTable = SubqueryAlias(qualifiedTableName.name, dataSourceTable, None) // Then, if alias is specified, wrap the table with a Subquery using the alias. // Otherwise, wrap the table with a Subquery using the table name. - alias.map(a => SubqueryAlias(a, qualifiedTable)).getOrElse(qualifiedTable) + alias.map(a => SubqueryAlias(a, qualifiedTable, None)).getOrElse(qualifiedTable) } else if (table.tableType == CatalogTableType.VIEW) { val viewText = table.viewText.getOrElse(sys.error("Invalid view without text.")) - alias match { - case None => - SubqueryAlias(table.identifier.table, - sparkSession.sessionState.sqlParser.parsePlan(viewText)) - case Some(aliasText) => - SubqueryAlias(aliasText, sessionState.sqlParser.parsePlan(viewText)) - } + SubqueryAlias( + alias.getOrElse(table.identifier.table), + sparkSession.sessionState.sqlParser.parsePlan(viewText), + Option(table.identifier)) } else { val qualifiedTable = MetastoreRelation( qualifiedTableName.database, qualifiedTableName.name)(table, client, sparkSession) - alias.map(a => SubqueryAlias(a, qualifiedTable)).getOrElse(qualifiedTable) + alias.map(a => SubqueryAlias(a, qualifiedTable, None)).getOrElse(qualifiedTable) } } @@ -383,7 +380,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log // Read path case relation: MetastoreRelation if shouldConvertMetastoreParquet(relation) => val parquetRelation = convertToParquetRelation(relation) - SubqueryAlias(relation.tableName, parquetRelation) + SubqueryAlias(relation.tableName, parquetRelation, None) } } } @@ -421,7 +418,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log // Read path case relation: MetastoreRelation if shouldConvertMetastoreOrc(relation) => val orcRelation = convertToOrcRelation(relation) - SubqueryAlias(relation.tableName, orcRelation) + SubqueryAlias(relation.tableName, orcRelation, None) } } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala index c59ac3dcafea4..ebed9eb6e7dca 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala @@ -68,10 +68,10 @@ private[sql] class HiveSessionCatalog( metastoreCatalog.lookupRelation(newName, alias) } else { val relation = tempTables(table) - val tableWithQualifiers = SubqueryAlias(table, relation) + val tableWithQualifiers = SubqueryAlias(table, relation, None) // If an alias was specified by the lookup, wrap the plan in a subquery so that // attributes are properly qualified with this alias. - alias.map(a => SubqueryAlias(a, tableWithQualifiers)).getOrElse(tableWithQualifiers) + alias.map(a => SubqueryAlias(a, tableWithQualifiers, None)).getOrElse(tableWithQualifiers) } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala index 9d72367f437bf..0477ea4d4c380 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala @@ -23,12 +23,13 @@ import org.apache.spark.sql.{QueryTest, Row, SaveMode} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.CatalogTableType import org.apache.spark.sql.catalyst.parser.CatalystSqlParser +import org.apache.spark.sql.catalyst.plans.logical.SubqueryAlias import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.{ExamplePointUDT, SQLTestUtils} import org.apache.spark.sql.types.{DecimalType, IntegerType, StringType, StructField, StructType} -class HiveMetastoreCatalogSuite extends TestHiveSingleton { +class HiveMetastoreCatalogSuite extends TestHiveSingleton with SQLTestUtils { import spark.implicits._ test("struct field should accept underscore in sub-column name") { @@ -57,6 +58,17 @@ class HiveMetastoreCatalogSuite extends TestHiveSingleton { val dataType = StructType((1 to 100).map(field)) assert(CatalystSqlParser.parseDataType(dataType.catalogString) == dataType) } + + test("view relation") { + withView("vw1") { + spark.sql("create view vw1 as select 1 as id") + val plan = spark.sql("select id from vw1").queryExecution.analyzed + val aliases = plan.collect { + case x @ SubqueryAlias("vw1", _, Some(TableIdentifier("vw1", Some("default")))) => x + } + assert(aliases.size == 1) + } + } } class DataSourceWithHiveMetastoreCatalogSuite From 0f6aa8afaacdf0ceca9c2c1650ca26a5c167ae69 Mon Sep 17 00:00:00 2001 From: mvervuurt Date: Tue, 16 Aug 2016 23:12:59 -0700 Subject: [PATCH 003/270] [MINOR][DOC] Fix the descriptions for `properties` argument in the documenation for jdbc APIs ## What changes were proposed in this pull request? This should be credited to mvervuurt. The main purpose of this PR is - simply to include the change for the same instance in `DataFrameReader` just to match up. - just avoid duplicately verifying the PR (as I already did). The documentation for both should be the same because both assume the `properties` should be the same `dict` for the same option. ## How was this patch tested? Manually building Python documentation. This will produce the output as below: - `DataFrameReader` ![2016-08-17 11 12 00](https://cloud.githubusercontent.com/assets/6477701/17722764/b3f6568e-646f-11e6-8b75-4fb672f3f366.png) - `DataFrameWriter` ![2016-08-17 11 12 10](https://cloud.githubusercontent.com/assets/6477701/17722765/b58cb308-646f-11e6-841a-32f19800d139.png) Closes #14624 Author: hyukjinkwon Author: mvervuurt Closes #14677 from HyukjinKwon/typo-python. --- python/pyspark/sql/readwriter.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index 4020bb3fa45b0..64de33e8ec0a8 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -401,8 +401,9 @@ def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPar :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` - :param properties: a dictionary of JDBC database connection arguments; normally, - at least a "user" and "password" property should be included + :param properties: a dictionary of JDBC database connection arguments. Normally at + least properties "user" and "password" with their corresponding values. + For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame """ if properties is None: @@ -716,9 +717,9 @@ def jdbc(self, url, table, mode=None, properties=None): * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` (default case): Throw an exception if data already exists. - :param properties: JDBC database connection arguments, a list of - arbitrary string tag/value. Normally at least a - "user" and "password" property should be included. + :param properties: a dictionary of JDBC database connection arguments. Normally at + least properties "user" and "password" with their corresponding values. + For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } """ if properties is None: properties = dict() From 4d0cc84afca9efd4541a2e8d583e3e0f2df37c0d Mon Sep 17 00:00:00 2001 From: jiangxingbo Date: Wed, 17 Aug 2016 14:22:36 +0200 Subject: [PATCH 004/270] [SPARK-17032][SQL] Add test cases for methods in ParserUtils. ## What changes were proposed in this pull request? Currently methods in `ParserUtils` are tested indirectly, we should add test cases in `ParserUtilsSuite` to verify their integrity directly. ## How was this patch tested? New test cases in `ParserUtilsSuite` Author: jiangxingbo Closes #14620 from jiangxb1987/parserUtils. --- .../sql/catalyst/parser/ParserUtils.scala | 9 +- .../catalyst/parser/ParserUtilsSuite.scala | 126 +++++++++++++++++- 2 files changed, 128 insertions(+), 7 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala index bc35ae2f55409..cb89a9679a8cf 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala @@ -31,11 +31,7 @@ import org.apache.spark.sql.catalyst.trees.{CurrentOrigin, Origin} object ParserUtils { /** Get the command which created the token. */ def command(ctx: ParserRuleContext): String = { - command(ctx.getStart.getInputStream) - } - - /** Get the command which created the token. */ - def command(stream: CharStream): String = { + val stream = ctx.getStart.getInputStream stream.getText(Interval.of(0, stream.size())) } @@ -74,7 +70,8 @@ object ParserUtils { /** Get the origin (line and position) of the token. */ def position(token: Token): Origin = { - Origin(Option(token.getLine), Option(token.getCharPositionInLine)) + val opt = Option(token) + Origin(opt.map(_.getLine), opt.map(_.getCharPositionInLine)) } /** Validate the condition. If it doesn't throw a parse exception. */ diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala index d090daf7b41eb..d5748a4ff18f8 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ParserUtilsSuite.scala @@ -16,12 +16,53 @@ */ package org.apache.spark.sql.catalyst.parser +import org.antlr.v4.runtime.{CommonTokenStream, ParserRuleContext} + import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.catalyst.parser.SqlBaseParser._ +import org.apache.spark.sql.catalyst.trees.{CurrentOrigin, Origin} class ParserUtilsSuite extends SparkFunSuite { import ParserUtils._ + val setConfContext = buildContext("set example.setting.name=setting.value") { parser => + parser.statement().asInstanceOf[SetConfigurationContext] + } + + val showFuncContext = buildContext("show functions foo.bar") { parser => + parser.statement().asInstanceOf[ShowFunctionsContext] + } + + val descFuncContext = buildContext("describe function extended bar") { parser => + parser.statement().asInstanceOf[DescribeFunctionContext] + } + + val showDbsContext = buildContext("show databases like 'identifier_with_wildcards'") { parser => + parser.statement().asInstanceOf[ShowDatabasesContext] + } + + val createDbContext = buildContext( + """ + |CREATE DATABASE IF NOT EXISTS database_name + |COMMENT 'database_comment' LOCATION '/home/user/db' + |WITH DBPROPERTIES ('a'='a', 'b'='b', 'c'='c') + """.stripMargin + ) { parser => + parser.statement().asInstanceOf[CreateDatabaseContext] + } + + val emptyContext = buildContext("") { parser => + parser.statement + } + + private def buildContext[T](command: String)(toResult: SqlBaseParser => T): T = { + val lexer = new SqlBaseLexer(new ANTLRNoCaseStringStream(command)) + val tokenStream = new CommonTokenStream(lexer) + val parser = new SqlBaseParser(tokenStream) + toResult(parser) + } + test("unescapeSQLString") { // scalastyle:off nonascii @@ -61,5 +102,88 @@ class ParserUtilsSuite extends SparkFunSuite { // scalastyle:on nonascii } - // TODO: Add test cases for other methods in ParserUtils + test("command") { + assert(command(setConfContext) == "set example.setting.name=setting.value") + assert(command(showFuncContext) == "show functions foo.bar") + assert(command(descFuncContext) == "describe function extended bar") + assert(command(showDbsContext) == "show databases like 'identifier_with_wildcards'") + } + + test("operationNotAllowed") { + val errorMessage = "parse.fail.operation.not.allowed.error.message" + val e = intercept[ParseException] { + operationNotAllowed(errorMessage, showFuncContext) + }.getMessage + assert(e.contains("Operation not allowed")) + assert(e.contains(errorMessage)) + } + + test("checkDuplicateKeys") { + val properties = Seq(("a", "a"), ("b", "b"), ("c", "c")) + checkDuplicateKeys[String](properties, createDbContext) + + val properties2 = Seq(("a", "a"), ("b", "b"), ("a", "c")) + val e = intercept[ParseException] { + checkDuplicateKeys(properties2, createDbContext) + }.getMessage + assert(e.contains("Found duplicate keys")) + } + + test("source") { + assert(source(setConfContext) == "set example.setting.name=setting.value") + assert(source(showFuncContext) == "show functions foo.bar") + assert(source(descFuncContext) == "describe function extended bar") + assert(source(showDbsContext) == "show databases like 'identifier_with_wildcards'") + } + + test("remainder") { + assert(remainder(setConfContext) == "") + assert(remainder(showFuncContext) == "") + assert(remainder(descFuncContext) == "") + assert(remainder(showDbsContext) == "") + + assert(remainder(setConfContext.SET.getSymbol) == " example.setting.name=setting.value") + assert(remainder(showFuncContext.FUNCTIONS.getSymbol) == " foo.bar") + assert(remainder(descFuncContext.EXTENDED.getSymbol) == " bar") + assert(remainder(showDbsContext.LIKE.getSymbol) == " 'identifier_with_wildcards'") + } + + test("string") { + assert(string(showDbsContext.pattern) == "identifier_with_wildcards") + assert(string(createDbContext.comment) == "database_comment") + + assert(string(createDbContext.locationSpec.STRING) == "/home/user/db") + } + + test("position") { + assert(position(setConfContext.start) == Origin(Some(1), Some(0))) + assert(position(showFuncContext.stop) == Origin(Some(1), Some(19))) + assert(position(descFuncContext.describeFuncName.start) == Origin(Some(1), Some(27))) + assert(position(createDbContext.locationSpec.start) == Origin(Some(3), Some(27))) + assert(position(emptyContext.stop) == Origin(None, None)) + } + + test("validate") { + val f1 = { ctx: ParserRuleContext => + ctx.children != null && !ctx.children.isEmpty + } + val message = "ParserRuleContext should not be empty." + validate(f1(showFuncContext), message, showFuncContext) + + val e = intercept[ParseException] { + validate(f1(emptyContext), message, emptyContext) + }.getMessage + assert(e.contains(message)) + } + + test("withOrigin") { + val ctx = createDbContext.locationSpec + val current = CurrentOrigin.get + val (location, origin) = withOrigin(ctx) { + (string(ctx.STRING), CurrentOrigin.get) + } + assert(location == "/home/user/db") + assert(origin == Origin(Some(3), Some(27))) + assert(CurrentOrigin.get == current) + } } From 363793f2bf57205f1d753d4705583aaf441849b5 Mon Sep 17 00:00:00 2001 From: "wm624@hotmail.com" Date: Wed, 17 Aug 2016 06:15:04 -0700 Subject: [PATCH 005/270] [SPARK-16444][SPARKR] Isotonic Regression wrapper in SparkR ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) Add Isotonic Regression wrapper in SparkR Wrappers in R and Scala are added. Unit tests Documentation ## How was this patch tested? Manually tested with sudo ./R/run-tests.sh (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Author: wm624@hotmail.com Closes #14182 from wangmiao1981/isoR. --- R/pkg/NAMESPACE | 3 +- R/pkg/R/generics.R | 4 + R/pkg/R/mllib.R | 118 +++++++++++++++++ R/pkg/inst/tests/testthat/test_mllib.R | 32 +++++ .../ml/r/IsotonicRegressionWrapper.scala | 119 ++++++++++++++++++ .../org/apache/spark/ml/r/RWrappers.scala | 2 + 6 files changed, 277 insertions(+), 1 deletion(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index aaab92f5cfc7b..1e23b233c1116 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -24,7 +24,8 @@ exportMethods("glm", "spark.kmeans", "fitted", "spark.naiveBayes", - "spark.survreg") + "spark.survreg", + "spark.isoreg") # Job group lifecycle management methods export("setJobGroup", diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 52ab730e215c2..ebacc11741812 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -1304,6 +1304,10 @@ setGeneric("spark.naiveBayes", function(data, formula, ...) { standardGeneric("s #' @export setGeneric("spark.survreg", function(data, formula, ...) { standardGeneric("spark.survreg") }) +#' @rdname spark.isoreg +#' @export +setGeneric("spark.isoreg", function(data, formula, ...) { standardGeneric("spark.isoreg") }) + #' @rdname write.ml #' @export setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") }) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 6f6e2fc255c3f..0dcc54d7af09b 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -53,6 +53,13 @@ setClass("AFTSurvivalRegressionModel", representation(jobj = "jobj")) #' @note KMeansModel since 2.0.0 setClass("KMeansModel", representation(jobj = "jobj")) +#' S4 class that represents an IsotonicRegressionModel +#' +#' @param jobj a Java object reference to the backing Scala IsotonicRegressionModel +#' @export +#' @note IsotonicRegressionModel since 2.1.0 +setClass("IsotonicRegressionModel", representation(jobj = "jobj")) + #' Saves the MLlib model to the input path #' #' Saves the MLlib model to the input path. For more information, see the specific @@ -62,6 +69,7 @@ setClass("KMeansModel", representation(jobj = "jobj")) #' @export #' @seealso \link{spark.glm}, \link{glm} #' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} +#' @seealso \link{spark.isoreg} #' @seealso \link{read.ml} NULL @@ -74,6 +82,7 @@ NULL #' @export #' @seealso \link{spark.glm}, \link{glm} #' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} +#' @seealso \link{spark.isoreg} NULL #' Generalized Linear Models @@ -299,6 +308,94 @@ setMethod("summary", signature(object = "NaiveBayesModel"), return(list(apriori = apriori, tables = tables)) }) +#' Isotonic Regression Model +#' +#' Fits an Isotonic Regression model against a Spark DataFrame, similarly to R's isoreg(). +#' Users can print, make predictions on the produced model and save the model to the input path. +#' +#' @param data SparkDataFrame for training +#' @param formula A symbolic description of the model to be fitted. Currently only a few formula +#' operators are supported, including '~', '.', ':', '+', and '-'. +#' @param isotonic Whether the output sequence should be isotonic/increasing (TRUE) or +#' antitonic/decreasing (FALSE) +#' @param featureIndex The index of the feature if \code{featuresCol} is a vector column (default: `0`), +#' no effect otherwise +#' @param weightCol The weight column name. +#' @return \code{spark.isoreg} returns a fitted Isotonic Regression model +#' @rdname spark.isoreg +#' @aliases spark.isoreg,SparkDataFrame,formula-method +#' @name spark.isoreg +#' @export +#' @examples +#' \dontrun{ +#' sparkR.session() +#' data <- list(list(7.0, 0.0), list(5.0, 1.0), list(3.0, 2.0), +#' list(5.0, 3.0), list(1.0, 4.0)) +#' df <- createDataFrame(data, c("label", "feature")) +#' model <- spark.isoreg(df, label ~ feature, isotonic = FALSE) +#' # return model boundaries and prediction as lists +#' result <- summary(model, df) +#' # prediction based on fitted model +#' predict_data <- list(list(-2.0), list(-1.0), list(0.5), +#' list(0.75), list(1.0), list(2.0), list(9.0)) +#' predict_df <- createDataFrame(predict_data, c("feature")) +#' # get prediction column +#' predict_result <- collect(select(predict(model, predict_df), "prediction")) +#' +#' # save fitted model to input path +#' path <- "path/to/model" +#' write.ml(model, path) +#' +#' # can also read back the saved model and print +#' savedModel <- read.ml(path) +#' summary(savedModel) +#' } +#' @note spark.isoreg since 2.1.0 +setMethod("spark.isoreg", signature(data = "SparkDataFrame", formula = "formula"), + function(data, formula, isotonic = TRUE, featureIndex = 0, weightCol = NULL) { + formula <- paste0(deparse(formula), collapse = "") + + if (is.null(weightCol)) { + weightCol <- "" + } + + jobj <- callJStatic("org.apache.spark.ml.r.IsotonicRegressionWrapper", "fit", + data@sdf, formula, as.logical(isotonic), as.integer(featureIndex), + as.character(weightCol)) + return(new("IsotonicRegressionModel", jobj = jobj)) + }) + +# Predicted values based on an isotonicRegression model + +#' @param object a fitted IsotonicRegressionModel +#' @param newData SparkDataFrame for testing +#' @return \code{predict} returns a SparkDataFrame containing predicted values +#' @rdname spark.isoreg +#' @aliases predict,IsotonicRegressionModel,SparkDataFrame-method +#' @export +#' @note predict(IsotonicRegressionModel) since 2.1.0 +setMethod("predict", signature(object = "IsotonicRegressionModel"), + function(object, newData) { + return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + }) + +# Get the summary of an IsotonicRegressionModel model + +#' @param object a fitted IsotonicRegressionModel +#' @param ... Other optional arguments to summary of an IsotonicRegressionModel +#' @return \code{summary} returns the model's boundaries and prediction as lists +#' @rdname spark.isoreg +#' @aliases summary,IsotonicRegressionModel-method +#' @export +#' @note summary(IsotonicRegressionModel) since 2.1.0 +setMethod("summary", signature(object = "IsotonicRegressionModel"), + function(object, ...) { + jobj <- object@jobj + boundaries <- callJMethod(jobj, "boundaries") + predictions <- callJMethod(jobj, "predictions") + return(list(boundaries = boundaries, predictions = predictions)) + }) + #' K-Means Clustering Model #' #' Fits a k-means clustering model against a Spark DataFrame, similarly to R's kmeans(). @@ -533,6 +630,25 @@ setMethod("write.ml", signature(object = "KMeansModel", path = "character"), invisible(callJMethod(writer, "save", path)) }) +# Save fitted IsotonicRegressionModel to the input path + +#' @param path The directory where the model is saved +#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE +#' which means throw exception if the output path exists. +#' +#' @rdname spark.isoreg +#' @aliases write.ml,IsotonicRegressionModel,character-method +#' @export +#' @note write.ml(IsotonicRegression, character) since 2.1.0 +setMethod("write.ml", signature(object = "IsotonicRegressionModel", path = "character"), + function(object, path, overwrite = FALSE) { + writer <- callJMethod(object@jobj, "write") + if (overwrite) { + writer <- callJMethod(writer, "overwrite") + } + invisible(callJMethod(writer, "save", path)) + }) + #' Load a fitted MLlib model from the input path. #' #' @param path Path of the model to read. @@ -558,6 +674,8 @@ read.ml <- function(path) { return(new("GeneralizedLinearRegressionModel", jobj = jobj)) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.KMeansWrapper")) { return(new("KMeansModel", jobj = jobj)) + } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.IsotonicRegressionWrapper")) { + return(new("IsotonicRegressionModel", jobj = jobj)) } else { stop(paste("Unsupported model: ", jobj)) } diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index bc18224680586..b759b28927365 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -476,4 +476,36 @@ test_that("spark.survreg", { } }) +test_that("spark.isotonicRegression", { + label <- c(7.0, 5.0, 3.0, 5.0, 1.0) + feature <- c(0.0, 1.0, 2.0, 3.0, 4.0) + weight <- c(1.0, 1.0, 1.0, 1.0, 1.0) + data <- as.data.frame(cbind(label, feature, weight)) + df <- suppressWarnings(createDataFrame(data)) + + model <- spark.isoreg(df, label ~ feature, isotonic = FALSE, + weightCol = "weight") + # only allow one variable on the right hand side of the formula + expect_error(model2 <- spark.isoreg(df, ~., isotonic = FALSE)) + result <- summary(model, df) + expect_equal(result$predictions, list(7, 5, 4, 4, 1)) + + # Test model prediction + predict_data <- list(list(-2.0), list(-1.0), list(0.5), + list(0.75), list(1.0), list(2.0), list(9.0)) + predict_df <- createDataFrame(predict_data, c("feature")) + predict_result <- collect(select(predict(model, predict_df), "prediction")) + expect_equal(predict_result$prediction, c(7.0, 7.0, 6.0, 5.5, 5.0, 4.0, 1.0)) + + # Test model save/load + modelPath <- tempfile(pattern = "spark-isotonicRegression", fileext = ".tmp") + write.ml(model, modelPath) + expect_error(write.ml(model, modelPath)) + write.ml(model, modelPath, overwrite = TRUE) + model2 <- read.ml(modelPath) + expect_equal(result, summary(model2, df)) + + unlink(modelPath) +}) + sparkR.session.stop() diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala new file mode 100644 index 0000000000000..1ea80cb46ab7b --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.hadoop.fs.Path +import org.json4s._ +import org.json4s.JsonDSL._ +import org.json4s.jackson.JsonMethods._ + +import org.apache.spark.ml.{Pipeline, PipelineModel} +import org.apache.spark.ml.attribute.{AttributeGroup} +import org.apache.spark.ml.feature.RFormula +import org.apache.spark.ml.regression.{IsotonicRegression, IsotonicRegressionModel} +import org.apache.spark.ml.util._ +import org.apache.spark.sql.{DataFrame, Dataset} + +private[r] class IsotonicRegressionWrapper private ( + val pipeline: PipelineModel, + val features: Array[String]) extends MLWritable { + + private val isotonicRegressionModel: IsotonicRegressionModel = + pipeline.stages(1).asInstanceOf[IsotonicRegressionModel] + + lazy val boundaries: Array[Double] = isotonicRegressionModel.boundaries.toArray + + lazy val predictions: Array[Double] = isotonicRegressionModel.predictions.toArray + + def transform(dataset: Dataset[_]): DataFrame = { + pipeline.transform(dataset).drop(isotonicRegressionModel.getFeaturesCol) + } + + override def write: MLWriter = new IsotonicRegressionWrapper.IsotonicRegressionWrapperWriter(this) +} + +private[r] object IsotonicRegressionWrapper + extends MLReadable[IsotonicRegressionWrapper] { + + def fit( + data: DataFrame, + formula: String, + isotonic: Boolean, + featureIndex: Int, + weightCol: String): IsotonicRegressionWrapper = { + + val rFormulaModel = new RFormula() + .setFormula(formula) + .setFeaturesCol("features") + .fit(data) + + // get feature names from output schema + val schema = rFormulaModel.transform(data).schema + val featureAttrs = AttributeGroup.fromStructField(schema(rFormulaModel.getFeaturesCol)) + .attributes.get + val features = featureAttrs.map(_.name.get) + require(features.size == 1) + + // assemble and fit the pipeline + val isotonicRegression = new IsotonicRegression() + .setIsotonic(isotonic) + .setFeatureIndex(featureIndex) + .setWeightCol(weightCol) + + val pipeline = new Pipeline() + .setStages(Array(rFormulaModel, isotonicRegression)) + .fit(data) + + new IsotonicRegressionWrapper(pipeline, features) + } + + override def read: MLReader[IsotonicRegressionWrapper] = new IsotonicRegressionWrapperReader + + override def load(path: String): IsotonicRegressionWrapper = super.load(path) + + class IsotonicRegressionWrapperWriter(instance: IsotonicRegressionWrapper) extends MLWriter { + + override protected def saveImpl(path: String): Unit = { + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadata = ("class" -> instance.getClass.getName) ~ + ("features" -> instance.features.toSeq) + val rMetadataJson: String = compact(render(rMetadata)) + sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) + + instance.pipeline.save(pipelinePath) + } + } + + class IsotonicRegressionWrapperReader extends MLReader[IsotonicRegressionWrapper] { + + override def load(path: String): IsotonicRegressionWrapper = { + implicit val format = DefaultFormats + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadataStr = sc.textFile(rMetadataPath, 1).first() + val rMetadata = parse(rMetadataStr) + val features = (rMetadata \ "features").extract[Array[String]] + + val pipeline = PipelineModel.load(pipelinePath) + new IsotonicRegressionWrapper(pipeline, features) + } + } +} diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala index 568c160ee50d7..f9a44d60e691a 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala @@ -44,6 +44,8 @@ private[r] object RWrappers extends MLReader[Object] { GeneralizedLinearRegressionWrapper.load(path) case "org.apache.spark.ml.r.KMeansWrapper" => KMeansWrapper.load(path) + case "org.apache.spark.ml.r.IsotonicRegressionWrapper" => + IsotonicRegressionWrapper.load(path) case _ => throw new SparkException(s"SparkR read.ml does not support load $className") } From 56d86742d2600b8426d75bd87ab3c73332dca1d2 Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Wed, 17 Aug 2016 21:34:57 +0800 Subject: [PATCH 006/270] [SPARK-15285][SQL] Generated SpecificSafeProjection.apply method grows beyond 64 KB ## What changes were proposed in this pull request? This PR splits the generated code for ```SafeProjection.apply``` by using ```ctx.splitExpressions()```. This is because the large code body for ```NewInstance``` may grow beyond 64KB bytecode size for ```apply()``` method. Here is [the original PR](https://github.com/apache/spark/pull/13243) for SPARK-15285. However, it breaks a build with Scala 2.10 since Scala 2.10 does not a case class with large number of members. Thus, it was reverted by [this commit](https://github.com/apache/spark/commit/fa244e5a90690d6a31be50f2aa203ae1a2e9a1cf). ## How was this patch tested? Added new tests by using `DefinedByConstructorParams` instead of case class for scala-2.10 Author: Kazuaki Ishizaki Closes #14670 from kiszk/SPARK-15285-2. --- .../expressions/objects/objects.scala | 32 ++++++++++++--- .../spark/sql/DataFrameComplexTypeSuite.scala | 40 +++++++++++++++++++ 2 files changed, 66 insertions(+), 6 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 7cb94a7942885..31ed485317487 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -245,27 +245,47 @@ case class NewInstance( override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val javaType = ctx.javaType(dataType) - val argGen = arguments.map(_.genCode(ctx)) - val argString = argGen.map(_.value).mkString(", ") + val argIsNulls = ctx.freshName("argIsNulls") + ctx.addMutableState("boolean[]", argIsNulls, + s"$argIsNulls = new boolean[${arguments.size}];") + val argValues = arguments.zipWithIndex.map { case (e, i) => + val argValue = ctx.freshName("argValue") + ctx.addMutableState(ctx.javaType(e.dataType), argValue, "") + argValue + } + + val argCodes = arguments.zipWithIndex.map { case (e, i) => + val expr = e.genCode(ctx) + expr.code + s""" + $argIsNulls[$i] = ${expr.isNull}; + ${argValues(i)} = ${expr.value}; + """ + } + val argCode = ctx.splitExpressions(ctx.INPUT_ROW, argCodes) val outer = outerPointer.map(func => Literal.fromObject(func()).genCode(ctx)) var isNull = ev.isNull val setIsNull = if (propagateNull && arguments.nonEmpty) { - s"final boolean $isNull = ${argGen.map(_.isNull).mkString(" || ")};" + s""" + boolean $isNull = false; + for (int idx = 0; idx < ${arguments.length}; idx++) { + if ($argIsNulls[idx]) { $isNull = true; break; } + } + """ } else { isNull = "false" "" } val constructorCall = outer.map { gen => - s"""${gen.value}.new ${cls.getSimpleName}($argString)""" + s"""${gen.value}.new ${cls.getSimpleName}(${argValues.mkString(", ")})""" }.getOrElse { - s"new $className($argString)" + s"new $className(${argValues.mkString(", ")})" } val code = s""" - ${argGen.map(_.code).mkString("\n")} + $argCode ${outer.map(_.code).getOrElse("")} $setIsNull final $javaType ${ev.value} = $isNull ? ${ctx.defaultValue(javaType)} : $constructorCall; diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameComplexTypeSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameComplexTypeSuite.scala index 72f676e6225ee..1230b921aa279 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameComplexTypeSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameComplexTypeSuite.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql +import org.apache.spark.sql.catalyst.DefinedByConstructorParams import org.apache.spark.sql.functions._ import org.apache.spark.sql.test.SharedSQLContext @@ -58,4 +59,43 @@ class DataFrameComplexTypeSuite extends QueryTest with SharedSQLContext { val nullIntRow = df.selectExpr("i[1]").collect()(0) assert(nullIntRow == org.apache.spark.sql.Row(null)) } + + test("SPARK-15285 Generated SpecificSafeProjection.apply method grows beyond 64KB") { + val ds100_5 = Seq(S100_5()).toDS() + ds100_5.rdd.count + } } + +class S100( + val s1: String = "1", val s2: String = "2", val s3: String = "3", val s4: String = "4", + val s5: String = "5", val s6: String = "6", val s7: String = "7", val s8: String = "8", + val s9: String = "9", val s10: String = "10", val s11: String = "11", val s12: String = "12", + val s13: String = "13", val s14: String = "14", val s15: String = "15", val s16: String = "16", + val s17: String = "17", val s18: String = "18", val s19: String = "19", val s20: String = "20", + val s21: String = "21", val s22: String = "22", val s23: String = "23", val s24: String = "24", + val s25: String = "25", val s26: String = "26", val s27: String = "27", val s28: String = "28", + val s29: String = "29", val s30: String = "30", val s31: String = "31", val s32: String = "32", + val s33: String = "33", val s34: String = "34", val s35: String = "35", val s36: String = "36", + val s37: String = "37", val s38: String = "38", val s39: String = "39", val s40: String = "40", + val s41: String = "41", val s42: String = "42", val s43: String = "43", val s44: String = "44", + val s45: String = "45", val s46: String = "46", val s47: String = "47", val s48: String = "48", + val s49: String = "49", val s50: String = "50", val s51: String = "51", val s52: String = "52", + val s53: String = "53", val s54: String = "54", val s55: String = "55", val s56: String = "56", + val s57: String = "57", val s58: String = "58", val s59: String = "59", val s60: String = "60", + val s61: String = "61", val s62: String = "62", val s63: String = "63", val s64: String = "64", + val s65: String = "65", val s66: String = "66", val s67: String = "67", val s68: String = "68", + val s69: String = "69", val s70: String = "70", val s71: String = "71", val s72: String = "72", + val s73: String = "73", val s74: String = "74", val s75: String = "75", val s76: String = "76", + val s77: String = "77", val s78: String = "78", val s79: String = "79", val s80: String = "80", + val s81: String = "81", val s82: String = "82", val s83: String = "83", val s84: String = "84", + val s85: String = "85", val s86: String = "86", val s87: String = "87", val s88: String = "88", + val s89: String = "89", val s90: String = "90", val s91: String = "91", val s92: String = "92", + val s93: String = "93", val s94: String = "94", val s95: String = "95", val s96: String = "96", + val s97: String = "97", val s98: String = "98", val s99: String = "99", val s100: String = "100") +extends DefinedByConstructorParams + +case class S100_5( + s1: S100 = new S100(), s2: S100 = new S100(), s3: S100 = new S100(), + s4: S100 = new S100(), s5: S100 = new S100()) + + From 0b0c8b95e3594db36d87ef0e59a30eefe8508ac1 Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Wed, 17 Aug 2016 07:03:24 -0700 Subject: [PATCH 007/270] [SPARK-17106] [SQL] Simplify the SubqueryExpression interface ## What changes were proposed in this pull request? The current subquery expression interface contains a little bit of technical debt in the form of a few different access paths to get and set the query contained by the expression. This is confusing to anyone who goes over this code. This PR unifies these access paths. ## How was this patch tested? (Existing tests) Author: Herman van Hovell Closes #14685 from hvanhovell/SPARK-17106. --- .../sql/catalyst/analysis/Analyzer.scala | 4 +- .../sql/catalyst/expressions/subquery.scala | 60 +++++++++---------- .../sql/catalyst/optimizer/Optimizer.scala | 6 +- .../spark/sql/catalyst/plans/QueryPlan.scala | 4 +- .../spark/sql/catalyst/SQLBuilder.scala | 2 +- .../apache/spark/sql/execution/subquery.scala | 49 ++++++--------- .../org/apache/spark/sql/QueryTest.scala | 4 +- .../benchmark/TPCDSQueryBenchmark.scala | 1 - 8 files changed, 56 insertions(+), 74 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index bd4c19181f647..f540816366ca8 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -146,7 +146,7 @@ class Analyzer( // This cannot be done in ResolveSubquery because ResolveSubquery does not know the CTE. other transformExpressions { case e: SubqueryExpression => - e.withNewPlan(substituteCTE(e.query, cteRelations)) + e.withNewPlan(substituteCTE(e.plan, cteRelations)) } } } @@ -1091,7 +1091,7 @@ class Analyzer( f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = { // Step 1: Resolve the outer expressions. var previous: LogicalPlan = null - var current = e.query + var current = e.plan do { // Try to resolve the subquery plan using the regular analyzer. previous = current diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala index ddbe937cba9bd..e2e7d98e33459 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/subquery.scala @@ -17,33 +17,33 @@ package org.apache.spark.sql.catalyst.expressions -import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.plans.QueryPlan -import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias} +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.types._ /** - * An interface for subquery that is used in expressions. + * An interface for expressions that contain a [[QueryPlan]]. */ -abstract class SubqueryExpression extends Expression { +abstract class PlanExpression[T <: QueryPlan[_]] extends Expression { /** The id of the subquery expression. */ def exprId: ExprId - /** The logical plan of the query. */ - def query: LogicalPlan + /** The plan being wrapped in the query. */ + def plan: T - /** - * Either a logical plan or a physical plan. The generated tree string (explain output) uses this - * field to explain the subquery. - */ - def plan: QueryPlan[_] - - /** Updates the query with new logical plan. */ - def withNewPlan(plan: LogicalPlan): SubqueryExpression + /** Updates the expression with a new plan. */ + def withNewPlan(plan: T): PlanExpression[T] protected def conditionString: String = children.mkString("[", " && ", "]") } +/** + * A base interface for expressions that contain a [[LogicalPlan]]. + */ +abstract class SubqueryExpression extends PlanExpression[LogicalPlan] { + override def withNewPlan(plan: LogicalPlan): SubqueryExpression +} + object SubqueryExpression { def hasCorrelatedSubquery(e: Expression): Boolean = { e.find { @@ -60,20 +60,19 @@ object SubqueryExpression { * Note: `exprId` is used to have a unique name in explain string output. */ case class ScalarSubquery( - query: LogicalPlan, + plan: LogicalPlan, children: Seq[Expression] = Seq.empty, exprId: ExprId = NamedExpression.newExprId) extends SubqueryExpression with Unevaluable { - override lazy val resolved: Boolean = childrenResolved && query.resolved + override lazy val resolved: Boolean = childrenResolved && plan.resolved override lazy val references: AttributeSet = { - if (query.resolved) super.references -- query.outputSet + if (plan.resolved) super.references -- plan.outputSet else super.references } - override def dataType: DataType = query.schema.fields.head.dataType + override def dataType: DataType = plan.schema.fields.head.dataType override def foldable: Boolean = false override def nullable: Boolean = true - override def plan: LogicalPlan = SubqueryAlias(toString, query, None) - override def withNewPlan(plan: LogicalPlan): ScalarSubquery = copy(query = plan) + override def withNewPlan(plan: LogicalPlan): ScalarSubquery = copy(plan = plan) override def toString: String = s"scalar-subquery#${exprId.id} $conditionString" } @@ -92,19 +91,18 @@ object ScalarSubquery { * be rewritten into a left semi/anti join during analysis. */ case class PredicateSubquery( - query: LogicalPlan, + plan: LogicalPlan, children: Seq[Expression] = Seq.empty, nullAware: Boolean = false, exprId: ExprId = NamedExpression.newExprId) extends SubqueryExpression with Predicate with Unevaluable { - override lazy val resolved = childrenResolved && query.resolved - override lazy val references: AttributeSet = super.references -- query.outputSet + override lazy val resolved = childrenResolved && plan.resolved + override lazy val references: AttributeSet = super.references -- plan.outputSet override def nullable: Boolean = nullAware - override def plan: LogicalPlan = SubqueryAlias(toString, query, None) - override def withNewPlan(plan: LogicalPlan): PredicateSubquery = copy(query = plan) + override def withNewPlan(plan: LogicalPlan): PredicateSubquery = copy(plan = plan) override def semanticEquals(o: Expression): Boolean = o match { case p: PredicateSubquery => - query.sameResult(p.query) && nullAware == p.nullAware && + plan.sameResult(p.plan) && nullAware == p.nullAware && children.length == p.children.length && children.zip(p.children).forall(p => p._1.semanticEquals(p._2)) case _ => false @@ -146,14 +144,13 @@ object PredicateSubquery { * FROM b) * }}} */ -case class ListQuery(query: LogicalPlan, exprId: ExprId = NamedExpression.newExprId) +case class ListQuery(plan: LogicalPlan, exprId: ExprId = NamedExpression.newExprId) extends SubqueryExpression with Unevaluable { override lazy val resolved = false override def children: Seq[Expression] = Seq.empty override def dataType: DataType = ArrayType(NullType) override def nullable: Boolean = false - override def withNewPlan(plan: LogicalPlan): ListQuery = copy(query = plan) - override def plan: LogicalPlan = SubqueryAlias(toString, query, None) + override def withNewPlan(plan: LogicalPlan): ListQuery = copy(plan = plan) override def toString: String = s"list#${exprId.id}" } @@ -168,12 +165,11 @@ case class ListQuery(query: LogicalPlan, exprId: ExprId = NamedExpression.newExp * WHERE b.id = a.id) * }}} */ -case class Exists(query: LogicalPlan, exprId: ExprId = NamedExpression.newExprId) +case class Exists(plan: LogicalPlan, exprId: ExprId = NamedExpression.newExprId) extends SubqueryExpression with Predicate with Unevaluable { override lazy val resolved = false override def children: Seq[Expression] = Seq.empty override def nullable: Boolean = false - override def withNewPlan(plan: LogicalPlan): Exists = copy(query = plan) - override def plan: LogicalPlan = SubqueryAlias(toString, query, None) + override def withNewPlan(plan: LogicalPlan): Exists = copy(plan = plan) override def toString: String = s"exists#${exprId.id}" } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index f97a78b411597..aa15f4a82383c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -127,7 +127,7 @@ abstract class Optimizer(sessionCatalog: SessionCatalog, conf: CatalystConf) object OptimizeSubqueries extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { case s: SubqueryExpression => - s.withNewPlan(Optimizer.this.execute(s.query)) + s.withNewPlan(Optimizer.this.execute(s.plan)) } } } @@ -1814,7 +1814,7 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { val newExpression = expression transform { case s: ScalarSubquery if s.children.nonEmpty => subqueries += s - s.query.output.head + s.plan.output.head } newExpression.asInstanceOf[E] } @@ -2029,7 +2029,7 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { // grouping expressions. As a result we need to replace all the scalar subqueries in the // grouping expressions by their result. val newGrouping = grouping.map { e => - subqueries.find(_.semanticEquals(e)).map(_.query.output.head).getOrElse(e) + subqueries.find(_.semanticEquals(e)).map(_.plan.output.head).getOrElse(e) } Aggregate(newGrouping, newExpressions, constructLeftJoins(child, subqueries)) } else { diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala index becf6945a2f2b..8ee31f42ad88e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala @@ -263,7 +263,9 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanT * All the subqueries of current plan. */ def subqueries: Seq[PlanType] = { - expressions.flatMap(_.collect {case e: SubqueryExpression => e.plan.asInstanceOf[PlanType]}) + expressions.flatMap(_.collect { + case e: PlanExpression[_] => e.plan.asInstanceOf[PlanType] + }) } override protected def innerChildren: Seq[QueryPlan[_]] = subqueries diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala index ff8e0f2642055..0f51aa58d63ba 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala @@ -80,7 +80,7 @@ class SQLBuilder private ( try { val replaced = finalPlan.transformAllExpressions { case s: SubqueryExpression => - val query = new SQLBuilder(s.query, nextSubqueryId, nextGenAttrId, exprIdMap).toSQL + val query = new SQLBuilder(s.plan, nextSubqueryId, nextGenAttrId, exprIdMap).toSQL val sql = s match { case _: ListQuery => query case _: Exists => s"EXISTS($query)" diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala index c730bee6ae050..730ca27f82bac 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala @@ -22,9 +22,8 @@ import scala.collection.mutable.ArrayBuffer import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.{expressions, InternalRow} -import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.expressions.{Expression, ExprId, InSet, Literal, PlanExpression} import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} -import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{BooleanType, DataType, StructType} @@ -32,18 +31,7 @@ import org.apache.spark.sql.types.{BooleanType, DataType, StructType} /** * The base class for subquery that is used in SparkPlan. */ -trait ExecSubqueryExpression extends SubqueryExpression { - - val executedPlan: SubqueryExec - def withExecutedPlan(plan: SubqueryExec): ExecSubqueryExpression - - // does not have logical plan - override def query: LogicalPlan = throw new UnsupportedOperationException - override def withNewPlan(plan: LogicalPlan): SubqueryExpression = - throw new UnsupportedOperationException - - override def plan: SparkPlan = executedPlan - +abstract class ExecSubqueryExpression extends PlanExpression[SubqueryExec] { /** * Fill the expression with collected result from executed plan. */ @@ -56,30 +44,29 @@ trait ExecSubqueryExpression extends SubqueryExpression { * This is the physical copy of ScalarSubquery to be used inside SparkPlan. */ case class ScalarSubquery( - executedPlan: SubqueryExec, + plan: SubqueryExec, exprId: ExprId) extends ExecSubqueryExpression { - override def dataType: DataType = executedPlan.schema.fields.head.dataType + override def dataType: DataType = plan.schema.fields.head.dataType override def children: Seq[Expression] = Nil override def nullable: Boolean = true - override def toString: String = executedPlan.simpleString - - def withExecutedPlan(plan: SubqueryExec): ExecSubqueryExpression = copy(executedPlan = plan) + override def toString: String = plan.simpleString + override def withNewPlan(query: SubqueryExec): ScalarSubquery = copy(plan = query) override def semanticEquals(other: Expression): Boolean = other match { - case s: ScalarSubquery => executedPlan.sameResult(executedPlan) + case s: ScalarSubquery => plan.sameResult(s.plan) case _ => false } // the first column in first row from `query`. - @volatile private var result: Any = null + @volatile private var result: Any = _ @volatile private var updated: Boolean = false def updateResult(): Unit = { val rows = plan.executeCollect() if (rows.length > 1) { - sys.error(s"more than one row returned by a subquery used as an expression:\n${plan}") + sys.error(s"more than one row returned by a subquery used as an expression:\n$plan") } if (rows.length == 1) { assert(rows(0).numFields == 1, @@ -108,7 +95,7 @@ case class ScalarSubquery( */ case class InSubquery( child: Expression, - executedPlan: SubqueryExec, + plan: SubqueryExec, exprId: ExprId, private var result: Array[Any] = null, private var updated: Boolean = false) extends ExecSubqueryExpression { @@ -116,13 +103,11 @@ case class InSubquery( override def dataType: DataType = BooleanType override def children: Seq[Expression] = child :: Nil override def nullable: Boolean = child.nullable - override def toString: String = s"$child IN ${executedPlan.name}" - - def withExecutedPlan(plan: SubqueryExec): ExecSubqueryExpression = copy(executedPlan = plan) + override def toString: String = s"$child IN ${plan.name}" + override def withNewPlan(plan: SubqueryExec): InSubquery = copy(plan = plan) override def semanticEquals(other: Expression): Boolean = other match { - case in: InSubquery => child.semanticEquals(in.child) && - executedPlan.sameResult(in.executedPlan) + case in: InSubquery => child.semanticEquals(in.child) && plan.sameResult(in.plan) case _ => false } @@ -159,8 +144,8 @@ case class PlanSubqueries(sparkSession: SparkSession) extends Rule[SparkPlan] { ScalarSubquery( SubqueryExec(s"subquery${subquery.exprId.id}", executedPlan), subquery.exprId) - case expressions.PredicateSubquery(plan, Seq(e: Expression), _, exprId) => - val executedPlan = new QueryExecution(sparkSession, plan).executedPlan + case expressions.PredicateSubquery(query, Seq(e: Expression), _, exprId) => + val executedPlan = new QueryExecution(sparkSession, query).executedPlan InSubquery(e, SubqueryExec(s"subquery${exprId.id}", executedPlan), exprId) } } @@ -184,9 +169,9 @@ case class ReuseSubquery(conf: SQLConf) extends Rule[SparkPlan] { val sameSchema = subqueries.getOrElseUpdate(sub.plan.schema, ArrayBuffer[SubqueryExec]()) val sameResult = sameSchema.find(_.sameResult(sub.plan)) if (sameResult.isDefined) { - sub.withExecutedPlan(sameResult.get) + sub.withNewPlan(sameResult.get) } else { - sameSchema += sub.executedPlan + sameSchema += sub.plan sub } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala index 304881d4a4bdd..cff9d22d089c3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala @@ -292,7 +292,7 @@ abstract class QueryTest extends PlanTest { p.expressions.foreach { _.foreach { case s: SubqueryExpression => - s.query.foreach(collectData) + s.plan.foreach(collectData) case _ => } } @@ -334,7 +334,7 @@ abstract class QueryTest extends PlanTest { case p => p.transformExpressions { case s: SubqueryExpression => - s.withNewPlan(s.query.transformDown(renormalize)) + s.withNewPlan(s.plan.transformDown(renormalize)) } } val normalized2 = jsonBackPlan.transformDown(renormalize) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala index 957a1d6426e87..3988d9750b585 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala @@ -26,7 +26,6 @@ import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation import org.apache.spark.sql.catalyst.expressions.SubqueryExpression import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.util._ -import org.apache.spark.sql.internal.SQLConf import org.apache.spark.util.Benchmark /** From 928ca1c6d12b23d84f9b6205e22d2e756311f072 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Wed, 17 Aug 2016 09:31:22 -0700 Subject: [PATCH 008/270] [SPARK-17102][SQL] bypass UserDefinedGenerator for json format check ## What changes were proposed in this pull request? We use reflection to convert `TreeNode` to json string, and currently don't support arbitrary object. `UserDefinedGenerator` takes a function object, so we should skip json format test for it, or the tests can be flacky, e.g. `DataFrameSuite.simple explode`, this test always fail with scala 2.10(branch 1.6 builds with scala 2.10 by default), but pass with scala 2.11(master branch builds with scala 2.11 by default). ## How was this patch tested? N/A Author: Wenchen Fan Closes #14679 from cloud-fan/json. --- sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala index cff9d22d089c3..484e4380331f8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala @@ -249,9 +249,10 @@ abstract class QueryTest extends PlanTest { } p }.transformAllExpressions { - case a: ImperativeAggregate => return + case _: ImperativeAggregate => return case _: TypedAggregateExpression => return case Literal(_, _: ObjectType) => return + case _: UserDefinedGenerator => return } // bypass hive tests before we fix all corner cases in hive module. From e3fec51fa1ed161789ab7aa32ed36efe357b5d31 Mon Sep 17 00:00:00 2001 From: Marcelo Vanzin Date: Wed, 17 Aug 2016 11:12:21 -0700 Subject: [PATCH 009/270] [SPARK-16930][YARN] Fix a couple of races in cluster app initialization. There are two narrow races that could cause the ApplicationMaster to miss when the user application instantiates the SparkContext, which could cause app failures when nothing was wrong with the app. It was also possible for a failing application to get stuck in the loop that waits for the context for a long time, instead of failing quickly. The change uses a promise to track the SparkContext instance, which gets rid of the races and allows for some simplification of the code. Tested with existing unit tests, and a new one being added to test the timeout code. Author: Marcelo Vanzin Closes #14542 from vanzin/SPARK-16930. --- .../spark/deploy/yarn/ApplicationMaster.scala | 98 +++++++++---------- .../cluster/YarnClusterScheduler.scala | 5 - .../spark/deploy/yarn/YarnClusterSuite.scala | 22 +++++ 3 files changed, 66 insertions(+), 59 deletions(-) diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala index 614278c8b2d22..a4b575c85d5fb 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala @@ -20,9 +20,11 @@ package org.apache.spark.deploy.yarn import java.io.{File, IOException} import java.lang.reflect.InvocationTargetException import java.net.{Socket, URI, URL} -import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{TimeoutException, TimeUnit} import scala.collection.mutable.HashMap +import scala.concurrent.Promise +import scala.concurrent.duration.Duration import scala.util.control.NonFatal import org.apache.hadoop.fs.{FileSystem, Path} @@ -106,12 +108,11 @@ private[spark] class ApplicationMaster( // Next wait interval before allocator poll. private var nextAllocationInterval = initialAllocationInterval - // Fields used in client mode. private var rpcEnv: RpcEnv = null private var amEndpoint: RpcEndpointRef = _ - // Fields used in cluster mode. - private val sparkContextRef = new AtomicReference[SparkContext](null) + // In cluster mode, used to tell the AM when the user's SparkContext has been initialized. + private val sparkContextPromise = Promise[SparkContext]() private var credentialRenewer: AMCredentialRenewer = _ @@ -316,23 +317,15 @@ private[spark] class ApplicationMaster( } private def sparkContextInitialized(sc: SparkContext) = { - sparkContextRef.synchronized { - sparkContextRef.compareAndSet(null, sc) - sparkContextRef.notifyAll() - } - } - - private def sparkContextStopped(sc: SparkContext) = { - sparkContextRef.compareAndSet(sc, null) + sparkContextPromise.success(sc) } private def registerAM( + _sparkConf: SparkConf, _rpcEnv: RpcEnv, driverRef: RpcEndpointRef, uiAddress: String, securityMgr: SecurityManager) = { - val sc = sparkContextRef.get() - val appId = client.getAttemptId().getApplicationId().toString() val attemptId = client.getAttemptId().getAttemptId().toString() val historyAddress = @@ -341,7 +334,6 @@ private[spark] class ApplicationMaster( .map { address => s"${address}${HistoryServer.UI_PATH_PREFIX}/${appId}/${attemptId}" } .getOrElse("") - val _sparkConf = if (sc != null) sc.getConf else sparkConf val driverUrl = RpcEndpointAddress( _sparkConf.get("spark.driver.host"), _sparkConf.get("spark.driver.port").toInt, @@ -385,21 +377,35 @@ private[spark] class ApplicationMaster( // This a bit hacky, but we need to wait until the spark.driver.port property has // been set by the Thread executing the user class. - val sc = waitForSparkContextInitialized() - - // If there is no SparkContext at this point, just fail the app. - if (sc == null) { - finish(FinalApplicationStatus.FAILED, - ApplicationMaster.EXIT_SC_NOT_INITED, - "Timed out waiting for SparkContext.") - } else { - rpcEnv = sc.env.rpcEnv - val driverRef = runAMEndpoint( - sc.getConf.get("spark.driver.host"), - sc.getConf.get("spark.driver.port"), - isClusterMode = true) - registerAM(rpcEnv, driverRef, sc.ui.map(_.appUIAddress).getOrElse(""), securityMgr) + logInfo("Waiting for spark context initialization...") + val totalWaitTime = sparkConf.get(AM_MAX_WAIT_TIME) + try { + val sc = ThreadUtils.awaitResult(sparkContextPromise.future, + Duration(totalWaitTime, TimeUnit.MILLISECONDS)) + if (sc != null) { + rpcEnv = sc.env.rpcEnv + val driverRef = runAMEndpoint( + sc.getConf.get("spark.driver.host"), + sc.getConf.get("spark.driver.port"), + isClusterMode = true) + registerAM(sc.getConf, rpcEnv, driverRef, sc.ui.map(_.appUIAddress).getOrElse(""), + securityMgr) + } else { + // Sanity check; should never happen in normal operation, since sc should only be null + // if the user app did not create a SparkContext. + if (!finished) { + throw new IllegalStateException("SparkContext is null but app is still running!") + } + } userClassThread.join() + } catch { + case e: SparkException if e.getCause().isInstanceOf[TimeoutException] => + logError( + s"SparkContext did not initialize after waiting for $totalWaitTime ms. " + + "Please check earlier log output for errors. Failing the application.") + finish(FinalApplicationStatus.FAILED, + ApplicationMaster.EXIT_SC_NOT_INITED, + "Timed out waiting for SparkContext.") } } @@ -409,7 +415,8 @@ private[spark] class ApplicationMaster( clientMode = true) val driverRef = waitForSparkDriver() addAmIpFilter() - registerAM(rpcEnv, driverRef, sparkConf.get("spark.driver.appUIAddress", ""), securityMgr) + registerAM(sparkConf, rpcEnv, driverRef, sparkConf.get("spark.driver.appUIAddress", ""), + securityMgr) // In client mode the actor will stop the reporter thread. reporterThread.join() @@ -525,26 +532,6 @@ private[spark] class ApplicationMaster( } } - private def waitForSparkContextInitialized(): SparkContext = { - logInfo("Waiting for spark context initialization") - sparkContextRef.synchronized { - val totalWaitTime = sparkConf.get(AM_MAX_WAIT_TIME) - val deadline = System.currentTimeMillis() + totalWaitTime - - while (sparkContextRef.get() == null && System.currentTimeMillis < deadline && !finished) { - logInfo("Waiting for spark context initialization ... ") - sparkContextRef.wait(10000L) - } - - val sparkContext = sparkContextRef.get() - if (sparkContext == null) { - logError(("SparkContext did not initialize after waiting for %d ms. Please check earlier" - + " log output for errors. Failing the application.").format(totalWaitTime)) - } - sparkContext - } - } - private def waitForSparkDriver(): RpcEndpointRef = { logInfo("Waiting for Spark driver to be reachable.") var driverUp = false @@ -647,6 +634,13 @@ private[spark] class ApplicationMaster( ApplicationMaster.EXIT_EXCEPTION_USER_CLASS, "User class threw exception: " + cause) } + sparkContextPromise.tryFailure(e.getCause()) + } finally { + // Notify the thread waiting for the SparkContext, in case the application did not + // instantiate one. This will do nothing when the user code instantiates a SparkContext + // (with the correct master), or when the user code throws an exception (due to the + // tryFailure above). + sparkContextPromise.trySuccess(null) } } } @@ -759,10 +753,6 @@ object ApplicationMaster extends Logging { master.sparkContextInitialized(sc) } - private[spark] def sparkContextStopped(sc: SparkContext): Boolean = { - master.sparkContextStopped(sc) - } - private[spark] def getAttemptId(): ApplicationAttemptId = { master.getAttemptId } diff --git a/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala index 72ec4d6b34af6..96c9151fc351d 100644 --- a/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala +++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala @@ -34,9 +34,4 @@ private[spark] class YarnClusterScheduler(sc: SparkContext) extends YarnSchedule logInfo("YarnClusterScheduler.postStartHook done") } - override def stop() { - super.stop() - ApplicationMaster.sparkContextStopped(sc) - } - } diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala index 8ab7b21c22139..fb7926f6a1e28 100644 --- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala +++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala @@ -33,6 +33,7 @@ import org.scalatest.concurrent.Eventually._ import org.apache.spark._ import org.apache.spark.deploy.SparkHadoopUtil +import org.apache.spark.deploy.yarn.config._ import org.apache.spark.internal.Logging import org.apache.spark.launcher._ import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationStart, @@ -192,6 +193,14 @@ class YarnClusterSuite extends BaseYarnClusterSuite { } } + test("timeout to get SparkContext in cluster mode triggers failure") { + val timeout = 2000 + val finalState = runSpark(false, mainClassName(SparkContextTimeoutApp.getClass), + appArgs = Seq((timeout * 4).toString), + extraConf = Map(AM_MAX_WAIT_TIME.key -> timeout.toString)) + finalState should be (SparkAppHandle.State.FAILED) + } + private def testBasicYarnApp(clientMode: Boolean, conf: Map[String, String] = Map()): Unit = { val result = File.createTempFile("result", null, tempDir) val finalState = runSpark(clientMode, mainClassName(YarnClusterDriver.getClass), @@ -469,3 +478,16 @@ private object YarnLauncherTestApp { } } + +/** + * Used to test code in the AM that detects the SparkContext instance. Expects a single argument + * with the duration to sleep for, in ms. + */ +private object SparkContextTimeoutApp { + + def main(args: Array[String]): Unit = { + val Array(sleepTime) = args + Thread.sleep(java.lang.Long.parseLong(sleepTime)) + } + +} From 4d92af310ad29ade039e4130f91f2a3d9180deef Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Wed, 17 Aug 2016 11:18:33 -0700 Subject: [PATCH 010/270] [SPARK-16446][SPARKR][ML] Gaussian Mixture Model wrapper in SparkR ## What changes were proposed in this pull request? Gaussian Mixture Model wrapper in SparkR, similarly to R's ```mvnormalmixEM```. ## How was this patch tested? Unit test. Author: Yanbo Liang Closes #14392 from yanboliang/spark-16446. --- R/pkg/NAMESPACE | 3 +- R/pkg/R/generics.R | 7 + R/pkg/R/mllib.R | 139 +++++++++++++++++- R/pkg/inst/tests/testthat/test_mllib.R | 62 ++++++++ .../spark/ml/r/GaussianMixtureWrapper.scala | 128 ++++++++++++++++ .../org/apache/spark/ml/r/RWrappers.scala | 2 + 6 files changed, 338 insertions(+), 3 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index 1e23b233c1116..c71eec5ce0437 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -25,7 +25,8 @@ exportMethods("glm", "fitted", "spark.naiveBayes", "spark.survreg", - "spark.isoreg") + "spark.isoreg", + "spark.gaussianMixture") # Job group lifecycle management methods export("setJobGroup", diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index ebacc11741812..06bb25d62d34d 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -1308,6 +1308,13 @@ setGeneric("spark.survreg", function(data, formula, ...) { standardGeneric("spar #' @export setGeneric("spark.isoreg", function(data, formula, ...) { standardGeneric("spark.isoreg") }) +#' @rdname spark.gaussianMixture +#' @export +setGeneric("spark.gaussianMixture", + function(data, formula, ...) { + standardGeneric("spark.gaussianMixture") + }) + #' @rdname write.ml #' @export setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") }) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 0dcc54d7af09b..db74046056a99 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -60,6 +60,13 @@ setClass("KMeansModel", representation(jobj = "jobj")) #' @note IsotonicRegressionModel since 2.1.0 setClass("IsotonicRegressionModel", representation(jobj = "jobj")) +#' S4 class that represents a GaussianMixtureModel +#' +#' @param jobj a Java object reference to the backing Scala GaussianMixtureModel +#' @export +#' @note GaussianMixtureModel since 2.1.0 +setClass("GaussianMixtureModel", representation(jobj = "jobj")) + #' Saves the MLlib model to the input path #' #' Saves the MLlib model to the input path. For more information, see the specific @@ -67,7 +74,7 @@ setClass("IsotonicRegressionModel", representation(jobj = "jobj")) #' @rdname write.ml #' @name write.ml #' @export -#' @seealso \link{spark.glm}, \link{glm} +#' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} #' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} #' @seealso \link{spark.isoreg} #' @seealso \link{read.ml} @@ -80,7 +87,7 @@ NULL #' @rdname predict #' @name predict #' @export -#' @seealso \link{spark.glm}, \link{glm} +#' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} #' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} #' @seealso \link{spark.isoreg} NULL @@ -649,6 +656,25 @@ setMethod("write.ml", signature(object = "IsotonicRegressionModel", path = "char invisible(callJMethod(writer, "save", path)) }) +# Save fitted MLlib model to the input path + +#' @param path the directory where the model is saved. +#' @param overwrite overwrites or not if the output path already exists. Default is FALSE +#' which means throw exception if the output path exists. +#' +#' @aliases write.ml,GaussianMixtureModel,character-method +#' @rdname spark.gaussianMixture +#' @export +#' @note write.ml(GaussianMixtureModel, character) since 2.1.0 +setMethod("write.ml", signature(object = "GaussianMixtureModel", path = "character"), + function(object, path, overwrite = FALSE) { + writer <- callJMethod(object@jobj, "write") + if (overwrite) { + writer <- callJMethod(writer, "overwrite") + } + invisible(callJMethod(writer, "save", path)) + }) + #' Load a fitted MLlib model from the input path. #' #' @param path Path of the model to read. @@ -676,6 +702,8 @@ read.ml <- function(path) { return(new("KMeansModel", jobj = jobj)) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.IsotonicRegressionWrapper")) { return(new("IsotonicRegressionModel", jobj = jobj)) + } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.GaussianMixtureWrapper")) { + return(new("GaussianMixtureModel", jobj = jobj)) } else { stop(paste("Unsupported model: ", jobj)) } @@ -757,3 +785,110 @@ setMethod("predict", signature(object = "AFTSurvivalRegressionModel"), function(object, newData) { return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) }) + +#' Multivariate Gaussian Mixture Model (GMM) +#' +#' Fits multivariate gaussian mixture model against a Spark DataFrame, similarly to R's +#' mvnormalmixEM(). Users can call \code{summary} to print a summary of the fitted model, +#' \code{predict} to make predictions on new data, and \code{write.ml}/\code{read.ml} +#' to save/load fitted models. +#' +#' @param data a SparkDataFrame for training. +#' @param formula a symbolic description of the model to be fitted. Currently only a few formula +#' operators are supported, including '~', '.', ':', '+', and '-'. +#' Note that the response variable of formula is empty in spark.gaussianMixture. +#' @param k number of independent Gaussians in the mixture model. +#' @param maxIter maximum iteration number. +#' @param tol the convergence tolerance. +#' @aliases spark.gaussianMixture,SparkDataFrame,formula-method +#' @return \code{spark.gaussianMixture} returns a fitted multivariate gaussian mixture model. +#' @rdname spark.gaussianMixture +#' @name spark.gaussianMixture +#' @seealso mixtools: \url{https://cran.r-project.org/web/packages/mixtools/} +#' @export +#' @examples +#' \dontrun{ +#' sparkR.session() +#' library(mvtnorm) +#' set.seed(100) +#' a <- rmvnorm(4, c(0, 0)) +#' b <- rmvnorm(6, c(3, 4)) +#' data <- rbind(a, b) +#' df <- createDataFrame(as.data.frame(data)) +#' model <- spark.gaussianMixture(df, ~ V1 + V2, k = 2) +#' summary(model) +#' +#' # fitted values on training data +#' fitted <- predict(model, df) +#' head(select(fitted, "V1", "prediction")) +#' +#' # save fitted model to input path +#' path <- "path/to/model" +#' write.ml(model, path) +#' +#' # can also read back the saved model and print +#' savedModel <- read.ml(path) +#' summary(savedModel) +#' } +#' @note spark.gaussianMixture since 2.1.0 +#' @seealso \link{predict}, \link{read.ml}, \link{write.ml} +setMethod("spark.gaussianMixture", signature(data = "SparkDataFrame", formula = "formula"), + function(data, formula, k = 2, maxIter = 100, tol = 0.01) { + formula <- paste(deparse(formula), collapse = "") + jobj <- callJStatic("org.apache.spark.ml.r.GaussianMixtureWrapper", "fit", data@sdf, + formula, as.integer(k), as.integer(maxIter), as.numeric(tol)) + return(new("GaussianMixtureModel", jobj = jobj)) + }) + +# Get the summary of a multivariate gaussian mixture model + +#' @param object a fitted gaussian mixture model. +#' @param ... currently not used argument(s) passed to the method. +#' @return \code{summary} returns the model's lambda, mu, sigma and posterior. +#' @aliases spark.gaussianMixture,SparkDataFrame,formula-method +#' @rdname spark.gaussianMixture +#' @export +#' @note summary(GaussianMixtureModel) since 2.1.0 +setMethod("summary", signature(object = "GaussianMixtureModel"), + function(object, ...) { + jobj <- object@jobj + is.loaded <- callJMethod(jobj, "isLoaded") + lambda <- unlist(callJMethod(jobj, "lambda")) + muList <- callJMethod(jobj, "mu") + sigmaList <- callJMethod(jobj, "sigma") + k <- callJMethod(jobj, "k") + dim <- callJMethod(jobj, "dim") + mu <- c() + for (i in 1 : k) { + start <- (i - 1) * dim + 1 + end <- i * dim + mu[[i]] <- unlist(muList[start : end]) + } + sigma <- c() + for (i in 1 : k) { + start <- (i - 1) * dim * dim + 1 + end <- i * dim * dim + sigma[[i]] <- t(matrix(sigmaList[start : end], ncol = dim)) + } + posterior <- if (is.loaded) { + NULL + } else { + dataFrame(callJMethod(jobj, "posterior")) + } + return(list(lambda = lambda, mu = mu, sigma = sigma, + posterior = posterior, is.loaded = is.loaded)) + }) + +# Predicted values based on a gaussian mixture model + +#' @param newData a SparkDataFrame for testing. +#' @return \code{predict} returns a SparkDataFrame containing predicted labels in a column named +#' "prediction". +#' @aliases predict,GaussianMixtureModel,SparkDataFrame-method +#' @rdname spark.gaussianMixture +#' @export +#' @note predict(GaussianMixtureModel) since 2.1.0 +setMethod("predict", signature(object = "GaussianMixtureModel"), + function(object, newData) { + return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + }) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index b759b28927365..96179864a88bf 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -508,4 +508,66 @@ test_that("spark.isotonicRegression", { unlink(modelPath) }) +test_that("spark.gaussianMixture", { + # R code to reproduce the result. + # nolint start + #' library(mvtnorm) + #' set.seed(100) + #' a <- rmvnorm(4, c(0, 0)) + #' b <- rmvnorm(6, c(3, 4)) + #' data <- rbind(a, b) + #' model <- mvnormalmixEM(data, k = 2) + #' model$lambda + # + # [1] 0.4 0.6 + # + #' model$mu + # + # [1] -0.2614822 0.5128697 + # [1] 2.647284 4.544682 + # + #' model$sigma + # + # [[1]] + # [,1] [,2] + # [1,] 0.08427399 0.00548772 + # [2,] 0.00548772 0.09090715 + # + # [[2]] + # [,1] [,2] + # [1,] 0.1641373 -0.1673806 + # [2,] -0.1673806 0.7508951 + # nolint end + data <- list(list(-0.50219235, 0.1315312), list(-0.07891709, 0.8867848), + list(0.11697127, 0.3186301), list(-0.58179068, 0.7145327), + list(2.17474057, 3.6401379), list(3.08988614, 4.0962745), + list(2.79836605, 4.7398405), list(3.12337950, 3.9706833), + list(2.61114575, 4.5108563), list(2.08618581, 6.3102968)) + df <- createDataFrame(data, c("x1", "x2")) + model <- spark.gaussianMixture(df, ~ x1 + x2, k = 2) + stats <- summary(model) + rLambda <- c(0.4, 0.6) + rMu <- c(-0.2614822, 0.5128697, 2.647284, 4.544682) + rSigma <- c(0.08427399, 0.00548772, 0.00548772, 0.09090715, + 0.1641373, -0.1673806, -0.1673806, 0.7508951) + expect_equal(stats$lambda, rLambda) + expect_equal(unlist(stats$mu), rMu, tolerance = 1e-3) + expect_equal(unlist(stats$sigma), rSigma, tolerance = 1e-3) + p <- collect(select(predict(model, df), "prediction")) + expect_equal(p$prediction, c(0, 0, 0, 0, 1, 1, 1, 1, 1, 1)) + + # Test model save/load + modelPath <- tempfile(pattern = "spark-gaussianMixture", fileext = ".tmp") + write.ml(model, modelPath) + expect_error(write.ml(model, modelPath)) + write.ml(model, modelPath, overwrite = TRUE) + model2 <- read.ml(modelPath) + stats2 <- summary(model2) + expect_equal(stats$lambda, stats2$lambda) + expect_equal(unlist(stats$mu), unlist(stats2$mu)) + expect_equal(unlist(stats$sigma), unlist(stats2$sigma)) + + unlink(modelPath) +}) + sparkR.session.stop() diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala new file mode 100644 index 0000000000000..1e8b3bbab6655 --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.hadoop.fs.Path +import org.json4s._ +import org.json4s.JsonDSL._ +import org.json4s.jackson.JsonMethods._ + +import org.apache.spark.ml.{Pipeline, PipelineModel} +import org.apache.spark.ml.attribute.AttributeGroup +import org.apache.spark.ml.clustering.{GaussianMixture, GaussianMixtureModel} +import org.apache.spark.ml.feature.RFormula +import org.apache.spark.ml.linalg.Vector +import org.apache.spark.ml.util.{MLReadable, MLReader, MLWritable, MLWriter} +import org.apache.spark.sql.{DataFrame, Dataset} +import org.apache.spark.sql.functions._ + +private[r] class GaussianMixtureWrapper private ( + val pipeline: PipelineModel, + val dim: Int, + val isLoaded: Boolean = false) extends MLWritable { + + private val gmm: GaussianMixtureModel = pipeline.stages(1).asInstanceOf[GaussianMixtureModel] + + lazy val k: Int = gmm.getK + + lazy val lambda: Array[Double] = gmm.weights + + lazy val mu: Array[Double] = gmm.gaussians.flatMap(_.mean.toArray) + + lazy val sigma: Array[Double] = gmm.gaussians.flatMap(_.cov.toArray) + + lazy val vectorToArray = udf { probability: Vector => probability.toArray } + lazy val posterior: DataFrame = gmm.summary.probability + .withColumn("posterior", vectorToArray(col(gmm.summary.probabilityCol))) + .drop(gmm.summary.probabilityCol) + + def transform(dataset: Dataset[_]): DataFrame = { + pipeline.transform(dataset).drop(gmm.getFeaturesCol) + } + + override def write: MLWriter = new GaussianMixtureWrapper.GaussianMixtureWrapperWriter(this) + +} + +private[r] object GaussianMixtureWrapper extends MLReadable[GaussianMixtureWrapper] { + + def fit( + data: DataFrame, + formula: String, + k: Int, + maxIter: Int, + tol: Double): GaussianMixtureWrapper = { + + val rFormulaModel = new RFormula() + .setFormula(formula) + .setFeaturesCol("features") + .fit(data) + + // get feature names from output schema + val schema = rFormulaModel.transform(data).schema + val featureAttrs = AttributeGroup.fromStructField(schema(rFormulaModel.getFeaturesCol)) + .attributes.get + val features = featureAttrs.map(_.name.get) + val dim = features.length + + val gm = new GaussianMixture() + .setK(k) + .setMaxIter(maxIter) + .setTol(tol) + + val pipeline = new Pipeline() + .setStages(Array(rFormulaModel, gm)) + .fit(data) + + new GaussianMixtureWrapper(pipeline, dim) + } + + override def read: MLReader[GaussianMixtureWrapper] = new GaussianMixtureWrapperReader + + override def load(path: String): GaussianMixtureWrapper = super.load(path) + + class GaussianMixtureWrapperWriter(instance: GaussianMixtureWrapper) extends MLWriter { + + override protected def saveImpl(path: String): Unit = { + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadata = ("class" -> instance.getClass.getName) ~ + ("dim" -> instance.dim) + val rMetadataJson: String = compact(render(rMetadata)) + + sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) + instance.pipeline.save(pipelinePath) + } + } + + class GaussianMixtureWrapperReader extends MLReader[GaussianMixtureWrapper] { + + override def load(path: String): GaussianMixtureWrapper = { + implicit val format = DefaultFormats + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + val pipeline = PipelineModel.load(pipelinePath) + + val rMetadataStr = sc.textFile(rMetadataPath, 1).first() + val rMetadata = parse(rMetadataStr) + val dim = (rMetadata \ "dim").extract[Int] + new GaussianMixtureWrapper(pipeline, dim, isLoaded = true) + } + } +} diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala index f9a44d60e691a..88ac26bc5e351 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala @@ -46,6 +46,8 @@ private[r] object RWrappers extends MLReader[Object] { KMeansWrapper.load(path) case "org.apache.spark.ml.r.IsotonicRegressionWrapper" => IsotonicRegressionWrapper.load(path) + case "org.apache.spark.ml.r.GaussianMixtureWrapper" => + GaussianMixtureWrapper.load(path) case _ => throw new SparkException(s"SparkR read.ml does not support load $className") } From cc97ea188e1d5b8e851d1a8438b8af092783ec04 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 17 Aug 2016 11:42:57 -0700 Subject: [PATCH 011/270] [SPARK-16736][CORE][SQL] purge superfluous fs calls A review of the code, working back from Hadoop's `FileSystem.exists()` and `FileSystem.isDirectory()` code, then removing uses of the calls when superfluous. 1. delete is harmless if called on a nonexistent path, so don't do any checks before deletes 1. any `FileSystem.exists()` check before `getFileStatus()` or `open()` is superfluous as the operation itself does the check. Instead the `FileNotFoundException` is caught and triggers the downgraded path. When a `FileNotFoundException` was thrown before, the code still creates a new FNFE with the error messages. Though now the inner exceptions are nested, for easier diagnostics. Initially, relying on Jenkins test runs. One troublespot here is that some of the codepaths are clearly error situations; it's not clear that they have coverage anyway. Trying to create the failure conditions in tests would be ideal, but it will also be hard. Author: Steve Loughran Closes #14371 from steveloughran/cloud/SPARK-16736-superfluous-fs-calls. --- .../scala/org/apache/spark/SparkContext.scala | 3 -- .../deploy/history/FsHistoryProvider.scala | 27 +++++++--------- .../spark/rdd/ReliableCheckpointRDD.scala | 31 ++++++++---------- .../spark/rdd/ReliableRDDCheckpointData.scala | 7 +--- .../scheduler/EventLoggingListener.scala | 13 ++------ .../spark/repl/ExecutorClassLoader.scala | 9 +++--- .../state/HDFSBackedStateStoreProvider.scala | 32 ++++++++++--------- .../hive/JavaMetastoreDataSourcesSuite.java | 4 +-- .../sql/hive/MetastoreDataSourcesSuite.scala | 2 +- .../apache/spark/streaming/Checkpoint.scala | 17 ++++------ .../util/FileBasedWriteAheadLog.scala | 27 ++++++++++++---- .../spark/streaming/util/HdfsUtils.scala | 24 +++++++------- .../org/apache/spark/deploy/yarn/Client.scala | 5 ++- 13 files changed, 92 insertions(+), 109 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index a6853fe3989a8..60f042f1e07c5 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -1410,9 +1410,6 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli val scheme = new URI(schemeCorrectedPath).getScheme if (!Array("http", "https", "ftp").contains(scheme)) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) - if (!fs.exists(hadoopPath)) { - throw new FileNotFoundException(s"Added file $hadoopPath does not exist.") - } val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw new SparkException(s"addFile does not support local directories when not running " + diff --git a/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala b/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala index bc09935f93f80..6874aa5f938ac 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala @@ -193,16 +193,18 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) private def startPolling(): Unit = { // Validate the log directory. val path = new Path(logDir) - if (!fs.exists(path)) { - var msg = s"Log directory specified does not exist: $logDir" - if (logDir == DEFAULT_LOG_DIR) { - msg += " Did you configure the correct one through spark.history.fs.logDirectory?" + try { + if (!fs.getFileStatus(path).isDirectory) { + throw new IllegalArgumentException( + "Logging directory specified is not a directory: %s".format(logDir)) } - throw new IllegalArgumentException(msg) - } - if (!fs.getFileStatus(path).isDirectory) { - throw new IllegalArgumentException( - "Logging directory specified is not a directory: %s".format(logDir)) + } catch { + case f: FileNotFoundException => + var msg = s"Log directory specified does not exist: $logDir" + if (logDir == DEFAULT_LOG_DIR) { + msg += " Did you configure the correct one through spark.history.fs.logDirectory?" + } + throw new FileNotFoundException(msg).initCause(f) } // Disable the background thread during tests. @@ -495,12 +497,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) val leftToClean = new mutable.ListBuffer[FsApplicationAttemptInfo] attemptsToClean.foreach { attempt => try { - val path = new Path(logDir, attempt.logPath) - if (fs.exists(path)) { - if (!fs.delete(path, true)) { - logWarning(s"Error deleting ${path}") - } - } + fs.delete(new Path(logDir, attempt.logPath), true) } catch { case e: AccessControlException => logInfo(s"No permission to delete ${attempt.logPath}, ignoring.") diff --git a/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala index fddb9353018a8..ab6554fd8a7e7 100644 --- a/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala @@ -17,7 +17,7 @@ package org.apache.spark.rdd -import java.io.IOException +import java.io.{FileNotFoundException, IOException} import scala.reflect.ClassTag import scala.util.control.NonFatal @@ -166,9 +166,6 @@ private[spark] object ReliableCheckpointRDD extends Logging { val tempOutputPath = new Path(outputDir, s".$finalOutputName-attempt-${ctx.attemptNumber()}") - if (fs.exists(tempOutputPath)) { - throw new IOException(s"Checkpoint failed: temporary path $tempOutputPath already exists") - } val bufferSize = env.conf.getInt("spark.buffer.size", 65536) val fileOutputStream = if (blockSize < 0) { @@ -240,22 +237,20 @@ private[spark] object ReliableCheckpointRDD extends Logging { val bufferSize = sc.conf.getInt("spark.buffer.size", 65536) val partitionerFilePath = new Path(checkpointDirPath, checkpointPartitionerFileName) val fs = partitionerFilePath.getFileSystem(sc.hadoopConfiguration) - if (fs.exists(partitionerFilePath)) { - val fileInputStream = fs.open(partitionerFilePath, bufferSize) - val serializer = SparkEnv.get.serializer.newInstance() - val deserializeStream = serializer.deserializeStream(fileInputStream) - val partitioner = Utils.tryWithSafeFinally[Partitioner] { - deserializeStream.readObject[Partitioner] - } { - deserializeStream.close() - } - logDebug(s"Read partitioner from $partitionerFilePath") - Some(partitioner) - } else { - logDebug("No partitioner file") - None + val fileInputStream = fs.open(partitionerFilePath, bufferSize) + val serializer = SparkEnv.get.serializer.newInstance() + val deserializeStream = serializer.deserializeStream(fileInputStream) + val partitioner = Utils.tryWithSafeFinally[Partitioner] { + deserializeStream.readObject[Partitioner] + } { + deserializeStream.close() } + logDebug(s"Read partitioner from $partitionerFilePath") + Some(partitioner) } catch { + case e: FileNotFoundException => + logDebug("No partitioner file", e) + None case NonFatal(e) => logWarning(s"Error reading partitioner from $checkpointDirPath, " + s"partitioner will not be recovered which may lead to performance loss", e) diff --git a/core/src/main/scala/org/apache/spark/rdd/ReliableRDDCheckpointData.scala b/core/src/main/scala/org/apache/spark/rdd/ReliableRDDCheckpointData.scala index 74f187642af21..b6d723c682796 100644 --- a/core/src/main/scala/org/apache/spark/rdd/ReliableRDDCheckpointData.scala +++ b/core/src/main/scala/org/apache/spark/rdd/ReliableRDDCheckpointData.scala @@ -80,12 +80,7 @@ private[spark] object ReliableRDDCheckpointData extends Logging { /** Clean up the files associated with the checkpoint data for this RDD. */ def cleanCheckpoint(sc: SparkContext, rddId: Int): Unit = { checkpointPath(sc, rddId).foreach { path => - val fs = path.getFileSystem(sc.hadoopConfiguration) - if (fs.exists(path)) { - if (!fs.delete(path, true)) { - logWarning(s"Error deleting ${path.toString()}") - } - } + path.getFileSystem(sc.hadoopConfiguration).delete(path, true) } } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala index a7d06391176d2..ce7877469f03f 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala @@ -91,7 +91,7 @@ private[spark] class EventLoggingListener( */ def start() { if (!fileSystem.getFileStatus(new Path(logBaseDir)).isDirectory) { - throw new IllegalArgumentException(s"Log directory $logBaseDir does not exist.") + throw new IllegalArgumentException(s"Log directory $logBaseDir is not a directory.") } val workingPath = logPath + IN_PROGRESS @@ -100,11 +100,8 @@ private[spark] class EventLoggingListener( val defaultFs = FileSystem.getDefaultUri(hadoopConf).getScheme val isDefaultLocal = defaultFs == null || defaultFs == "file" - if (shouldOverwrite && fileSystem.exists(path)) { + if (shouldOverwrite && fileSystem.delete(path, true)) { logWarning(s"Event log $path already exists. Overwriting...") - if (!fileSystem.delete(path, true)) { - logWarning(s"Error deleting $path") - } } /* The Hadoop LocalFileSystem (r1.0.4) has known issues with syncing (HADOOP-7844). @@ -301,12 +298,6 @@ private[spark] object EventLoggingListener extends Logging { * @return input stream that holds one JSON record per line. */ def openEventLog(log: Path, fs: FileSystem): InputStream = { - // It's not clear whether FileSystem.open() throws FileNotFoundException or just plain - // IOException when a file does not exist, so try our best to throw a proper exception. - if (!fs.exists(log)) { - throw new FileNotFoundException(s"File $log does not exist.") - } - val in = new BufferedInputStream(fs.open(log)) // Compression codec is encoded as an extension, e.g. app_123.lzf diff --git a/repl/src/main/scala/org/apache/spark/repl/ExecutorClassLoader.scala b/repl/src/main/scala/org/apache/spark/repl/ExecutorClassLoader.scala index 2f07395edf8d1..df13b32451af2 100644 --- a/repl/src/main/scala/org/apache/spark/repl/ExecutorClassLoader.scala +++ b/repl/src/main/scala/org/apache/spark/repl/ExecutorClassLoader.scala @@ -17,7 +17,7 @@ package org.apache.spark.repl -import java.io.{ByteArrayOutputStream, FilterInputStream, InputStream, IOException} +import java.io.{ByteArrayOutputStream, FileNotFoundException, FilterInputStream, InputStream, IOException} import java.net.{HttpURLConnection, URI, URL, URLEncoder} import java.nio.channels.Channels @@ -147,10 +147,11 @@ class ExecutorClassLoader( private def getClassFileInputStreamFromFileSystem(fileSystem: FileSystem)( pathInDirectory: String): InputStream = { val path = new Path(directory, pathInDirectory) - if (fileSystem.exists(path)) { + try { fileSystem.open(path) - } else { - throw new ClassNotFoundException(s"Class file not found at path $path") + } catch { + case _: FileNotFoundException => + throw new ClassNotFoundException(s"Class file not found at path $path") } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala index 3335755fd3b67..bec966b15ed0f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.execution.streaming.state -import java.io.{DataInputStream, DataOutputStream, IOException} +import java.io.{DataInputStream, DataOutputStream, FileNotFoundException, IOException} import scala.collection.JavaConverters._ import scala.collection.mutable @@ -171,7 +171,7 @@ private[state] class HDFSBackedStateStoreProvider( if (tempDeltaFileStream != null) { tempDeltaFileStream.close() } - if (tempDeltaFile != null && fs.exists(tempDeltaFile)) { + if (tempDeltaFile != null) { fs.delete(tempDeltaFile, true) } logInfo("Aborted") @@ -278,14 +278,12 @@ private[state] class HDFSBackedStateStoreProvider( /** Initialize the store provider */ private def initialize(): Unit = { - if (!fs.exists(baseDir)) { + try { fs.mkdirs(baseDir) - } else { - if (!fs.isDirectory(baseDir)) { + } catch { + case e: IOException => throw new IllegalStateException( - s"Cannot use ${id.checkpointLocation} for storing state data for $this as " + - s"$baseDir already exists and is not a directory") - } + s"Cannot use ${id.checkpointLocation} for storing state data for $this: $e ", e) } } @@ -340,13 +338,16 @@ private[state] class HDFSBackedStateStoreProvider( private def updateFromDeltaFile(version: Long, map: MapType): Unit = { val fileToRead = deltaFile(version) - if (!fs.exists(fileToRead)) { - throw new IllegalStateException( - s"Error reading delta file $fileToRead of $this: $fileToRead does not exist") - } var input: DataInputStream = null + val sourceStream = try { + fs.open(fileToRead) + } catch { + case f: FileNotFoundException => + throw new IllegalStateException( + s"Error reading delta file $fileToRead of $this: $fileToRead does not exist", f) + } try { - input = decompressStream(fs.open(fileToRead)) + input = decompressStream(sourceStream) var eof = false while(!eof) { @@ -405,8 +406,6 @@ private[state] class HDFSBackedStateStoreProvider( private def readSnapshotFile(version: Long): Option[MapType] = { val fileToRead = snapshotFile(version) - if (!fs.exists(fileToRead)) return None - val map = new MapType() var input: DataInputStream = null @@ -443,6 +442,9 @@ private[state] class HDFSBackedStateStoreProvider( } logInfo(s"Read snapshot file for version $version of $this from $fileToRead") Some(map) + } catch { + case _: FileNotFoundException => + None } finally { if (input != null) input.close() } diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java index e73117c8144ce..061c7431a6362 100644 --- a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java +++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java @@ -75,9 +75,7 @@ public void setUp() throws IOException { hiveManagedPath = new Path( catalog.hiveDefaultTableFilePath(new TableIdentifier("javaSavedTable"))); fs = hiveManagedPath.getFileSystem(sc.hadoopConfiguration()); - if (fs.exists(hiveManagedPath)){ - fs.delete(hiveManagedPath, true); - } + fs.delete(hiveManagedPath, true); List jsonObjects = new ArrayList<>(10); for (int i = 0; i < 10; i++) { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index c36b0275f4161..3892fe87e2a80 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -375,7 +375,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv sessionState.catalog.hiveDefaultTableFilePath(TableIdentifier("ctasJsonTable")) val filesystemPath = new Path(expectedPath) val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf()) - if (fs.exists(filesystemPath)) fs.delete(filesystemPath, true) + fs.delete(filesystemPath, true) // It is a managed table when we do not specify the location. sql( diff --git a/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala index 398fa6500f093..5cbad8bf3ce6e 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala @@ -117,7 +117,7 @@ object Checkpoint extends Logging { val path = new Path(checkpointDir) val fs = fsOption.getOrElse(path.getFileSystem(SparkHadoopUtil.get.conf)) - if (fs.exists(path)) { + try { val statuses = fs.listStatus(path) if (statuses != null) { val paths = statuses.map(_.getPath) @@ -127,9 +127,10 @@ object Checkpoint extends Logging { logWarning(s"Listing $path returned null") Seq.empty } - } else { - logWarning(s"Checkpoint directory $path does not exist") - Seq.empty + } catch { + case _: FileNotFoundException => + logWarning(s"Checkpoint directory $path does not exist") + Seq.empty } } @@ -229,9 +230,7 @@ class CheckpointWriter( logInfo(s"Saving checkpoint for time $checkpointTime to file '$checkpointFile'") // Write checkpoint to temp file - if (fs.exists(tempFile)) { - fs.delete(tempFile, true) // just in case it exists - } + fs.delete(tempFile, true) // just in case it exists val fos = fs.create(tempFile) Utils.tryWithSafeFinally { fos.write(bytes) @@ -242,9 +241,7 @@ class CheckpointWriter( // If the checkpoint file exists, back it up // If the backup exists as well, just delete it, otherwise rename will fail if (fs.exists(checkpointFile)) { - if (fs.exists(backupFile)) { - fs.delete(backupFile, true) // just in case it exists - } + fs.delete(backupFile, true) // just in case it exists if (!fs.rename(checkpointFile, backupFile)) { logWarning(s"Could not rename $checkpointFile to $backupFile") } diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala index 9b689f01b8d39..845f554308c43 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala @@ -16,6 +16,7 @@ */ package org.apache.spark.streaming.util +import java.io.FileNotFoundException import java.nio.ByteBuffer import java.util.{Iterator => JIterator} import java.util.concurrent.RejectedExecutionException @@ -231,13 +232,25 @@ private[streaming] class FileBasedWriteAheadLog( val logDirectoryPath = new Path(logDirectory) val fileSystem = HdfsUtils.getFileSystemForPath(logDirectoryPath, hadoopConf) - if (fileSystem.exists(logDirectoryPath) && - fileSystem.getFileStatus(logDirectoryPath).isDirectory) { - val logFileInfo = logFilesTologInfo(fileSystem.listStatus(logDirectoryPath).map { _.getPath }) - pastLogs.clear() - pastLogs ++= logFileInfo - logInfo(s"Recovered ${logFileInfo.size} write ahead log files from $logDirectory") - logDebug(s"Recovered files are:\n${logFileInfo.map(_.path).mkString("\n")}") + try { + // If you call listStatus(file) it returns a stat of the file in the array, + // rather than an array listing all the children. + // This makes it hard to differentiate listStatus(file) and + // listStatus(dir-with-one-child) except by examining the name of the returned status, + // and once you've got symlinks in the mix that differentiation isn't easy. + // Checking for the path being a directory is one more call to the filesystem, but + // leads to much clearer code. + if (fileSystem.getFileStatus(logDirectoryPath).isDirectory) { + val logFileInfo = logFilesTologInfo( + fileSystem.listStatus(logDirectoryPath).map { _.getPath }) + pastLogs.clear() + pastLogs ++= logFileInfo + logInfo(s"Recovered ${logFileInfo.size} write ahead log files from $logDirectory") + logDebug(s"Recovered files are:\n${logFileInfo.map(_.path).mkString("\n")}") + } + } catch { + case _: FileNotFoundException => + // there is no log directory, hence nothing to recover } } diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala index 13a765d035ee8..6a3b3200dccdb 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala @@ -16,7 +16,7 @@ */ package org.apache.spark.streaming.util -import java.io.IOException +import java.io.{FileNotFoundException, IOException} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs._ @@ -44,18 +44,16 @@ private[streaming] object HdfsUtils { def getInputStream(path: String, conf: Configuration): FSDataInputStream = { val dfsPath = new Path(path) val dfs = getFileSystemForPath(dfsPath, conf) - if (dfs.isFile(dfsPath)) { - try { - dfs.open(dfsPath) - } catch { - case e: IOException => - // If we are really unlucky, the file may be deleted as we're opening the stream. - // This can happen as clean up is performed by daemon threads that may be left over from - // previous runs. - if (!dfs.isFile(dfsPath)) null else throw e - } - } else { - null + try { + dfs.open(dfsPath) + } catch { + case _: FileNotFoundException => + null + case e: IOException => + // If we are really unlucky, the file may be deleted as we're opening the stream. + // This can happen as clean up is performed by daemon threads that may be left over from + // previous runs. + if (!dfs.isFile(dfsPath)) null else throw e } } diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala index e3572d781b0db..93684005f1cc0 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala @@ -189,9 +189,8 @@ private[spark] class Client( try { val preserveFiles = sparkConf.get(PRESERVE_STAGING_FILES) val fs = stagingDirPath.getFileSystem(hadoopConf) - if (!preserveFiles && fs.exists(stagingDirPath)) { - logInfo("Deleting staging directory " + stagingDirPath) - fs.delete(stagingDirPath, true) + if (!preserveFiles && fs.delete(stagingDirPath, true)) { + logInfo(s"Deleted staging directory $stagingDirPath") } } catch { case ioe: IOException => From d60af8f6aa53373de1333cc642cf2a9d7b39d912 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 17 Aug 2016 13:31:34 -0700 Subject: [PATCH 012/270] [SPARK-17096][SQL][STREAMING] Improve exception string reported through the StreamingQueryListener ## What changes were proposed in this pull request? Currently, the stackTrace (as `Array[StackTraceElements]`) reported through StreamingQueryListener.onQueryTerminated is useless as it has the stack trace of where StreamingQueryException is defined, not the stack trace of underlying exception. For example, if a streaming query fails because of a / by zero exception in a task, the `QueryTerminated.stackTrace` will have ``` org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runBatches(StreamExecution.scala:211) org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:124) ``` This is basically useless, as it is location where the StreamingQueryException was defined. What we want is Here is the right way to reason about what should be posted as through StreamingQueryListener.onQueryTerminated - The actual exception could either be a SparkException, or an arbitrary exception. - SparkException reports the relevant executor stack trace of a failed task as a string in the the exception message. The `Array[StackTraceElements]` returned by `SparkException.stackTrace()` is mostly irrelevant. - For any arbitrary exception, the `Array[StackTraceElements]` returned by `exception.stackTrace()` may be relevant. - When there is an error in a streaming query, it's hard to reason whether the `Array[StackTraceElements]` is useful or not. In fact, it is not clear whether it is even useful to report the stack trace as this array of Java objects. It may be sufficient to report the strack trace as a string, along with the message. This is how Spark reported executor stra - Hence, this PR simplifies the API by removing the array `stackTrace` from `QueryTerminated`. Instead the `exception` returns a string containing the message and the stack trace of the actual underlying exception that failed the streaming query (i.e. not that of the StreamingQueryException). If anyone is interested in the actual stack trace as an array, can always access them through `streamingQuery.exception` which returns the exception object. With this change, if a streaming query fails because of a / by zero exception in a task, the `QueryTerminated.exception` will be ``` org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 0.0 failed 1 times, most recent failure: Lost task 1.0 in stage 0.0 (TID 1, localhost): java.lang.ArithmeticException: / by zero at org.apache.spark.sql.streaming.StreamingQueryListenerSuite$$anonfun$5$$anonfun$apply$mcV$sp$4$$anonfun$apply$mcV$sp$5.apply$mcII$sp(StreamingQueryListenerSuite.scala:153) at org.apache.spark.sql.streaming.StreamingQueryListenerSuite$$anonfun$5$$anonfun$apply$mcV$sp$4$$anonfun$apply$mcV$sp$5.apply(StreamingQueryListenerSuite.scala:153) at org.apache.spark.sql.streaming.StreamingQueryListenerSuite$$anonfun$5$$anonfun$apply$mcV$sp$4$$anonfun$apply$mcV$sp$5.apply(StreamingQueryListenerSuite.scala:153) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370) at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:232) at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:226) at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:803) at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:803) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319) at org.apache.spark.rdd.RDD.iterator(RDD.scala:283) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70) at org.apache.spark.scheduler.Task.run(Task.scala:86) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1429) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1417) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1416) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1416) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802) ... ``` It contains the relevant executor stack trace. In a case non-SparkException, if the streaming source MemoryStream throws an exception, exception message will have the relevant stack trace. ``` java.lang.RuntimeException: this is the exception message at org.apache.spark.sql.execution.streaming.MemoryStream.getBatch(memory.scala:103) at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$5.apply(StreamExecution.scala:316) at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$5.apply(StreamExecution.scala:313) at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) at scala.collection.Iterator$class.foreach(Iterator.scala:893) at scala.collection.AbstractIterator.foreach(Iterator.scala:1336) at scala.collection.IterableLike$class.foreach(IterableLike.scala:72) at org.apache.spark.sql.execution.streaming.StreamProgress.foreach(StreamProgress.scala:25) at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241) at org.apache.spark.sql.execution.streaming.StreamProgress.flatMap(StreamProgress.scala:25) at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runBatch(StreamExecution.scala:313) at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$org$apache$spark$sql$execution$streaming$StreamExecution$$runBatches$1.apply$mcZ$sp(StreamExecution.scala:197) at org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:43) at org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runBatches(StreamExecution.scala:187) at org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:124) ``` Note that this change in the public `QueryTerminated` class is okay as the APIs are still experimental. ## How was this patch tested? Unit tests that test whether the right information is present in the exception message reported through QueryTerminated object. Author: Tathagata Das Closes #14675 from tdas/SPARK-17096. --- .../sql/execution/streaming/StreamExecution.scala | 5 +---- .../sql/streaming/StreamingQueryException.scala | 3 ++- .../sql/streaming/StreamingQueryListener.scala | 3 +-- .../sql/streaming/StreamingQueryListenerSuite.scala | 13 ++++++------- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala index 66fb5a4bdeb7f..4d05af0b60358 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala @@ -217,10 +217,7 @@ class StreamExecution( } finally { state = TERMINATED sparkSession.streams.notifyQueryTermination(StreamExecution.this) - postEvent(new QueryTerminated( - this.toInfo, - exception.map(_.getMessage), - exception.map(_.getStackTrace.toSeq).getOrElse(Nil))) + postEvent(new QueryTerminated(this.toInfo, exception.map(_.cause).map(Utils.exceptionString))) terminationLatch.countDown() } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryException.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryException.scala index 90f95ca9d4229..bd3e5a5618ec4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryException.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryException.scala @@ -22,7 +22,8 @@ import org.apache.spark.sql.execution.streaming.{Offset, StreamExecution} /** * :: Experimental :: - * Exception that stopped a [[StreamingQuery]]. + * Exception that stopped a [[StreamingQuery]]. Use `cause` get the actual exception + * that caused the failure. * @param query Query that caused the exception * @param message Message of this exception * @param cause Internal cause of this exception diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryListener.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryListener.scala index 3b3cead3a66de..db606abb8ce43 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryListener.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryListener.scala @@ -108,6 +108,5 @@ object StreamingQueryListener { @Experimental class QueryTerminated private[sql]( val queryInfo: StreamingQueryInfo, - val exception: Option[String], - val stackTrace: Seq[StackTraceElement]) extends Event + val exception: Option[String]) extends Event } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala index 7f4d28cf0598f..77602e8167fa3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala @@ -94,7 +94,6 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { assert(status.id === query.id) assert(status.sourceStatuses(0).offsetDesc === Some(LongOffset(0).toString)) assert(status.sinkStatus.offsetDesc === CompositeOffset.fill(LongOffset(0)).toString) - assert(listener.terminationStackTrace.isEmpty) assert(listener.terminationException === None) } listener.checkAsyncErrors() @@ -147,7 +146,7 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { } } - test("exception should be reported in QueryTerminated") { + testQuietly("exception should be reported in QueryTerminated") { val listener = new QueryStatusCollector withListenerAdded(listener) { val input = MemoryStream[Int] @@ -159,8 +158,11 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { spark.sparkContext.listenerBus.waitUntilEmpty(10000) assert(listener.terminationStatus !== null) assert(listener.terminationException.isDefined) + // Make sure that the exception message reported through listener + // contains the actual exception and relevant stack trace + assert(!listener.terminationException.get.contains("StreamingQueryException")) assert(listener.terminationException.get.contains("java.lang.ArithmeticException")) - assert(listener.terminationStackTrace.nonEmpty) + assert(listener.terminationException.get.contains("StreamingQueryListenerSuite")) } ) } @@ -205,8 +207,7 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { val exception = new RuntimeException("exception") val queryQueryTerminated = new StreamingQueryListener.QueryTerminated( queryTerminatedInfo, - Some(exception.getMessage), - exception.getStackTrace) + Some(exception.getMessage)) val json = JsonProtocol.sparkEventToJson(queryQueryTerminated) val newQueryTerminated = JsonProtocol.sparkEventFromJson(json) @@ -262,7 +263,6 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { @volatile var startStatus: StreamingQueryInfo = null @volatile var terminationStatus: StreamingQueryInfo = null @volatile var terminationException: Option[String] = null - @volatile var terminationStackTrace: Seq[StackTraceElement] = null val progressStatuses = new ConcurrentLinkedQueue[StreamingQueryInfo] @@ -296,7 +296,6 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { assert(startStatus != null, "onQueryTerminated called before onQueryStarted") terminationStatus = queryTerminated.queryInfo terminationException = queryTerminated.exception - terminationStackTrace = queryTerminated.stackTrace } asyncTestWaiter.dismiss() } From e6bef7d52f0e19ec771fb0f3e96c7ddbd1a6a19b Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Wed, 17 Aug 2016 16:31:42 -0700 Subject: [PATCH 013/270] [SPARK-17038][STREAMING] fix metrics retrieval source of 'lastReceivedBatch' https://issues.apache.org/jira/browse/SPARK-17038 ## What changes were proposed in this pull request? StreamingSource's lastReceivedBatch_submissionTime, lastReceivedBatch_processingTimeStart, and lastReceivedBatch_processingTimeEnd all use data from lastCompletedBatch instead of lastReceivedBatch. In particular, this makes it impossible to match lastReceivedBatch_records with a batchID/submission time. This is apparent when looking at StreamingSource.scala, lines 89-94. ## How was this patch tested? Manually running unit tests on local laptop Author: Xin Ren Closes #14681 from keypointt/SPARK-17038. --- .../scala/org/apache/spark/streaming/StreamingSource.scala | 6 +++--- .../streaming/ui/StreamingJobProgressListenerSuite.scala | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/streaming/src/main/scala/org/apache/spark/streaming/StreamingSource.scala b/streaming/src/main/scala/org/apache/spark/streaming/StreamingSource.scala index 9697437dd2fe5..0b306a28d1a59 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/StreamingSource.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/StreamingSource.scala @@ -87,11 +87,11 @@ private[streaming] class StreamingSource(ssc: StreamingContext) extends Source { // Gauge for last received batch, useful for monitoring the streaming job's running status, // displayed data -1 for any abnormal condition. registerGaugeWithOption("lastReceivedBatch_submissionTime", - _.lastCompletedBatch.map(_.submissionTime), -1L) + _.lastReceivedBatch.map(_.submissionTime), -1L) registerGaugeWithOption("lastReceivedBatch_processingStartTime", - _.lastCompletedBatch.flatMap(_.processingStartTime), -1L) + _.lastReceivedBatch.flatMap(_.processingStartTime), -1L) registerGaugeWithOption("lastReceivedBatch_processingEndTime", - _.lastCompletedBatch.flatMap(_.processingEndTime), -1L) + _.lastReceivedBatch.flatMap(_.processingEndTime), -1L) // Gauge for last received batch records. registerGauge("lastReceivedBatch_records", _.lastReceivedBatchRecords.values.sum, 0L) diff --git a/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala index 26b757cc2d535..46ab3ac8de3d4 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala @@ -68,6 +68,7 @@ class StreamingJobProgressListenerSuite extends TestSuiteBase with Matchers { listener.waitingBatches should be (List(BatchUIData(batchInfoSubmitted))) listener.runningBatches should be (Nil) listener.retainedCompletedBatches should be (Nil) + listener.lastReceivedBatch should be (Some(BatchUIData(batchInfoSubmitted))) listener.lastCompletedBatch should be (None) listener.numUnprocessedBatches should be (1) listener.numTotalCompletedBatches should be (0) @@ -81,6 +82,7 @@ class StreamingJobProgressListenerSuite extends TestSuiteBase with Matchers { listener.waitingBatches should be (Nil) listener.runningBatches should be (List(BatchUIData(batchInfoStarted))) listener.retainedCompletedBatches should be (Nil) + listener.lastReceivedBatch should be (Some(BatchUIData(batchInfoStarted))) listener.lastCompletedBatch should be (None) listener.numUnprocessedBatches should be (1) listener.numTotalCompletedBatches should be (0) @@ -123,6 +125,7 @@ class StreamingJobProgressListenerSuite extends TestSuiteBase with Matchers { listener.waitingBatches should be (Nil) listener.runningBatches should be (Nil) listener.retainedCompletedBatches should be (List(BatchUIData(batchInfoCompleted))) + listener.lastReceivedBatch should be (Some(BatchUIData(batchInfoCompleted))) listener.lastCompletedBatch should be (Some(BatchUIData(batchInfoCompleted))) listener.numUnprocessedBatches should be (0) listener.numTotalCompletedBatches should be (1) From 10204b9d29cd69895f5a606e75510dc64cf2e009 Mon Sep 17 00:00:00 2001 From: Liang-Chi Hsieh Date: Thu, 18 Aug 2016 13:24:12 +0800 Subject: [PATCH 014/270] [SPARK-16995][SQL] TreeNodeException when flat mapping RelationalGroupedDataset created from DataFrame containing a column created with lit/expr ## What changes were proposed in this pull request? A TreeNodeException is thrown when executing the following minimal example in Spark 2.0. import spark.implicits._ case class test (x: Int, q: Int) val d = Seq(1).toDF("x") d.withColumn("q", lit(0)).as[test].groupByKey(_.x).flatMapGroups{case (x, iter) => List[Int]()}.show d.withColumn("q", expr("0")).as[test].groupByKey(_.x).flatMapGroups{case (x, iter) => List[Int]()}.show The problem is at `FoldablePropagation`. The rule will do `transformExpressions` on `LogicalPlan`. The query above contains a `MapGroups` which has a parameter `dataAttributes:Seq[Attribute]`. One attributes in `dataAttributes` will be transformed to an `Alias(literal(0), _)` in `FoldablePropagation`. `Alias` is not an `Attribute` and causes the error. We can't easily detect such type inconsistency during transforming expressions. A direct approach to this problem is to skip doing `FoldablePropagation` on object operators as they should not contain such expressions. ## How was this patch tested? Jenkins tests. Author: Liang-Chi Hsieh Closes #14648 from viirya/flat-mapping. --- .../spark/sql/catalyst/optimizer/Optimizer.scala | 13 +++++++++++++ .../scala/org/apache/spark/sql/DatasetSuite.scala | 13 +++++++++++++ 2 files changed, 26 insertions(+) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index aa15f4a82383c..b53c0b5beccf2 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -727,6 +727,19 @@ object FoldablePropagation extends Rule[LogicalPlan] { case j @ Join(_, _, LeftOuter | RightOuter | FullOuter, _) => stop = true j + + // These 3 operators take attributes as constructor parameters, and these attributes + // can't be replaced by alias. + case m: MapGroups => + stop = true + m + case f: FlatMapGroupsInR => + stop = true + f + case c: CoGroup => + stop = true + c + case p: LogicalPlan if !stop => p.transformExpressions { case a: AttributeReference if foldableMap.contains(a) => foldableMap(a) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala index 88fb1472b668b..8ce6ea66b6bbf 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala @@ -878,6 +878,19 @@ class DatasetSuite extends QueryTest with SharedSQLContext { val ds = spark.createDataset(data)(enc) checkDataset(ds, (("a", "b"), "c"), (null, "d")) } + + test("SPARK-16995: flat mapping on Dataset containing a column created with lit/expr") { + val df = Seq("1").toDF("a") + + import df.sparkSession.implicits._ + + checkDataset( + df.withColumn("b", lit(0)).as[ClassData] + .groupByKey(_.a).flatMapGroups { case (x, iter) => List[Int]() }) + checkDataset( + df.withColumn("b", expr("0")).as[ClassData] + .groupByKey(_.a).flatMapGroups { case (x, iter) => List[Int]() }) + } } case class Generic[T](id: T, value: Double) From 3e6ef2e8a435a91b6a76876e9833917e5aa0945e Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Thu, 18 Aug 2016 16:17:01 +0800 Subject: [PATCH 015/270] [SPARK-17034][SQL] Minor code cleanup for UnresolvedOrdinal ## What changes were proposed in this pull request? I was looking at the code for UnresolvedOrdinal and made a few small changes to make it slightly more clear: 1. Rename the rule to SubstituteUnresolvedOrdinals which is more consistent with other rules that start with verbs. Note that this is still inconsistent with CTESubstitution and WindowsSubstitution. 2. Broke the test suite down from a single test case to three test cases. ## How was this patch tested? This is a minor cleanup. Author: petermaxlee Closes #14672 from petermaxlee/SPARK-17034. --- .../sql/catalyst/analysis/Analyzer.scala | 2 +- ...ala => SubstituteUnresolvedOrdinals.scala} | 26 ++++++++++--------- .../sql/catalyst/planning/patterns.scala | 13 ---------- ...> SubstituteUnresolvedOrdinalsSuite.scala} | 24 +++++++++-------- 4 files changed, 28 insertions(+), 37 deletions(-) rename sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/{UnresolvedOrdinalSubstitution.scala => SubstituteUnresolvedOrdinals.scala} (69%) rename sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/{UnresolvedOrdinalSubstitutionSuite.scala => SubstituteUnresolvedOrdinalsSuite.scala} (76%) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index f540816366ca8..cfab6ae7bd02b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -84,7 +84,7 @@ class Analyzer( CTESubstitution, WindowsSubstitution, EliminateUnions, - new UnresolvedOrdinalSubstitution(conf)), + new SubstituteUnresolvedOrdinals(conf)), Batch("Resolution", fixedPoint, ResolveRelations :: ResolveReferences :: diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnresolvedOrdinalSubstitution.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinals.scala similarity index 69% rename from sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnresolvedOrdinalSubstitution.scala rename to sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinals.scala index e21cd08af8b0d..6d8dc8628229a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnresolvedOrdinalSubstitution.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinals.scala @@ -18,32 +18,34 @@ package org.apache.spark.sql.catalyst.analysis import org.apache.spark.sql.catalyst.CatalystConf -import org.apache.spark.sql.catalyst.expressions.{Expression, SortOrder} -import org.apache.spark.sql.catalyst.planning.IntegerIndex +import org.apache.spark.sql.catalyst.expressions.{Expression, Literal, SortOrder} import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Sort} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.catalyst.trees.CurrentOrigin.withOrigin +import org.apache.spark.sql.types.IntegerType /** * Replaces ordinal in 'order by' or 'group by' with UnresolvedOrdinal expression. */ -class UnresolvedOrdinalSubstitution(conf: CatalystConf) extends Rule[LogicalPlan] { - private def isIntegerLiteral(sorter: Expression) = IntegerIndex.unapply(sorter).nonEmpty +class SubstituteUnresolvedOrdinals(conf: CatalystConf) extends Rule[LogicalPlan] { + private def isIntLiteral(e: Expression) = e match { + case Literal(_, IntegerType) => true + case _ => false + } def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case s @ Sort(orders, global, child) if conf.orderByOrdinal && - orders.exists(o => isIntegerLiteral(o.child)) => - val newOrders = orders.map { - case order @ SortOrder(ordinal @ IntegerIndex(index: Int), _) => + case s: Sort if conf.orderByOrdinal && s.order.exists(o => isIntLiteral(o.child)) => + val newOrders = s.order.map { + case order @ SortOrder(ordinal @ Literal(index: Int, IntegerType), _) => val newOrdinal = withOrigin(ordinal.origin)(UnresolvedOrdinal(index)) withOrigin(order.origin)(order.copy(child = newOrdinal)) case other => other } withOrigin(s.origin)(s.copy(order = newOrders)) - case a @ Aggregate(groups, aggs, child) if conf.groupByOrdinal && - groups.exists(isIntegerLiteral(_)) => - val newGroups = groups.map { - case ordinal @ IntegerIndex(index) => + + case a: Aggregate if conf.groupByOrdinal && a.groupingExpressions.exists(isIntLiteral) => + val newGroups = a.groupingExpressions.map { + case ordinal @ Literal(index: Int, IntegerType) => withOrigin(ordinal.origin)(UnresolvedOrdinal(index)) case other => other } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala index f42e67ca6ec20..476c66af76b29 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala @@ -208,19 +208,6 @@ object Unions { } } -/** - * Extractor for retrieving Int value. - */ -object IntegerIndex { - def unapply(a: Any): Option[Int] = a match { - case Literal(a: Int, IntegerType) => Some(a) - // When resolving ordinal in Sort and Group By, negative values are extracted - // for issuing error messages. - case UnaryMinus(IntegerLiteral(v)) => Some(-v) - case _ => None - } -} - /** * An extractor used when planning the physical execution of an aggregation. Compared with a logical * aggregation, the following transformations are performed: diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnresolvedOrdinalSubstitutionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinalsSuite.scala similarity index 76% rename from sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnresolvedOrdinalSubstitutionSuite.scala rename to sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinalsSuite.scala index 23995e96e1d2b..3c429ebce1a8d 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnresolvedOrdinalSubstitutionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinalsSuite.scala @@ -23,20 +23,21 @@ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions.Literal import org.apache.spark.sql.catalyst.SimpleCatalystConf -class UnresolvedOrdinalSubstitutionSuite extends AnalysisTest { - - test("test rule UnresolvedOrdinalSubstitution, replaces ordinal in order by or group by") { - val a = testRelation2.output(0) - val b = testRelation2.output(1) - val conf = new SimpleCatalystConf(caseSensitiveAnalysis = true) +class SubstituteUnresolvedOrdinalsSuite extends AnalysisTest { + private lazy val conf = SimpleCatalystConf(caseSensitiveAnalysis = true) + private lazy val a = testRelation2.output(0) + private lazy val b = testRelation2.output(1) + test("unresolved ordinal should not be unresolved") { // Expression OrderByOrdinal is unresolved. assert(!UnresolvedOrdinal(0).resolved) + } + test("order by ordinal") { // Tests order by ordinal, apply single rule. val plan = testRelation2.orderBy(Literal(1).asc, Literal(2).asc) comparePlans( - new UnresolvedOrdinalSubstitution(conf).apply(plan), + new SubstituteUnresolvedOrdinals(conf).apply(plan), testRelation2.orderBy(UnresolvedOrdinal(1).asc, UnresolvedOrdinal(2).asc)) // Tests order by ordinal, do full analysis @@ -44,14 +45,15 @@ class UnresolvedOrdinalSubstitutionSuite extends AnalysisTest { // order by ordinal can be turned off by config comparePlans( - new UnresolvedOrdinalSubstitution(conf.copy(orderByOrdinal = false)).apply(plan), + new SubstituteUnresolvedOrdinals(conf.copy(orderByOrdinal = false)).apply(plan), testRelation2.orderBy(Literal(1).asc, Literal(2).asc)) + } - + test("group by ordinal") { // Tests group by ordinal, apply single rule. val plan2 = testRelation2.groupBy(Literal(1), Literal(2))('a, 'b) comparePlans( - new UnresolvedOrdinalSubstitution(conf).apply(plan2), + new SubstituteUnresolvedOrdinals(conf).apply(plan2), testRelation2.groupBy(UnresolvedOrdinal(1), UnresolvedOrdinal(2))('a, 'b)) // Tests group by ordinal, do full analysis @@ -59,7 +61,7 @@ class UnresolvedOrdinalSubstitutionSuite extends AnalysisTest { // group by ordinal can be turned off by config comparePlans( - new UnresolvedOrdinalSubstitution(conf.copy(groupByOrdinal = false)).apply(plan2), + new SubstituteUnresolvedOrdinals(conf.copy(groupByOrdinal = false)).apply(plan2), testRelation2.groupBy(Literal(1), Literal(2))('a, 'b)) } } From 1748f824101870b845dbbd118763c6885744f98a Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Thu, 18 Aug 2016 16:37:25 +0800 Subject: [PATCH 016/270] [SPARK-16391][SQL] Support partial aggregation for reduceGroups ## What changes were proposed in this pull request? This patch introduces a new private ReduceAggregator interface that is a subclass of Aggregator. ReduceAggregator only requires a single associative and commutative reduce function. ReduceAggregator is also used to implement KeyValueGroupedDataset.reduceGroups in order to support partial aggregation. Note that the pull request was initially done by viirya. ## How was this patch tested? Covered by original tests for reduceGroups, as well as a new test suite for ReduceAggregator. Author: Reynold Xin Author: Liang-Chi Hsieh Closes #14576 from rxin/reduceAggregator. --- .../spark/sql/KeyValueGroupedDataset.scala | 10 +-- .../sql/expressions/ReduceAggregator.scala | 68 +++++++++++++++++ .../expressions/ReduceAggregatorSuite.scala | 73 +++++++++++++++++++ 3 files changed, 146 insertions(+), 5 deletions(-) create mode 100644 sql/core/src/main/scala/org/apache/spark/sql/expressions/ReduceAggregator.scala create mode 100644 sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala diff --git a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala index 65a725f3d4a81..61a3e6e0bc4f8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala @@ -21,10 +21,11 @@ import scala.collection.JavaConverters._ import org.apache.spark.annotation.Experimental import org.apache.spark.api.java.function._ -import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder, OuterScopes} +import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder} import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, CreateStruct} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.execution.QueryExecution +import org.apache.spark.sql.expressions.ReduceAggregator /** * :: Experimental :: @@ -177,10 +178,9 @@ class KeyValueGroupedDataset[K, V] private[sql]( * @since 1.6.0 */ def reduceGroups(f: (V, V) => V): Dataset[(K, V)] = { - val func = (key: K, it: Iterator[V]) => Iterator((key, it.reduce(f))) - - implicit val resultEncoder = ExpressionEncoder.tuple(kExprEnc, vExprEnc) - flatMapGroups(func) + val vEncoder = encoderFor[V] + val aggregator: TypedColumn[V, V] = new ReduceAggregator[V](f)(vEncoder).toColumn + agg(aggregator) } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/ReduceAggregator.scala b/sql/core/src/main/scala/org/apache/spark/sql/expressions/ReduceAggregator.scala new file mode 100644 index 0000000000000..174378304d4a5 --- /dev/null +++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/ReduceAggregator.scala @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.expressions + +import org.apache.spark.sql.Encoder +import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder + +/** + * An aggregator that uses a single associative and commutative reduce function. This reduce + * function can be used to go through all input values and reduces them to a single value. + * If there is no input, a null value is returned. + * + * This class currently assumes there is at least one input row. + */ +private[sql] class ReduceAggregator[T: Encoder](func: (T, T) => T) + extends Aggregator[T, (Boolean, T), T] { + + private val encoder = implicitly[Encoder[T]] + + override def zero: (Boolean, T) = (false, null.asInstanceOf[T]) + + override def bufferEncoder: Encoder[(Boolean, T)] = + ExpressionEncoder.tuple( + ExpressionEncoder[Boolean](), + encoder.asInstanceOf[ExpressionEncoder[T]]) + + override def outputEncoder: Encoder[T] = encoder + + override def reduce(b: (Boolean, T), a: T): (Boolean, T) = { + if (b._1) { + (true, func(b._2, a)) + } else { + (true, a) + } + } + + override def merge(b1: (Boolean, T), b2: (Boolean, T)): (Boolean, T) = { + if (!b1._1) { + b2 + } else if (!b2._1) { + b1 + } else { + (true, func(b1._2, b2._2)) + } + } + + override def finish(reduction: (Boolean, T)): T = { + if (!reduction._1) { + throw new IllegalStateException("ReduceAggregator requires at least one input row") + } + reduction._2 + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala new file mode 100644 index 0000000000000..d826d3f54d922 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ReduceAggregatorSuite.scala @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.expressions + +import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.Encoders +import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder + +class ReduceAggregatorSuite extends SparkFunSuite { + + test("zero value") { + val encoder: ExpressionEncoder[Int] = ExpressionEncoder() + val func = (v1: Int, v2: Int) => v1 + v2 + val aggregator: ReduceAggregator[Int] = new ReduceAggregator(func)(Encoders.scalaInt) + assert(aggregator.zero == (false, null)) + } + + test("reduce, merge and finish") { + val encoder: ExpressionEncoder[Int] = ExpressionEncoder() + val func = (v1: Int, v2: Int) => v1 + v2 + val aggregator: ReduceAggregator[Int] = new ReduceAggregator(func)(Encoders.scalaInt) + + val firstReduce = aggregator.reduce(aggregator.zero, 1) + assert(firstReduce == (true, 1)) + + val secondReduce = aggregator.reduce(firstReduce, 2) + assert(secondReduce == (true, 3)) + + val thirdReduce = aggregator.reduce(secondReduce, 3) + assert(thirdReduce == (true, 6)) + + val mergeWithZero1 = aggregator.merge(aggregator.zero, firstReduce) + assert(mergeWithZero1 == (true, 1)) + + val mergeWithZero2 = aggregator.merge(secondReduce, aggregator.zero) + assert(mergeWithZero2 == (true, 3)) + + val mergeTwoReduced = aggregator.merge(firstReduce, secondReduce) + assert(mergeTwoReduced == (true, 4)) + + assert(aggregator.finish(firstReduce)== 1) + assert(aggregator.finish(secondReduce) == 3) + assert(aggregator.finish(thirdReduce) == 6) + assert(aggregator.finish(mergeWithZero1) == 1) + assert(aggregator.finish(mergeWithZero2) == 3) + assert(aggregator.finish(mergeTwoReduced) == 4) + } + + test("requires at least one input row") { + val encoder: ExpressionEncoder[Int] = ExpressionEncoder() + val func = (v1: Int, v2: Int) => v1 + v2 + val aggregator: ReduceAggregator[Int] = new ReduceAggregator(func)(Encoders.scalaInt) + + intercept[IllegalStateException] { + aggregator.finish(aggregator.zero) + } + } +} From e82dbe600e0d36d76cd5607a77c3243a26777b77 Mon Sep 17 00:00:00 2001 From: Liang-Chi Hsieh Date: Thu, 18 Aug 2016 12:45:56 +0200 Subject: [PATCH 017/270] [SPARK-17107][SQL] Remove redundant pushdown rule for Union ## What changes were proposed in this pull request? The `Optimizer` rules `PushThroughSetOperations` and `PushDownPredicate` have a redundant rule to push down `Filter` through `Union`. We should remove it. ## How was this patch tested? Jenkins tests. Author: Liang-Chi Hsieh Closes #14687 from viirya/remove-extra-pushdown. --- .../sql/catalyst/optimizer/Optimizer.scala | 21 +++++-------------- .../optimizer/SetOperationSuite.scala | 3 ++- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index b53c0b5beccf2..f7aa6da0a5bdc 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -75,7 +75,7 @@ abstract class Optimizer(sessionCatalog: SessionCatalog, conf: CatalystConf) RemoveRepetitionFromGroupExpressions) :: Batch("Operator Optimizations", fixedPoint, // Operator push down - PushThroughSetOperations, + PushProjectionThroughUnion, ReorderJoin, EliminateOuterJoin, PushPredicateThroughJoin, @@ -302,14 +302,14 @@ object LimitPushDown extends Rule[LogicalPlan] { } /** - * Pushes certain operations to both sides of a Union operator. + * Pushes Project operator to both sides of a Union operator. * Operations that are safe to pushdown are listed as follows. * Union: * Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is - * safe to pushdown Filters and Projections through it. Once we add UNION DISTINCT, - * we will not be able to pushdown Projections. + * safe to pushdown Filters and Projections through it. Filter pushdown is handled by another + * rule PushDownPredicate. Once we add UNION DISTINCT, we will not be able to pushdown Projections. */ -object PushThroughSetOperations extends Rule[LogicalPlan] with PredicateHelper { +object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper { /** * Maps Attributes from the left side to the corresponding Attribute on the right side. @@ -364,17 +364,6 @@ object PushThroughSetOperations extends Rule[LogicalPlan] with PredicateHelper { } else { p } - - // Push down filter into union - case Filter(condition, Union(children)) => - assert(children.nonEmpty) - val (deterministic, nondeterministic) = partitionByDeterministic(condition) - val newFirstChild = Filter(deterministic, children.head) - val newOtherChildren = children.tail.map { child => - val rewrites = buildRewrites(children.head, child) - Filter(pushToRight(deterministic, rewrites), child) - } - Filter(nondeterministic, Union(newFirstChild +: newOtherChildren)) } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala index dab45a6b166be..7227706ab2b36 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala @@ -31,7 +31,8 @@ class SetOperationSuite extends PlanTest { EliminateSubqueryAliases) :: Batch("Union Pushdown", Once, CombineUnions, - PushThroughSetOperations, + PushProjectionThroughUnion, + PushDownPredicate, PruneFilters) :: Nil } From b81421afb04959bb22b53653be0a09c1f1c5845f Mon Sep 17 00:00:00 2001 From: Stavros Kontopoulos Date: Thu, 18 Aug 2016 12:19:19 +0100 Subject: [PATCH 018/270] [SPARK-17087][MESOS] Documentation for Making Spark on Mesos honor port restrictions ## What changes were proposed in this pull request? - adds documentation for https://issues.apache.org/jira/browse/SPARK-11714 ## How was this patch tested? Doc no test needed. Author: Stavros Kontopoulos Closes #14667 from skonto/add_doc. --- docs/running-on-mesos.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md index a6ce34c761c82..173961deaadcb 100644 --- a/docs/running-on-mesos.md +++ b/docs/running-on-mesos.md @@ -207,6 +207,16 @@ The scheduler will start executors round-robin on the offers Mesos gives it, but there are no spread guarantees, as Mesos does not provide such guarantees on the offer stream. +In this mode spark executors will honor port allocation if such is +provided from the user. Specifically if the user defines +`spark.executor.port` or `spark.blockManager.port` in Spark configuration, +the mesos scheduler will check the available offers for a valid port +range containing the port numbers. If no such range is available it will +not launch any task. If no restriction is imposed on port numbers by the +user, ephemeral ports are used as usual. This port honouring implementation +implies one task per host if the user defines a port. In the future network +isolation shall be supported. + The benefit of coarse-grained mode is much lower startup overhead, but at the cost of reserving Mesos resources for the complete duration of the application. To configure your job to dynamically adjust to its From 412dba63b511474a6db3c43c8618d803e604bc6b Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 18 Aug 2016 13:33:55 +0200 Subject: [PATCH 019/270] [SPARK-17069] Expose spark.range() as table-valued function in SQL ## What changes were proposed in this pull request? This adds analyzer rules for resolving table-valued functions, and adds one builtin implementation for range(). The arguments for range() are the same as those of `spark.range()`. ## How was this patch tested? Unit tests. cc hvanhovell Author: Eric Liang Closes #14656 from ericl/sc-4309. --- .../spark/sql/catalyst/parser/SqlBase.g4 | 1 + .../sql/catalyst/analysis/Analyzer.scala | 1 + .../ResolveTableValuedFunctions.scala | 132 ++++++++++++++++++ .../sql/catalyst/analysis/unresolved.scala | 11 ++ .../sql/catalyst/parser/AstBuilder.scala | 8 ++ .../sql/catalyst/parser/PlanParserSuite.scala | 8 +- .../inputs/table-valued-functions.sql | 20 +++ .../results/table-valued-functions.sql.out | 87 ++++++++++++ 8 files changed, 267 insertions(+), 1 deletion(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala create mode 100644 sql/core/src/test/resources/sql-tests/inputs/table-valued-functions.sql create mode 100644 sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out diff --git a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 index 6122bcdef8f07..cab7c3ff5a8f7 100644 --- a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 +++ b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 @@ -433,6 +433,7 @@ relationPrimary | '(' queryNoWith ')' sample? (AS? strictIdentifier)? #aliasedQuery | '(' relation ')' sample? (AS? strictIdentifier)? #aliasedRelation | inlineTable #inlineTableDefault2 + | identifier '(' (expression (',' expression)*)? ')' #tableValuedFunction ; inlineTable diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index cfab6ae7bd02b..333dd4d9a4f2a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -86,6 +86,7 @@ class Analyzer( EliminateUnions, new SubstituteUnresolvedOrdinals(conf)), Batch("Resolution", fixedPoint, + ResolveTableValuedFunctions :: ResolveRelations :: ResolveReferences :: ResolveDeserializer :: diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala new file mode 100644 index 0000000000000..7fdf7fa0c06a3 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.analysis + +import org.apache.spark.{SparkConf, SparkContext} +import org.apache.spark.sql.catalyst.expressions.Expression +import org.apache.spark.sql.catalyst.plans._ +import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Range} +import org.apache.spark.sql.catalyst.rules._ +import org.apache.spark.sql.types.{DataType, IntegerType, LongType} + +/** + * Rule that resolves table-valued function references. + */ +object ResolveTableValuedFunctions extends Rule[LogicalPlan] { + private lazy val defaultParallelism = + SparkContext.getOrCreate(new SparkConf(false)).defaultParallelism + + /** + * List of argument names and their types, used to declare a function. + */ + private case class ArgumentList(args: (String, DataType)*) { + /** + * Try to cast the expressions to satisfy the expected types of this argument list. If there + * are any types that cannot be casted, then None is returned. + */ + def implicitCast(values: Seq[Expression]): Option[Seq[Expression]] = { + if (args.length == values.length) { + val casted = values.zip(args).map { case (value, (_, expectedType)) => + TypeCoercion.ImplicitTypeCasts.implicitCast(value, expectedType) + } + if (casted.forall(_.isDefined)) { + return Some(casted.map(_.get)) + } + } + None + } + + override def toString: String = { + args.map { a => + s"${a._1}: ${a._2.typeName}" + }.mkString(", ") + } + } + + /** + * A TVF maps argument lists to resolver functions that accept those arguments. Using a map + * here allows for function overloading. + */ + private type TVF = Map[ArgumentList, Seq[Any] => LogicalPlan] + + /** + * TVF builder. + */ + private def tvf(args: (String, DataType)*)(pf: PartialFunction[Seq[Any], LogicalPlan]) + : (ArgumentList, Seq[Any] => LogicalPlan) = { + (ArgumentList(args: _*), + pf orElse { + case args => + throw new IllegalArgumentException( + "Invalid arguments for resolved function: " + args.mkString(", ")) + }) + } + + /** + * Internal registry of table-valued functions. + */ + private val builtinFunctions: Map[String, TVF] = Map( + "range" -> Map( + /* range(end) */ + tvf("end" -> LongType) { case Seq(end: Long) => + Range(0, end, 1, defaultParallelism) + }, + + /* range(start, end) */ + tvf("start" -> LongType, "end" -> LongType) { case Seq(start: Long, end: Long) => + Range(start, end, 1, defaultParallelism) + }, + + /* range(start, end, step) */ + tvf("start" -> LongType, "end" -> LongType, "step" -> LongType) { + case Seq(start: Long, end: Long, step: Long) => + Range(start, end, step, defaultParallelism) + }, + + /* range(start, end, step, numPartitions) */ + tvf("start" -> LongType, "end" -> LongType, "step" -> LongType, + "numPartitions" -> IntegerType) { + case Seq(start: Long, end: Long, step: Long, numPartitions: Int) => + Range(start, end, step, numPartitions) + }) + ) + + override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators { + case u: UnresolvedTableValuedFunction if u.functionArgs.forall(_.resolved) => + builtinFunctions.get(u.functionName) match { + case Some(tvf) => + val resolved = tvf.flatMap { case (argList, resolver) => + argList.implicitCast(u.functionArgs) match { + case Some(casted) => + Some(resolver(casted.map(_.eval()))) + case _ => + None + } + } + resolved.headOption.getOrElse { + val argTypes = u.functionArgs.map(_.dataType.typeName).mkString(", ") + u.failAnalysis( + s"""error: table-valued function ${u.functionName} with alternatives: + |${tvf.keys.map(_.toString).toSeq.sorted.map(x => s" ($x)").mkString("\n")} + |cannot be applied to: (${argTypes})""".stripMargin) + } + case _ => + u.failAnalysis(s"could not resolve `${u.functionName}` to a table-valued function") + } + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala index 42e7aae0b6b05..3735a1501cbfa 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala @@ -49,6 +49,17 @@ case class UnresolvedRelation( override lazy val resolved = false } +/** + * Holds a table-valued function call that has yet to be resolved. + */ +case class UnresolvedTableValuedFunction( + functionName: String, functionArgs: Seq[Expression]) extends LeafNode { + + override def output: Seq[Attribute] = Nil + + override lazy val resolved = false +} + /** * Holds the name of an attribute that has yet to be resolved. */ diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index adf78396d7fc0..01322ae327e4a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -657,6 +657,14 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { table.optionalMap(ctx.sample)(withSample) } + /** + * Create a table-valued function call with arguments, e.g. range(1000) + */ + override def visitTableValuedFunction(ctx: TableValuedFunctionContext) + : LogicalPlan = withOrigin(ctx) { + UnresolvedTableValuedFunction(ctx.identifier.getText, ctx.expression.asScala.map(expression)) + } + /** * Create an inline table (a virtual table in Hive parlance). */ diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala index 7af333b34f723..cbe4a022e730d 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.catalyst.parser import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.FunctionIdentifier -import org.apache.spark.sql.catalyst.analysis.UnresolvedGenerator +import org.apache.spark.sql.catalyst.analysis.{UnresolvedGenerator, UnresolvedTableValuedFunction} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ @@ -426,6 +426,12 @@ class PlanParserSuite extends PlanTest { assertEqual("table d.t", table("d", "t")) } + test("table valued function") { + assertEqual( + "select * from range(2)", + UnresolvedTableValuedFunction("range", Literal(2) :: Nil).select(star())) + } + test("inline table") { assertEqual("values 1, 2, 3, 4", LocalRelation.fromExternalRows( Seq('col1.int), diff --git a/sql/core/src/test/resources/sql-tests/inputs/table-valued-functions.sql b/sql/core/src/test/resources/sql-tests/inputs/table-valued-functions.sql new file mode 100644 index 0000000000000..2e6dcd538b7ac --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/table-valued-functions.sql @@ -0,0 +1,20 @@ +-- unresolved function +select * from dummy(3); + +-- range call with end +select * from range(6 + cos(3)); + +-- range call with start and end +select * from range(5, 10); + +-- range call with step +select * from range(0, 10, 2); + +-- range call with numPartitions +select * from range(0, 10, 1, 200); + +-- range call error +select * from range(1, 1, 1, 1, 1); + +-- range call with null +select * from range(1, null); diff --git a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out new file mode 100644 index 0000000000000..d769bcef0aca7 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out @@ -0,0 +1,87 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 7 + + +-- !query 0 +select * from dummy(3) +-- !query 0 schema +struct<> +-- !query 0 output +org.apache.spark.sql.AnalysisException +could not resolve `dummy` to a table-valued function; line 1 pos 14 + + +-- !query 1 +select * from range(6 + cos(3)) +-- !query 1 schema +struct +-- !query 1 output +0 +1 +2 +3 +4 + + +-- !query 2 +select * from range(5, 10) +-- !query 2 schema +struct +-- !query 2 output +5 +6 +7 +8 +9 + + +-- !query 3 +select * from range(0, 10, 2) +-- !query 3 schema +struct +-- !query 3 output +0 +2 +4 +6 +8 + + +-- !query 4 +select * from range(0, 10, 1, 200) +-- !query 4 schema +struct +-- !query 4 output +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 + + +-- !query 5 +select * from range(1, 1, 1, 1, 1) +-- !query 5 schema +struct<> +-- !query 5 output +org.apache.spark.sql.AnalysisException +error: table-valued function range with alternatives: + (end: long) + (start: long, end: long) + (start: long, end: long, step: long) + (start: long, end: long, step: long, numPartitions: integer) +cannot be applied to: (integer, integer, integer, integer, integer); line 1 pos 14 + + +-- !query 6 +select * from range(1, null) +-- !query 6 schema +struct<> +-- !query 6 output +java.lang.IllegalArgumentException +Invalid arguments for resolved function: 1, null From 68f5087d2107d6afec5d5745f0cb0e9e3bdd6a0b Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Thu, 18 Aug 2016 13:44:13 +0200 Subject: [PATCH 020/270] [SPARK-17117][SQL] 1 / NULL should not fail analysis ## What changes were proposed in this pull request? This patch fixes the problem described in SPARK-17117, i.e. "SELECT 1 / NULL" throws an analysis exception: ``` org.apache.spark.sql.AnalysisException: cannot resolve '(1 / NULL)' due to data type mismatch: differing types in '(1 / NULL)' (int and null). ``` The problem is that division type coercion did not take null type into account. ## How was this patch tested? A unit test for the type coercion, and a few end-to-end test cases using SQLQueryTestSuite. Author: petermaxlee Closes #14695 from petermaxlee/SPARK-17117. --- .../sql/catalyst/analysis/TypeCoercion.scala | 7 +- .../catalyst/analysis/TypeCoercionSuite.scala | 9 +- .../resources/sql-tests/inputs/arithmetic.sql | 12 ++- .../sql-tests/results/arithmetic.sql.out | 84 +++++++++++++++---- 4 files changed, 89 insertions(+), 23 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala index 021952e7166f9..21e96aaf53844 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala @@ -543,11 +543,14 @@ object TypeCoercion { // Decimal and Double remain the same case d: Divide if d.dataType == DoubleType => d case d: Divide if d.dataType.isInstanceOf[DecimalType] => d - case Divide(left, right) if isNumeric(left) && isNumeric(right) => + case Divide(left, right) if isNumericOrNull(left) && isNumericOrNull(right) => Divide(Cast(left, DoubleType), Cast(right, DoubleType)) } - private def isNumeric(ex: Expression): Boolean = ex.dataType.isInstanceOf[NumericType] + private def isNumericOrNull(ex: Expression): Boolean = { + // We need to handle null types in case a query contains null literals. + ex.dataType.isInstanceOf[NumericType] || ex.dataType == NullType + } } /** diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala index a13c45fe2ffee..9560563a8ca56 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.catalyst.analysis import java.sql.Timestamp -import org.apache.spark.sql.catalyst.analysis.TypeCoercion.{Division, FunctionArgumentConversion} +import org.apache.spark.sql.catalyst.analysis.TypeCoercion._ import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.PlanTest @@ -730,6 +730,13 @@ class TypeCoercionSuite extends PlanTest { // the right expression to Decimal. ruleTest(rules, sum(Divide(Decimal(4.0), 3)), sum(Divide(Decimal(4.0), 3))) } + + test("SPARK-17117 null type coercion in divide") { + val rules = Seq(FunctionArgumentConversion, Division, ImplicitTypeCasts) + val nullLit = Literal.create(null, NullType) + ruleTest(rules, Divide(1L, nullLit), Divide(Cast(1L, DoubleType), Cast(nullLit, DoubleType))) + ruleTest(rules, Divide(nullLit, 1L), Divide(Cast(nullLit, DoubleType), Cast(1L, DoubleType))) + } } diff --git a/sql/core/src/test/resources/sql-tests/inputs/arithmetic.sql b/sql/core/src/test/resources/sql-tests/inputs/arithmetic.sql index cbe40410cdc10..f62b10ca0037b 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/arithmetic.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/arithmetic.sql @@ -16,11 +16,19 @@ select + + 100; select - - max(key) from testdata; select + - key from testdata where key = 33; +-- div +select 5 / 2; +select 5 / 0; +select 5 / null; +select null / 5; +select 5 div 2; +select 5 div 0; +select 5 div null; +select null div 5; + -- other arithmetics select 1 + 2; select 1 - 2; select 2 * 5; -select 5 / 2; -select 5 div 2; select 5 % 3; select pmod(-7, 3); diff --git a/sql/core/src/test/resources/sql-tests/results/arithmetic.sql.out b/sql/core/src/test/resources/sql-tests/results/arithmetic.sql.out index f2b40a00d062d..6abe048af477d 100644 --- a/sql/core/src/test/resources/sql-tests/results/arithmetic.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/arithmetic.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 22 +-- Number of queries: 28 -- !query 0 @@ -123,35 +123,35 @@ struct<(- key):int> -- !query 15 -select 1 + 2 +select 5 / 2 -- !query 15 schema -struct<(1 + 2):int> +struct<(CAST(5 AS DOUBLE) / CAST(2 AS DOUBLE)):double> -- !query 15 output -3 +2.5 -- !query 16 -select 1 - 2 +select 5 / 0 -- !query 16 schema -struct<(1 - 2):int> +struct<(CAST(5 AS DOUBLE) / CAST(0 AS DOUBLE)):double> -- !query 16 output --1 +NULL -- !query 17 -select 2 * 5 +select 5 / null -- !query 17 schema -struct<(2 * 5):int> +struct<(CAST(5 AS DOUBLE) / CAST(NULL AS DOUBLE)):double> -- !query 17 output -10 +NULL -- !query 18 -select 5 / 2 +select null / 5 -- !query 18 schema -struct<(CAST(5 AS DOUBLE) / CAST(2 AS DOUBLE)):double> +struct<(CAST(NULL AS DOUBLE) / CAST(5 AS DOUBLE)):double> -- !query 18 output -2.5 +NULL -- !query 19 @@ -163,16 +163,64 @@ struct -- !query 20 -select 5 % 3 +select 5 div 0 -- !query 20 schema -struct<(5 % 3):int> +struct -- !query 20 output -2 +NULL -- !query 21 -select pmod(-7, 3) +select 5 div null -- !query 21 schema -struct +struct -- !query 21 output +NULL + + +-- !query 22 +select null div 5 +-- !query 22 schema +struct +-- !query 22 output +NULL + + +-- !query 23 +select 1 + 2 +-- !query 23 schema +struct<(1 + 2):int> +-- !query 23 output +3 + + +-- !query 24 +select 1 - 2 +-- !query 24 schema +struct<(1 - 2):int> +-- !query 24 output +-1 + + +-- !query 25 +select 2 * 5 +-- !query 25 schema +struct<(2 * 5):int> +-- !query 25 output +10 + + +-- !query 26 +select 5 % 3 +-- !query 26 schema +struct<(5 % 3):int> +-- !query 26 output +2 + + +-- !query 27 +select pmod(-7, 3) +-- !query 27 schema +struct +-- !query 27 output 2 From b72bb62d421840f82d663c6b8e3922bd14383fbb Mon Sep 17 00:00:00 2001 From: Xusen Yin Date: Thu, 18 Aug 2016 05:33:52 -0700 Subject: [PATCH 021/270] [SPARK-16447][ML][SPARKR] LDA wrapper in SparkR ## What changes were proposed in this pull request? Add LDA Wrapper in SparkR with the following interfaces: - spark.lda(data, ...) - spark.posterior(object, newData, ...) - spark.perplexity(object, ...) - summary(object) - write.ml(object) - read.ml(path) ## How was this patch tested? Test with SparkR unit test. Author: Xusen Yin Closes #14229 from yinxusen/SPARK-16447. --- R/pkg/NAMESPACE | 3 + R/pkg/R/generics.R | 14 ++ R/pkg/R/mllib.R | 166 +++++++++++++- R/pkg/inst/tests/testthat/test_mllib.R | 87 +++++++ .../org/apache/spark/ml/clustering/LDA.scala | 4 + .../org/apache/spark/ml/r/LDAWrapper.scala | 216 ++++++++++++++++++ .../org/apache/spark/ml/r/RWrappers.scala | 2 + 7 files changed, 490 insertions(+), 2 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/LDAWrapper.scala diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index c71eec5ce0437..4404cffc292aa 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -25,6 +25,9 @@ exportMethods("glm", "fitted", "spark.naiveBayes", "spark.survreg", + "spark.lda", + "spark.posterior", + "spark.perplexity", "spark.isoreg", "spark.gaussianMixture") diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 06bb25d62d34d..fe04bcfc7d14d 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -1304,6 +1304,19 @@ setGeneric("spark.naiveBayes", function(data, formula, ...) { standardGeneric("s #' @export setGeneric("spark.survreg", function(data, formula, ...) { standardGeneric("spark.survreg") }) +#' @rdname spark.lda +#' @param ... Additional parameters to tune LDA. +#' @export +setGeneric("spark.lda", function(data, ...) { standardGeneric("spark.lda") }) + +#' @rdname spark.lda +#' @export +setGeneric("spark.posterior", function(object, newData) { standardGeneric("spark.posterior") }) + +#' @rdname spark.lda +#' @export +setGeneric("spark.perplexity", function(object, data) { standardGeneric("spark.perplexity") }) + #' @rdname spark.isoreg #' @export setGeneric("spark.isoreg", function(data, formula, ...) { standardGeneric("spark.isoreg") }) @@ -1315,6 +1328,7 @@ setGeneric("spark.gaussianMixture", standardGeneric("spark.gaussianMixture") }) +#' write.ml #' @rdname write.ml #' @export setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") }) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index db74046056a99..b9527410a9853 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -39,6 +39,13 @@ setClass("GeneralizedLinearRegressionModel", representation(jobj = "jobj")) #' @note NaiveBayesModel since 2.0.0 setClass("NaiveBayesModel", representation(jobj = "jobj")) +#' S4 class that represents an LDAModel +#' +#' @param jobj a Java object reference to the backing Scala LDAWrapper +#' @export +#' @note LDAModel since 2.1.0 +setClass("LDAModel", representation(jobj = "jobj")) + #' S4 class that represents a AFTSurvivalRegressionModel #' #' @param jobj a Java object reference to the backing Scala AFTSurvivalRegressionWrapper @@ -75,7 +82,7 @@ setClass("GaussianMixtureModel", representation(jobj = "jobj")) #' @name write.ml #' @export #' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} -#' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} +#' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg}, \link{spark.lda} #' @seealso \link{spark.isoreg} #' @seealso \link{read.ml} NULL @@ -315,6 +322,94 @@ setMethod("summary", signature(object = "NaiveBayesModel"), return(list(apriori = apriori, tables = tables)) }) +# Returns posterior probabilities from a Latent Dirichlet Allocation model produced by spark.lda() + +#' @param newData A SparkDataFrame for testing +#' @return \code{spark.posterior} returns a SparkDataFrame containing posterior probabilities +#' vectors named "topicDistribution" +#' @rdname spark.lda +#' @aliases spark.posterior,LDAModel,SparkDataFrame-method +#' @export +#' @note spark.posterior(LDAModel) since 2.1.0 +setMethod("spark.posterior", signature(object = "LDAModel", newData = "SparkDataFrame"), + function(object, newData) { + return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + }) + +# Returns the summary of a Latent Dirichlet Allocation model produced by \code{spark.lda} + +#' @param object A Latent Dirichlet Allocation model fitted by \code{spark.lda}. +#' @param maxTermsPerTopic Maximum number of terms to collect for each topic. Default value of 10. +#' @return \code{summary} returns a list containing +#' \item{\code{docConcentration}}{concentration parameter commonly named \code{alpha} for +#' the prior placed on documents distributions over topics \code{theta}} +#' \item{\code{topicConcentration}}{concentration parameter commonly named \code{beta} or +#' \code{eta} for the prior placed on topic distributions over terms} +#' \item{\code{logLikelihood}}{log likelihood of the entire corpus} +#' \item{\code{logPerplexity}}{log perplexity} +#' \item{\code{isDistributed}}{TRUE for distributed model while FALSE for local model} +#' \item{\code{vocabSize}}{number of terms in the corpus} +#' \item{\code{topics}}{top 10 terms and their weights of all topics} +#' \item{\code{vocabulary}}{whole terms of the training corpus, NULL if libsvm format file +#' used as training set} +#' @rdname spark.lda +#' @aliases summary,LDAModel-method +#' @export +#' @note summary(LDAModel) since 2.1.0 +setMethod("summary", signature(object = "LDAModel"), + function(object, maxTermsPerTopic) { + maxTermsPerTopic <- as.integer(ifelse(missing(maxTermsPerTopic), 10, maxTermsPerTopic)) + jobj <- object@jobj + docConcentration <- callJMethod(jobj, "docConcentration") + topicConcentration <- callJMethod(jobj, "topicConcentration") + logLikelihood <- callJMethod(jobj, "logLikelihood") + logPerplexity <- callJMethod(jobj, "logPerplexity") + isDistributed <- callJMethod(jobj, "isDistributed") + vocabSize <- callJMethod(jobj, "vocabSize") + topics <- dataFrame(callJMethod(jobj, "topics", maxTermsPerTopic)) + vocabulary <- callJMethod(jobj, "vocabulary") + return(list(docConcentration = unlist(docConcentration), + topicConcentration = topicConcentration, + logLikelihood = logLikelihood, logPerplexity = logPerplexity, + isDistributed = isDistributed, vocabSize = vocabSize, + topics = topics, + vocabulary = unlist(vocabulary))) + }) + +# Returns the log perplexity of a Latent Dirichlet Allocation model produced by \code{spark.lda} + +#' @return \code{spark.perplexity} returns the log perplexity of given SparkDataFrame, or the log +#' perplexity of the training data if missing argument "data". +#' @rdname spark.lda +#' @aliases spark.perplexity,LDAModel-method +#' @export +#' @note spark.perplexity(LDAModel) since 2.1.0 +setMethod("spark.perplexity", signature(object = "LDAModel", data = "SparkDataFrame"), + function(object, data) { + return(ifelse(missing(data), callJMethod(object@jobj, "logPerplexity"), + callJMethod(object@jobj, "computeLogPerplexity", data@sdf))) + }) + +# Saves the Latent Dirichlet Allocation model to the input path. + +#' @param path The directory where the model is saved +#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE +#' which means throw exception if the output path exists. +#' +#' @rdname spark.lda +#' @aliases write.ml,LDAModel,character-method +#' @export +#' @seealso \link{read.ml} +#' @note write.ml(LDAModel, character) since 2.1.0 +setMethod("write.ml", signature(object = "LDAModel", path = "character"), + function(object, path, overwrite = FALSE) { + writer <- callJMethod(object@jobj, "write") + if (overwrite) { + writer <- callJMethod(writer, "overwrite") + } + invisible(callJMethod(writer, "save", path)) + }) + #' Isotonic Regression Model #' #' Fits an Isotonic Regression model against a Spark DataFrame, similarly to R's isoreg(). @@ -700,6 +795,8 @@ read.ml <- function(path) { return(new("GeneralizedLinearRegressionModel", jobj = jobj)) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.KMeansWrapper")) { return(new("KMeansModel", jobj = jobj)) + } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.LDAWrapper")) { + return(new("LDAModel", jobj = jobj)) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.IsotonicRegressionWrapper")) { return(new("IsotonicRegressionModel", jobj = jobj)) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.GaussianMixtureWrapper")) { @@ -751,6 +848,71 @@ setMethod("spark.survreg", signature(data = "SparkDataFrame", formula = "formula return(new("AFTSurvivalRegressionModel", jobj = jobj)) }) +#' Latent Dirichlet Allocation +#' +#' \code{spark.lda} fits a Latent Dirichlet Allocation model on a SparkDataFrame. Users can call +#' \code{summary} to get a summary of the fitted LDA model, \code{spark.posterior} to compute +#' posterior probabilities on new data, \code{spark.perplexity} to compute log perplexity on new +#' data and \code{write.ml}/\code{read.ml} to save/load fitted models. +#' +#' @param data A SparkDataFrame for training +#' @param features Features column name, default "features". Either libSVM-format column or +#' character-format column is valid. +#' @param k Number of topics, default 10 +#' @param maxIter Maximum iterations, default 20 +#' @param optimizer Optimizer to train an LDA model, "online" or "em", default "online" +#' @param subsamplingRate (For online optimizer) Fraction of the corpus to be sampled and used in +#' each iteration of mini-batch gradient descent, in range (0, 1], default 0.05 +#' @param topicConcentration concentration parameter (commonly named \code{beta} or \code{eta}) for +#' the prior placed on topic distributions over terms, default -1 to set automatically on the +#' Spark side. Use \code{summary} to retrieve the effective topicConcentration. Only 1-size +#' numeric is accepted. +#' @param docConcentration concentration parameter (commonly named \code{alpha}) for the +#' prior placed on documents distributions over topics (\code{theta}), default -1 to set +#' automatically on the Spark side. Use \code{summary} to retrieve the effective +#' docConcentration. Only 1-size or \code{k}-size numeric is accepted. +#' @param customizedStopWords stopwords that need to be removed from the given corpus. Ignore the +#' parameter if libSVM-format column is used as the features column. +#' @param maxVocabSize maximum vocabulary size, default 1 << 18 +#' @return \code{spark.lda} returns a fitted Latent Dirichlet Allocation model +#' @rdname spark.lda +#' @aliases spark.lda,SparkDataFrame-method +#' @seealso topicmodels: \url{https://cran.r-project.org/web/packages/topicmodels/} +#' @export +#' @examples +#' \dontrun{ +#' text <- read.df("path/to/data", source = "libsvm") +#' model <- spark.lda(data = text, optimizer = "em") +#' +#' # get a summary of the model +#' summary(model) +#' +#' # compute posterior probabilities +#' posterior <- spark.posterior(model, df) +#' showDF(posterior) +#' +#' # compute perplexity +#' perplexity <- spark.perplexity(model, df) +#' +#' # save and load the model +#' path <- "path/to/model" +#' write.ml(model, path) +#' savedModel <- read.ml(path) +#' summary(savedModel) +#' } +#' @note spark.lda since 2.1.0 +setMethod("spark.lda", signature(data = "SparkDataFrame"), + function(data, features = "features", k = 10, maxIter = 20, optimizer = c("online", "em"), + subsamplingRate = 0.05, topicConcentration = -1, docConcentration = -1, + customizedStopWords = "", maxVocabSize = bitwShiftL(1, 18)) { + optimizer <- match.arg(optimizer) + jobj <- callJStatic("org.apache.spark.ml.r.LDAWrapper", "fit", data@sdf, features, + as.integer(k), as.integer(maxIter), optimizer, + as.numeric(subsamplingRate), topicConcentration, + as.array(docConcentration), as.array(customizedStopWords), + maxVocabSize) + return(new("LDAModel", jobj = jobj)) + }) # Returns a summary of the AFT survival regression model produced by spark.survreg, # similarly to R's summary(). @@ -891,4 +1053,4 @@ setMethod("summary", signature(object = "GaussianMixtureModel"), setMethod("predict", signature(object = "GaussianMixtureModel"), function(object, newData) { return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) - }) + }) \ No newline at end of file diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index 96179864a88bf..8c380fbf150f4 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -570,4 +570,91 @@ test_that("spark.gaussianMixture", { unlink(modelPath) }) +test_that("spark.lda with libsvm", { + text <- read.df("data/mllib/sample_lda_libsvm_data.txt", source = "libsvm") + model <- spark.lda(text, optimizer = "em") + + stats <- summary(model, 10) + isDistributed <- stats$isDistributed + logLikelihood <- stats$logLikelihood + logPerplexity <- stats$logPerplexity + vocabSize <- stats$vocabSize + topics <- stats$topicTopTerms + weights <- stats$topicTopTermsWeights + vocabulary <- stats$vocabulary + + expect_false(isDistributed) + expect_true(logLikelihood <= 0 & is.finite(logLikelihood)) + expect_true(logPerplexity >= 0 & is.finite(logPerplexity)) + expect_equal(vocabSize, 11) + expect_true(is.null(vocabulary)) + + # Test model save/load + modelPath <- tempfile(pattern = "spark-lda", fileext = ".tmp") + write.ml(model, modelPath) + expect_error(write.ml(model, modelPath)) + write.ml(model, modelPath, overwrite = TRUE) + model2 <- read.ml(modelPath) + stats2 <- summary(model2) + + expect_false(stats2$isDistributed) + expect_equal(logLikelihood, stats2$logLikelihood) + expect_equal(logPerplexity, stats2$logPerplexity) + expect_equal(vocabSize, stats2$vocabSize) + expect_equal(vocabulary, stats2$vocabulary) + + unlink(modelPath) +}) + +test_that("spark.lda with text input", { + text <- read.text("data/mllib/sample_lda_data.txt") + model <- spark.lda(text, optimizer = "online", features = "value") + + stats <- summary(model) + isDistributed <- stats$isDistributed + logLikelihood <- stats$logLikelihood + logPerplexity <- stats$logPerplexity + vocabSize <- stats$vocabSize + topics <- stats$topicTopTerms + weights <- stats$topicTopTermsWeights + vocabulary <- stats$vocabulary + + expect_false(isDistributed) + expect_true(logLikelihood <= 0 & is.finite(logLikelihood)) + expect_true(logPerplexity >= 0 & is.finite(logPerplexity)) + expect_equal(vocabSize, 10) + expect_true(setequal(stats$vocabulary, c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"))) + + # Test model save/load + modelPath <- tempfile(pattern = "spark-lda-text", fileext = ".tmp") + write.ml(model, modelPath) + expect_error(write.ml(model, modelPath)) + write.ml(model, modelPath, overwrite = TRUE) + model2 <- read.ml(modelPath) + stats2 <- summary(model2) + + expect_false(stats2$isDistributed) + expect_equal(logLikelihood, stats2$logLikelihood) + expect_equal(logPerplexity, stats2$logPerplexity) + expect_equal(vocabSize, stats2$vocabSize) + expect_true(all.equal(vocabulary, stats2$vocabulary)) + + unlink(modelPath) +}) + +test_that("spark.posterior and spark.perplexity", { + text <- read.text("data/mllib/sample_lda_data.txt") + model <- spark.lda(text, features = "value", k = 3) + + # Assert perplexities are equal + stats <- summary(model) + logPerplexity <- spark.perplexity(model, text) + expect_equal(logPerplexity, stats$logPerplexity) + + # Assert the sum of every topic distribution is equal to 1 + posterior <- spark.posterior(model, text) + local.posterior <- collect(posterior)$topicDistribution + expect_equal(length(local.posterior), sum(unlist(local.posterior))) +}) + sparkR.session.stop() diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala index 034f2c3fa2fd9..b5a764b5863f1 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala @@ -386,6 +386,10 @@ sealed abstract class LDAModel private[ml] ( @Since("1.6.0") protected def getModel: OldLDAModel + private[ml] def getEffectiveDocConcentration: Array[Double] = getModel.docConcentration.toArray + + private[ml] def getEffectiveTopicConcentration: Double = getModel.topicConcentration + /** * The features for LDA should be a [[Vector]] representing the word counts in a document. * The vector should be of length vocabSize, with counts for each term (word). diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/LDAWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/LDAWrapper.scala new file mode 100644 index 0000000000000..cbe6a705007d1 --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/LDAWrapper.scala @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import scala.collection.mutable + +import org.apache.hadoop.fs.Path +import org.json4s._ +import org.json4s.JsonDSL._ +import org.json4s.jackson.JsonMethods._ + +import org.apache.spark.SparkException +import org.apache.spark.ml.{Pipeline, PipelineModel, PipelineStage} +import org.apache.spark.ml.clustering.{LDA, LDAModel} +import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel, RegexTokenizer, StopWordsRemover} +import org.apache.spark.ml.linalg.{Vector, VectorUDT} +import org.apache.spark.ml.param.ParamPair +import org.apache.spark.ml.util._ +import org.apache.spark.sql.{DataFrame, Dataset} +import org.apache.spark.sql.functions._ +import org.apache.spark.sql.types.StringType + + +private[r] class LDAWrapper private ( + val pipeline: PipelineModel, + val logLikelihood: Double, + val logPerplexity: Double, + val vocabulary: Array[String]) extends MLWritable { + + import LDAWrapper._ + + private val lda: LDAModel = pipeline.stages.last.asInstanceOf[LDAModel] + private val preprocessor: PipelineModel = + new PipelineModel(s"${Identifiable.randomUID(pipeline.uid)}", pipeline.stages.dropRight(1)) + + def transform(data: Dataset[_]): DataFrame = { + val vec2ary = udf { vec: Vector => vec.toArray } + val outputCol = lda.getTopicDistributionCol + val tempCol = s"${Identifiable.randomUID(outputCol)}" + val preprocessed = preprocessor.transform(data) + lda.transform(preprocessed, ParamPair(lda.topicDistributionCol, tempCol)) + .withColumn(outputCol, vec2ary(col(tempCol))) + .drop(TOKENIZER_COL, STOPWORDS_REMOVER_COL, COUNT_VECTOR_COL, tempCol) + } + + def computeLogPerplexity(data: Dataset[_]): Double = { + lda.logPerplexity(preprocessor.transform(data)) + } + + def topics(maxTermsPerTopic: Int): DataFrame = { + val topicIndices: DataFrame = lda.describeTopics(maxTermsPerTopic) + if (vocabulary.isEmpty || vocabulary.length < vocabSize) { + topicIndices + } else { + val index2term = udf { indices: mutable.WrappedArray[Int] => indices.map(i => vocabulary(i)) } + topicIndices + .select(col("topic"), index2term(col("termIndices")).as("term"), col("termWeights")) + } + } + + lazy val isDistributed: Boolean = lda.isDistributed + lazy val vocabSize: Int = lda.vocabSize + lazy val docConcentration: Array[Double] = lda.getEffectiveDocConcentration + lazy val topicConcentration: Double = lda.getEffectiveTopicConcentration + + override def write: MLWriter = new LDAWrapper.LDAWrapperWriter(this) +} + +private[r] object LDAWrapper extends MLReadable[LDAWrapper] { + + val TOKENIZER_COL = s"${Identifiable.randomUID("rawTokens")}" + val STOPWORDS_REMOVER_COL = s"${Identifiable.randomUID("tokens")}" + val COUNT_VECTOR_COL = s"${Identifiable.randomUID("features")}" + + private def getPreStages( + features: String, + customizedStopWords: Array[String], + maxVocabSize: Int): Array[PipelineStage] = { + val tokenizer = new RegexTokenizer() + .setInputCol(features) + .setOutputCol(TOKENIZER_COL) + val stopWordsRemover = new StopWordsRemover() + .setInputCol(TOKENIZER_COL) + .setOutputCol(STOPWORDS_REMOVER_COL) + stopWordsRemover.setStopWords(stopWordsRemover.getStopWords ++ customizedStopWords) + val countVectorizer = new CountVectorizer() + .setVocabSize(maxVocabSize) + .setInputCol(STOPWORDS_REMOVER_COL) + .setOutputCol(COUNT_VECTOR_COL) + + Array(tokenizer, stopWordsRemover, countVectorizer) + } + + def fit( + data: DataFrame, + features: String, + k: Int, + maxIter: Int, + optimizer: String, + subsamplingRate: Double, + topicConcentration: Double, + docConcentration: Array[Double], + customizedStopWords: Array[String], + maxVocabSize: Int): LDAWrapper = { + + val lda = new LDA() + .setK(k) + .setMaxIter(maxIter) + .setSubsamplingRate(subsamplingRate) + + val featureSchema = data.schema(features) + val stages = featureSchema.dataType match { + case d: StringType => + getPreStages(features, customizedStopWords, maxVocabSize) ++ + Array(lda.setFeaturesCol(COUNT_VECTOR_COL)) + case d: VectorUDT => + Array(lda.setFeaturesCol(features)) + case _ => + throw new SparkException( + s"Unsupported input features type of ${featureSchema.dataType.typeName}," + + s" only String type and Vector type are supported now.") + } + + if (topicConcentration != -1) { + lda.setTopicConcentration(topicConcentration) + } else { + // Auto-set topicConcentration + } + + if (docConcentration.length == 1) { + if (docConcentration.head != -1) { + lda.setDocConcentration(docConcentration.head) + } else { + // Auto-set docConcentration + } + } else { + lda.setDocConcentration(docConcentration) + } + + val pipeline = new Pipeline().setStages(stages) + val model = pipeline.fit(data) + + val vocabulary: Array[String] = featureSchema.dataType match { + case d: StringType => + val countVectorModel = model.stages(2).asInstanceOf[CountVectorizerModel] + countVectorModel.vocabulary + case _ => Array.empty[String] + } + + val ldaModel: LDAModel = model.stages.last.asInstanceOf[LDAModel] + val preprocessor: PipelineModel = + new PipelineModel(s"${Identifiable.randomUID(pipeline.uid)}", model.stages.dropRight(1)) + + val preprocessedData = preprocessor.transform(data) + + new LDAWrapper( + model, + ldaModel.logLikelihood(preprocessedData), + ldaModel.logPerplexity(preprocessedData), + vocabulary) + } + + override def read: MLReader[LDAWrapper] = new LDAWrapperReader + + override def load(path: String): LDAWrapper = super.load(path) + + class LDAWrapperWriter(instance: LDAWrapper) extends MLWriter { + + override protected def saveImpl(path: String): Unit = { + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadata = ("class" -> instance.getClass.getName) ~ + ("logLikelihood" -> instance.logLikelihood) ~ + ("logPerplexity" -> instance.logPerplexity) ~ + ("vocabulary" -> instance.vocabulary.toList) + val rMetadataJson: String = compact(render(rMetadata)) + sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) + + instance.pipeline.save(pipelinePath) + } + } + + class LDAWrapperReader extends MLReader[LDAWrapper] { + + override def load(path: String): LDAWrapper = { + implicit val format = DefaultFormats + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadataStr = sc.textFile(rMetadataPath, 1).first() + val rMetadata = parse(rMetadataStr) + val logLikelihood = (rMetadata \ "logLikelihood").extract[Double] + val logPerplexity = (rMetadata \ "logPerplexity").extract[Double] + val vocabulary = (rMetadata \ "vocabulary").extract[List[String]].toArray + + val pipeline = PipelineModel.load(pipelinePath) + new LDAWrapper(pipeline, logLikelihood, logPerplexity, vocabulary) + } + } +} diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala index 88ac26bc5e351..e23af51df5718 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala @@ -44,6 +44,8 @@ private[r] object RWrappers extends MLReader[Object] { GeneralizedLinearRegressionWrapper.load(path) case "org.apache.spark.ml.r.KMeansWrapper" => KMeansWrapper.load(path) + case "org.apache.spark.ml.r.LDAWrapper" => + LDAWrapper.load(path) case "org.apache.spark.ml.r.IsotonicRegressionWrapper" => IsotonicRegressionWrapper.load(path) case "org.apache.spark.ml.r.GaussianMixtureWrapper" => From f5472dda51b980a726346587257c22873ff708e3 Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Fri, 19 Aug 2016 09:19:47 +0800 Subject: [PATCH 022/270] [SPARK-16947][SQL] Support type coercion and foldable expression for inline tables ## What changes were proposed in this pull request? This patch improves inline table support with the following: 1. Support type coercion. 2. Support using foldable expressions. Previously only literals were supported. 3. Improve error message handling. 4. Improve test coverage. ## How was this patch tested? Added a new unit test suite ResolveInlineTablesSuite and a new file-based end-to-end test inline-table.sql. Author: petermaxlee Closes #14676 from petermaxlee/SPARK-16947. --- .../sql/catalyst/analysis/Analyzer.scala | 1 + .../analysis/ResolveInlineTables.scala | 112 ++++++++++++++ .../sql/catalyst/analysis/TypeCoercion.scala | 2 +- .../sql/catalyst/analysis/unresolved.scala | 26 +++- .../sql/catalyst/parser/AstBuilder.scala | 41 ++--- .../analysis/ResolveInlineTablesSuite.scala | 101 ++++++++++++ .../sql/catalyst/parser/PlanParserSuite.scala | 22 +-- .../sql-tests/inputs/inline-table.sql | 48 ++++++ .../sql-tests/results/inline-table.sql.out | 145 ++++++++++++++++++ 9 files changed, 452 insertions(+), 46 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala create mode 100644 sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTablesSuite.scala create mode 100644 sql/core/src/test/resources/sql-tests/inputs/inline-table.sql create mode 100644 sql/core/src/test/resources/sql-tests/results/inline-table.sql.out diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index 333dd4d9a4f2a..41e0e6d65e9ad 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -108,6 +108,7 @@ class Analyzer( GlobalAggregates :: ResolveAggregateFunctions :: TimeWindowing :: + ResolveInlineTables :: TypeCoercion.typeCoercionRules ++ extendedResolutionRules : _*), Batch("Nondeterministic", Once, diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala new file mode 100644 index 0000000000000..7323197b10f6e --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.analysis + +import scala.util.control.NonFatal + +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.Cast +import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan} +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.types.{StructField, StructType} + +/** + * An analyzer rule that replaces [[UnresolvedInlineTable]] with [[LocalRelation]]. + */ +object ResolveInlineTables extends Rule[LogicalPlan] { + override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { + case table: UnresolvedInlineTable if table.expressionsResolved => + validateInputDimension(table) + validateInputEvaluable(table) + convert(table) + } + + /** + * Validates the input data dimension: + * 1. All rows have the same cardinality. + * 2. The number of column aliases defined is consistent with the number of columns in data. + * + * This is package visible for unit testing. + */ + private[analysis] def validateInputDimension(table: UnresolvedInlineTable): Unit = { + if (table.rows.nonEmpty) { + val numCols = table.names.size + table.rows.zipWithIndex.foreach { case (row, ri) => + if (row.size != numCols) { + table.failAnalysis(s"expected $numCols columns but found ${row.size} columns in row $ri") + } + } + } + } + + /** + * Validates that all inline table data are valid expressions that can be evaluated + * (in this they must be foldable). + * + * This is package visible for unit testing. + */ + private[analysis] def validateInputEvaluable(table: UnresolvedInlineTable): Unit = { + table.rows.foreach { row => + row.foreach { e => + // Note that nondeterministic expressions are not supported since they are not foldable. + if (!e.resolved || !e.foldable) { + e.failAnalysis(s"cannot evaluate expression ${e.sql} in inline table definition") + } + } + } + } + + /** + * Convert a valid (with right shape and foldable inputs) [[UnresolvedInlineTable]] + * into a [[LocalRelation]]. + * + * This function attempts to coerce inputs into consistent types. + * + * This is package visible for unit testing. + */ + private[analysis] def convert(table: UnresolvedInlineTable): LocalRelation = { + // For each column, traverse all the values and find a common data type and nullability. + val fields = table.rows.transpose.zip(table.names).map { case (column, name) => + val inputTypes = column.map(_.dataType) + val tpe = TypeCoercion.findWiderTypeWithoutStringPromotion(inputTypes).getOrElse { + table.failAnalysis(s"incompatible types found in column $name for inline table") + } + StructField(name, tpe, nullable = column.exists(_.nullable)) + } + val attributes = StructType(fields).toAttributes + assert(fields.size == table.names.size) + + val newRows: Seq[InternalRow] = table.rows.map { row => + InternalRow.fromSeq(row.zipWithIndex.map { case (e, ci) => + val targetType = fields(ci).dataType + try { + if (e.dataType.sameType(targetType)) { + e.eval() + } else { + Cast(e, targetType).eval() + } + } catch { + case NonFatal(ex) => + table.failAnalysis(s"failed to evaluate expression ${e.sql}: ${ex.getMessage}") + } + }) + } + + LocalRelation(attributes, newRows) + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala index 21e96aaf53844..193c3ec4e585a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala @@ -150,7 +150,7 @@ object TypeCoercion { * [[findTightestCommonType]], but can handle decimal types. If the wider decimal type exceeds * system limitation, this rule will truncate the decimal type before return it. */ - private def findWiderTypeWithoutStringPromotion(types: Seq[DataType]): Option[DataType] = { + def findWiderTypeWithoutStringPromotion(types: Seq[DataType]): Option[DataType] = { types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match { case Some(d) => findTightestCommonTypeOfTwo(d, c).orElse((d, c) match { case (t1: DecimalType, t2: DecimalType) => diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala index 3735a1501cbfa..235ae04782455 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala @@ -50,10 +50,30 @@ case class UnresolvedRelation( } /** - * Holds a table-valued function call that has yet to be resolved. + * An inline table that has not been resolved yet. Once resolved, it is turned by the analyzer into + * a [[org.apache.spark.sql.catalyst.plans.logical.LocalRelation]]. + * + * @param names list of column names + * @param rows expressions for the data + */ +case class UnresolvedInlineTable( + names: Seq[String], + rows: Seq[Seq[Expression]]) + extends LeafNode { + + lazy val expressionsResolved: Boolean = rows.forall(_.forall(_.resolved)) + override lazy val resolved = false + override def output: Seq[Attribute] = Nil +} + +/** + * A table-valued function, e.g. + * {{{ + * select * from range(10); + * }}} */ -case class UnresolvedTableValuedFunction( - functionName: String, functionArgs: Seq[Expression]) extends LeafNode { +case class UnresolvedTableValuedFunction(functionName: String, functionArgs: Seq[Expression]) + extends LeafNode { override def output: Seq[Attribute] = Nil diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 01322ae327e4a..283e4d43ba2b9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -670,39 +670,24 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { */ override def visitInlineTable(ctx: InlineTableContext): LogicalPlan = withOrigin(ctx) { // Get the backing expressions. - val expressions = ctx.expression.asScala.map { eCtx => - val e = expression(eCtx) - validate(e.foldable, "All expressions in an inline table must be constants.", eCtx) - e - } - - // Validate and evaluate the rows. - val (structType, structConstructor) = expressions.head.dataType match { - case st: StructType => - (st, (e: Expression) => e) - case dt => - val st = CreateStruct(Seq(expressions.head)).dataType - (st, (e: Expression) => CreateStruct(Seq(e))) - } - val rows = expressions.map { - case expression => - val safe = Cast(structConstructor(expression), structType) - safe.eval().asInstanceOf[InternalRow] + val rows = ctx.expression.asScala.map { e => + expression(e) match { + // inline table comes in two styles: + // style 1: values (1), (2), (3) -- multiple columns are supported + // style 2: values 1, 2, 3 -- only a single column is supported here + case CreateStruct(children) => children // style 1 + case child => Seq(child) // style 2 + } } - // Construct attributes. - val baseAttributes = structType.toAttributes.map(_.withNullability(true)) - val attributes = if (ctx.identifierList != null) { - val aliases = visitIdentifierList(ctx.identifierList) - validate(aliases.size == baseAttributes.size, - "Number of aliases must match the number of fields in an inline table.", ctx) - baseAttributes.zip(aliases).map(p => p._1.withName(p._2)) + val aliases = if (ctx.identifierList != null) { + visitIdentifierList(ctx.identifierList) } else { - baseAttributes + Seq.tabulate(rows.head.size)(i => s"col${i + 1}") } - // Create plan and add an alias if a name has been defined. - LocalRelation(attributes, rows).optionalMap(ctx.identifier)(aliasPlan) + val table = UnresolvedInlineTable(aliases, rows) + table.optionalMap(ctx.identifier)(aliasPlan) } /** diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTablesSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTablesSuite.scala new file mode 100644 index 0000000000000..920c6ea50f4ba --- /dev/null +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTablesSuite.scala @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.analysis + +import org.scalatest.BeforeAndAfter + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.catalyst.expressions.{Literal, Rand} +import org.apache.spark.sql.catalyst.expressions.aggregate.Count +import org.apache.spark.sql.catalyst.plans.PlanTest +import org.apache.spark.sql.types.{LongType, NullType} + +/** + * Unit tests for [[ResolveInlineTables]]. Note that there are also test cases defined in + * end-to-end tests (in sql/core module) for verifying the correct error messages are shown + * in negative cases. + */ +class ResolveInlineTablesSuite extends PlanTest with BeforeAndAfter { + + private def lit(v: Any): Literal = Literal(v) + + test("validate inputs are foldable") { + ResolveInlineTables.validateInputEvaluable( + UnresolvedInlineTable(Seq("c1", "c2"), Seq(Seq(lit(1))))) + + // nondeterministic (rand) should not work + intercept[AnalysisException] { + ResolveInlineTables.validateInputEvaluable( + UnresolvedInlineTable(Seq("c1"), Seq(Seq(Rand(1))))) + } + + // aggregate should not work + intercept[AnalysisException] { + ResolveInlineTables.validateInputEvaluable( + UnresolvedInlineTable(Seq("c1"), Seq(Seq(Count(lit(1)))))) + } + + // unresolved attribute should not work + intercept[AnalysisException] { + ResolveInlineTables.validateInputEvaluable( + UnresolvedInlineTable(Seq("c1"), Seq(Seq(UnresolvedAttribute("A"))))) + } + } + + test("validate input dimensions") { + ResolveInlineTables.validateInputDimension( + UnresolvedInlineTable(Seq("c1"), Seq(Seq(lit(1)), Seq(lit(2))))) + + // num alias != data dimension + intercept[AnalysisException] { + ResolveInlineTables.validateInputDimension( + UnresolvedInlineTable(Seq("c1", "c2"), Seq(Seq(lit(1)), Seq(lit(2))))) + } + + // num alias == data dimension, but data themselves are inconsistent + intercept[AnalysisException] { + ResolveInlineTables.validateInputDimension( + UnresolvedInlineTable(Seq("c1"), Seq(Seq(lit(1)), Seq(lit(21), lit(22))))) + } + } + + test("do not fire the rule if not all expressions are resolved") { + val table = UnresolvedInlineTable(Seq("c1", "c2"), Seq(Seq(UnresolvedAttribute("A")))) + assert(ResolveInlineTables(table) == table) + } + + test("convert") { + val table = UnresolvedInlineTable(Seq("c1"), Seq(Seq(lit(1)), Seq(lit(2L)))) + val converted = ResolveInlineTables.convert(table) + + assert(converted.output.map(_.dataType) == Seq(LongType)) + assert(converted.data.size == 2) + assert(converted.data(0).getLong(0) == 1L) + assert(converted.data(1).getLong(0) == 2L) + } + + test("nullability inference in convert") { + val table1 = UnresolvedInlineTable(Seq("c1"), Seq(Seq(lit(1)), Seq(lit(2L)))) + val converted1 = ResolveInlineTables.convert(table1) + assert(!converted1.schema.fields(0).nullable) + + val table2 = UnresolvedInlineTable(Seq("c1"), Seq(Seq(lit(1)), Seq(Literal(null, NullType)))) + val converted2 = ResolveInlineTables.convert(table2) + assert(converted2.schema.fields(0).nullable) + } +} diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala index cbe4a022e730d..2fcbfc7067a13 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala @@ -17,9 +17,8 @@ package org.apache.spark.sql.catalyst.parser -import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.FunctionIdentifier -import org.apache.spark.sql.catalyst.analysis.{UnresolvedGenerator, UnresolvedTableValuedFunction} +import org.apache.spark.sql.catalyst.analysis.{UnresolvedGenerator, UnresolvedInlineTable, UnresolvedTableValuedFunction} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ @@ -433,19 +432,14 @@ class PlanParserSuite extends PlanTest { } test("inline table") { - assertEqual("values 1, 2, 3, 4", LocalRelation.fromExternalRows( - Seq('col1.int), - Seq(1, 2, 3, 4).map(x => Row(x)))) + assertEqual("values 1, 2, 3, 4", + UnresolvedInlineTable(Seq("col1"), Seq(1, 2, 3, 4).map(x => Seq(Literal(x))))) + assertEqual( - "values (1, 'a'), (2, 'b'), (3, 'c') as tbl(a, b)", - LocalRelation.fromExternalRows( - Seq('a.int, 'b.string), - Seq((1, "a"), (2, "b"), (3, "c")).map(x => Row(x._1, x._2))).as("tbl")) - intercept("values (a, 'a'), (b, 'b')", - "All expressions in an inline table must be constants.") - intercept("values (1, 'a'), (2, 'b') as tbl(a, b, c)", - "Number of aliases must match the number of fields in an inline table.") - intercept[ArrayIndexOutOfBoundsException](parsePlan("values (1, 'a'), (2, 'b', 5Y)")) + "values (1, 'a'), (2, 'b') as tbl(a, b)", + UnresolvedInlineTable( + Seq("a", "b"), + Seq(Literal(1), Literal("a")) :: Seq(Literal(2), Literal("b")) :: Nil).as("tbl")) } test("simple select query with !> and !<") { diff --git a/sql/core/src/test/resources/sql-tests/inputs/inline-table.sql b/sql/core/src/test/resources/sql-tests/inputs/inline-table.sql new file mode 100644 index 0000000000000..5107fa4d55537 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/inline-table.sql @@ -0,0 +1,48 @@ + +-- single row, without table and column alias +select * from values ("one", 1); + +-- single row, without column alias +select * from values ("one", 1) as data; + +-- single row +select * from values ("one", 1) as data(a, b); + +-- single column multiple rows +select * from values 1, 2, 3 as data(a); + +-- three rows +select * from values ("one", 1), ("two", 2), ("three", null) as data(a, b); + +-- null type +select * from values ("one", null), ("two", null) as data(a, b); + +-- int and long coercion +select * from values ("one", 1), ("two", 2L) as data(a, b); + +-- foldable expressions +select * from values ("one", 1 + 0), ("two", 1 + 3L) as data(a, b); + +-- complex types +select * from values ("one", array(0, 1)), ("two", array(2, 3)) as data(a, b); + +-- decimal and double coercion +select * from values ("one", 2.0), ("two", 3.0D) as data(a, b); + +-- error reporting: nondeterministic function rand +select * from values ("one", rand(5)), ("two", 3.0D) as data(a, b); + +-- error reporting: different number of columns +select * from values ("one", 2.0), ("two") as data(a, b); + +-- error reporting: types that are incompatible +select * from values ("one", array(0, 1)), ("two", struct(1, 2)) as data(a, b); + +-- error reporting: number aliases different from number data values +select * from values ("one"), ("two") as data(a, b); + +-- error reporting: unresolved expression +select * from values ("one", random_not_exist_func(1)), ("two", 2) as data(a, b); + +-- error reporting: aggregate expression +select * from values ("one", count(1)), ("two", 2) as data(a, b); diff --git a/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out b/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out new file mode 100644 index 0000000000000..de6f01b8de772 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out @@ -0,0 +1,145 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 16 + + +-- !query 0 +select * from values ("one", 1) +-- !query 0 schema +struct +-- !query 0 output +one 1 + + +-- !query 1 +select * from values ("one", 1) as data +-- !query 1 schema +struct +-- !query 1 output +one 1 + + +-- !query 2 +select * from values ("one", 1) as data(a, b) +-- !query 2 schema +struct +-- !query 2 output +one 1 + + +-- !query 3 +select * from values 1, 2, 3 as data(a) +-- !query 3 schema +struct +-- !query 3 output +1 +2 +3 + + +-- !query 4 +select * from values ("one", 1), ("two", 2), ("three", null) as data(a, b) +-- !query 4 schema +struct +-- !query 4 output +one 1 +three NULL +two 2 + + +-- !query 5 +select * from values ("one", null), ("two", null) as data(a, b) +-- !query 5 schema +struct +-- !query 5 output +one NULL +two NULL + + +-- !query 6 +select * from values ("one", 1), ("two", 2L) as data(a, b) +-- !query 6 schema +struct +-- !query 6 output +one 1 +two 2 + + +-- !query 7 +select * from values ("one", 1 + 0), ("two", 1 + 3L) as data(a, b) +-- !query 7 schema +struct +-- !query 7 output +one 1 +two 4 + + +-- !query 8 +select * from values ("one", array(0, 1)), ("two", array(2, 3)) as data(a, b) +-- !query 8 schema +struct> +-- !query 8 output +one [0,1] +two [2,3] + + +-- !query 9 +select * from values ("one", 2.0), ("two", 3.0D) as data(a, b) +-- !query 9 schema +struct +-- !query 9 output +one 2.0 +two 3.0 + + +-- !query 10 +select * from values ("one", rand(5)), ("two", 3.0D) as data(a, b) +-- !query 10 schema +struct<> +-- !query 10 output +org.apache.spark.sql.AnalysisException +cannot evaluate expression rand(5) in inline table definition; line 1 pos 29 + + +-- !query 11 +select * from values ("one", 2.0), ("two") as data(a, b) +-- !query 11 schema +struct<> +-- !query 11 output +org.apache.spark.sql.AnalysisException +expected 2 columns but found 1 columns in row 1; line 1 pos 14 + + +-- !query 12 +select * from values ("one", array(0, 1)), ("two", struct(1, 2)) as data(a, b) +-- !query 12 schema +struct<> +-- !query 12 output +org.apache.spark.sql.AnalysisException +incompatible types found in column b for inline table; line 1 pos 14 + + +-- !query 13 +select * from values ("one"), ("two") as data(a, b) +-- !query 13 schema +struct<> +-- !query 13 output +org.apache.spark.sql.AnalysisException +expected 2 columns but found 1 columns in row 0; line 1 pos 14 + + +-- !query 14 +select * from values ("one", random_not_exist_func(1)), ("two", 2) as data(a, b) +-- !query 14 schema +struct<> +-- !query 14 output +org.apache.spark.sql.AnalysisException +Undefined function: 'random_not_exist_func'. This function is neither a registered temporary function nor a permanent function registered in the database 'default'.; line 1 pos 29 + + +-- !query 15 +select * from values ("one", count(1)), ("two", 2) as data(a, b) +-- !query 15 schema +struct<> +-- !query 15 output +org.apache.spark.sql.AnalysisException +cannot evaluate expression count(1) in inline table definition; line 1 pos 29 From b482c09fa22c5762a355f95820e4ba3e2517fb77 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Thu, 18 Aug 2016 19:02:32 -0700 Subject: [PATCH 023/270] HOTFIX: compilation broken due to protected ctor. --- .../org/apache/spark/sql/catalyst/expressions/literals.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala index 95ed68fbb0528..7040008769a32 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala @@ -163,8 +163,7 @@ object DecimalLiteral { /** * In order to do type checking, use Literal.create() instead of constructor */ -case class Literal protected (value: Any, dataType: DataType) - extends LeafExpression with CodegenFallback { +case class Literal (value: Any, dataType: DataType) extends LeafExpression with CodegenFallback { override def foldable: Boolean = true override def nullable: Boolean = value == null From 287bea13050b8eedc3b8b6b3491f1b5e5bc24d7a Mon Sep 17 00:00:00 2001 From: sethah Date: Thu, 18 Aug 2016 22:16:48 -0700 Subject: [PATCH 024/270] [SPARK-7159][ML] Add multiclass logistic regression to Spark ML ## What changes were proposed in this pull request? This patch adds a new estimator/transformer `MultinomialLogisticRegression` to spark ML. JIRA: [SPARK-7159](https://issues.apache.org/jira/browse/SPARK-7159) ## How was this patch tested? Added new test suite `MultinomialLogisticRegressionSuite`. ## Approach ### Do not use a "pivot" class in the algorithm formulation Many implementations of multinomial logistic regression treat the problem as K - 1 independent binary logistic regression models where K is the number of possible outcomes in the output variable. In this case, one outcome is chosen as a "pivot" and the other K - 1 outcomes are regressed against the pivot. This is somewhat undesirable since the coefficients returned will be different for different choices of pivot variables. An alternative approach to the problem models class conditional probabilites using the softmax function and will return uniquely identifiable coefficients (assuming regularization is applied). This second approach is used in R's glmnet and was also recommended by dbtsai. ### Separate multinomial logistic regression and binary logistic regression The initial design makes multinomial logistic regression a separate estimator/transformer than the existing LogisticRegression estimator/transformer. An alternative design would be to merge them into one. **Arguments for:** * The multinomial case without pivot is distinctly different than the current binary case since the binary case uses a pivot class. * The current logistic regression model in ML uses a vector of coefficients and a scalar intercept. In the multinomial case, we require a matrix of coefficients and a vector of intercepts. There are potential workarounds for this issue if we were to merge the two estimators, but none are particularly elegant. **Arguments against:** * It may be inconvenient for users to have to switch the estimator class when transitioning between binary and multiclass (although the new multinomial estimator can be used for two class outcomes). * Some portions of the code are repeated. This is a major design point and warrants more discussion. ### Mean centering When no regularization is applied, the coefficients will not be uniquely identifiable. This is not hard to show and is discussed in further detail [here](https://core.ac.uk/download/files/153/6287975.pdf). R's glmnet deals with this by choosing the minimum l2 regularized solution (i.e. mean centering). Additionally, the intercepts are never regularized so they are always mean centered. This is the approach taken in this PR as well. ### Feature scaling In current ML logistic regression, the features are always standardized when running the optimization algorithm. They are always returned to the user in the original feature space, however. This same approach is maintained in this patch as well, but the implementation details are different. In ML logistic regression, the unregularized feature values are divided by the column standard deviation in every gradient update iteration. In contrast, MLlib transforms the entire input dataset to the scaled space _before_ optimizaton. In ML, this means that `numFeatures * numClasses` extra scalar divisions are required in every iteration. Performance testing shows that this has significant (4x in some cases) slow downs in each iteration. This can be avoided by transforming the input to the scaled space ala MLlib once, before iteration begins. This does add some overhead initially, but can make significant time savings in some cases. One issue with this approach is that if the input data is already cached, there may not be enough memory to cache the transformed data, which would make the algorithm _much_ slower. The tradeoffs here merit more discussion. ### Specifying and inferring the number of outcome classes The estimator checks the dataframe label column for metadata which specifies the number of values. If they are not specified, the length of the `histogram` variable is used, which is essentially the maximum value found in the column. The assumption then, is that the labels are zero-indexed when they are provided to the algorithm. ## Performance Below are some performance tests I have run so far. I am happy to add more cases or trials if we deem them necessary. Test cluster: 4 bare metal nodes, 128 GB RAM each, 48 cores each Notes: * Time in units of seconds * Metric is classification accuracy | algo | elasticNetParam | fitIntercept | metric | maxIter | numPoints | numClasses | numFeatures | time | standardization | regParam | |--------|-------------------|----------------|----------|-----------|-------------|--------------|---------------|---------|-------------------|------------| | ml | 0 | true | 0.746415 | 30 | 100000 | 3 | 100000 | 327.923 | true | 0 | | mllib | 0 | true | 0.743785 | 30 | 100000 | 3 | 100000 | 390.217 | true | 0 | | algo | elasticNetParam | fitIntercept | metric | maxIter | numPoints | numClasses | numFeatures | time | standardization | regParam | |--------|-------------------|----------------|----------|-----------|-------------|--------------|---------------|---------|-------------------|------------| | ml | 0 | true | 0.973238 | 30 | 2000000 | 3 | 10000 | 385.476 | true | 0 | | mllib | 0 | true | 0.949828 | 30 | 2000000 | 3 | 10000 | 550.403 | true | 0 | | algo | elasticNetParam | fitIntercept | metric | maxIter | numPoints | numClasses | numFeatures | time | standardization | regParam | |--------|-------------------|----------------|----------|-----------|-------------|--------------|---------------|---------|-------------------|------------| | mllib | 0 | true | 0.864358 | 30 | 2000000 | 3 | 10000 | 543.359 | true | 0.1 | | ml | 0 | true | 0.867418 | 30 | 2000000 | 3 | 10000 | 401.955 | true | 0.1 | | algo | elasticNetParam | fitIntercept | metric | maxIter | numPoints | numClasses | numFeatures | time | standardization | regParam | |--------|-------------------|----------------|----------|-----------|-------------|--------------|---------------|---------|-------------------|------------| | ml | 1 | true | 0.807449 | 30 | 2000000 | 3 | 10000 | 334.892 | true | 0.05 | | algo | elasticNetParam | fitIntercept | metric | maxIter | numPoints | numClasses | numFeatures | time | standardization | regParam | |--------|-------------------|----------------|----------|-----------|-------------|--------------|---------------|---------|-------------------|------------| | ml | 0 | true | 0.602006 | 30 | 2000000 | 500 | 100 | 112.319 | true | 0 | | mllib | 0 | true | 0.567226 | 30 | 2000000 | 500 | 100 | 263.768 | true | 0 |e | 0.567226 | 30 | 2000000 | 500 | 100 | 263.768 | true | 0 | ## References Friedman, et al. ["Regularization Paths for Generalized Linear Models via Coordinate Descent"](https://core.ac.uk/download/files/153/6287975.pdf) [http://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html](http://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html) ## Follow up items * Consider using level 2 BLAS routines in the gradient computations - [SPARK-17134](https://issues.apache.org/jira/browse/SPARK-17134) * Add model summary for MLOR - [SPARK-17139](https://issues.apache.org/jira/browse/SPARK-17139) * Add initial model to MLOR and add test for intercept priors - [SPARK-17140](https://issues.apache.org/jira/browse/SPARK-17140) * Python API - [SPARK-17138](https://issues.apache.org/jira/browse/SPARK-17138) * Consider changing the tree aggregation level for MLOR/BLOR or making it user configurable to avoid memory problems with high dimensional data - [SPARK-17090](https://issues.apache.org/jira/browse/SPARK-17090) * Refactor helper classes out of `LogisticRegression.scala` - [SPARK-17135](https://issues.apache.org/jira/browse/SPARK-17135) * Design optimizer interface for added flexibility in ML algos - [SPARK-17136](https://issues.apache.org/jira/browse/SPARK-17136) * Support compressing the coefficients and intercepts for MLOR models - [SPARK-17137](https://issues.apache.org/jira/browse/SPARK-17137) Author: sethah Closes #13796 from sethah/SPARK-7159_M. --- .../classification/LogisticRegression.scala | 425 +++++-- .../MultinomialLogisticRegression.scala | 620 ++++++++++ .../MultinomialLogisticRegressionSuite.scala | 1056 +++++++++++++++++ .../apache/spark/ml/util/MLTestingUtils.scala | 49 +- 4 files changed, 2062 insertions(+), 88 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala create mode 100644 mllib/src/test/scala/org/apache/spark/ml/classification/MultinomialLogisticRegressionSuite.scala diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala index fce3935d396fe..ea31c68e4c943 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala @@ -63,6 +63,7 @@ private[classification] trait LogisticRegressionParams extends ProbabilisticClas * equivalent. * * Default is 0.5. + * * @group setParam */ def setThreshold(value: Double): this.type = { @@ -131,6 +132,7 @@ private[classification] trait LogisticRegressionParams extends ProbabilisticClas /** * If [[threshold]] and [[thresholds]] are both set, ensures they are consistent. + * * @throws IllegalArgumentException if [[threshold]] and [[thresholds]] are not equivalent */ protected def checkThresholdConsistency(): Unit = { @@ -153,8 +155,8 @@ private[classification] trait LogisticRegressionParams extends ProbabilisticClas /** * Logistic regression. - * Currently, this class only supports binary classification. It will support multiclass - * in the future. + * Currently, this class only supports binary classification. For multiclass classification, + * use [[MultinomialLogisticRegression]] */ @Since("1.2.0") class LogisticRegression @Since("1.2.0") ( @@ -168,6 +170,7 @@ class LogisticRegression @Since("1.2.0") ( /** * Set the regularization parameter. * Default is 0.0. + * * @group setParam */ @Since("1.2.0") @@ -179,6 +182,7 @@ class LogisticRegression @Since("1.2.0") ( * For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty. * For 0 < alpha < 1, the penalty is a combination of L1 and L2. * Default is 0.0 which is an L2 penalty. + * * @group setParam */ @Since("1.4.0") @@ -188,6 +192,7 @@ class LogisticRegression @Since("1.2.0") ( /** * Set the maximum number of iterations. * Default is 100. + * * @group setParam */ @Since("1.2.0") @@ -198,6 +203,7 @@ class LogisticRegression @Since("1.2.0") ( * Set the convergence tolerance of iterations. * Smaller value will lead to higher accuracy with the cost of more iterations. * Default is 1E-6. + * * @group setParam */ @Since("1.4.0") @@ -207,6 +213,7 @@ class LogisticRegression @Since("1.2.0") ( /** * Whether to fit an intercept term. * Default is true. + * * @group setParam */ @Since("1.4.0") @@ -220,6 +227,7 @@ class LogisticRegression @Since("1.2.0") ( * the models should be always converged to the same solution when no regularization * is applied. In R's GLMNET package, the default behavior is true as well. * Default is true. + * * @group setParam */ @Since("1.5.0") @@ -233,9 +241,10 @@ class LogisticRegression @Since("1.2.0") ( override def getThreshold: Double = super.getThreshold /** - * Whether to over-/under-sample training instances according to the given weights in weightCol. - * If not set or empty String, all instances are treated equally (weight 1.0). + * Sets the value of param [[weightCol]]. + * If this is not set or empty, we treat all instance weights as 1.0. * Default is not set, so all instances have weight one. + * * @group setParam */ @Since("1.6.0") @@ -310,12 +319,15 @@ class LogisticRegression @Since("1.2.0") ( throw new SparkException(msg) } + val isConstantLabel = histogram.count(_ != 0) == 1 + if (numClasses > 2) { - val msg = s"Currently, LogisticRegression with ElasticNet in ML package only supports " + - s"binary classification. Found $numClasses in the input dataset." + val msg = s"LogisticRegression with ElasticNet in ML package only supports " + + s"binary classification. Found $numClasses in the input dataset. Consider using " + + s"MultinomialLogisticRegression instead." logError(msg) throw new SparkException(msg) - } else if ($(fitIntercept) && numClasses == 2 && histogram(0) == 0.0) { + } else if ($(fitIntercept) && numClasses == 2 && isConstantLabel) { logWarning(s"All labels are one and fitIntercept=true, so the coefficients will be " + s"zeros and the intercept will be positive infinity; as a result, " + s"training is not needed.") @@ -326,12 +338,9 @@ class LogisticRegression @Since("1.2.0") ( s"training is not needed.") (Vectors.sparse(numFeatures, Seq()), Double.NegativeInfinity, Array.empty[Double]) } else { - if (!$(fitIntercept) && numClasses == 2 && histogram(0) == 0.0) { - logWarning(s"All labels are one and fitIntercept=false. It's a dangerous ground, " + - s"so the algorithm may not converge.") - } else if (!$(fitIntercept) && numClasses == 1) { - logWarning(s"All labels are zero and fitIntercept=false. It's a dangerous ground, " + - s"so the algorithm may not converge.") + if (!$(fitIntercept) && isConstantLabel) { + logWarning(s"All labels belong to a single class and fitIntercept=false. It's a " + + s"dangerous ground, so the algorithm may not converge.") } val featuresMean = summarizer.mean.toArray @@ -349,7 +358,7 @@ class LogisticRegression @Since("1.2.0") ( val bcFeaturesStd = instances.context.broadcast(featuresStd) val costFun = new LogisticCostFun(instances, numClasses, $(fitIntercept), - $(standardization), bcFeaturesStd, regParamL2) + $(standardization), bcFeaturesStd, regParamL2, multinomial = false) val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) { new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol)) @@ -416,7 +425,7 @@ class LogisticRegression @Since("1.2.0") ( /* Note that in Logistic Regression, the objective history (loss + regularization) - is log-likelihood which is invariance under feature standardization. As a result, + is log-likelihood which is invariant under feature standardization. As a result, the objective history from optimizer is the same as the one in the original space. */ val arrayBuilder = mutable.ArrayBuilder.make[Double] @@ -559,6 +568,7 @@ class LogisticRegressionModel private[spark] ( /** * Evaluates the model on a test dataset. + * * @param dataset Test dataset to evaluate model on. */ @Since("2.0.0") @@ -681,6 +691,7 @@ object LogisticRegressionModel extends MLReadable[LogisticRegressionModel] { val data = sparkSession.read.format("parquet").load(dataPath) // We will need numClasses, numFeatures in the future for multinomial logreg support. + // TODO: remove numClasses and numFeatures fields? val Row(numClasses: Int, numFeatures: Int, intercept: Double, coefficients: Vector) = MLUtils.convertVectorColumnsToML(data, "coefficients") .select("numClasses", "numFeatures", "intercept", "coefficients") @@ -710,6 +721,7 @@ private[classification] class MultiClassSummarizer extends Serializable { /** * Add a new label into this MultilabelSummarizer, and update the distinct map. + * * @param label The label for this data point. * @param weight The weight of this instances. * @return This MultilabelSummarizer @@ -933,32 +945,310 @@ class BinaryLogisticRegressionSummary private[classification] ( } /** - * LogisticAggregator computes the gradient and loss for binary logistic loss function, as used - * in binary classification for instances in sparse or dense vector in an online fashion. - * - * Note that multinomial logistic loss is not supported yet! + * LogisticAggregator computes the gradient and loss for binary or multinomial logistic (softmax) + * loss function, as used in classification for instances in sparse or dense vector in an online + * fashion. * - * Two LogisticAggregator can be merged together to have a summary of loss and gradient of + * Two LogisticAggregators can be merged together to have a summary of loss and gradient of * the corresponding joint dataset. * + * For improving the convergence rate during the optimization process and also to prevent against + * features with very large variances exerting an overly large influence during model training, + * packages like R's GLMNET perform the scaling to unit variance and remove the mean in order to + * reduce the condition number. The model is then trained in this scaled space, but returns the + * coefficients in the original scale. See page 9 in + * http://cran.r-project.org/web/packages/glmnet/glmnet.pdf + * + * However, we don't want to apply the [[org.apache.spark.ml.feature.StandardScaler]] on the + * training dataset, and then cache the standardized dataset since it will create a lot of overhead. + * As a result, we perform the scaling implicitly when we compute the objective function (though + * we do not subtract the mean). + * + * Note that there is a difference between multinomial (softmax) and binary loss. The binary case + * uses one outcome class as a "pivot" and regresses the other class against the pivot. In the + * multinomial case, the softmax loss function is used to model each class probability + * independently. Using softmax loss produces `K` sets of coefficients, while using a pivot class + * produces `K - 1` sets of coefficients (a single coefficient vector in the binary case). In the + * binary case, we can say that the coefficients are shared between the positive and negative + * classes. When regularization is applied, multinomial (softmax) loss will produce a result + * different from binary loss since the positive and negative don't share the coefficients while the + * binary regression shares the coefficients between positive and negative. + * + * The following is a mathematical derivation for the multinomial (softmax) loss. + * + * The probability of the multinomial outcome $y$ taking on any of the K possible outcomes is: + * + *

+ * $$ + * P(y_i=0|\vec{x}_i, \beta) = \frac{e^{\vec{x}_i^T \vec{\beta}_0}}{\sum_{k=0}^{K-1} + * e^{\vec{x}_i^T \vec{\beta}_k}} \\ + * P(y_i=1|\vec{x}_i, \beta) = \frac{e^{\vec{x}_i^T \vec{\beta}_1}}{\sum_{k=0}^{K-1} + * e^{\vec{x}_i^T \vec{\beta}_k}}\\ + * P(y_i=K-1|\vec{x}_i, \beta) = \frac{e^{\vec{x}_i^T \vec{\beta}_{K-1}}\,}{\sum_{k=0}^{K-1} + * e^{\vec{x}_i^T \vec{\beta}_k}} + * $$ + *

+ * + * The model coefficients $\beta = (\beta_0, \beta_1, \beta_2, ..., \beta_{K-1})$ become a matrix + * which has dimension of $K \times (N+1)$ if the intercepts are added. If the intercepts are not + * added, the dimension will be $K \times N$. + * + * Note that the coefficients in the model above lack identifiability. That is, any constant scalar + * can be added to all of the coefficients and the probabilities remain the same. + * + *

+ * $$ + * \begin{align} + * \frac{e^{\vec{x}_i^T \left(\vec{\beta}_0 + \vec{c}\right)}}{\sum_{k=0}^{K-1} + * e^{\vec{x}_i^T \left(\vec{\beta}_k + \vec{c}\right)}} + * = \frac{e^{\vec{x}_i^T \vec{\beta}_0}e^{\vec{x}_i^T \vec{c}}\,}{e^{\vec{x}_i^T \vec{c}} + * \sum_{k=0}^{K-1} e^{\vec{x}_i^T \vec{\beta}_k}} + * = \frac{e^{\vec{x}_i^T \vec{\beta}_0}}{\sum_{k=0}^{K-1} e^{\vec{x}_i^T \vec{\beta}_k}} + * \end{align} + * $$ + *

+ * + * However, when regularization is added to the loss function, the coefficients are indeed + * identifiable because there is only one set of coefficients which minimizes the regularization + * term. When no regularization is applied, we choose the coefficients with the minimum L2 + * penalty for consistency and reproducibility. For further discussion see: + * + * Friedman, et al. "Regularization Paths for Generalized Linear Models via Coordinate Descent" + * + * The loss of objective function for a single instance of data (we do not include the + * regularization term here for simplicity) can be written as + * + *

+ * $$ + * \begin{align} + * \ell\left(\beta, x_i\right) &= -log{P\left(y_i \middle| \vec{x}_i, \beta\right)} \\ + * &= log\left(\sum_{k=0}^{K-1}e^{\vec{x}_i^T \vec{\beta}_k}\right) - \vec{x}_i^T \vec{\beta}_y\\ + * &= log\left(\sum_{k=0}^{K-1} e^{margins_k}\right) - margins_y + * \end{align} + * $$ + *

+ * + * where ${margins}_k = \vec{x}_i^T \vec{\beta}_k$. + * + * For optimization, we have to calculate the first derivative of the loss function, and a simple + * calculation shows that + * + *

+ * $$ + * \begin{align} + * \frac{\partial \ell(\beta, \vec{x}_i, w_i)}{\partial \beta_{j, k}} + * &= x_{i,j} \cdot w_i \cdot \left(\frac{e^{\vec{x}_i \cdot \vec{\beta}_k}}{\sum_{k'=0}^{K-1} + * e^{\vec{x}_i \cdot \vec{\beta}_{k'}}\,} - I_{y=k}\right) \\ + * &= x_{i, j} \cdot w_i \cdot multiplier_k + * \end{align} + * $$ + *

+ * + * where $w_i$ is the sample weight, $I_{y=k}$ is an indicator function + * + *

+ * $$ + * I_{y=k} = \begin{cases} + * 1 & y = k \\ + * 0 & else + * \end{cases} + * $$ + *

+ * + * and + * + *

+ * $$ + * multiplier_k = \left(\frac{e^{\vec{x}_i \cdot \vec{\beta}_k}}{\sum_{k=0}^{K-1} + * e^{\vec{x}_i \cdot \vec{\beta}_k}} - I_{y=k}\right) + * $$ + *

+ * + * If any of margins is larger than 709.78, the numerical computation of multiplier and loss + * function will suffer from arithmetic overflow. This issue occurs when there are outliers in + * data which are far away from the hyperplane, and this will cause the failing of training once + * infinity is introduced. Note that this is only a concern when max(margins) > 0. + * + * Fortunately, when max(margins) = maxMargin > 0, the loss function and the multiplier can easily + * be rewritten into the following equivalent numerically stable formula. + * + *

+ * $$ + * \ell\left(\beta, x\right) = log\left(\sum_{k=0}^{K-1} e^{margins_k - maxMargin}\right) - + * margins_{y} + maxMargin + * $$ + *

+ * + * Note that each term, $(margins_k - maxMargin)$ in the exponential is no greater than zero; as a + * result, overflow will not happen with this formula. + * + * For $multiplier$, a similar trick can be applied as the following, + * + *

+ * $$ + * multiplier_k = \left(\frac{e^{\vec{x}_i \cdot \vec{\beta}_k - maxMargin}}{\sum_{k'=0}^{K-1} + * e^{\vec{x}_i \cdot \vec{\beta}_{k'} - maxMargin}} - I_{y=k}\right) + * $$ + *

+ * * @param bcCoefficients The broadcast coefficients corresponding to the features. * @param bcFeaturesStd The broadcast standard deviation values of the features. * @param numClasses the number of possible outcomes for k classes classification problem in * Multinomial Logistic Regression. * @param fitIntercept Whether to fit an intercept term. + * @param multinomial Whether to use multinomial (softmax) or binary loss */ private class LogisticAggregator( - val bcCoefficients: Broadcast[Vector], - val bcFeaturesStd: Broadcast[Array[Double]], - private val numFeatures: Int, + bcCoefficients: Broadcast[Vector], + bcFeaturesStd: Broadcast[Array[Double]], numClasses: Int, - fitIntercept: Boolean) extends Serializable { + fitIntercept: Boolean, + multinomial: Boolean) extends Serializable with Logging { + + private val numFeatures = bcFeaturesStd.value.length + private val numFeaturesPlusIntercept = if (fitIntercept) numFeatures + 1 else numFeatures + private val coefficientSize = bcCoefficients.value.size + if (multinomial) { + require(numClasses == coefficientSize / numFeaturesPlusIntercept, s"The number of " + + s"coefficients should be ${numClasses * numFeaturesPlusIntercept} but was $coefficientSize") + } else { + require(coefficientSize == numFeaturesPlusIntercept, s"Expected $numFeaturesPlusIntercept " + + s"coefficients but got $coefficientSize") + require(numClasses == 1 || numClasses == 2, s"Binary logistic aggregator requires numClasses " + + s"in {1, 2} but found $numClasses.") + } private var weightSum = 0.0 private var lossSum = 0.0 - private val gradientSumArray = - Array.ofDim[Double](if (fitIntercept) numFeatures + 1 else numFeatures) + private val gradientSumArray = Array.ofDim[Double](coefficientSize) + + if (multinomial && numClasses <= 2) { + logInfo(s"Multinomial logistic regression for binary classification yields separate " + + s"coefficients for positive and negative classes. When no regularization is applied, the" + + s"result will be effectively the same as binary logistic regression. When regularization" + + s"is applied, multinomial loss will produce a result different from binary loss.") + } + + /** Update gradient and loss using binary loss function. */ + private def binaryUpdateInPlace( + features: Vector, + weight: Double, + label: Double): Unit = { + + val localFeaturesStd = bcFeaturesStd.value + val localCoefficients = bcCoefficients.value + val localGradientArray = gradientSumArray + val margin = - { + var sum = 0.0 + features.foreachActive { (index, value) => + if (localFeaturesStd(index) != 0.0 && value != 0.0) { + sum += localCoefficients(index) * value / localFeaturesStd(index) + } + } + if (fitIntercept) sum += localCoefficients(numFeaturesPlusIntercept - 1) + sum + } + + val multiplier = weight * (1.0 / (1.0 + math.exp(margin)) - label) + + features.foreachActive { (index, value) => + if (localFeaturesStd(index) != 0.0 && value != 0.0) { + localGradientArray(index) += multiplier * value / localFeaturesStd(index) + } + } + + if (fitIntercept) { + localGradientArray(numFeaturesPlusIntercept - 1) += multiplier + } + + if (label > 0) { + // The following is equivalent to log(1 + exp(margin)) but more numerically stable. + lossSum += weight * MLUtils.log1pExp(margin) + } else { + lossSum += weight * (MLUtils.log1pExp(margin) - margin) + } + } + + /** Update gradient and loss using multinomial (softmax) loss function. */ + private def multinomialUpdateInPlace( + features: Vector, + weight: Double, + label: Double): Unit = { + // TODO: use level 2 BLAS operations + /* + Note: this can still be used when numClasses = 2 for binary + logistic regression without pivoting. + */ + val localFeaturesStd = bcFeaturesStd.value + val localCoefficients = bcCoefficients.value + val localGradientArray = gradientSumArray + + // marginOfLabel is margins(label) in the formula + var marginOfLabel = 0.0 + var maxMargin = Double.NegativeInfinity + + val margins = Array.tabulate(numClasses) { i => + var margin = 0.0 + features.foreachActive { (index, value) => + if (localFeaturesStd(index) != 0.0 && value != 0.0) { + margin += localCoefficients(i * numFeaturesPlusIntercept + index) * + value / localFeaturesStd(index) + } + } + + if (fitIntercept) { + margin += localCoefficients(i * numFeaturesPlusIntercept + numFeatures) + } + if (i == label.toInt) marginOfLabel = margin + if (margin > maxMargin) { + maxMargin = margin + } + margin + } + + /** + * When maxMargin > 0, the original formula could cause overflow. + * We address this by subtracting maxMargin from all the margins, so it's guaranteed + * that all of the new margins will be smaller than zero to prevent arithmetic overflow. + */ + val sum = { + var temp = 0.0 + if (maxMargin > 0) { + for (i <- 0 until numClasses) { + margins(i) -= maxMargin + temp += math.exp(margins(i)) + } + } else { + for (i <- 0 until numClasses) { + temp += math.exp(margins(i)) + } + } + temp + } + + for (i <- 0 until numClasses) { + val multiplier = math.exp(margins(i)) / sum - { + if (label == i) 1.0 else 0.0 + } + features.foreachActive { (index, value) => + if (localFeaturesStd(index) != 0.0 && value != 0.0) { + localGradientArray(i * numFeaturesPlusIntercept + index) += + weight * multiplier * value / localFeaturesStd(index) + } + } + if (fitIntercept) { + localGradientArray(i * numFeaturesPlusIntercept + numFeatures) += weight * multiplier + } + } + + val loss = if (maxMargin > 0) { + math.log(sum) - marginOfLabel + maxMargin + } else { + math.log(sum) - marginOfLabel + } + lossSum += weight * loss + } /** * Add a new training instance to this LogisticAggregator, and update the loss and gradient @@ -975,52 +1265,10 @@ private class LogisticAggregator( if (weight == 0.0) return this - val coefficientsArray = bcCoefficients.value match { - case dv: DenseVector => dv.values - case _ => - throw new IllegalArgumentException( - "coefficients only supports dense vector" + - s"but got type ${bcCoefficients.value.getClass}.") - } - val localGradientSumArray = gradientSumArray - - val featuresStd = bcFeaturesStd.value - numClasses match { - case 2 => - // For Binary Logistic Regression. - val margin = - { - var sum = 0.0 - features.foreachActive { (index, value) => - if (featuresStd(index) != 0.0 && value != 0.0) { - sum += coefficientsArray(index) * (value / featuresStd(index)) - } - } - sum + { - if (fitIntercept) coefficientsArray(numFeatures) else 0.0 - } - } - - val multiplier = weight * (1.0 / (1.0 + math.exp(margin)) - label) - - features.foreachActive { (index, value) => - if (featuresStd(index) != 0.0 && value != 0.0) { - localGradientSumArray(index) += multiplier * (value / featuresStd(index)) - } - } - - if (fitIntercept) { - localGradientSumArray(numFeatures) += multiplier - } - - if (label > 0) { - // The following is equivalent to log(1 + exp(margin)) but more numerically stable. - lossSum += weight * MLUtils.log1pExp(margin) - } else { - lossSum += weight * (MLUtils.log1pExp(margin) - margin) - } - case _ => - new NotImplementedError("LogisticRegression with ElasticNet in ML package " + - "only supports binary classification for now.") + if (multinomial) { + multinomialUpdateInPlace(features, weight, label) + } else { + binaryUpdateInPlace(features, weight, label) } weightSum += weight this @@ -1071,8 +1319,8 @@ private class LogisticAggregator( } /** - * LogisticCostFun implements Breeze's DiffFunction[T] for a multinomial logistic loss function, - * as used in multi-class classification (it is also used in binary logistic regression). + * LogisticCostFun implements Breeze's DiffFunction[T] for a multinomial (softmax) logistic loss + * function, as used in multi-class classification (it is also used in binary logistic regression). * It returns the loss and gradient with L2 regularization at a particular point (coefficients). * It's used in Breeze's convex optimization routines. */ @@ -1082,36 +1330,36 @@ private class LogisticCostFun( fitIntercept: Boolean, standardization: Boolean, bcFeaturesStd: Broadcast[Array[Double]], - regParamL2: Double) extends DiffFunction[BDV[Double]] { + regParamL2: Double, + multinomial: Boolean) extends DiffFunction[BDV[Double]] { - val featuresStd = bcFeaturesStd.value override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = { - val numFeatures = featuresStd.length val coeffs = Vectors.fromBreeze(coefficients) val bcCoeffs = instances.context.broadcast(coeffs) - val n = coeffs.size + val featuresStd = bcFeaturesStd.value + val numFeatures = featuresStd.length val logisticAggregator = { val seqOp = (c: LogisticAggregator, instance: Instance) => c.add(instance) val combOp = (c1: LogisticAggregator, c2: LogisticAggregator) => c1.merge(c2) instances.treeAggregate( - new LogisticAggregator(bcCoeffs, bcFeaturesStd, numFeatures, numClasses, fitIntercept) + new LogisticAggregator(bcCoeffs, bcFeaturesStd, numClasses, fitIntercept, + multinomial) )(seqOp, combOp) } val totalGradientArray = logisticAggregator.gradient.toArray - // regVal is the sum of coefficients squares excluding intercept for L2 regularization. val regVal = if (regParamL2 == 0.0) { 0.0 } else { var sum = 0.0 - coeffs.foreachActive { (index, value) => - // If `fitIntercept` is true, the last term which is intercept doesn't - // contribute to the regularization. - if (index != numFeatures) { + coeffs.foreachActive { case (index, value) => + // We do not apply regularization to the intercepts + val isIntercept = fitIntercept && ((index + 1) % (numFeatures + 1) == 0) + if (!isIntercept) { // The following code will compute the loss of the regularization; also // the gradient of the regularization, and add back to totalGradientArray. sum += { @@ -1119,13 +1367,18 @@ private class LogisticCostFun( totalGradientArray(index) += regParamL2 * value value * value } else { - if (featuresStd(index) != 0.0) { + val featureIndex = if (fitIntercept) { + index % (numFeatures + 1) + } else { + index % numFeatures + } + if (featuresStd(featureIndex) != 0.0) { // If `standardization` is false, we still standardize the data // to improve the rate of convergence; as a result, we have to // perform this reverse standardization by penalizing each component // differently to get effectively the same objective function when // the training dataset is not standardized. - val temp = value / (featuresStd(index) * featuresStd(index)) + val temp = value / (featuresStd(featureIndex) * featuresStd(featureIndex)) totalGradientArray(index) += regParamL2 * temp value * temp } else { diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala new file mode 100644 index 0000000000000..dfadd68c5f476 --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala @@ -0,0 +1,620 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.classification + +import scala.collection.mutable + +import breeze.linalg.{DenseVector => BDV} +import breeze.optimize.{CachedDiffFunction, LBFGS => BreezeLBFGS, OWLQN => BreezeOWLQN} +import org.apache.hadoop.fs.Path + +import org.apache.spark.SparkException +import org.apache.spark.annotation.{Experimental, Since} +import org.apache.spark.internal.Logging +import org.apache.spark.ml.feature.Instance +import org.apache.spark.ml.linalg._ +import org.apache.spark.ml.param._ +import org.apache.spark.ml.param.shared._ +import org.apache.spark.ml.util._ +import org.apache.spark.mllib.linalg.VectorImplicits._ +import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer +import org.apache.spark.rdd.RDD +import org.apache.spark.sql.{Dataset, Row} +import org.apache.spark.sql.functions.{col, lit} +import org.apache.spark.sql.types.DoubleType +import org.apache.spark.storage.StorageLevel + +/** + * Params for multinomial logistic (softmax) regression. + */ +private[classification] trait MultinomialLogisticRegressionParams + extends ProbabilisticClassifierParams with HasRegParam with HasElasticNetParam with HasMaxIter + with HasFitIntercept with HasTol with HasStandardization with HasWeightCol { + + /** + * Set thresholds in multiclass (or binary) classification to adjust the probability of + * predicting each class. Array must have length equal to the number of classes, with values >= 0. + * The class with largest value p/t is predicted, where p is the original probability of that + * class and t is the class' threshold. + * + * @group setParam + */ + def setThresholds(value: Array[Double]): this.type = { + set(thresholds, value) + } + + /** + * Get thresholds for binary or multiclass classification. + * + * @group getParam + */ + override def getThresholds: Array[Double] = { + $(thresholds) + } +} + +/** + * :: Experimental :: + * Multinomial Logistic (softmax) regression. + */ +@Since("2.1.0") +@Experimental +class MultinomialLogisticRegression @Since("2.1.0") ( + @Since("2.1.0") override val uid: String) + extends ProbabilisticClassifier[Vector, + MultinomialLogisticRegression, MultinomialLogisticRegressionModel] + with MultinomialLogisticRegressionParams with DefaultParamsWritable with Logging { + + @Since("2.1.0") + def this() = this(Identifiable.randomUID("mlogreg")) + + /** + * Set the regularization parameter. + * Default is 0.0. + * + * @group setParam + */ + @Since("2.1.0") + def setRegParam(value: Double): this.type = set(regParam, value) + setDefault(regParam -> 0.0) + + /** + * Set the ElasticNet mixing parameter. + * For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty. + * For 0 < alpha < 1, the penalty is a combination of L1 and L2. + * Default is 0.0 which is an L2 penalty. + * + * @group setParam + */ + @Since("2.1.0") + def setElasticNetParam(value: Double): this.type = set(elasticNetParam, value) + setDefault(elasticNetParam -> 0.0) + + /** + * Set the maximum number of iterations. + * Default is 100. + * + * @group setParam + */ + @Since("2.1.0") + def setMaxIter(value: Int): this.type = set(maxIter, value) + setDefault(maxIter -> 100) + + /** + * Set the convergence tolerance of iterations. + * Smaller value will lead to higher accuracy with the cost of more iterations. + * Default is 1E-6. + * + * @group setParam + */ + @Since("2.1.0") + def setTol(value: Double): this.type = set(tol, value) + setDefault(tol -> 1E-6) + + /** + * Whether to fit an intercept term. + * Default is true. + * + * @group setParam + */ + @Since("2.1.0") + def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value) + setDefault(fitIntercept -> true) + + /** + * Whether to standardize the training features before fitting the model. + * The coefficients of models will be always returned on the original scale, + * so it will be transparent for users. Note that with/without standardization, + * the models should always converge to the same solution when no regularization + * is applied. In R's GLMNET package, the default behavior is true as well. + * Default is true. + * + * @group setParam + */ + @Since("2.1.0") + def setStandardization(value: Boolean): this.type = set(standardization, value) + setDefault(standardization -> true) + + /** + * Sets the value of param [[weightCol]]. + * If this is not set or empty, we treat all instance weights as 1.0. + * Default is not set, so all instances have weight one. + * + * @group setParam + */ + @Since("2.1.0") + def setWeightCol(value: String): this.type = set(weightCol, value) + + @Since("2.1.0") + override def setThresholds(value: Array[Double]): this.type = super.setThresholds(value) + + override protected[spark] def train(dataset: Dataset[_]): MultinomialLogisticRegressionModel = { + val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol)) + val instances: RDD[Instance] = + dataset.select(col($(labelCol)).cast(DoubleType), w, col($(featuresCol))).rdd.map { + case Row(label: Double, weight: Double, features: Vector) => + Instance(label, weight, features) + } + + val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE + if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK) + + val instr = Instrumentation.create(this, instances) + instr.logParams(regParam, elasticNetParam, standardization, thresholds, + maxIter, tol, fitIntercept) + + val (summarizer, labelSummarizer) = { + val seqOp = (c: (MultivariateOnlineSummarizer, MultiClassSummarizer), + instance: Instance) => + (c._1.add(instance.features, instance.weight), c._2.add(instance.label, instance.weight)) + + val combOp = (c1: (MultivariateOnlineSummarizer, MultiClassSummarizer), + c2: (MultivariateOnlineSummarizer, MultiClassSummarizer)) => + (c1._1.merge(c2._1), c1._2.merge(c2._2)) + + instances.treeAggregate( + new MultivariateOnlineSummarizer, new MultiClassSummarizer)(seqOp, combOp) + } + + val histogram = labelSummarizer.histogram + val numInvalid = labelSummarizer.countInvalid + val numFeatures = summarizer.mean.size + val numFeaturesPlusIntercept = if (getFitIntercept) numFeatures + 1 else numFeatures + + val numClasses = MetadataUtils.getNumClasses(dataset.schema($(labelCol))) match { + case Some(n: Int) => + require(n >= histogram.length, s"Specified number of classes $n was " + + s"less than the number of unique labels ${histogram.length}") + n + case None => histogram.length + } + + instr.logNumClasses(numClasses) + instr.logNumFeatures(numFeatures) + + val (coefficients, intercepts, objectiveHistory) = { + if (numInvalid != 0) { + val msg = s"Classification labels should be in {0 to ${numClasses - 1} " + + s"Found $numInvalid invalid labels." + logError(msg) + throw new SparkException(msg) + } + + val isConstantLabel = histogram.count(_ != 0) == 1 + + if ($(fitIntercept) && isConstantLabel) { + // we want to produce a model that will always predict the constant label so all the + // coefficients will be zero, and the constant label class intercept will be +inf + val constantLabelIndex = Vectors.dense(histogram).argmax + (Matrices.sparse(numClasses, numFeatures, Array.fill(numFeatures + 1)(0), + Array.empty[Int], Array.empty[Double]), + Vectors.sparse(numClasses, Seq((constantLabelIndex, Double.PositiveInfinity))), + Array.empty[Double]) + } else { + if (!$(fitIntercept) && isConstantLabel) { + logWarning(s"All labels belong to a single class and fitIntercept=false. It's" + + s"a dangerous ground, so the algorithm may not converge.") + } + + val featuresStd = summarizer.variance.toArray.map(math.sqrt) + val featuresMean = summarizer.mean.toArray + if (!$(fitIntercept) && (0 until numFeatures).exists { i => + featuresStd(i) == 0.0 && featuresMean(i) != 0.0 }) { + logWarning("Fitting MultinomialLogisticRegressionModel without intercept on dataset " + + "with constant nonzero column, Spark MLlib outputs zero coefficients for constant " + + "nonzero columns. This behavior is the same as R glmnet but different from LIBSVM.") + } + + val regParamL1 = $(elasticNetParam) * $(regParam) + val regParamL2 = (1.0 - $(elasticNetParam)) * $(regParam) + + val bcFeaturesStd = instances.context.broadcast(featuresStd) + val costFun = new LogisticCostFun(instances, numClasses, $(fitIntercept), + $(standardization), bcFeaturesStd, regParamL2, multinomial = true) + + val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) { + new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol)) + } else { + val standardizationParam = $(standardization) + def regParamL1Fun = (index: Int) => { + // Remove the L1 penalization on the intercept + val isIntercept = $(fitIntercept) && ((index + 1) % numFeaturesPlusIntercept == 0) + if (isIntercept) { + 0.0 + } else { + if (standardizationParam) { + regParamL1 + } else { + val featureIndex = if ($(fitIntercept)) { + index % numFeaturesPlusIntercept + } else { + index % numFeatures + } + // If `standardization` is false, we still standardize the data + // to improve the rate of convergence; as a result, we have to + // perform this reverse standardization by penalizing each component + // differently to get effectively the same objective function when + // the training dataset is not standardized. + if (featuresStd(featureIndex) != 0.0) { + regParamL1 / featuresStd(featureIndex) + } else { + 0.0 + } + } + } + } + new BreezeOWLQN[Int, BDV[Double]]($(maxIter), 10, regParamL1Fun, $(tol)) + } + + val initialCoefficientsWithIntercept = Vectors.zeros(numClasses * numFeaturesPlusIntercept) + + if ($(fitIntercept)) { + /* + For multinomial logistic regression, when we initialize the coefficients as zeros, + it will converge faster if we initialize the intercepts such that + it follows the distribution of the labels. + {{{ + P(1) = \exp(b_1) / Z + ... + P(K) = \exp(b_K) / Z + where Z = \sum_{k=1}^{K} \exp(b_k) + }}} + Since this doesn't have a unique solution, one of the solutions that satisfies the + above equations is + {{{ + \exp(b_k) = count_k * \exp(\lambda) + b_k = \log(count_k) * \lambda + }}} + \lambda is a free parameter, so choose the phase \lambda such that the + mean is centered. This yields + {{{ + b_k = \log(count_k) + b_k' = b_k - \mean(b_k) + }}} + */ + val rawIntercepts = histogram.map(c => math.log(c + 1)) // add 1 for smoothing + val rawMean = rawIntercepts.sum / rawIntercepts.length + rawIntercepts.indices.foreach { i => + initialCoefficientsWithIntercept.toArray(i * numFeaturesPlusIntercept + numFeatures) = + rawIntercepts(i) - rawMean + } + } + + val states = optimizer.iterations(new CachedDiffFunction(costFun), + initialCoefficientsWithIntercept.asBreeze.toDenseVector) + + /* + Note that in Multinomial Logistic Regression, the objective history + (loss + regularization) is log-likelihood which is invariant under feature + standardization. As a result, the objective history from optimizer is the same as the + one in the original space. + */ + val arrayBuilder = mutable.ArrayBuilder.make[Double] + var state: optimizer.State = null + while (states.hasNext) { + state = states.next() + arrayBuilder += state.adjustedValue + } + + if (state == null) { + val msg = s"${optimizer.getClass.getName} failed." + logError(msg) + throw new SparkException(msg) + } + bcFeaturesStd.destroy(blocking = false) + + /* + The coefficients are trained in the scaled space; we're converting them back to + the original space. + Note that the intercept in scaled space and original space is the same; + as a result, no scaling is needed. + */ + val rawCoefficients = state.x.toArray + val interceptsArray: Array[Double] = if ($(fitIntercept)) { + Array.tabulate(numClasses) { i => + val coefIndex = (i + 1) * numFeaturesPlusIntercept - 1 + rawCoefficients(coefIndex) + } + } else { + Array[Double]() + } + + val coefficientArray: Array[Double] = Array.tabulate(numClasses * numFeatures) { i => + // flatIndex will loop though rawCoefficients, and skip the intercept terms. + val flatIndex = if ($(fitIntercept)) i + i / numFeatures else i + val featureIndex = i % numFeatures + if (featuresStd(featureIndex) != 0.0) { + rawCoefficients(flatIndex) / featuresStd(featureIndex) + } else { + 0.0 + } + } + val coefficientMatrix = + new DenseMatrix(numClasses, numFeatures, coefficientArray, isTransposed = true) + + /* + When no regularization is applied, the coefficients lack identifiability because + we do not use a pivot class. We can add any constant value to the coefficients and + get the same likelihood. So here, we choose the mean centered coefficients for + reproducibility. This method follows the approach in glmnet, described here: + + Friedman, et al. "Regularization Paths for Generalized Linear Models via + Coordinate Descent," https://core.ac.uk/download/files/153/6287975.pdf + */ + if ($(regParam) == 0.0) { + val coefficientMean = coefficientMatrix.values.sum / (numClasses * numFeatures) + coefficientMatrix.update(_ - coefficientMean) + } + /* + The intercepts are never regularized, so we always center the mean. + */ + val interceptVector = if (interceptsArray.nonEmpty) { + val interceptMean = interceptsArray.sum / numClasses + interceptsArray.indices.foreach { i => interceptsArray(i) -= interceptMean } + Vectors.dense(interceptsArray) + } else { + Vectors.sparse(numClasses, Seq()) + } + + (coefficientMatrix, interceptVector, arrayBuilder.result()) + } + } + + if (handlePersistence) instances.unpersist() + + val model = copyValues( + new MultinomialLogisticRegressionModel(uid, coefficients, intercepts, numClasses)) + instr.logSuccess(model) + model + } + + @Since("2.1.0") + override def copy(extra: ParamMap): MultinomialLogisticRegression = defaultCopy(extra) +} + +@Since("2.1.0") +object MultinomialLogisticRegression extends DefaultParamsReadable[MultinomialLogisticRegression] { + + @Since("2.1.0") + override def load(path: String): MultinomialLogisticRegression = super.load(path) +} + +/** + * :: Experimental :: + * Model produced by [[MultinomialLogisticRegression]]. + */ +@Since("2.1.0") +@Experimental +class MultinomialLogisticRegressionModel private[spark] ( + @Since("2.1.0") override val uid: String, + @Since("2.1.0") val coefficients: Matrix, + @Since("2.1.0") val intercepts: Vector, + @Since("2.1.0") val numClasses: Int) + extends ProbabilisticClassificationModel[Vector, MultinomialLogisticRegressionModel] + with MultinomialLogisticRegressionParams with MLWritable { + + @Since("2.1.0") + override def setThresholds(value: Array[Double]): this.type = super.setThresholds(value) + + @Since("2.1.0") + override def getThresholds: Array[Double] = super.getThresholds + + @Since("2.1.0") + override val numFeatures: Int = coefficients.numCols + + /** Margin (rawPrediction) for each class label. */ + private val margins: Vector => Vector = (features) => { + val m = intercepts.toDense.copy + BLAS.gemv(1.0, coefficients, features, 1.0, m) + m + } + + /** Score (probability) for each class label. */ + private val scores: Vector => Vector = (features) => { + val m = margins(features) + val maxMarginIndex = m.argmax + val marginArray = m.toArray + val maxMargin = marginArray(maxMarginIndex) + + // adjust margins for overflow + val sum = { + var temp = 0.0 + var k = 0 + while (k < numClasses) { + marginArray(k) = if (maxMargin > 0) { + math.exp(marginArray(k) - maxMargin) + } else { + math.exp(marginArray(k)) + } + temp += marginArray(k) + k += 1 + } + temp + } + + val scores = Vectors.dense(marginArray) + BLAS.scal(1 / sum, scores) + scores + } + + /** + * Predict label for the given feature vector. + * The behavior of this can be adjusted using [[thresholds]]. + */ + override protected def predict(features: Vector): Double = { + if (isDefined(thresholds)) { + val thresholds: Array[Double] = getThresholds + val probabilities = scores(features).toArray + var argMax = 0 + var max = Double.NegativeInfinity + var i = 0 + while (i < numClasses) { + if (thresholds(i) == 0.0) { + max = Double.PositiveInfinity + argMax = i + } else { + val scaled = probabilities(i) / thresholds(i) + if (scaled > max) { + max = scaled + argMax = i + } + } + i += 1 + } + argMax + } else { + scores(features).argmax + } + } + + override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = { + rawPrediction match { + case dv: DenseVector => + val size = dv.size + val values = dv.values + + // get the maximum margin + val maxMarginIndex = rawPrediction.argmax + val maxMargin = rawPrediction(maxMarginIndex) + + if (maxMargin == Double.PositiveInfinity) { + var k = 0 + while (k < size) { + values(k) = if (k == maxMarginIndex) 1.0 else 0.0 + k += 1 + } + } else { + val sum = { + var temp = 0.0 + var k = 0 + while (k < numClasses) { + values(k) = if (maxMargin > 0) { + math.exp(values(k) - maxMargin) + } else { + math.exp(values(k)) + } + temp += values(k) + k += 1 + } + temp + } + BLAS.scal(1 / sum, dv) + } + dv + case sv: SparseVector => + throw new RuntimeException("Unexpected error in MultinomialLogisticRegressionModel:" + + " raw2probabilitiesInPlace encountered SparseVector") + } + } + + override protected def predictRaw(features: Vector): Vector = margins(features) + + @Since("2.1.0") + override def copy(extra: ParamMap): MultinomialLogisticRegressionModel = { + val newModel = + copyValues( + new MultinomialLogisticRegressionModel(uid, coefficients, intercepts, numClasses), extra) + newModel.setParent(parent) + } + + /** + * Returns a [[org.apache.spark.ml.util.MLWriter]] instance for this ML instance. + * + * This does not save the [[parent]] currently. + */ + @Since("2.1.0") + override def write: MLWriter = + new MultinomialLogisticRegressionModel.MultinomialLogisticRegressionModelWriter(this) +} + + +@Since("2.1.0") +object MultinomialLogisticRegressionModel extends MLReadable[MultinomialLogisticRegressionModel] { + + @Since("2.1.0") + override def read: MLReader[MultinomialLogisticRegressionModel] = + new MultinomialLogisticRegressionModelReader + + @Since("2.1.0") + override def load(path: String): MultinomialLogisticRegressionModel = super.load(path) + + /** [[MLWriter]] instance for [[MultinomialLogisticRegressionModel]] */ + private[MultinomialLogisticRegressionModel] + class MultinomialLogisticRegressionModelWriter(instance: MultinomialLogisticRegressionModel) + extends MLWriter with Logging { + + private case class Data( + numClasses: Int, + numFeatures: Int, + intercepts: Vector, + coefficients: Matrix) + + override protected def saveImpl(path: String): Unit = { + // Save metadata and Params + DefaultParamsWriter.saveMetadata(instance, path, sc) + // Save model data: numClasses, numFeatures, intercept, coefficients + val data = Data(instance.numClasses, instance.numFeatures, instance.intercepts, + instance.coefficients) + val dataPath = new Path(path, "data").toString + sqlContext.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath) + } + } + + private class MultinomialLogisticRegressionModelReader + extends MLReader[MultinomialLogisticRegressionModel] { + + /** Checked against metadata when loading model */ + private val className = classOf[MultinomialLogisticRegressionModel].getName + + override def load(path: String): MultinomialLogisticRegressionModel = { + val metadata = DefaultParamsReader.loadMetadata(path, sc, className) + + val dataPath = new Path(path, "data").toString + val data = sqlContext.read.format("parquet").load(dataPath) + .select("numClasses", "numFeatures", "intercepts", "coefficients").head() + val numClasses = data.getAs[Int](data.fieldIndex("numClasses")) + val intercepts = data.getAs[Vector](data.fieldIndex("intercepts")) + val coefficients = data.getAs[Matrix](data.fieldIndex("coefficients")) + val model = + new MultinomialLogisticRegressionModel(metadata.uid, coefficients, intercepts, numClasses) + + DefaultParamsReader.getAndSetParams(model, metadata) + model + } + } +} diff --git a/mllib/src/test/scala/org/apache/spark/ml/classification/MultinomialLogisticRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/classification/MultinomialLogisticRegressionSuite.scala new file mode 100644 index 0000000000000..0913fe559c562 --- /dev/null +++ b/mllib/src/test/scala/org/apache/spark/ml/classification/MultinomialLogisticRegressionSuite.scala @@ -0,0 +1,1056 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.classification + +import scala.language.existentials + +import org.apache.spark.SparkFunSuite +import org.apache.spark.ml.attribute.NominalAttribute +import org.apache.spark.ml.classification.LogisticRegressionSuite._ +import org.apache.spark.ml.feature.LabeledPoint +import org.apache.spark.ml.linalg._ +import org.apache.spark.ml.param.ParamsSuite +import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils} +import org.apache.spark.ml.util.TestingUtils._ +import org.apache.spark.mllib.util.MLlibTestSparkContext +import org.apache.spark.sql.{DataFrame, Dataset, Row} + +class MultinomialLogisticRegressionSuite + extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest { + + @transient var dataset: Dataset[_] = _ + @transient var multinomialDataset: DataFrame = _ + private val eps: Double = 1e-5 + + override def beforeAll(): Unit = { + super.beforeAll() + + dataset = { + val nPoints = 100 + val coefficients = Array( + -0.57997, 0.912083, -0.371077, + -0.16624, -0.84355, -0.048509) + + val xMean = Array(5.843, 3.057) + val xVariance = Array(0.6856, 0.1899) + + val testData = generateMultinomialLogisticInput( + coefficients, xMean, xVariance, addIntercept = true, nPoints, 42) + + val df = spark.createDataFrame(sc.parallelize(testData, 4)) + df.cache() + df + } + + multinomialDataset = { + val nPoints = 10000 + val coefficients = Array( + -0.57997, 0.912083, -0.371077, -0.819866, 2.688191, + -0.16624, -0.84355, -0.048509, -0.301789, 4.170682) + + val xMean = Array(5.843, 3.057, 3.758, 1.199) + val xVariance = Array(0.6856, 0.1899, 3.116, 0.581) + + val testData = generateMultinomialLogisticInput( + coefficients, xMean, xVariance, addIntercept = true, nPoints, 42) + + val df = spark.createDataFrame(sc.parallelize(testData, 4)) + df.cache() + df + } + } + + /** + * Enable the ignored test to export the dataset into CSV format, + * so we can validate the training accuracy compared with R's glmnet package. + */ + ignore("export test data into CSV format") { + val rdd = multinomialDataset.rdd.map { case Row(label: Double, features: Vector) => + label + "," + features.toArray.mkString(",") + }.repartition(1) + rdd.saveAsTextFile("target/tmp/MultinomialLogisticRegressionSuite/multinomialDataset") + } + + test("params") { + ParamsSuite.checkParams(new MultinomialLogisticRegression) + val model = new MultinomialLogisticRegressionModel("mLogReg", + Matrices.dense(2, 1, Array(0.0, 0.0)), Vectors.dense(0.0, 0.0), 2) + ParamsSuite.checkParams(model) + } + + test("multinomial logistic regression: default params") { + val mlr = new MultinomialLogisticRegression + assert(mlr.getLabelCol === "label") + assert(mlr.getFeaturesCol === "features") + assert(mlr.getPredictionCol === "prediction") + assert(mlr.getRawPredictionCol === "rawPrediction") + assert(mlr.getProbabilityCol === "probability") + assert(!mlr.isDefined(mlr.weightCol)) + assert(!mlr.isDefined(mlr.thresholds)) + assert(mlr.getFitIntercept) + assert(mlr.getStandardization) + val model = mlr.fit(dataset) + model.transform(dataset) + .select("label", "probability", "prediction", "rawPrediction") + .collect() + assert(model.getFeaturesCol === "features") + assert(model.getPredictionCol === "prediction") + assert(model.getRawPredictionCol === "rawPrediction") + assert(model.getProbabilityCol === "probability") + assert(model.intercepts !== Vectors.dense(0.0, 0.0)) + assert(model.hasParent) + } + + test("multinomial logistic regression with intercept without regularization") { + + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(0.0).setRegParam(0.0).setStandardization(true).setMaxIter(100) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(0.0).setRegParam(0.0).setStandardization(false) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + + /* + Using the following R code to load the data and train the model using glmnet package. + > library("glmnet") + > data <- read.csv("path", header=FALSE) + > label = as.factor(data$V1) + > features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + > coefficients = coef(glmnet(features, label, family="multinomial", alpha = 0, lambda = 0)) + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -2.24493379 + V2 0.25096771 + V3 -0.03915938 + V4 0.14766639 + V5 0.36810817 + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.3778931 + V2 -0.3327489 + V3 0.8893666 + V4 -0.2306948 + V5 -0.4442330 + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 1.86704066 + V2 0.08178121 + V3 -0.85020722 + V4 0.08302840 + V5 0.07612480 + */ + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.2509677, -0.0391594, 0.1476664, 0.3681082, + -0.3327489, 0.8893666, -0.2306948, -0.4442330, + 0.0817812, -0.8502072, 0.0830284, 0.0761248), isTransposed = true) + val interceptsR = Vectors.dense(-2.2449338, 0.3778931, 1.8670407) + + assert(model1.coefficients ~== coefficientsR relTol 0.05) + assert(model1.coefficients.toArray.sum ~== 0.0 absTol eps) + assert(model1.intercepts ~== interceptsR relTol 0.05) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR relTol 0.05) + assert(model2.coefficients.toArray.sum ~== 0.0 absTol eps) + assert(model2.intercepts ~== interceptsR relTol 0.05) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression without intercept without regularization") { + + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(0.0).setRegParam(0.0).setStandardization(true) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(0.0).setRegParam(0.0).setStandardization(false) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + + /* + Using the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 0, lambda = 0, + intercept=F)) + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.06992464 + V3 -0.36562784 + V4 0.12142680 + V5 0.32052211 + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 -0.3036269 + V3 0.9449630 + V4 -0.2271038 + V5 -0.4364839 + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.2337022 + V3 -0.5793351 + V4 0.1056770 + V5 0.1159618 + */ + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.0699246, -0.3656278, 0.1214268, 0.3205221, + -0.3036269, 0.9449630, -0.2271038, -0.4364839, + 0.2337022, -0.5793351, 0.1056770, 0.1159618), isTransposed = true) + + assert(model1.coefficients ~== coefficientsR relTol 0.05) + assert(model1.coefficients.toArray.sum ~== 0.0 absTol eps) + assert(model1.intercepts.toArray === Array.fill(3)(0.0)) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR relTol 0.05) + assert(model2.coefficients.toArray.sum ~== 0.0 absTol eps) + assert(model2.intercepts.toArray === Array.fill(3)(0.0)) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression with intercept with L1 regularization") { + + // use tighter constraints because OWL-QN solver takes longer to converge + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(1.0).setRegParam(0.05).setStandardization(true) + .setMaxIter(300).setTol(1e-10) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(1.0).setRegParam(0.05).setStandardization(false) + .setMaxIter(300).setTol(1e-10) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + + /* + Use the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficientsStd = coef(glmnet(features, label, family="multinomial", alpha = 1, + lambda = 0.05, standardization=T)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 1, lambda = 0.05, + standardization=F)) + > coefficientsStd + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.68988825 + V2 . + V3 . + V4 . + V5 0.09404023 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.2303499 + V2 -0.1232443 + V3 0.3258380 + V4 -0.1564688 + V5 -0.2053965 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.9202381 + V2 . + V3 -0.4803856 + V4 . + V5 . + + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.44893320 + V2 . + V3 . + V4 0.01933812 + V5 0.03666044 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.7376760 + V2 -0.0577182 + V3 . + V4 -0.2081718 + V5 -0.1304592 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.2887428 + V2 . + V3 . + V4 . + V5 . + */ + + val coefficientsRStd = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.0, 0.09404023, + -0.1232443, 0.3258380, -0.1564688, -0.2053965, + 0.0, -0.4803856, 0.0, 0.0), isTransposed = true) + val interceptsRStd = Vectors.dense(-0.68988825, -0.2303499, 0.9202381) + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.01933812, 0.03666044, + -0.0577182, 0.0, -0.2081718, -0.1304592, + 0.0, 0.0, 0.0, 0.0), isTransposed = true) + val interceptsR = Vectors.dense(-0.44893320, 0.7376760, -0.2887428) + + assert(model1.coefficients ~== coefficientsRStd absTol 0.02) + assert(model1.intercepts ~== interceptsRStd relTol 0.1) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR absTol 0.02) + assert(model2.intercepts ~== interceptsR relTol 0.1) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression without intercept with L1 regularization") { + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(1.0).setRegParam(0.05).setStandardization(true) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(1.0).setRegParam(0.05).setStandardization(false) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + /* + Use the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficientsStd = coef(glmnet(features, label, family="multinomial", alpha = 1, + lambda = 0.05, intercept=F, standardization=T)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 1, lambda = 0.05, + intercept=F, standardization=F)) + > coefficientsStd + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 . + V4 . + V5 0.01525105 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 -0.1502410 + V3 0.5134658 + V4 -0.1601146 + V5 -0.2500232 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.003301875 + V3 . + V4 . + V5 . + + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 . + V4 . + V5 . + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 0.1943624 + V4 -0.1902577 + V5 -0.1028789 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 . + V4 . + V5 . + */ + + val coefficientsRStd = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.0, 0.01525105, + -0.1502410, 0.5134658, -0.1601146, -0.2500232, + 0.003301875, 0.0, 0.0, 0.0), isTransposed = true) + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.0, 0.0, + 0.0, 0.1943624, -0.1902577, -0.1028789, + 0.0, 0.0, 0.0, 0.0), isTransposed = true) + + assert(model1.coefficients ~== coefficientsRStd absTol 0.01) + assert(model1.intercepts.toArray === Array.fill(3)(0.0)) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR absTol 0.01) + assert(model2.intercepts.toArray === Array.fill(3)(0.0)) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression with intercept with L2 regularization") { + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(0.0).setRegParam(0.1).setStandardization(true) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(0.0).setRegParam(0.1).setStandardization(false) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + /* + Use the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficientsStd = coef(glmnet(features, label, family="multinomial", alpha = 0, + lambda = 0.1, intercept=T, standardization=T)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 0, + lambda = 0.1, intercept=T, standardization=F)) + > coefficientsStd + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -1.70040424 + V2 0.17576070 + V3 0.01527894 + V4 0.10216108 + V5 0.26099531 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.2438590 + V2 -0.2238875 + V3 0.5967610 + V4 -0.1555496 + V5 -0.3010479 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 1.45654525 + V2 0.04812679 + V3 -0.61203992 + V4 0.05338850 + V5 0.04005258 + + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -1.65488543 + V2 0.15715048 + V3 0.01992903 + V4 0.12428858 + V5 0.22130317 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 1.1297533 + V2 -0.1974768 + V3 0.2776373 + V4 -0.1869445 + V5 -0.2510320 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.52513212 + V2 0.04032627 + V3 -0.29756637 + V4 0.06265594 + V5 0.02972883 + */ + + val coefficientsRStd = new DenseMatrix(3, 4, Array( + 0.17576070, 0.01527894, 0.10216108, 0.26099531, + -0.2238875, 0.5967610, -0.1555496, -0.3010479, + 0.04812679, -0.61203992, 0.05338850, 0.04005258), isTransposed = true) + val interceptsRStd = Vectors.dense(-1.70040424, 0.2438590, 1.45654525) + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.15715048, 0.01992903, 0.12428858, 0.22130317, + -0.1974768, 0.2776373, -0.1869445, -0.2510320, + 0.04032627, -0.29756637, 0.06265594, 0.02972883), isTransposed = true) + val interceptsR = Vectors.dense(-1.65488543, 1.1297533, 0.52513212) + + assert(model1.coefficients ~== coefficientsRStd relTol 0.05) + assert(model1.intercepts ~== interceptsRStd relTol 0.05) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR relTol 0.05) + assert(model2.intercepts ~== interceptsR relTol 0.05) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression without intercept with L2 regularization") { + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(0.0).setRegParam(0.1).setStandardization(true) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(0.0).setRegParam(0.1).setStandardization(false) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + /* + Use the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficientsStd = coef(glmnet(features, label, family="multinomial", alpha = 0, + lambda = 0.1, intercept=F, standardization=T)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 0, + lambda = 0.1, intercept=F, standardization=F)) + > coefficientsStd + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.03904171 + V3 -0.23354322 + V4 0.08288096 + V5 0.22706393 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 -0.2061848 + V3 0.6341398 + V4 -0.1530059 + V5 -0.2958455 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.16714312 + V3 -0.40059658 + V4 0.07012496 + V5 0.06878158 + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 -0.005704542 + V3 -0.144466409 + V4 0.092080736 + V5 0.182927657 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 -0.08469036 + V3 0.38996748 + V4 -0.16468436 + V5 -0.22522976 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.09039490 + V3 -0.24550107 + V4 0.07260362 + V5 0.04230210 + */ + val coefficientsRStd = new DenseMatrix(3, 4, Array( + 0.03904171, -0.23354322, 0.08288096, 0.2270639, + -0.2061848, 0.6341398, -0.1530059, -0.2958455, + 0.16714312, -0.40059658, 0.07012496, 0.06878158), isTransposed = true) + + val coefficientsR = new DenseMatrix(3, 4, Array( + -0.005704542, -0.144466409, 0.092080736, 0.182927657, + -0.08469036, 0.38996748, -0.16468436, -0.22522976, + 0.0903949, -0.24550107, 0.07260362, 0.0423021), isTransposed = true) + + assert(model1.coefficients ~== coefficientsRStd absTol 0.01) + assert(model1.intercepts.toArray === Array.fill(3)(0.0)) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR absTol 0.01) + assert(model2.intercepts.toArray === Array.fill(3)(0.0)) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression with intercept with elasticnet regularization") { + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(0.5).setRegParam(0.1).setStandardization(true) + .setMaxIter(300).setTol(1e-10) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(true) + .setElasticNetParam(0.5).setRegParam(0.1).setStandardization(false) + .setMaxIter(300).setTol(1e-10) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + /* + Use the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficientsStd = coef(glmnet(features, label, family="multinomial", alpha = 0.5, + lambda = 0.1, intercept=T, standardization=T)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 0.5, + lambda = 0.1, intercept=T, standardization=F)) + > coefficientsStd + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.5521819483 + V2 0.0003092611 + V3 . + V4 . + V5 0.0913818490 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.27531989 + V2 -0.09790029 + V3 0.28502034 + V4 -0.12416487 + V5 -0.16513373 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.8275018 + V2 . + V3 -0.4044859 + V4 . + V5 . + + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.39876213 + V2 . + V3 . + V4 0.02547520 + V5 0.03893991 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + 0.61089869 + V2 -0.04224269 + V3 . + V4 -0.18923970 + V5 -0.09104249 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + -0.2121366 + V2 . + V3 . + V4 . + V5 . + */ + + val coefficientsRStd = new DenseMatrix(3, 4, Array( + 0.0003092611, 0.0, 0.0, 0.091381849, + -0.09790029, 0.28502034, -0.12416487, -0.16513373, + 0.0, -0.4044859, 0.0, 0.0), isTransposed = true) + val interceptsRStd = Vectors.dense(-0.5521819483, -0.27531989, 0.8275018) + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.0254752, 0.03893991, + -0.04224269, 0.0, -0.1892397, -0.09104249, + 0.0, 0.0, 0.0, 0.0), isTransposed = true) + val interceptsR = Vectors.dense(-0.39876213, 0.61089869, -0.2121366) + + assert(model1.coefficients ~== coefficientsRStd absTol 0.01) + assert(model1.intercepts ~== interceptsRStd absTol 0.01) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR absTol 0.01) + assert(model2.intercepts ~== interceptsR absTol 0.01) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + test("multinomial logistic regression without intercept with elasticnet regularization") { + val trainer1 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(0.5).setRegParam(0.1).setStandardization(true) + .setMaxIter(300).setTol(1e-10) + val trainer2 = (new MultinomialLogisticRegression).setFitIntercept(false) + .setElasticNetParam(0.5).setRegParam(0.1).setStandardization(false) + .setMaxIter(300).setTol(1e-10) + + val model1 = trainer1.fit(multinomialDataset) + val model2 = trainer2.fit(multinomialDataset) + /* + Use the following R code to load the data and train the model using glmnet package. + library("glmnet") + data <- read.csv("path", header=FALSE) + label = as.factor(data$V1) + features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5)) + coefficientsStd = coef(glmnet(features, label, family="multinomial", alpha = 0.5, + lambda = 0.1, intercept=F, standardization=T)) + coefficients = coef(glmnet(features, label, family="multinomial", alpha = 0.5, + lambda = 0.1, intercept=F, standardization=F)) + > coefficientsStd + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 . + V4 . + V5 0.03543706 + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 -0.1187387 + V3 0.4025482 + V4 -0.1270969 + V5 -0.1918386 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 0.00774365 + V3 . + V4 . + V5 . + + > coefficients + $`0` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 . + V4 . + V5 . + + $`1` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 0.14666497 + V4 -0.16570638 + V5 -0.05982875 + + $`2` + 5 x 1 sparse Matrix of class "dgCMatrix" + s0 + . + V2 . + V3 . + V4 . + V5 . + */ + val coefficientsRStd = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.0, 0.03543706, + -0.1187387, 0.4025482, -0.1270969, -0.1918386, + 0.0, 0.0, 0.0, 0.00774365), isTransposed = true) + + val coefficientsR = new DenseMatrix(3, 4, Array( + 0.0, 0.0, 0.0, 0.0, + 0.0, 0.14666497, -0.16570638, -0.05982875, + 0.0, 0.0, 0.0, 0.0), isTransposed = true) + + assert(model1.coefficients ~== coefficientsRStd absTol 0.01) + assert(model1.intercepts.toArray === Array.fill(3)(0.0)) + assert(model1.intercepts.toArray.sum ~== 0.0 absTol eps) + assert(model2.coefficients ~== coefficientsR absTol 0.01) + assert(model2.intercepts.toArray === Array.fill(3)(0.0)) + assert(model2.intercepts.toArray.sum ~== 0.0 absTol eps) + } + + /* + test("multinomial logistic regression with intercept with strong L1 regularization") { + // TODO: implement this test to check that the priors on the intercepts are correct + // TODO: when initial model becomes available + } + */ + + test("prediction") { + val model = new MultinomialLogisticRegressionModel("mLogReg", + Matrices.dense(3, 2, Array(0.0, 0.0, 0.0, 1.0, 2.0, 3.0)), + Vectors.dense(0.0, 0.0, 0.0), 3) + val overFlowData = spark.createDataFrame(Seq( + LabeledPoint(1.0, Vectors.dense(0.0, 1000.0)), + LabeledPoint(1.0, Vectors.dense(0.0, -1.0)) + )) + val results = model.transform(overFlowData).select("rawPrediction", "probability").collect() + + // probabilities are correct when margins have to be adjusted + val raw1 = results(0).getAs[Vector](0) + val prob1 = results(0).getAs[Vector](1) + assert(raw1 === Vectors.dense(1000.0, 2000.0, 3000.0)) + assert(prob1 ~== Vectors.dense(0.0, 0.0, 1.0) absTol eps) + + // probabilities are correct when margins don't have to be adjusted + val raw2 = results(1).getAs[Vector](0) + val prob2 = results(1).getAs[Vector](1) + assert(raw2 === Vectors.dense(-1.0, -2.0, -3.0)) + assert(prob2 ~== Vectors.dense(0.66524096, 0.24472847, 0.09003057) relTol eps) + } + + test("multinomial logistic regression: Predictor, Classifier methods") { + val mlr = new MultinomialLogisticRegression + + val model = mlr.fit(dataset) + assert(model.numClasses === 3) + val numFeatures = dataset.select("features").first().getAs[Vector](0).size + assert(model.numFeatures === numFeatures) + + val results = model.transform(dataset) + // check that raw prediction is coefficients dot features + intercept + results.select("rawPrediction", "features").collect().foreach { + case Row(raw: Vector, features: Vector) => + assert(raw.size === 3) + val margins = Array.tabulate(3) { k => + var margin = 0.0 + features.foreachActive { (index, value) => + margin += value * model.coefficients(k, index) + } + margin += model.intercepts(k) + margin + } + assert(raw ~== Vectors.dense(margins) relTol eps) + } + + // Compare rawPrediction with probability + results.select("rawPrediction", "probability").collect().foreach { + case Row(raw: Vector, prob: Vector) => + assert(raw.size === 3) + assert(prob.size === 3) + val max = raw.toArray.max + val subtract = if (max > 0) max else 0.0 + val sum = raw.toArray.map(x => math.exp(x - subtract)).sum + val probFromRaw0 = math.exp(raw(0) - subtract) / sum + val probFromRaw1 = math.exp(raw(1) - subtract) / sum + assert(prob(0) ~== probFromRaw0 relTol eps) + assert(prob(1) ~== probFromRaw1 relTol eps) + assert(prob(2) ~== 1.0 - probFromRaw1 - probFromRaw0 relTol eps) + } + + // Compare prediction with probability + results.select("prediction", "probability").collect().foreach { + case Row(pred: Double, prob: Vector) => + val predFromProb = prob.toArray.zipWithIndex.maxBy(_._1)._2 + assert(pred == predFromProb) + } + } + + test("multinomial logistic regression coefficients should be centered") { + val mlr = new MultinomialLogisticRegression().setMaxIter(1) + val model = mlr.fit(dataset) + assert(model.intercepts.toArray.sum ~== 0.0 absTol 1e-6) + assert(model.coefficients.toArray.sum ~== 0.0 absTol 1e-6) + } + + test("numClasses specified in metadata/inferred") { + val mlr = new MultinomialLogisticRegression().setMaxIter(1) + + // specify more classes than unique label values + val labelMeta = NominalAttribute.defaultAttr.withName("label").withNumValues(4).toMetadata() + val df = dataset.select(dataset("label").as("label", labelMeta), dataset("features")) + val model1 = mlr.fit(df) + assert(model1.numClasses === 4) + assert(model1.intercepts.size === 4) + + // specify two classes when there are really three + val labelMeta1 = NominalAttribute.defaultAttr.withName("label").withNumValues(2).toMetadata() + val df1 = dataset.select(dataset("label").as("label", labelMeta1), dataset("features")) + val thrown = intercept[IllegalArgumentException] { + mlr.fit(df1) + } + assert(thrown.getMessage.contains("less than the number of unique labels")) + + // mlr should infer the number of classes if not specified + val model3 = mlr.fit(dataset) + assert(model3.numClasses === 3) + } + + test("all labels the same") { + val constantData = spark.createDataFrame(Seq( + LabeledPoint(4.0, Vectors.dense(0.0)), + LabeledPoint(4.0, Vectors.dense(1.0)), + LabeledPoint(4.0, Vectors.dense(2.0))) + ) + val mlr = new MultinomialLogisticRegression + val model = mlr.fit(constantData) + val results = model.transform(constantData) + results.select("rawPrediction", "probability", "prediction").collect().foreach { + case Row(raw: Vector, prob: Vector, pred: Double) => + assert(raw === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, Double.PositiveInfinity))) + assert(prob === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, 1.0))) + assert(pred === 4.0) + } + + // force the model to be trained with only one class + val constantZeroData = spark.createDataFrame(Seq( + LabeledPoint(0.0, Vectors.dense(0.0)), + LabeledPoint(0.0, Vectors.dense(1.0)), + LabeledPoint(0.0, Vectors.dense(2.0))) + ) + val modelZeroLabel = mlr.setFitIntercept(false).fit(constantZeroData) + val resultsZero = modelZeroLabel.transform(constantZeroData) + resultsZero.select("rawPrediction", "probability", "prediction").collect().foreach { + case Row(raw: Vector, prob: Vector, pred: Double) => + assert(prob === Vectors.dense(Array(1.0))) + assert(pred === 0.0) + } + + // ensure that the correct value is predicted when numClasses passed through metadata + val labelMeta = NominalAttribute.defaultAttr.withName("label").withNumValues(6).toMetadata() + val constantDataWithMetadata = constantData + .select(constantData("label").as("label", labelMeta), constantData("features")) + val modelWithMetadata = mlr.setFitIntercept(true).fit(constantDataWithMetadata) + val resultsWithMetadata = modelWithMetadata.transform(constantDataWithMetadata) + resultsWithMetadata.select("rawPrediction", "probability", "prediction").collect().foreach { + case Row(raw: Vector, prob: Vector, pred: Double) => + assert(raw === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, Double.PositiveInfinity, 0.0))) + assert(prob === Vectors.dense(Array(0.0, 0.0, 0.0, 0.0, 1.0, 0.0))) + assert(pred === 4.0) + } + // TODO: check num iters is zero when it become available in the model + } + + test("weighted data") { + val numClasses = 5 + val numPoints = 40 + val outlierData = MLTestingUtils.genClassificationInstancesWithWeightedOutliers(spark, + numClasses, numPoints) + val testData = spark.createDataFrame(Array.tabulate[LabeledPoint](numClasses) { i => + LabeledPoint(i.toDouble, Vectors.dense(i.toDouble)) + }) + val mlr = new MultinomialLogisticRegression().setWeightCol("weight") + val model = mlr.fit(outlierData) + val results = model.transform(testData).select("label", "prediction").collect() + + // check that the predictions are the one to one mapping + results.foreach { case Row(label: Double, pred: Double) => + assert(label === pred) + } + val (overSampledData, weightedData) = + MLTestingUtils.genEquivalentOversampledAndWeightedInstances(outlierData, "label", "features", + 42L) + val weightedModel = mlr.fit(weightedData) + val overSampledModel = mlr.setWeightCol("").fit(overSampledData) + assert(weightedModel.coefficients ~== overSampledModel.coefficients relTol 0.01) + } + + test("thresholds prediction") { + val mlr = new MultinomialLogisticRegression + val model = mlr.fit(dataset) + val basePredictions = model.transform(dataset).select("prediction").collect() + + // should predict all zeros + model.setThresholds(Array(1, 1000, 1000)) + val zeroPredictions = model.transform(dataset).select("prediction").collect() + assert(zeroPredictions.forall(_.getDouble(0) === 0.0)) + + // should predict all ones + model.setThresholds(Array(1000, 1, 1000)) + val onePredictions = model.transform(dataset).select("prediction").collect() + assert(onePredictions.forall(_.getDouble(0) === 1.0)) + + // should predict all twos + model.setThresholds(Array(1000, 1000, 1)) + val twoPredictions = model.transform(dataset).select("prediction").collect() + assert(twoPredictions.forall(_.getDouble(0) === 2.0)) + + // constant threshold scaling is the same as no thresholds + model.setThresholds(Array(1000, 1000, 1000)) + val scaledPredictions = model.transform(dataset).select("prediction").collect() + assert(scaledPredictions.zip(basePredictions).forall { case (scaled, base) => + scaled.getDouble(0) === base.getDouble(0) + }) + } + + test("read/write") { + def checkModelData( + model: MultinomialLogisticRegressionModel, + model2: MultinomialLogisticRegressionModel): Unit = { + assert(model.intercepts === model2.intercepts) + assert(model.coefficients.toArray === model2.coefficients.toArray) + assert(model.numClasses === model2.numClasses) + assert(model.numFeatures === model2.numFeatures) + } + val mlr = new MultinomialLogisticRegression() + testEstimatorAndModelReadWrite(mlr, dataset, + MultinomialLogisticRegressionSuite.allParamSettings, + checkModelData) + } + + test("should support all NumericType labels and not support other types") { + val mlr = new MultinomialLogisticRegression().setMaxIter(1) + MLTestingUtils + .checkNumericTypes[MultinomialLogisticRegressionModel, MultinomialLogisticRegression]( + mlr, spark) { (expected, actual) => + assert(expected.intercepts === actual.intercepts) + assert(expected.coefficients.toArray === actual.coefficients.toArray) + } + } +} + +object MultinomialLogisticRegressionSuite { + + /** + * Mapping from all Params to valid settings which differ from the defaults. + * This is useful for tests which need to exercise all Params, such as save/load. + * This excludes input columns to simplify some tests. + */ + val allParamSettings: Map[String, Any] = ProbabilisticClassifierSuite.allParamSettings ++ Map( + "probabilityCol" -> "myProbability", + "thresholds" -> Array(0.4, 0.6), + "regParam" -> 0.01, + "elasticNetParam" -> 0.1, + "maxIter" -> 2, // intentionally small + "fitIntercept" -> true, + "tol" -> 0.8, + "standardization" -> false + ) +} diff --git a/mllib/src/test/scala/org/apache/spark/ml/util/MLTestingUtils.scala b/mllib/src/test/scala/org/apache/spark/ml/util/MLTestingUtils.scala index 80b976914cbdf..472a5af06e7a2 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/util/MLTestingUtils.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/util/MLTestingUtils.scala @@ -19,12 +19,14 @@ package org.apache.spark.ml.util import org.apache.spark.SparkFunSuite import org.apache.spark.ml.{Estimator, Model} +import org.apache.spark.ml.attribute.NominalAttribute import org.apache.spark.ml.evaluation.Evaluator -import org.apache.spark.ml.linalg.Vectors +import org.apache.spark.ml.feature.Instance +import org.apache.spark.ml.linalg.{Vector, Vectors} import org.apache.spark.ml.param.ParamMap import org.apache.spark.ml.recommendation.{ALS, ALSModel} import org.apache.spark.ml.tree.impl.TreeTests -import org.apache.spark.sql.{DataFrame, SparkSession} +import org.apache.spark.sql.{DataFrame, Row, SparkSession} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ @@ -179,4 +181,47 @@ object MLTestingUtils extends SparkFunSuite { .map(t => t -> df.select(col(labelColName).cast(t), col(predictionColName))) .toMap } + + def genClassificationInstancesWithWeightedOutliers( + spark: SparkSession, + numClasses: Int, + numInstances: Int): DataFrame = { + val data = Array.tabulate[Instance](numInstances) { i => + val feature = i % numClasses + if (i < numInstances / 3) { + // give large weights to minority of data with 1 to 1 mapping feature to label + Instance(feature, 1.0, Vectors.dense(feature)) + } else { + // give small weights to majority of data points with reverse mapping + Instance(numClasses - feature - 1, 0.01, Vectors.dense(feature)) + } + } + val labelMeta = + NominalAttribute.defaultAttr.withName("label").withNumValues(numClasses).toMetadata() + spark.createDataFrame(data).select(col("label").as("label", labelMeta), col("weight"), + col("features")) + } + + def genEquivalentOversampledAndWeightedInstances( + data: DataFrame, + labelCol: String, + featuresCol: String, + seed: Long): (DataFrame, DataFrame) = { + import data.sparkSession.implicits._ + val rng = scala.util.Random + rng.setSeed(seed) + val sample: () => Int = () => rng.nextInt(10) + 1 + val sampleUDF = udf(sample) + val rawData = data.select(labelCol, featuresCol).withColumn("samples", sampleUDF()) + val overSampledData = rawData.rdd.flatMap { + case Row(label: Double, features: Vector, n: Int) => + Iterator.fill(n)(Instance(label, 1.0, features)) + }.toDF() + rng.setSeed(seed) + val weightedData = rawData.rdd.map { + case Row(label: Double, features: Vector, n: Int) => + Instance(label, n.toDouble, features) + }.toDF() + (overSampledData, weightedData) + } } From 5377fc62360d5e9b5c94078e41d10a96e0e8a535 Mon Sep 17 00:00:00 2001 From: Nick Lavers Date: Fri, 19 Aug 2016 10:11:59 +0100 Subject: [PATCH 025/270] [SPARK-16961][CORE] Fixed off-by-one error that biased randomizeInPlace JIRA issue link: https://issues.apache.org/jira/browse/SPARK-16961 Changed one line of Utils.randomizeInPlace to allow elements to stay in place. Created a unit test that runs a Pearson's chi squared test to determine whether the output diverges significantly from a uniform distribution. Author: Nick Lavers Closes #14551 from nicklavers/SPARK-16961-randomizeInPlace. --- R/pkg/inst/tests/testthat/test_mllib.R | 12 +++---- .../scala/org/apache/spark/util/Utils.scala | 2 +- .../org/apache/spark/util/UtilsSuite.scala | 35 +++++++++++++++++++ python/pyspark/ml/clustering.py | 12 +++---- python/pyspark/mllib/clustering.py | 2 +- python/pyspark/mllib/tests.py | 2 +- 6 files changed, 50 insertions(+), 15 deletions(-) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index 8c380fbf150f4..dfb7a185cd5a3 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -546,15 +546,15 @@ test_that("spark.gaussianMixture", { df <- createDataFrame(data, c("x1", "x2")) model <- spark.gaussianMixture(df, ~ x1 + x2, k = 2) stats <- summary(model) - rLambda <- c(0.4, 0.6) - rMu <- c(-0.2614822, 0.5128697, 2.647284, 4.544682) - rSigma <- c(0.08427399, 0.00548772, 0.00548772, 0.09090715, - 0.1641373, -0.1673806, -0.1673806, 0.7508951) - expect_equal(stats$lambda, rLambda) + rLambda <- c(0.50861, 0.49139) + rMu <- c(0.267, 1.195, 2.743, 4.730) + rSigma <- c(1.099, 1.339, 1.339, 1.798, + 0.145, -0.309, -0.309, 0.716) + expect_equal(stats$lambda, rLambda, tolerance = 1e-3) expect_equal(unlist(stats$mu), rMu, tolerance = 1e-3) expect_equal(unlist(stats$sigma), rSigma, tolerance = 1e-3) p <- collect(select(predict(model, df), "prediction")) - expect_equal(p$prediction, c(0, 0, 0, 0, 1, 1, 1, 1, 1, 1)) + expect_equal(p$prediction, c(0, 0, 0, 0, 0, 1, 1, 1, 1, 1)) # Test model save/load modelPath <- tempfile(pattern = "spark-gaussianMixture", fileext = ".tmp") diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala index 0ae44a2ed7865..9b4274a27b3be 100644 --- a/core/src/main/scala/org/apache/spark/util/Utils.scala +++ b/core/src/main/scala/org/apache/spark/util/Utils.scala @@ -824,7 +824,7 @@ private[spark] object Utils extends Logging { */ def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = { for (i <- (arr.length - 1) to 1 by -1) { - val j = rand.nextInt(i) + val j = rand.nextInt(i + 1) val tmp = arr(j) arr(j) = arr(i) arr(i) = tmp diff --git a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala index 30952a9458345..4715fd29375d6 100644 --- a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala @@ -31,6 +31,7 @@ import scala.util.Random import com.google.common.io.Files import org.apache.commons.lang3.SystemUtils +import org.apache.commons.math3.stat.inference.ChiSquareTest import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path @@ -874,4 +875,38 @@ class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging { } } } + + test("chi square test of randomizeInPlace") { + // Parameters + val arraySize = 10 + val numTrials = 1000 + val threshold = 0.05 + val seed = 1L + + // results(i)(j): how many times Utils.randomize moves an element from position j to position i + val results = Array.ofDim[Long](arraySize, arraySize) + + // This must be seeded because even a fair random process will fail this test with + // probability equal to the value of `threshold`, which is inconvenient for a unit test. + val rand = new java.util.Random(seed) + val range = 0 until arraySize + + for { + _ <- 0 until numTrials + trial = Utils.randomizeInPlace(range.toArray, rand) + i <- range + } results(i)(trial(i)) += 1L + + val chi = new ChiSquareTest() + + // We expect an even distribution; this array will be rescaled by `chiSquareTest` + val expected = Array.fill(arraySize * arraySize)(1.0) + val observed = results.flatten + + // Performs Pearson's chi-squared test. Using the sum-of-squares as the test statistic, gives + // the probability of a uniform distribution producing results as extreme as `observed` + val pValue = chi.chiSquareTest(expected, observed) + + assert(pValue > threshold) + } } diff --git a/python/pyspark/ml/clustering.py b/python/pyspark/ml/clustering.py index 75d9a0e8cac18..4dab83362a0a4 100644 --- a/python/pyspark/ml/clustering.py +++ b/python/pyspark/ml/clustering.py @@ -99,9 +99,9 @@ class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIte +--------------------+--------------------+ | mean| cov| +--------------------+--------------------+ - |[-0.0550000000000...|0.002025000000000...| - |[0.82499999999999...|0.005625000000000...| - |[-0.87,-0.7200000...|0.001600000000000...| + |[0.82500000140229...|0.005625000000006...| + |[-0.4777098016092...|0.167969502720916...| + |[-0.4472625243352...|0.167304119758233...| +--------------------+--------------------+ ... >>> transformed = model.transform(df).select("features", "prediction") @@ -124,9 +124,9 @@ class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIte +--------------------+--------------------+ | mean| cov| +--------------------+--------------------+ - |[-0.0550000000000...|0.002025000000000...| - |[0.82499999999999...|0.005625000000000...| - |[-0.87,-0.7200000...|0.001600000000000...| + |[0.82500000140229...|0.005625000000006...| + |[-0.4777098016092...|0.167969502720916...| + |[-0.4472625243352...|0.167304119758233...| +--------------------+--------------------+ ... diff --git a/python/pyspark/mllib/clustering.py b/python/pyspark/mllib/clustering.py index c8c3c42774f21..29aa615125770 100644 --- a/python/pyspark/mllib/clustering.py +++ b/python/pyspark/mllib/clustering.py @@ -416,7 +416,7 @@ class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader): ... 4.5605, 5.2043, 6.2734]) >>> clusterdata_2 = sc.parallelize(data.reshape(5,3)) >>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001, - ... maxIterations=150, seed=10) + ... maxIterations=150, seed=4) >>> labels = model.predict(clusterdata_2).collect() >>> labels[0]==labels[1] True diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py index 99bf50b5a1640..3f3dfd186c10d 100644 --- a/python/pyspark/mllib/tests.py +++ b/python/pyspark/mllib/tests.py @@ -550,7 +550,7 @@ def test_gmm(self): [-6, -7], ]) clusters = GaussianMixture.train(data, 2, convergenceTol=0.001, - maxIterations=10, seed=56) + maxIterations=10, seed=1) labels = clusters.predict(data).collect() self.assertEqual(labels[0], labels[1]) self.assertEqual(labels[2], labels[3]) From 864be9359ae2f8409e6dbc38a7a18593f9cc5692 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Fri, 19 Aug 2016 03:23:16 -0700 Subject: [PATCH 026/270] [SPARK-17141][ML] MinMaxScaler should remain NaN value. ## What changes were proposed in this pull request? In the existing code, ```MinMaxScaler``` handle ```NaN``` value indeterminately. * If a column has identity value, that is ```max == min```, ```MinMaxScalerModel``` transformation will output ```0.5``` for all rows even the original value is ```NaN```. * Otherwise, it will remain ```NaN``` after transformation. I think we should unify the behavior by remaining ```NaN``` value at any condition, since we don't know how to transform a ```NaN``` value. In Python sklearn, it will throw exception when there is ```NaN``` in the dataset. ## How was this patch tested? Unit tests. Author: Yanbo Liang Closes #14716 from yanboliang/spark-17141. --- .../spark/ml/feature/MinMaxScaler.scala | 6 +++-- .../spark/ml/feature/MinMaxScalerSuite.scala | 27 +++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala index 9f3d2ca6db0c1..28cbe1cb01e9a 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/MinMaxScaler.scala @@ -186,8 +186,10 @@ class MinMaxScalerModel private[ml] ( val size = values.length var i = 0 while (i < size) { - val raw = if (originalRange(i) != 0) (values(i) - minArray(i)) / originalRange(i) else 0.5 - values(i) = raw * scale + $(min) + if (!values(i).isNaN) { + val raw = if (originalRange(i) != 0) (values(i) - minArray(i)) / originalRange(i) else 0.5 + values(i) = raw * scale + $(min) + } i += 1 } Vectors.dense(values) diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/MinMaxScalerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/MinMaxScalerSuite.scala index 5da84711758c6..9f376b70035c5 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/MinMaxScalerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/MinMaxScalerSuite.scala @@ -90,4 +90,31 @@ class MinMaxScalerSuite extends SparkFunSuite with MLlibTestSparkContext with De assert(newInstance.originalMin === instance.originalMin) assert(newInstance.originalMax === instance.originalMax) } + + test("MinMaxScaler should remain NaN value") { + val data = Array( + Vectors.dense(1, Double.NaN, 2.0, 2.0), + Vectors.dense(2, 2.0, 0.0, 3.0), + Vectors.dense(3, Double.NaN, 0.0, 1.0), + Vectors.dense(6, 2.0, 2.0, Double.NaN)) + + val expected: Array[Vector] = Array( + Vectors.dense(-5.0, Double.NaN, 5.0, 0.0), + Vectors.dense(-3.0, 0.0, -5.0, 5.0), + Vectors.dense(-1.0, Double.NaN, -5.0, -5.0), + Vectors.dense(5.0, 0.0, 5.0, Double.NaN)) + + val df = spark.createDataFrame(data.zip(expected)).toDF("features", "expected") + val scaler = new MinMaxScaler() + .setInputCol("features") + .setOutputCol("scaled") + .setMin(-5) + .setMax(5) + + val model = scaler.fit(df) + model.transform(df).select("expected", "scaled").collect() + .foreach { case Row(vector1: Vector, vector2: Vector) => + assert(vector1.equals(vector2), "Transformed vector is different with expected.") + } + } } From 072acf5e1460d66d4b60b536d5b2ccddeee80794 Mon Sep 17 00:00:00 2001 From: Jeff Zhang Date: Fri, 19 Aug 2016 12:38:15 +0100 Subject: [PATCH 027/270] [SPARK-16965][MLLIB][PYSPARK] Fix bound checking for SparseVector. ## What changes were proposed in this pull request? 1. In scala, add negative low bound checking and put all the low/upper bound checking in one place 2. In python, add low/upper bound checking of indices. ## How was this patch tested? unit test added Author: Jeff Zhang Closes #14555 from zjffdu/SPARK-16965. --- .../org/apache/spark/ml/linalg/Vectors.scala | 34 +++++++++++-------- .../apache/spark/ml/linalg/VectorsSuite.scala | 6 ++++ python/pyspark/ml/linalg/__init__.py | 15 ++++++++ 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala index 0659324aad1fa..2e4a58dc6291c 100644 --- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala +++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Vectors.scala @@ -208,17 +208,7 @@ object Vectors { */ @Since("2.0.0") def sparse(size: Int, elements: Seq[(Int, Double)]): Vector = { - require(size > 0, "The size of the requested sparse vector must be greater than 0.") - val (indices, values) = elements.sortBy(_._1).unzip - var prev = -1 - indices.foreach { i => - require(prev < i, s"Found duplicate indices: $i.") - prev = i - } - require(prev < size, s"You may not write an element to index $prev because the declared " + - s"size of your vector is $size") - new SparseVector(size, indices.toArray, values.toArray) } @@ -560,11 +550,25 @@ class SparseVector @Since("2.0.0") ( @Since("2.0.0") val indices: Array[Int], @Since("2.0.0") val values: Array[Double]) extends Vector { - require(indices.length == values.length, "Sparse vectors require that the dimension of the" + - s" indices match the dimension of the values. You provided ${indices.length} indices and " + - s" ${values.length} values.") - require(indices.length <= size, s"You provided ${indices.length} indices and values, " + - s"which exceeds the specified vector size ${size}.") + // validate the data + { + require(size >= 0, "The size of the requested sparse vector must be greater than 0.") + require(indices.length == values.length, "Sparse vectors require that the dimension of the" + + s" indices match the dimension of the values. You provided ${indices.length} indices and " + + s" ${values.length} values.") + require(indices.length <= size, s"You provided ${indices.length} indices and values, " + + s"which exceeds the specified vector size ${size}.") + + if (indices.nonEmpty) { + require(indices(0) >= 0, s"Found negative index: ${indices(0)}.") + } + var prev = -1 + indices.foreach { i => + require(prev < i, s"Index $i follows $prev and is not strictly increasing") + prev = i + } + require(prev < size, s"Index $prev out of bounds for vector of size $size") + } override def toString: String = s"($size,${indices.mkString("[", ",", "]")},${values.mkString("[", ",", "]")})" diff --git a/mllib-local/src/test/scala/org/apache/spark/ml/linalg/VectorsSuite.scala b/mllib-local/src/test/scala/org/apache/spark/ml/linalg/VectorsSuite.scala index 614be460a414a..ea22c2787fb3c 100644 --- a/mllib-local/src/test/scala/org/apache/spark/ml/linalg/VectorsSuite.scala +++ b/mllib-local/src/test/scala/org/apache/spark/ml/linalg/VectorsSuite.scala @@ -72,6 +72,12 @@ class VectorsSuite extends SparkMLFunSuite { } } + test("sparse vector construction with negative indices") { + intercept[IllegalArgumentException] { + Vectors.sparse(3, Array(-1, 1), Array(3.0, 5.0)) + } + } + test("dense to array") { val vec = Vectors.dense(arr).asInstanceOf[DenseVector] assert(vec.toArray.eq(arr)) diff --git a/python/pyspark/ml/linalg/__init__.py b/python/pyspark/ml/linalg/__init__.py index f42c589b92255..05c0ac862fb7f 100644 --- a/python/pyspark/ml/linalg/__init__.py +++ b/python/pyspark/ml/linalg/__init__.py @@ -478,6 +478,14 @@ def __init__(self, size, *args): SparseVector(4, {1: 1.0, 3: 5.5}) >>> SparseVector(4, [1, 3], [1.0, 5.5]) SparseVector(4, {1: 1.0, 3: 5.5}) + >>> SparseVector(4, {1:1.0, 6:2.0}) + Traceback (most recent call last): + ... + AssertionError: Index 6 is out of the the size of vector with size=4 + >>> SparseVector(4, {-1:1.0}) + Traceback (most recent call last): + ... + AssertionError: Contains negative index -1 """ self.size = int(size) """ Size of the vector. """ @@ -511,6 +519,13 @@ def __init__(self, size, *args): "Indices %s and %s are not strictly increasing" % (self.indices[i], self.indices[i + 1])) + if self.indices.size > 0: + assert np.max(self.indices) < self.size, \ + "Index %d is out of the the size of vector with size=%d" \ + % (np.max(self.indices), self.size) + assert np.min(self.indices) >= 0, \ + "Contains negative index %d" % (np.min(self.indices)) + def numNonzeros(self): """ Number of nonzero elements. This scans all active values and count non zeros. From 67e59d464f782ff5f509234212aa072a7653d7bf Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Fri, 19 Aug 2016 21:11:35 +0800 Subject: [PATCH 028/270] [SPARK-16994][SQL] Whitelist operators for predicate pushdown ## What changes were proposed in this pull request? This patch changes predicate pushdown optimization rule (PushDownPredicate) from using a blacklist to a whitelist. That is to say, operators must be explicitly allowed. This approach is more future-proof: previously it was possible for us to introduce a new operator and then render the optimization rule incorrect. This also fixes the bug that previously we allowed pushing filter beneath limit, which was incorrect. That is to say, before this patch, the optimizer would rewrite ``` select * from (select * from range(10) limit 5) where id > 3 to select * from range(10) where id > 3 limit 5 ``` ## How was this patch tested? - a unit test case in FilterPushdownSuite - an end-to-end test in limit.sql Author: Reynold Xin Closes #14713 from rxin/SPARK-16994. --- .../sql/catalyst/optimizer/Optimizer.scala | 23 ++++++++++++++----- .../optimizer/FilterPushdownSuite.scala | 6 +++++ .../test/resources/sql-tests/inputs/limit.sql | 3 +++ .../resources/sql-tests/results/limit.sql.out | 10 +++++++- 4 files changed, 35 insertions(+), 7 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index f7aa6da0a5bdc..ce57f05868fe1 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -1208,17 +1208,28 @@ object PushDownPredicate extends Rule[LogicalPlan] with PredicateHelper { filter } - // two filters should be combine together by other rules - case filter @ Filter(_, _: Filter) => filter - // should not push predicates through sample, or will generate different results. - case filter @ Filter(_, _: Sample) => filter - - case filter @ Filter(condition, u: UnaryNode) if u.expressions.forall(_.deterministic) => + case filter @ Filter(condition, u: UnaryNode) + if canPushThrough(u) && u.expressions.forall(_.deterministic) => pushDownPredicate(filter, u.child) { predicate => u.withNewChildren(Seq(Filter(predicate, u.child))) } } + private def canPushThrough(p: UnaryNode): Boolean = p match { + // Note that some operators (e.g. project, aggregate, union) are being handled separately + // (earlier in this rule). + case _: AppendColumns => true + case _: BroadcastHint => true + case _: Distinct => true + case _: Generate => true + case _: Pivot => true + case _: RedistributeData => true + case _: Repartition => true + case _: ScriptTransformation => true + case _: Sort => true + case _ => false + } + private def pushDownPredicate( filter: Filter, grandchild: LogicalPlan)(insertFilter: Expression => LogicalPlan): LogicalPlan = { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala index 596b8fcea194b..9f25e9d8e9ac8 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala @@ -111,6 +111,12 @@ class FilterPushdownSuite extends PlanTest { assert(optimized == correctAnswer) } + test("SPARK-16994: filter should not be pushed through limit") { + val originalQuery = testRelation.limit(10).where('a === 1).analyze + val optimized = Optimize.execute(originalQuery) + comparePlans(optimized, originalQuery) + } + test("can't push without rewrite") { val originalQuery = testRelation diff --git a/sql/core/src/test/resources/sql-tests/inputs/limit.sql b/sql/core/src/test/resources/sql-tests/inputs/limit.sql index 892a1bb4b559f..2ea35f7f3a5c8 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/limit.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/limit.sql @@ -18,3 +18,6 @@ select * from testdata limit key > 3; -- limit must be integer select * from testdata limit true; select * from testdata limit 'a'; + +-- limit within a subquery +select * from (select * from range(10) limit 5) where id > 3; diff --git a/sql/core/src/test/resources/sql-tests/results/limit.sql.out b/sql/core/src/test/resources/sql-tests/results/limit.sql.out index b71b05886986c..cb4e4d04810d0 100644 --- a/sql/core/src/test/resources/sql-tests/results/limit.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/limit.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 9 +-- Number of queries: 10 -- !query 0 @@ -81,3 +81,11 @@ struct<> -- !query 8 output org.apache.spark.sql.AnalysisException The limit expression must be integer type, but got string; + + +-- !query 9 +select * from (select * from range(10) limit 5) where id > 3 +-- !query 9 schema +struct +-- !query 9 output +4 From e98eb2146f1363956bfc3e5adcc11c246182d617 Mon Sep 17 00:00:00 2001 From: Alex Bozarth Date: Fri, 19 Aug 2016 10:04:20 -0500 Subject: [PATCH 029/270] [SPARK-16673][WEB UI] New Executor Page removed conditional for Logs and Thread Dump columns ## What changes were proposed in this pull request? When #13670 switched `ExecutorsPage` to use JQuery DataTables it incidentally removed the conditional for the Logs and Thread Dump columns. I reimplemented the conditional display of the Logs and Thread dump columns as it was before the switch. ## How was this patch tested? Manually tested and dev/run-tests ![both](https://cloud.githubusercontent.com/assets/13952758/17186879/da8dd1a8-53eb-11e6-8b0c-d0ff0156a9a7.png) ![dump](https://cloud.githubusercontent.com/assets/13952758/17186881/dab08a04-53eb-11e6-8b1c-50ffd0bf2ae8.png) ![logs](https://cloud.githubusercontent.com/assets/13952758/17186880/dab04d00-53eb-11e6-8754-68dd64d6d9f4.png) Author: Alex Bozarth Closes #14382 from ajbozarth/spark16673. --- .../apache/spark/ui/static/executorspage.js | 38 +++++++++++++++---- .../apache/spark/ui/exec/ExecutorsPage.scala | 7 ++-- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/core/src/main/resources/org/apache/spark/ui/static/executorspage.js b/core/src/main/resources/org/apache/spark/ui/static/executorspage.js index b2b2363d3ac69..1df67337ea031 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/executorspage.js +++ b/core/src/main/resources/org/apache/spark/ui/static/executorspage.js @@ -15,6 +15,16 @@ * limitations under the License. */ +var threadDumpEnabled = false; + +function setThreadDumpEnabled(val) { + threadDumpEnabled = val; +} + +function getThreadDumpEnabled() { + return threadDumpEnabled; +} + function formatStatus(status, type) { if (type !== 'display') return status; if (status) { @@ -116,6 +126,12 @@ function formatLogsCells(execLogs, type) { return result; } +function logsExist(execs) { + return execs.some(function(exec) { + return !($.isEmptyObject(exec["executorLogs"])); + }); +} + // Determine Color Opacity from 0.5-1 // activeTasks range from 0 to maxTasks function activeTasksAlpha(activeTasks, maxTasks) { @@ -143,18 +159,16 @@ function totalDurationAlpha(totalGCTime, totalDuration) { (Math.min(totalGCTime / totalDuration + 0.5, 1)) : 1; } +// When GCTimePercent is edited change ToolTips.TASK_TIME to match +var GCTimePercent = 0.1; + function totalDurationStyle(totalGCTime, totalDuration) { // Red if GC time over GCTimePercent of total time - // When GCTimePercent is edited change ToolTips.TASK_TIME to match - var GCTimePercent = 0.1; return (totalGCTime > GCTimePercent * totalDuration) ? ("hsla(0, 100%, 50%, " + totalDurationAlpha(totalGCTime, totalDuration) + ")") : ""; } function totalDurationColor(totalGCTime, totalDuration) { - // Red if GC time over GCTimePercent of total time - // When GCTimePercent is edited change ToolTips.TASK_TIME to match - var GCTimePercent = 0.1; return (totalGCTime > GCTimePercent * totalDuration) ? "white" : "black"; } @@ -392,8 +406,18 @@ $(document).ready(function () { {data: 'executorLogs', render: formatLogsCells}, { data: 'id', render: function (data, type) { - return type === 'display' ? ("Thread Dump" ) : data; + return type === 'display' ? ("Thread Dump" ) : data; + } } + ], + "columnDefs": [ + { + "targets": [ 15 ], + "visible": logsExist(response) + }, + { + "targets": [ 16 ], + "visible": getThreadDumpEnabled() } ], "order": [[0, "asc"]] @@ -458,7 +482,7 @@ $(document).ready(function () { "paging": false, "searching": false, "info": false - + }; $(sumSelector).DataTable(sumConf); diff --git a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala index 287390b87bd73..982e8915a8ded 100644 --- a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala @@ -50,16 +50,15 @@ private[ui] class ExecutorsPage( threadDumpEnabled: Boolean) extends WebUIPage("") { private val listener = parent.listener - // When GCTimePercent is edited change ToolTips.TASK_TIME to match - private val GCTimePercent = 0.1 def render(request: HttpServletRequest): Seq[Node] = { val content =
{ -
++ +
++ ++ - + ++ + }
; From 071eaaf9d2b63589f2e66e5279a16a5a484de6f5 Mon Sep 17 00:00:00 2001 From: Kousuke Saruta Date: Fri, 19 Aug 2016 10:11:25 -0500 Subject: [PATCH 030/270] [SPARK-11227][CORE] UnknownHostException can be thrown when NameNode HA is enabled. ## What changes were proposed in this pull request? If the following conditions are satisfied, executors don't load properties in `hdfs-site.xml` and UnknownHostException can be thrown. (1) NameNode HA is enabled (2) spark.eventLogging is disabled or logging path is NOT on HDFS (3) Using Standalone or Mesos for the cluster manager (4) There are no code to load `HdfsCondition` class in the driver regardless of directly or indirectly. (5) The tasks access to HDFS (There might be some more conditions...) For example, following code causes UnknownHostException when the conditions above are satisfied. ``` sc.textFile("").collect ``` ``` java.lang.IllegalArgumentException: java.net.UnknownHostException: hacluster at org.apache.hadoop.security.SecurityUtil.buildTokenService(SecurityUtil.java:378) at org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:310) at org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:176) at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:678) at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:619) at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:149) at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2653) at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:92) at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2687) at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2669) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:371) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:170) at org.apache.hadoop.mapred.JobConf.getWorkingDirectory(JobConf.java:656) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:438) at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:411) at org.apache.spark.SparkContext$$anonfun$hadoopFile$1$$anonfun$32.apply(SparkContext.scala:986) at org.apache.spark.SparkContext$$anonfun$hadoopFile$1$$anonfun$32.apply(SparkContext.scala:986) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:177) at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$6.apply(HadoopRDD.scala:177) at scala.Option.map(Option.scala:146) at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:177) at org.apache.spark.rdd.HadoopRDD$$anon$1.(HadoopRDD.scala:213) at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:209) at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:102) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:318) at org.apache.spark.rdd.RDD.iterator(RDD.scala:282) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:318) at org.apache.spark.rdd.RDD.iterator(RDD.scala:282) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70) at org.apache.spark.scheduler.Task.run(Task.scala:85) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Caused by: java.net.UnknownHostException: hacluster ``` But following code doesn't cause the Exception because `textFile` method loads `HdfsConfiguration` indirectly. ``` sc.textFile("").collect ``` When a job includes some operations which access to HDFS, the object of `org.apache.hadoop.Configuration` is wrapped by `SerializableConfiguration`, serialized and broadcasted from driver to executors and each executor deserialize the object with `loadDefaults` false so HDFS related properties should be set before broadcasted. ## How was this patch tested? Tested manually on my standalone cluster. Author: Kousuke Saruta Closes #13738 from sarutak/SPARK-11227. --- .../scala/org/apache/spark/SparkContext.scala | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 60f042f1e07c5..2eaeab1d807b4 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -35,7 +35,7 @@ import scala.util.control.NonFatal import com.google.common.collect.MapMaker import org.apache.commons.lang3.SerializationUtils import org.apache.hadoop.conf.Configuration -import org.apache.hadoop.fs.Path +import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable} import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, @@ -961,6 +961,11 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() + + // This is a hack to enforce loading hdfs-site.xml. + // See SPARK-11227 for details. + FileSystem.getLocal(conf) + // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) @@ -981,6 +986,11 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() + + // This is a hack to enforce loading hdfs-site.xml. + // See SPARK-11227 for details. + FileSystem.get(new URI(path), hadoopConfiguration) + // A Hadoop configuration can be about 10 KB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) @@ -1065,6 +1075,11 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() + + // This is a hack to enforce loading hdfs-site.xml. + // See SPARK-11227 for details. + FileSystem.get(new URI(path), hadoopConfiguration) + // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) @@ -1099,6 +1114,11 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() + + // This is a hack to enforce loading hdfs-site.xml. + // See SPARK-11227 for details. + FileSystem.getLocal(conf) + // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) From cf0cce90364d17afe780ff9a5426dfcefa298535 Mon Sep 17 00:00:00 2001 From: Sital Kedia Date: Fri, 19 Aug 2016 11:27:30 -0700 Subject: [PATCH 031/270] [SPARK-17113] [SHUFFLE] Job failure due to Executor OOM in offheap mode ## What changes were proposed in this pull request? This PR fixes executor OOM in offheap mode due to bug in Cooperative Memory Management for UnsafeExternSorter. UnsafeExternalSorter was checking if memory page is being used by upstream by comparing the base object address of the current page with the base object address of upstream. However, in case of offheap memory allocation, the base object addresses are always null, so there was no spilling happening and eventually the operator would OOM. Following is the stack trace this issue addresses - java.lang.OutOfMemoryError: Unable to acquire 1220 bytes of memory, got 0 at org.apache.spark.memory.MemoryConsumer.allocatePage(MemoryConsumer.java:120) at org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter.acquireNewPageIfNecessary(UnsafeExternalSorter.java:341) at org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter.insertRecord(UnsafeExternalSorter.java:362) at org.apache.spark.sql.execution.UnsafeExternalRowSorter.insertRow(UnsafeExternalRowSorter.java:93) at org.apache.spark.sql.execution.UnsafeExternalRowSorter.sort(UnsafeExternalRowSorter.java:170) ## How was this patch tested? Tested by running the failing job. Author: Sital Kedia Closes #14693 from sitalkedia/fix_offheap_oom. --- .../util/collection/unsafe/sort/UnsafeExternalSorter.java | 2 +- .../util/collection/unsafe/sort/UnsafeInMemorySorter.java | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java index 8d596f87d213b..ccf76643db2b4 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java @@ -522,7 +522,7 @@ public long spill() throws IOException { // is accessing the current record. We free this page in that caller's next loadNext() // call. for (MemoryBlock page : allocatedPages) { - if (!loaded || page.getBaseObject() != upstream.getBaseObject()) { + if (!loaded || page.pageNumber != ((UnsafeInMemorySorter.SortedIterator)upstream).getCurrentPageNumber()) { released += page.size(); freePage(page); } else { diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java index 78da38927878b..30d0f3006a04e 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java @@ -248,6 +248,7 @@ public final class SortedIterator extends UnsafeSorterIterator implements Clonea private long baseOffset; private long keyPrefix; private int recordLength; + private long currentPageNumber; private SortedIterator(int numRecords, int offset) { this.numRecords = numRecords; @@ -262,6 +263,7 @@ public SortedIterator clone() { iter.baseOffset = baseOffset; iter.keyPrefix = keyPrefix; iter.recordLength = recordLength; + iter.currentPageNumber = currentPageNumber; return iter; } @@ -279,6 +281,7 @@ public boolean hasNext() { public void loadNext() { // This pointer points to a 4-byte record length, followed by the record's bytes final long recordPointer = array.get(offset + position); + currentPageNumber = memoryManager.decodePageNumber(recordPointer); baseObject = memoryManager.getPage(recordPointer); baseOffset = memoryManager.getOffsetInPage(recordPointer) + 4; // Skip over record length recordLength = Platform.getInt(baseObject, baseOffset - 4); @@ -292,6 +295,10 @@ public void loadNext() { @Override public long getBaseOffset() { return baseOffset; } + public long getCurrentPageNumber() { + return currentPageNumber; + } + @Override public int getRecordLength() { return recordLength; } From acac7a508a29d0f75d86ee2e4ca83ebf01a36cf8 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Fri, 19 Aug 2016 14:24:09 -0700 Subject: [PATCH 032/270] [SPARK-16443][SPARKR] Alternating Least Squares (ALS) wrapper ## What changes were proposed in this pull request? Add Alternating Least Squares wrapper in SparkR. Unit tests have been updated. ## How was this patch tested? SparkR unit tests. (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) ![screen shot 2016-07-27 at 3 50 31 pm](https://cloud.githubusercontent.com/assets/15318264/17195347/f7a6352a-5411-11e6-8e21-61a48070192a.png) ![screen shot 2016-07-27 at 3 50 46 pm](https://cloud.githubusercontent.com/assets/15318264/17195348/f7a7d452-5411-11e6-845f-6d292283bc28.png) Author: Junyang Qian Closes #14384 from junyangq/SPARK-16443. --- R/pkg/NAMESPACE | 3 +- R/pkg/R/generics.R | 4 + R/pkg/R/mllib.R | 159 +++++++++++++++++- R/pkg/inst/tests/testthat/test_mllib.R | 40 +++++ .../org/apache/spark/ml/r/ALSWrapper.scala | 119 +++++++++++++ .../org/apache/spark/ml/r/RWrappers.scala | 2 + 6 files changed, 322 insertions(+), 5 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/ALSWrapper.scala diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index 4404cffc292aa..e1b87b28d35ae 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -29,7 +29,8 @@ exportMethods("glm", "spark.posterior", "spark.perplexity", "spark.isoreg", - "spark.gaussianMixture") + "spark.gaussianMixture", + "spark.als") # Job group lifecycle management methods export("setJobGroup", diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index fe04bcfc7d14d..693aa31d3ecab 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -1332,3 +1332,7 @@ setGeneric("spark.gaussianMixture", #' @rdname write.ml #' @export setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") }) + +#' @rdname spark.als +#' @export +setGeneric("spark.als", function(data, ...) { standardGeneric("spark.als") }) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index b9527410a9853..36f38fc73a510 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -74,6 +74,13 @@ setClass("IsotonicRegressionModel", representation(jobj = "jobj")) #' @note GaussianMixtureModel since 2.1.0 setClass("GaussianMixtureModel", representation(jobj = "jobj")) +#' S4 class that represents an ALSModel +#' +#' @param jobj a Java object reference to the backing Scala ALSWrapper +#' @export +#' @note ALSModel since 2.1.0 +setClass("ALSModel", representation(jobj = "jobj")) + #' Saves the MLlib model to the input path #' #' Saves the MLlib model to the input path. For more information, see the specific @@ -82,8 +89,8 @@ setClass("GaussianMixtureModel", representation(jobj = "jobj")) #' @name write.ml #' @export #' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} -#' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg}, \link{spark.lda} -#' @seealso \link{spark.isoreg} +#' @seealso \link{spark.als}, \link{spark.kmeans}, \link{spark.lda}, \link{spark.naiveBayes} +#' @seealso \link{spark.survreg}, \link{spark.isoreg} #' @seealso \link{read.ml} NULL @@ -95,10 +102,11 @@ NULL #' @name predict #' @export #' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} -#' @seealso \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} +#' @seealso \link{spark.als}, \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} #' @seealso \link{spark.isoreg} NULL + #' Generalized Linear Models #' #' Fits generalized linear model against a Spark DataFrame. @@ -801,6 +809,8 @@ read.ml <- function(path) { return(new("IsotonicRegressionModel", jobj = jobj)) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.GaussianMixtureWrapper")) { return(new("GaussianMixtureModel", jobj = jobj)) + } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.ALSWrapper")) { + return(new("ALSModel", jobj = jobj)) } else { stop(paste("Unsupported model: ", jobj)) } @@ -1053,4 +1063,145 @@ setMethod("summary", signature(object = "GaussianMixtureModel"), setMethod("predict", signature(object = "GaussianMixtureModel"), function(object, newData) { return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) - }) \ No newline at end of file + }) + +#' Alternating Least Squares (ALS) for Collaborative Filtering +#' +#' \code{spark.als} learns latent factors in collaborative filtering via alternating least +#' squares. Users can call \code{summary} to obtain fitted latent factors, \code{predict} +#' to make predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. +#' +#' For more details, see +#' \href{http://spark.apache.org/docs/latest/ml-collaborative-filtering.html}{MLlib: +#' Collaborative Filtering}. +#' +#' @param data a SparkDataFrame for training. +#' @param ratingCol column name for ratings. +#' @param userCol column name for user ids. Ids must be (or can be coerced into) integers. +#' @param itemCol column name for item ids. Ids must be (or can be coerced into) integers. +#' @param rank rank of the matrix factorization (> 0). +#' @param reg regularization parameter (>= 0). +#' @param maxIter maximum number of iterations (>= 0). +#' @param nonnegative logical value indicating whether to apply nonnegativity constraints. +#' @param implicitPrefs logical value indicating whether to use implicit preference. +#' @param alpha alpha parameter in the implicit preference formulation (>= 0). +#' @param seed integer seed for random number generation. +#' @param numUserBlocks number of user blocks used to parallelize computation (> 0). +#' @param numItemBlocks number of item blocks used to parallelize computation (> 0). +#' @param checkpointInterval number of checkpoint intervals (>= 1) or disable checkpoint (-1). +#' +#' @return \code{spark.als} returns a fitted ALS model +#' @rdname spark.als +#' @aliases spark.als,SparkDataFrame-method +#' @name spark.als +#' @export +#' @examples +#' \dontrun{ +#' ratings <- list(list(0, 0, 4.0), list(0, 1, 2.0), list(1, 1, 3.0), list(1, 2, 4.0), +#' list(2, 1, 1.0), list(2, 2, 5.0)) +#' df <- createDataFrame(ratings, c("user", "item", "rating")) +#' model <- spark.als(df, "rating", "user", "item") +#' +#' # extract latent factors +#' stats <- summary(model) +#' userFactors <- stats$userFactors +#' itemFactors <- stats$itemFactors +#' +#' # make predictions +#' predicted <- predict(model, df) +#' showDF(predicted) +#' +#' # save and load the model +#' path <- "path/to/model" +#' write.ml(model, path) +#' savedModel <- read.ml(path) +#' summary(savedModel) +#' +#' # set other arguments +#' modelS <- spark.als(df, "rating", "user", "item", rank = 20, +#' reg = 0.1, nonnegative = TRUE) +#' statsS <- summary(modelS) +#' } +#' @note spark.als since 2.1.0 +setMethod("spark.als", signature(data = "SparkDataFrame"), + function(data, ratingCol = "rating", userCol = "user", itemCol = "item", + rank = 10, reg = 1.0, maxIter = 10, nonnegative = FALSE, + implicitPrefs = FALSE, alpha = 1.0, numUserBlocks = 10, numItemBlocks = 10, + checkpointInterval = 10, seed = 0) { + + if (!is.numeric(rank) || rank <= 0) { + stop("rank should be a positive number.") + } + if (!is.numeric(reg) || reg < 0) { + stop("reg should be a nonnegative number.") + } + if (!is.numeric(maxIter) || maxIter <= 0) { + stop("maxIter should be a positive number.") + } + + jobj <- callJStatic("org.apache.spark.ml.r.ALSWrapper", + "fit", data@sdf, ratingCol, userCol, itemCol, as.integer(rank), + reg, as.integer(maxIter), implicitPrefs, alpha, nonnegative, + as.integer(numUserBlocks), as.integer(numItemBlocks), + as.integer(checkpointInterval), as.integer(seed)) + return(new("ALSModel", jobj = jobj)) + }) + +# Returns a summary of the ALS model produced by spark.als. + +#' @param object a fitted ALS model. +#' @return \code{summary} returns a list containing the names of the user column, +#' the item column and the rating column, the estimated user and item factors, +#' rank, regularization parameter and maximum number of iterations used in training. +#' @rdname spark.als +#' @aliases summary,ALSModel-method +#' @export +#' @note summary(ALSModel) since 2.1.0 +setMethod("summary", signature(object = "ALSModel"), +function(object, ...) { + jobj <- object@jobj + user <- callJMethod(jobj, "userCol") + item <- callJMethod(jobj, "itemCol") + rating <- callJMethod(jobj, "ratingCol") + userFactors <- dataFrame(callJMethod(jobj, "userFactors")) + itemFactors <- dataFrame(callJMethod(jobj, "itemFactors")) + rank <- callJMethod(jobj, "rank") + return(list(user = user, item = item, rating = rating, userFactors = userFactors, + itemFactors = itemFactors, rank = rank)) +}) + + +# Makes predictions from an ALS model or a model produced by spark.als. + +#' @param newData a SparkDataFrame for testing. +#' @return \code{predict} returns a SparkDataFrame containing predicted values. +#' @rdname spark.als +#' @aliases predict,ALSModel-method +#' @export +#' @note predict(ALSModel) since 2.1.0 +setMethod("predict", signature(object = "ALSModel"), +function(object, newData) { + return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) +}) + + +# Saves the ALS model to the input path. + +#' @param path the directory where the model is saved. +#' @param overwrite logical value indicating whether to overwrite if the output path +#' already exists. Default is FALSE which means throw exception +#' if the output path exists. +#' +#' @rdname spark.als +#' @aliases write.ml,ALSModel,character-method +#' @export +#' @seealso \link{read.ml} +#' @note write.ml(ALSModel, character) since 2.1.0 +setMethod("write.ml", signature(object = "ALSModel", path = "character"), +function(object, path, overwrite = FALSE) { + writer <- callJMethod(object@jobj, "write") + if (overwrite) { + writer <- callJMethod(writer, "overwrite") + } + invisible(callJMethod(writer, "save", path)) +}) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index dfb7a185cd5a3..67a3099101cf1 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -657,4 +657,44 @@ test_that("spark.posterior and spark.perplexity", { expect_equal(length(local.posterior), sum(unlist(local.posterior))) }) +test_that("spark.als", { + data <- list(list(0, 0, 4.0), list(0, 1, 2.0), list(1, 1, 3.0), list(1, 2, 4.0), + list(2, 1, 1.0), list(2, 2, 5.0)) + df <- createDataFrame(data, c("user", "item", "score")) + model <- spark.als(df, ratingCol = "score", userCol = "user", itemCol = "item", + rank = 10, maxIter = 5, seed = 0, reg = 0.1) + stats <- summary(model) + expect_equal(stats$rank, 10) + test <- createDataFrame(list(list(0, 2), list(1, 0), list(2, 0)), c("user", "item")) + predictions <- collect(predict(model, test)) + + expect_equal(predictions$prediction, c(-0.1380762, 2.6258414, -1.5018409), + tolerance = 1e-4) + + # Test model save/load + modelPath <- tempfile(pattern = "spark-als", fileext = ".tmp") + write.ml(model, modelPath) + expect_error(write.ml(model, modelPath)) + write.ml(model, modelPath, overwrite = TRUE) + model2 <- read.ml(modelPath) + stats2 <- summary(model2) + expect_equal(stats2$rating, "score") + userFactors <- collect(stats$userFactors) + itemFactors <- collect(stats$itemFactors) + userFactors2 <- collect(stats2$userFactors) + itemFactors2 <- collect(stats2$itemFactors) + + orderUser <- order(userFactors$id) + orderUser2 <- order(userFactors2$id) + expect_equal(userFactors$id[orderUser], userFactors2$id[orderUser2]) + expect_equal(userFactors$features[orderUser], userFactors2$features[orderUser2]) + + orderItem <- order(itemFactors$id) + orderItem2 <- order(itemFactors2$id) + expect_equal(itemFactors$id[orderItem], itemFactors2$id[orderItem2]) + expect_equal(itemFactors$features[orderItem], itemFactors2$features[orderItem2]) + + unlink(modelPath) +}) + sparkR.session.stop() diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/ALSWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/ALSWrapper.scala new file mode 100644 index 0000000000000..ad13cced4667b --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/ALSWrapper.scala @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.hadoop.fs.Path +import org.json4s._ +import org.json4s.JsonDSL._ +import org.json4s.jackson.JsonMethods._ + +import org.apache.spark.ml.recommendation.{ALS, ALSModel} +import org.apache.spark.ml.util._ +import org.apache.spark.sql.{DataFrame, Dataset} + +private[r] class ALSWrapper private ( + val alsModel: ALSModel, + val ratingCol: String) extends MLWritable { + + lazy val userCol: String = alsModel.getUserCol + lazy val itemCol: String = alsModel.getItemCol + lazy val userFactors: DataFrame = alsModel.userFactors + lazy val itemFactors: DataFrame = alsModel.itemFactors + lazy val rank: Int = alsModel.rank + + def transform(dataset: Dataset[_]): DataFrame = { + alsModel.transform(dataset) + } + + override def write: MLWriter = new ALSWrapper.ALSWrapperWriter(this) +} + +private[r] object ALSWrapper extends MLReadable[ALSWrapper] { + + def fit( // scalastyle:ignore + data: DataFrame, + ratingCol: String, + userCol: String, + itemCol: String, + rank: Int, + regParam: Double, + maxIter: Int, + implicitPrefs: Boolean, + alpha: Double, + nonnegative: Boolean, + numUserBlocks: Int, + numItemBlocks: Int, + checkpointInterval: Int, + seed: Int): ALSWrapper = { + + val als = new ALS() + .setRatingCol(ratingCol) + .setUserCol(userCol) + .setItemCol(itemCol) + .setRank(rank) + .setRegParam(regParam) + .setMaxIter(maxIter) + .setImplicitPrefs(implicitPrefs) + .setAlpha(alpha) + .setNonnegative(nonnegative) + .setNumBlocks(numUserBlocks) + .setNumItemBlocks(numItemBlocks) + .setCheckpointInterval(checkpointInterval) + .setSeed(seed.toLong) + + val alsModel: ALSModel = als.fit(data) + + new ALSWrapper(alsModel, ratingCol) + } + + override def read: MLReader[ALSWrapper] = new ALSWrapperReader + + override def load(path: String): ALSWrapper = super.load(path) + + class ALSWrapperWriter(instance: ALSWrapper) extends MLWriter { + + override protected def saveImpl(path: String): Unit = { + val rMetadataPath = new Path(path, "rMetadata").toString + val modelPath = new Path(path, "model").toString + + val rMetadata = ("class" -> instance.getClass.getName) ~ + ("ratingCol" -> instance.ratingCol) + val rMetadataJson: String = compact(render(rMetadata)) + sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) + + instance.alsModel.save(modelPath) + } + } + + class ALSWrapperReader extends MLReader[ALSWrapper] { + + override def load(path: String): ALSWrapper = { + implicit val format = DefaultFormats + val rMetadataPath = new Path(path, "rMetadata").toString + val modelPath = new Path(path, "model").toString + + val rMetadataStr = sc.textFile(rMetadataPath, 1).first() + val rMetadata = parse(rMetadataStr) + val ratingCol = (rMetadata \ "ratingCol").extract[String] + val alsModel = ALSModel.load(modelPath) + + new ALSWrapper(alsModel, ratingCol) + } + } + +} diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala index e23af51df5718..51a65f7fc4fe8 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala @@ -50,6 +50,8 @@ private[r] object RWrappers extends MLReader[Object] { IsotonicRegressionWrapper.load(path) case "org.apache.spark.ml.r.GaussianMixtureWrapper" => GaussianMixtureWrapper.load(path) + case "org.apache.spark.ml.r.ALSWrapper" => + ALSWrapper.load(path) case _ => throw new SparkException(s"SparkR read.ml does not support load $className") } From a117afa7c2d94f943106542ec53d74ba2b5f1058 Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Fri, 19 Aug 2016 18:14:45 -0700 Subject: [PATCH 033/270] [SPARK-17149][SQL] array.sql for testing array related functions ## What changes were proposed in this pull request? This patch creates array.sql in SQLQueryTestSuite for testing array related functions, including: - indexing - array creation - size - array_contains - sort_array ## How was this patch tested? The patch itself is about adding tests. Author: petermaxlee Closes #14708 from petermaxlee/SPARK-17149. --- .../catalyst/analysis/FunctionRegistry.scala | 12 +- .../test/resources/sql-tests/inputs/array.sql | 86 +++++++++++ .../resources/sql-tests/results/array.sql.out | 144 ++++++++++++++++++ .../org/apache/spark/sql/SQLQuerySuite.scala | 16 -- .../apache/spark/sql/SQLQueryTestSuite.scala | 10 ++ .../execution/HiveCompatibilitySuite.scala | 4 +- .../sql/hive/execution/HiveQuerySuite.scala | 9 -- 7 files changed, 248 insertions(+), 33 deletions(-) create mode 100644 sql/core/src/test/resources/sql-tests/inputs/array.sql create mode 100644 sql/core/src/test/resources/sql-tests/results/array.sql.out diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala index c5f91c1590542..35fd800df4a4f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala @@ -161,7 +161,6 @@ object FunctionRegistry { val expressions: Map[String, (ExpressionInfo, FunctionBuilder)] = Map( // misc non-aggregate functions expression[Abs]("abs"), - expression[CreateArray]("array"), expression[Coalesce]("coalesce"), expression[Explode]("explode"), expression[Greatest]("greatest"), @@ -172,10 +171,6 @@ object FunctionRegistry { expression[IsNull]("isnull"), expression[IsNotNull]("isnotnull"), expression[Least]("least"), - expression[CreateMap]("map"), - expression[MapKeys]("map_keys"), - expression[MapValues]("map_values"), - expression[CreateNamedStruct]("named_struct"), expression[NaNvl]("nanvl"), expression[NullIf]("nullif"), expression[Nvl]("nvl"), @@ -184,7 +179,6 @@ object FunctionRegistry { expression[Rand]("rand"), expression[Randn]("randn"), expression[Stack]("stack"), - expression[CreateStruct]("struct"), expression[CaseWhen]("when"), // math functions @@ -354,9 +348,15 @@ object FunctionRegistry { expression[TimeWindow]("window"), // collection functions + expression[CreateArray]("array"), expression[ArrayContains]("array_contains"), + expression[CreateMap]("map"), + expression[CreateNamedStruct]("named_struct"), + expression[MapKeys]("map_keys"), + expression[MapValues]("map_values"), expression[Size]("size"), expression[SortArray]("sort_array"), + expression[CreateStruct]("struct"), // misc functions expression[AssertTrue]("assert_true"), diff --git a/sql/core/src/test/resources/sql-tests/inputs/array.sql b/sql/core/src/test/resources/sql-tests/inputs/array.sql new file mode 100644 index 0000000000000..4038a0da41d2b --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/array.sql @@ -0,0 +1,86 @@ +-- test cases for array functions + +create temporary view data as select * from values + ("one", array(11, 12, 13), array(array(111, 112, 113), array(121, 122, 123))), + ("two", array(21, 22, 23), array(array(211, 212, 213), array(221, 222, 223))) + as data(a, b, c); + +select * from data; + +-- index into array +select a, b[0], b[0] + b[1] from data; + +-- index into array of arrays +select a, c[0][0] + c[0][0 + 1] from data; + + +create temporary view primitive_arrays as select * from values ( + array(true), + array(2Y, 1Y), + array(2S, 1S), + array(2, 1), + array(2L, 1L), + array(9223372036854775809, 9223372036854775808), + array(2.0D, 1.0D), + array(float(2.0), float(1.0)), + array(date '2016-03-14', date '2016-03-13'), + array(timestamp '2016-11-15 20:54:00.000', timestamp '2016-11-12 20:54:00.000') +) as primitive_arrays( + boolean_array, + tinyint_array, + smallint_array, + int_array, + bigint_array, + decimal_array, + double_array, + float_array, + date_array, + timestamp_array +); + +select * from primitive_arrays; + +-- array_contains on all primitive types: result should alternate between true and false +select + array_contains(boolean_array, true), array_contains(boolean_array, false), + array_contains(tinyint_array, 2Y), array_contains(tinyint_array, 0Y), + array_contains(smallint_array, 2S), array_contains(smallint_array, 0S), + array_contains(int_array, 2), array_contains(int_array, 0), + array_contains(bigint_array, 2L), array_contains(bigint_array, 0L), + array_contains(decimal_array, 9223372036854775809), array_contains(decimal_array, 1), + array_contains(double_array, 2.0D), array_contains(double_array, 0.0D), + array_contains(float_array, float(2.0)), array_contains(float_array, float(0.0)), + array_contains(date_array, date '2016-03-14'), array_contains(date_array, date '2016-01-01'), + array_contains(timestamp_array, timestamp '2016-11-15 20:54:00.000'), array_contains(timestamp_array, timestamp '2016-01-01 20:54:00.000') +from primitive_arrays; + +-- array_contains on nested arrays +select array_contains(b, 11), array_contains(c, array(111, 112, 113)) from data; + +-- sort_array +select + sort_array(boolean_array), + sort_array(tinyint_array), + sort_array(smallint_array), + sort_array(int_array), + sort_array(bigint_array), + sort_array(decimal_array), + sort_array(double_array), + sort_array(float_array), + sort_array(date_array), + sort_array(timestamp_array) +from primitive_arrays; + +-- size +select + size(boolean_array), + size(tinyint_array), + size(smallint_array), + size(int_array), + size(bigint_array), + size(decimal_array), + size(double_array), + size(float_array), + size(date_array), + size(timestamp_array) +from primitive_arrays; diff --git a/sql/core/src/test/resources/sql-tests/results/array.sql.out b/sql/core/src/test/resources/sql-tests/results/array.sql.out new file mode 100644 index 0000000000000..4a1d149c1f362 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/array.sql.out @@ -0,0 +1,144 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 10 + + +-- !query 0 +create temporary view data as select * from values + ("one", array(11, 12, 13), array(array(111, 112, 113), array(121, 122, 123))), + ("two", array(21, 22, 23), array(array(211, 212, 213), array(221, 222, 223))) + as data(a, b, c) +-- !query 0 schema +struct<> +-- !query 0 output + + + +-- !query 1 +select * from data +-- !query 1 schema +struct,c:array>> +-- !query 1 output +one [11,12,13] [[111,112,113],[121,122,123]] +two [21,22,23] [[211,212,213],[221,222,223]] + + +-- !query 2 +select a, b[0], b[0] + b[1] from data +-- !query 2 schema +struct +-- !query 2 output +one 11 23 +two 21 43 + + +-- !query 3 +select a, c[0][0] + c[0][0 + 1] from data +-- !query 3 schema +struct +-- !query 3 output +one 223 +two 423 + + +-- !query 4 +create temporary view primitive_arrays as select * from values ( + array(true), + array(2Y, 1Y), + array(2S, 1S), + array(2, 1), + array(2L, 1L), + array(9223372036854775809, 9223372036854775808), + array(2.0D, 1.0D), + array(float(2.0), float(1.0)), + array(date '2016-03-14', date '2016-03-13'), + array(timestamp '2016-11-15 20:54:00.000', timestamp '2016-11-12 20:54:00.000') +) as primitive_arrays( + boolean_array, + tinyint_array, + smallint_array, + int_array, + bigint_array, + decimal_array, + double_array, + float_array, + date_array, + timestamp_array +) +-- !query 4 schema +struct<> +-- !query 4 output + + + +-- !query 5 +select * from primitive_arrays +-- !query 5 schema +struct,tinyint_array:array,smallint_array:array,int_array:array,bigint_array:array,decimal_array:array,double_array:array,float_array:array,date_array:array,timestamp_array:array> +-- !query 5 output +[true] [2,1] [2,1] [2,1] [2,1] [9223372036854775809,9223372036854775808] [2.0,1.0] [2.0,1.0] [2016-03-14,2016-03-13] [2016-11-15 20:54:00.0,2016-11-12 20:54:00.0] + + +-- !query 6 +select + array_contains(boolean_array, true), array_contains(boolean_array, false), + array_contains(tinyint_array, 2Y), array_contains(tinyint_array, 0Y), + array_contains(smallint_array, 2S), array_contains(smallint_array, 0S), + array_contains(int_array, 2), array_contains(int_array, 0), + array_contains(bigint_array, 2L), array_contains(bigint_array, 0L), + array_contains(decimal_array, 9223372036854775809), array_contains(decimal_array, 1), + array_contains(double_array, 2.0D), array_contains(double_array, 0.0D), + array_contains(float_array, float(2.0)), array_contains(float_array, float(0.0)), + array_contains(date_array, date '2016-03-14'), array_contains(date_array, date '2016-01-01'), + array_contains(timestamp_array, timestamp '2016-11-15 20:54:00.000'), array_contains(timestamp_array, timestamp '2016-01-01 20:54:00.000') +from primitive_arrays +-- !query 6 schema +struct +-- !query 6 output +true false true false true false true false true false true false true false true false true false true false + + +-- !query 7 +select array_contains(b, 11), array_contains(c, array(111, 112, 113)) from data +-- !query 7 schema +struct +-- !query 7 output +false false +true true + + +-- !query 8 +select + sort_array(boolean_array), + sort_array(tinyint_array), + sort_array(smallint_array), + sort_array(int_array), + sort_array(bigint_array), + sort_array(decimal_array), + sort_array(double_array), + sort_array(float_array), + sort_array(date_array), + sort_array(timestamp_array) +from primitive_arrays +-- !query 8 schema +struct,sort_array(tinyint_array, true):array,sort_array(smallint_array, true):array,sort_array(int_array, true):array,sort_array(bigint_array, true):array,sort_array(decimal_array, true):array,sort_array(double_array, true):array,sort_array(float_array, true):array,sort_array(date_array, true):array,sort_array(timestamp_array, true):array> +-- !query 8 output +[true] [1,2] [1,2] [1,2] [1,2] [9223372036854775808,9223372036854775809] [1.0,2.0] [1.0,2.0] [2016-03-13,2016-03-14] [2016-11-12 20:54:00.0,2016-11-15 20:54:00.0] + + +-- !query 9 +select + size(boolean_array), + size(tinyint_array), + size(smallint_array), + size(int_array), + size(bigint_array), + size(decimal_array), + size(double_array), + size(float_array), + size(date_array), + size(timestamp_array) +from primitive_arrays +-- !query 9 schema +struct +-- !query 9 output +1 2 2 2 2 2 2 2 2 2 diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index 4fcde58833d76..eac266cba55b8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -445,12 +445,6 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext { Nil) } - test("index into array") { - checkAnswer( - sql("SELECT data, data[0], data[0] + data[1], data[0 + 1] FROM arrayData"), - arrayData.map(d => Row(d.data, d.data(0), d.data(0) + d.data(1), d.data(1))).collect()) - } - test("left semi greater than predicate") { withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") { checkAnswer( @@ -472,16 +466,6 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext { ) } - test("index into array of arrays") { - checkAnswer( - sql( - "SELECT nestedData, nestedData[0][0], nestedData[0][0] + nestedData[0][1] FROM arrayData"), - arrayData.map(d => - Row(d.nestedData, - d.nestedData(0)(0), - d.nestedData(0)(0) + d.nestedData(0)(1))).collect().toSeq) - } - test("agg") { checkAnswer( sql("SELECT a, SUM(b) FROM testData2 GROUP BY a"), diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala index 069a9b665eb36..55d5a56f1040a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala @@ -35,6 +35,16 @@ import org.apache.spark.sql.types.StructType * Each case is loaded from a file in "spark/sql/core/src/test/resources/sql-tests/inputs". * Each case has a golden result file in "spark/sql/core/src/test/resources/sql-tests/results". * + * To run the entire test suite: + * {{{ + * build/sbt "sql/test-only *SQLQueryTestSuite" + * }}} + * + * To run a single test file upon change: + * {{{ + * build/sbt "~sql/test-only *SQLQueryTestSuite -- -z inline-table.sql" + * }}} + * * To re-generate golden files, run: * {{{ * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite" diff --git a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala index 13d18fdec0e9d..a54d234876256 100644 --- a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala +++ b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala @@ -979,8 +979,8 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "udf_PI", "udf_acos", "udf_add", - "udf_array", - "udf_array_contains", + // "udf_array", -- done in array.sql + // "udf_array_contains", -- done in array.sql "udf_ascii", "udf_asin", "udf_atan", diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala index 6785167d3dfba..3c7dbb449c521 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala @@ -216,15 +216,6 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter { assert(new Timestamp(1000) == r1.getTimestamp(0)) } - createQueryTest("constant array", - """ - |SELECT sort_array( - | sort_array( - | array("hadoop distributed file system", - | "enterprise databases", "hadoop map-reduce"))) - |FROM src LIMIT 1; - """.stripMargin) - createQueryTest("null case", "SELECT case when(true) then 1 else null end FROM src LIMIT 1") From ba1737c21aab91ff3f1a1737aa2d6b07575e36a3 Mon Sep 17 00:00:00 2001 From: Srinath Shankar Date: Fri, 19 Aug 2016 19:54:26 -0700 Subject: [PATCH 034/270] [SPARK-17158][SQL] Change error message for out of range numeric literals ## What changes were proposed in this pull request? Modifies error message for numeric literals to Numeric literal does not fit in range [min, max] for type ## How was this patch tested? Fixed up the error messages for literals.sql in SqlQueryTestSuite and re-ran via sbt. Also fixed up error messages in ExpressionParserSuite Author: Srinath Shankar Closes #14721 from srinathshankar/sc4296. --- .../sql/catalyst/parser/AstBuilder.scala | 29 ++++++++++++------- .../parser/ExpressionParserSuite.scala | 9 ++++-- .../sql-tests/results/literals.sql.out | 6 ++-- 3 files changed, 27 insertions(+), 17 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 283e4d43ba2b9..8b98efcbf33c8 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -1278,10 +1278,17 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { } /** Create a numeric literal expression. */ - private def numericLiteral(ctx: NumberContext)(f: String => Any): Literal = withOrigin(ctx) { - val raw = ctx.getText + private def numericLiteral + (ctx: NumberContext, minValue: BigDecimal, maxValue: BigDecimal, typeName: String) + (converter: String => Any): Literal = withOrigin(ctx) { + val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1) try { - Literal(f(raw.substring(0, raw.length - 1))) + val rawBigDecimal = BigDecimal(rawStrippedQualifier) + if (rawBigDecimal < minValue || rawBigDecimal > maxValue) { + throw new ParseException(s"Numeric literal ${rawStrippedQualifier} does not " + + s"fit in range [${minValue}, ${maxValue}] for type ${typeName}", ctx) + } + Literal(converter(rawStrippedQualifier)) } catch { case e: NumberFormatException => throw new ParseException(e.getMessage, ctx) @@ -1291,29 +1298,29 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { /** * Create a Byte Literal expression. */ - override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = numericLiteral(ctx) { - _.toByte + override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = { + numericLiteral(ctx, Byte.MinValue, Byte.MaxValue, ByteType.simpleString)(_.toByte) } /** * Create a Short Literal expression. */ - override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = numericLiteral(ctx) { - _.toShort + override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = { + numericLiteral(ctx, Short.MinValue, Short.MaxValue, ShortType.simpleString)(_.toShort) } /** * Create a Long Literal expression. */ - override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = numericLiteral(ctx) { - _.toLong + override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = { + numericLiteral(ctx, Long.MinValue, Long.MaxValue, LongType.simpleString)(_.toLong) } /** * Create a Double Literal expression. */ - override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = numericLiteral(ctx) { - _.toDouble + override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = { + numericLiteral(ctx, Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble) } /** diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala index 849d96212822c..401d9cd9d288c 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala @@ -375,18 +375,21 @@ class ExpressionParserSuite extends PlanTest { // Tiny Int Literal assertEqual("10Y", Literal(10.toByte)) - intercept("-1000Y") + intercept("-1000Y", s"does not fit in range [${Byte.MinValue}, ${Byte.MaxValue}]") // Small Int Literal assertEqual("10S", Literal(10.toShort)) - intercept("40000S") + intercept("40000S", s"does not fit in range [${Short.MinValue}, ${Short.MaxValue}]") // Long Int Literal assertEqual("10L", Literal(10L)) - intercept("78732472347982492793712334L") + intercept("78732472347982492793712334L", + s"does not fit in range [${Long.MinValue}, ${Long.MaxValue}]") // Double Literal assertEqual("10.0D", Literal(10.0D)) + intercept("-1.8E308D", s"does not fit in range") + intercept("1.8E308D", s"does not fit in range") // TODO we need to figure out if we should throw an exception here! assertEqual("1E309", Literal(Double.PositiveInfinity)) } diff --git a/sql/core/src/test/resources/sql-tests/results/literals.sql.out b/sql/core/src/test/resources/sql-tests/results/literals.sql.out index b964a6fc0921f..67e6d78dfbf24 100644 --- a/sql/core/src/test/resources/sql-tests/results/literals.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/literals.sql.out @@ -41,7 +41,7 @@ struct<> -- !query 4 output org.apache.spark.sql.catalyst.parser.ParseException -Value out of range. Value:"128" Radix:10(line 1, pos 7) +Numeric literal 128 does not fit in range [-128, 127] for type tinyint(line 1, pos 7) == SQL == select 128Y @@ -71,7 +71,7 @@ struct<> -- !query 7 output org.apache.spark.sql.catalyst.parser.ParseException -Value out of range. Value:"32768" Radix:10(line 1, pos 7) +Numeric literal 32768 does not fit in range [-32768, 32767] for type smallint(line 1, pos 7) == SQL == select 32768S @@ -101,7 +101,7 @@ struct<> -- !query 10 output org.apache.spark.sql.catalyst.parser.ParseException -For input string: "9223372036854775808"(line 1, pos 7) +Numeric literal 9223372036854775808 does not fit in range [-9223372036854775808, 9223372036854775807] for type bigint(line 1, pos 7) == SQL == select 9223372036854775808L From 45d40d9f66c666eec6df926db23937589d67225d Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Sat, 20 Aug 2016 13:19:38 +0800 Subject: [PATCH 035/270] [SPARK-17150][SQL] Support SQL generation for inline tables ## What changes were proposed in this pull request? This patch adds support for SQL generation for inline tables. With this, it would be possible to create a view that depends on inline tables. ## How was this patch tested? Added a test case in LogicalPlanToSQLSuite. Author: petermaxlee Closes #14709 from petermaxlee/SPARK-17150. --- .../catalyst/plans/logical/LocalRelation.scala | 17 +++++++++++++++-- .../apache/spark/sql/catalyst/SQLBuilder.scala | 3 +++ .../src/test/resources/sqlgen/inline_tables.sql | 4 ++++ .../sql/catalyst/LogicalPlanToSQLSuite.scala | 8 ++++++++ 4 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 sql/hive/src/test/resources/sqlgen/inline_tables.sql diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LocalRelation.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LocalRelation.scala index 9d64f35efcc6a..890865d177845 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LocalRelation.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LocalRelation.scala @@ -18,8 +18,9 @@ package org.apache.spark.sql.catalyst.plans.logical import org.apache.spark.sql.Row -import org.apache.spark.sql.catalyst.{analysis, CatalystTypeConverters, InternalRow} -import org.apache.spark.sql.catalyst.expressions.Attribute +import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow} +import org.apache.spark.sql.catalyst.analysis +import org.apache.spark.sql.catalyst.expressions.{Attribute, Literal} import org.apache.spark.sql.types.{StructField, StructType} object LocalRelation { @@ -75,4 +76,16 @@ case class LocalRelation(output: Seq[Attribute], data: Seq[InternalRow] = Nil) override lazy val statistics = Statistics(sizeInBytes = output.map(_.dataType.defaultSize).sum * data.length) + + def toSQL(inlineTableName: String): String = { + require(data.nonEmpty) + val types = output.map(_.dataType) + val rows = data.map { row => + val cells = row.toSeq(types).zip(types).map { case (v, tpe) => Literal(v, tpe).sql } + cells.mkString("(", ", ", ")") + } + "VALUES " + rows.mkString(", ") + + " AS " + inlineTableName + + output.map(_.name).mkString("(", ", ", ")") + } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala index 0f51aa58d63ba..af1de511da060 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala @@ -205,6 +205,9 @@ class SQLBuilder private ( case p: ScriptTransformation => scriptTransformationToSQL(p) + case p: LocalRelation => + p.toSQL(newSubqueryName()) + case OneRowRelation => "" diff --git a/sql/hive/src/test/resources/sqlgen/inline_tables.sql b/sql/hive/src/test/resources/sqlgen/inline_tables.sql new file mode 100644 index 0000000000000..602551e69da6e --- /dev/null +++ b/sql/hive/src/test/resources/sqlgen/inline_tables.sql @@ -0,0 +1,4 @@ +-- This file is automatically generated by LogicalPlanToSQLSuite. +select * from values ("one", 1), ("two", 2), ("three", null) as data(a, b) where b > 1 +-------------------------------------------------------------------------------- +SELECT `gen_attr_0` AS `a`, `gen_attr_1` AS `b` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (VALUES ("one", 1), ("two", 2), ("three", CAST(NULL AS INT)) AS gen_subquery_0(gen_attr_0, gen_attr_1)) AS data WHERE (`gen_attr_1` > 1)) AS data diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala index 4e5a51155defd..742b065891a8e 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala @@ -1102,4 +1102,12 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { checkSQL("select * from orc_t", "select_orc_table") } } + + test("inline tables") { + checkSQL( + """ + |select * from values ("one", 1), ("two", 2), ("three", null) as data(a, b) where b > 1 + """.stripMargin, + "inline_tables") + } } From 39f328ba3519b01940a7d1cdee851ba4e75ef31f Mon Sep 17 00:00:00 2001 From: Bryan Cutler Date: Fri, 19 Aug 2016 23:46:36 -0700 Subject: [PATCH 036/270] [SPARK-15018][PYSPARK][ML] Improve handling of PySpark Pipeline when used without stages ## What changes were proposed in this pull request? When fitting a PySpark Pipeline without the `stages` param set, a confusing NoneType error is raised as attempts to iterate over the pipeline stages. A pipeline with no stages should act as an identity transform, however the `stages` param still needs to be set to an empty list. This change improves the error output when the `stages` param is not set and adds a better description of what the API expects as input. Also minor cleanup of related code. ## How was this patch tested? Added new unit tests to verify an empty Pipeline acts as an identity transformer Author: Bryan Cutler Closes #12790 from BryanCutler/pipeline-identity-SPARK-15018. --- python/pyspark/ml/pipeline.py | 11 +++-------- python/pyspark/ml/tests.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/python/pyspark/ml/pipeline.py b/python/pyspark/ml/pipeline.py index a48f4bb2ad1ba..4307ad02a0ebd 100644 --- a/python/pyspark/ml/pipeline.py +++ b/python/pyspark/ml/pipeline.py @@ -44,21 +44,19 @@ class Pipeline(Estimator, MLReadable, MLWritable): the dataset for the next stage. The fitted model from a :py:class:`Pipeline` is a :py:class:`PipelineModel`, which consists of fitted models and transformers, corresponding to the - pipeline stages. If there are no stages, the pipeline acts as an + pipeline stages. If stages is an empty list, the pipeline acts as an identity transformer. .. versionadded:: 1.3.0 """ - stages = Param(Params._dummy(), "stages", "pipeline stages") + stages = Param(Params._dummy(), "stages", "a list of pipeline stages") @keyword_only def __init__(self, stages=None): """ __init__(self, stages=None) """ - if stages is None: - stages = [] super(Pipeline, self).__init__() kwargs = self.__init__._input_kwargs self.setParams(**kwargs) @@ -78,8 +76,7 @@ def getStages(self): """ Get pipeline stages. """ - if self.stages in self._paramMap: - return self._paramMap[self.stages] + return self.getOrDefault(self.stages) @keyword_only @since("1.3.0") @@ -88,8 +85,6 @@ def setParams(self, stages=None): setParams(self, stages=None) Sets params for Pipeline. """ - if stages is None: - stages = [] kwargs = self.setParams._input_kwargs return self._set(**kwargs) diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py index 4bcb2c400c4aa..6886ed321ee82 100755 --- a/python/pyspark/ml/tests.py +++ b/python/pyspark/ml/tests.py @@ -230,6 +230,17 @@ def test_pipeline(self): self.assertEqual(5, transformer3.dataset_index) self.assertEqual(6, dataset.index) + def test_identity_pipeline(self): + dataset = MockDataset() + + def doTransform(pipeline): + pipeline_model = pipeline.fit(dataset) + return pipeline_model.transform(dataset) + # check that empty pipeline did not perform any transformation + self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index) + # check that failure to set stages param will raise KeyError for missing param + self.assertRaises(KeyError, lambda: doTransform(Pipeline())) + class TestParams(HasMaxIter, HasInputCol, HasSeed): """ From 01401e965b58f7e8ab615764a452d7d18f1d4bf0 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Sat, 20 Aug 2016 06:59:23 -0700 Subject: [PATCH 037/270] [SPARK-16508][SPARKR] Fix CRAN undocumented/duplicated arguments warnings. ## What changes were proposed in this pull request? This PR tries to fix all the remaining "undocumented/duplicated arguments" warnings given by CRAN-check. One left is doc for R `stats::glm` exported in SparkR. To mute that warning, we have to also provide document for all arguments of that non-SparkR function. Some previous conversation is in #14558. ## How was this patch tested? R unit test and `check-cran.sh` script (with no-test). Author: Junyang Qian Closes #14705 from junyangq/SPARK-16508-master. --- R/pkg/R/DataFrame.R | 221 +++++++++++++++++++++++++------------------ R/pkg/R/SQLContext.R | 30 +++--- R/pkg/R/WindowSpec.R | 11 ++- R/pkg/R/column.R | 18 +++- R/pkg/R/functions.R | 173 +++++++++++++++++++++------------ R/pkg/R/generics.R | 62 +++++++++--- R/pkg/R/group.R | 7 +- R/pkg/R/mllib.R | 113 +++++++++++----------- R/pkg/R/schema.R | 5 +- R/pkg/R/sparkR.R | 21 ++-- R/pkg/R/stats.R | 25 +++-- 11 files changed, 419 insertions(+), 267 deletions(-) diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index 09be06de06b52..540dc3122dd6d 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -120,8 +120,9 @@ setMethod("schema", #' #' Print the logical and physical Catalyst plans to the console for debugging. #' -#' @param x A SparkDataFrame +#' @param x a SparkDataFrame. #' @param extended Logical. If extended is FALSE, explain() only prints the physical plan. +#' @param ... further arguments to be passed to or from other methods. #' @family SparkDataFrame functions #' @aliases explain,SparkDataFrame-method #' @rdname explain @@ -177,11 +178,13 @@ setMethod("isLocal", #' #' Print the first numRows rows of a SparkDataFrame #' -#' @param x A SparkDataFrame -#' @param numRows The number of rows to print. Defaults to 20. -#' @param truncate Whether truncate long strings. If true, strings more than 20 characters will be -#' truncated. However, if set greater than zero, truncates strings longer than `truncate` -#' characters and all cells will be aligned right. +#' @param x a SparkDataFrame. +#' @param numRows the number of rows to print. Defaults to 20. +#' @param truncate whether truncate long strings. If \code{TRUE}, strings more than +#' 20 characters will be truncated. However, if set greater than zero, +#' truncates strings longer than `truncate` characters and all cells +#' will be aligned right. +#' @param ... further arguments to be passed to or from other methods. #' @family SparkDataFrame functions #' @aliases showDF,SparkDataFrame-method #' @rdname showDF @@ -211,7 +214,7 @@ setMethod("showDF", #' #' Print the SparkDataFrame column names and types #' -#' @param x A SparkDataFrame +#' @param object a SparkDataFrame. #' #' @family SparkDataFrame functions #' @rdname show @@ -262,11 +265,11 @@ setMethod("dtypes", }) }) -#' Column names +#' Column Names of SparkDataFrame #' -#' Return all column names as a list +#' Return all column names as a list. #' -#' @param x A SparkDataFrame +#' @param x a SparkDataFrame. #' #' @family SparkDataFrame functions #' @rdname columns @@ -323,6 +326,8 @@ setMethod("colnames", columns(x) }) +#' @param value a character vector. Must have the same length as the number +#' of columns in the SparkDataFrame. #' @rdname columns #' @aliases colnames<-,SparkDataFrame-method #' @name colnames<- @@ -514,9 +519,10 @@ setMethod("registerTempTable", #' #' Insert the contents of a SparkDataFrame into a table registered in the current SparkSession. #' -#' @param x A SparkDataFrame -#' @param tableName A character vector containing the name of the table -#' @param overwrite A logical argument indicating whether or not to overwrite +#' @param x a SparkDataFrame. +#' @param tableName a character vector containing the name of the table. +#' @param overwrite a logical argument indicating whether or not to overwrite. +#' @param ... further arguments to be passed to or from other methods. #' the existing rows in the table. #' #' @family SparkDataFrame functions @@ -575,7 +581,9 @@ setMethod("cache", #' supported storage levels, refer to #' \url{http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence}. #' -#' @param x The SparkDataFrame to persist +#' @param x the SparkDataFrame to persist. +#' @param newLevel storage level chosen for the persistance. See available options in +#' the description. #' #' @family SparkDataFrame functions #' @rdname persist @@ -603,8 +611,9 @@ setMethod("persist", #' Mark this SparkDataFrame as non-persistent, and remove all blocks for it from memory and #' disk. #' -#' @param x The SparkDataFrame to unpersist -#' @param blocking Whether to block until all blocks are deleted +#' @param x the SparkDataFrame to unpersist. +#' @param blocking whether to block until all blocks are deleted. +#' @param ... further arguments to be passed to or from other methods. #' #' @family SparkDataFrame functions #' @rdname unpersist-methods @@ -638,9 +647,10 @@ setMethod("unpersist", #' \item{3.} {Return a new SparkDataFrame partitioned by the given column(s), #' using `spark.sql.shuffle.partitions` as number of partitions.} #'} -#' @param x A SparkDataFrame -#' @param numPartitions The number of partitions to use. -#' @param col The column by which the partitioning will be performed. +#' @param x a SparkDataFrame. +#' @param numPartitions the number of partitions to use. +#' @param col the column by which the partitioning will be performed. +#' @param ... additional column(s) to be used in the partitioning. #' #' @family SparkDataFrame functions #' @rdname repartition @@ -919,11 +929,10 @@ setMethod("sample_frac", #' Returns the number of rows in a SparkDataFrame #' -#' @param x A SparkDataFrame -#' +#' @param x a SparkDataFrame. #' @family SparkDataFrame functions #' @rdname nrow -#' @name count +#' @name nrow #' @aliases count,SparkDataFrame-method #' @export #' @examples @@ -999,9 +1008,10 @@ setMethod("dim", #' Collects all the elements of a SparkDataFrame and coerces them into an R data.frame. #' -#' @param x A SparkDataFrame -#' @param stringsAsFactors (Optional) A logical indicating whether or not string columns +#' @param x a SparkDataFrame. +#' @param stringsAsFactors (Optional) a logical indicating whether or not string columns #' should be converted to factors. FALSE by default. +#' @param ... further arguments to be passed to or from other methods. #' #' @family SparkDataFrame functions #' @rdname collect @@ -1096,8 +1106,10 @@ setMethod("limit", dataFrame(res) }) -#' Take the first NUM rows of a SparkDataFrame and return a the results as a R data.frame +#' Take the first NUM rows of a SparkDataFrame and return the results as a R data.frame #' +#' @param x a SparkDataFrame. +#' @param num number of rows to take. #' @family SparkDataFrame functions #' @rdname take #' @name take @@ -1124,9 +1136,9 @@ setMethod("take", #' then head() returns the first 6 rows in keeping with the current data.frame #' convention in R. #' -#' @param x A SparkDataFrame -#' @param num The number of rows to return. Default is 6. -#' @return A data.frame +#' @param x a SparkDataFrame. +#' @param num the number of rows to return. Default is 6. +#' @return A data.frame. #' #' @family SparkDataFrame functions #' @aliases head,SparkDataFrame-method @@ -1150,7 +1162,8 @@ setMethod("head", #' Return the first row of a SparkDataFrame #' -#' @param x A SparkDataFrame +#' @param x a SparkDataFrame or a column used in aggregation function. +#' @param ... further arguments to be passed to or from other methods. #' #' @family SparkDataFrame functions #' @aliases first,SparkDataFrame-method @@ -1201,8 +1214,9 @@ setMethod("toRDD", #' #' Groups the SparkDataFrame using the specified columns, so we can run aggregation on them. #' -#' @param x a SparkDataFrame -#' @return a GroupedData +#' @param x a SparkDataFrame. +#' @param ... variable(s) (character names(s) or Column(s)) to group on. +#' @return A GroupedData. #' @family SparkDataFrame functions #' @aliases groupBy,SparkDataFrame-method #' @rdname groupBy @@ -1244,7 +1258,6 @@ setMethod("group_by", #' #' Compute aggregates by specifying a list of columns #' -#' @param x a SparkDataFrame #' @family SparkDataFrame functions #' @aliases agg,SparkDataFrame-method #' @rdname summarize @@ -1391,16 +1404,15 @@ setMethod("dapplyCollect", #' Groups the SparkDataFrame using the specified columns and applies the R function to each #' group. #' -#' @param x A SparkDataFrame -#' @param cols Grouping columns -#' @param func A function to be applied to each group partition specified by grouping +#' @param cols grouping columns. +#' @param func a function to be applied to each group partition specified by grouping #' column of the SparkDataFrame. The function `func` takes as argument #' a key - grouping columns and a data frame - a local R data.frame. #' The output of `func` is a local R data.frame. -#' @param schema The schema of the resulting SparkDataFrame after the function is applied. +#' @param schema the schema of the resulting SparkDataFrame after the function is applied. #' The schema must match to output of `func`. It has to be defined for each #' output column with preferred output column name and corresponding data type. -#' @return a SparkDataFrame +#' @return A SparkDataFrame. #' @family SparkDataFrame functions #' @aliases gapply,SparkDataFrame-method #' @rdname gapply @@ -1483,13 +1495,12 @@ setMethod("gapply", #' Groups the SparkDataFrame using the specified columns, applies the R function to each #' group and collects the result back to R as data.frame. #' -#' @param x A SparkDataFrame -#' @param cols Grouping columns -#' @param func A function to be applied to each group partition specified by grouping +#' @param cols grouping columns. +#' @param func a function to be applied to each group partition specified by grouping #' column of the SparkDataFrame. The function `func` takes as argument #' a key - grouping columns and a data frame - a local R data.frame. #' The output of `func` is a local R data.frame. -#' @return a data.frame +#' @return A data.frame. #' @family SparkDataFrame functions #' @aliases gapplyCollect,SparkDataFrame-method #' @rdname gapplyCollect @@ -1636,6 +1647,7 @@ getColumn <- function(x, c) { column(callJMethod(x@sdf, "col", c)) } +#' @param name name of a Column (without being wrapped by \code{""}). #' @rdname select #' @name $ #' @aliases $,SparkDataFrame-method @@ -1645,6 +1657,7 @@ setMethod("$", signature(x = "SparkDataFrame"), getColumn(x, name) }) +#' @param value a Column or NULL. If NULL, the specified Column is dropped. #' @rdname select #' @name $<- #' @aliases $<-,SparkDataFrame-method @@ -1719,12 +1732,13 @@ setMethod("[", signature(x = "SparkDataFrame"), #' Subset #' #' Return subsets of SparkDataFrame according to given conditions -#' @param x A SparkDataFrame -#' @param subset (Optional) A logical expression to filter on rows -#' @param select expression for the single Column or a list of columns to select from the SparkDataFrame +#' @param x a SparkDataFrame. +#' @param i,subset (Optional) a logical expression to filter on rows. +#' @param j,select expression for the single Column or a list of columns to select from the SparkDataFrame. #' @param drop if TRUE, a Column will be returned if the resulting dataset has only one column. -#' Otherwise, a SparkDataFrame will always be returned. -#' @return A new SparkDataFrame containing only the rows that meet the condition with selected columns +#' Otherwise, a SparkDataFrame will always be returned. +#' @param ... currently not used. +#' @return A new SparkDataFrame containing only the rows that meet the condition with selected columns. #' @export #' @family SparkDataFrame functions #' @aliases subset,SparkDataFrame-method @@ -1759,9 +1773,12 @@ setMethod("subset", signature(x = "SparkDataFrame"), #' Select #' #' Selects a set of columns with names or Column expressions. -#' @param x A SparkDataFrame -#' @param col A list of columns or single Column or name -#' @return A new SparkDataFrame with selected columns +#' @param x a SparkDataFrame. +#' @param col a list of columns or single Column or name. +#' @param ... additional column(s) if only one column is specified in \code{col}. +#' If more than one column is assigned in \code{col}, \code{...} +#' should be left empty. +#' @return A new SparkDataFrame with selected columns. #' @export #' @family SparkDataFrame functions #' @rdname select @@ -1858,9 +1875,9 @@ setMethod("selectExpr", #' Return a new SparkDataFrame by adding a column or replacing the existing column #' that has the same name. #' -#' @param x A SparkDataFrame -#' @param colName A column name. -#' @param col A Column expression. +#' @param x a SparkDataFrame. +#' @param colName a column name. +#' @param col a Column expression. #' @return A SparkDataFrame with the new column added or the existing column replaced. #' @family SparkDataFrame functions #' @aliases withColumn,SparkDataFrame,character,Column-method @@ -1889,8 +1906,8 @@ setMethod("withColumn", #' #' Return a new SparkDataFrame with the specified columns added or replaced. #' -#' @param .data A SparkDataFrame -#' @param col a named argument of the form name = col +#' @param .data a SparkDataFrame. +#' @param ... additional column argument(s) each in the form name = col. #' @return A new SparkDataFrame with the new columns added or replaced. #' @family SparkDataFrame functions #' @aliases mutate,SparkDataFrame-method @@ -1967,6 +1984,7 @@ setMethod("mutate", do.call(select, c(x, colList, deDupCols)) }) +#' @param _data a SparkDataFrame. #' @export #' @rdname mutate #' @aliases transform,SparkDataFrame-method @@ -2278,11 +2296,18 @@ setMethod("join", #' specified, the common column names in \code{x} and \code{y} will be used. #' @param by.x a character vector specifying the joining columns for x. #' @param by.y a character vector specifying the joining columns for y. +#' @param all a boolean value setting \code{all.x} and \code{all.y} +#' if any of them are unset. #' @param all.x a boolean value indicating whether all the rows in x should #' be including in the join #' @param all.y a boolean value indicating whether all the rows in y should #' be including in the join #' @param sort a logical argument indicating whether the resulting columns should be sorted +#' @param suffixes a string vector of length 2 used to make colnames of +#' \code{x} and \code{y} unique. +#' The first element is appended to each colname of \code{x}. +#' The second element is appended to each colname of \code{y}. +#' @param ... additional argument(s) passed to the method. #' @details If all.x and all.y are set to FALSE, a natural join will be returned. If #' all.x is set to TRUE and all.y is set to FALSE, a left outer join will #' be returned. If all.x is set to FALSE and all.y is set to TRUE, a right @@ -2311,7 +2336,7 @@ setMethod("merge", signature(x = "SparkDataFrame", y = "SparkDataFrame"), function(x, y, by = intersect(names(x), names(y)), by.x = by, by.y = by, all = FALSE, all.x = all, all.y = all, - sort = TRUE, suffixes = c("_x", "_y"), ... ) { + sort = TRUE, suffixes = c("_x", "_y"), ...) { if (length(suffixes) != 2) { stop("suffixes must have length 2") @@ -2464,8 +2489,10 @@ setMethod("unionAll", #' Union two or more SparkDataFrames. This is equivalent to `UNION ALL` in SQL. #' Note that this does not remove duplicate rows across the two SparkDataFrames. #' -#' @param x A SparkDataFrame -#' @param ... Additional SparkDataFrame +#' @param x a SparkDataFrame. +#' @param ... additional SparkDataFrame(s). +#' @param deparse.level currently not used (put here to match the signature of +#' the base implementation). #' @return A SparkDataFrame containing the result of the union. #' @family SparkDataFrame functions #' @aliases rbind,SparkDataFrame-method @@ -2522,8 +2549,8 @@ setMethod("intersect", #' Return a new SparkDataFrame containing rows in this SparkDataFrame #' but not in another SparkDataFrame. This is equivalent to `EXCEPT` in SQL. #' -#' @param x A SparkDataFrame -#' @param y A SparkDataFrame +#' @param x a SparkDataFrame. +#' @param y a SparkDataFrame. #' @return A SparkDataFrame containing the result of the except operation. #' @family SparkDataFrame functions #' @aliases except,SparkDataFrame,SparkDataFrame-method @@ -2564,10 +2591,11 @@ setMethod("except", #' and to not change the existing data. #' } #' -#' @param df A SparkDataFrame -#' @param path A name for the table -#' @param source A name for external data source -#' @param mode One of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default) +#' @param df a SparkDataFrame. +#' @param path a name for the table. +#' @param source a name for external data source. +#' @param mode one of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default) +#' @param ... additional argument(s) passed to the method. #' #' @family SparkDataFrame functions #' @aliases write.df,SparkDataFrame,character-method @@ -2626,10 +2654,11 @@ setMethod("saveDF", #' ignore: The save operation is expected to not save the contents of the SparkDataFrame #' and to not change the existing data. \cr #' -#' @param df A SparkDataFrame -#' @param tableName A name for the table -#' @param source A name for external data source -#' @param mode One of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default) +#' @param df a SparkDataFrame. +#' @param tableName a name for the table. +#' @param source a name for external data source. +#' @param mode one of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default). +#' @param ... additional option(s) passed to the method. #' #' @family SparkDataFrame functions #' @aliases saveAsTable,SparkDataFrame,character-method @@ -2665,10 +2694,10 @@ setMethod("saveAsTable", #' Computes statistics for numeric and string columns. #' If no columns are given, this function computes statistics for all numerical or string columns. #' -#' @param x A SparkDataFrame to be computed. -#' @param col A string of name -#' @param ... Additional expressions -#' @return A SparkDataFrame +#' @param x a SparkDataFrame to be computed. +#' @param col a string of name. +#' @param ... additional expressions. +#' @return A SparkDataFrame. #' @family SparkDataFrame functions #' @aliases describe,SparkDataFrame,character-method describe,SparkDataFrame,ANY-method #' @rdname summary @@ -2703,6 +2732,7 @@ setMethod("describe", dataFrame(sdf) }) +#' @param object a SparkDataFrame to be summarized. #' @rdname summary #' @name summary #' @aliases summary,SparkDataFrame-method @@ -2718,16 +2748,20 @@ setMethod("summary", #' #' dropna, na.omit - Returns a new SparkDataFrame omitting rows with null values. #' -#' @param x A SparkDataFrame. +#' @param x a SparkDataFrame. #' @param how "any" or "all". #' if "any", drop a row if it contains any nulls. #' if "all", drop a row only if all its values are null. #' if minNonNulls is specified, how is ignored. -#' @param minNonNulls If specified, drop rows that have less than +#' @param minNonNulls if specified, drop rows that have less than #' minNonNulls non-null values. #' This overwrites the how parameter. -#' @param cols Optional list of column names to consider. -#' @return A SparkDataFrame +#' @param cols optional list of column names to consider. In `fillna`, +#' columns specified in cols that do not have matching data +#' type are ignored. For example, if value is a character, and +#' subset contains a non-character column, then the non-character +#' column is simply ignored. +#' @return A SparkDataFrame. #' #' @family SparkDataFrame functions #' @rdname nafunctions @@ -2759,6 +2793,8 @@ setMethod("dropna", dataFrame(sdf) }) +#' @param object a SparkDataFrame. +#' @param ... further arguments to be passed to or from other methods. #' @rdname nafunctions #' @name na.omit #' @aliases na.omit,SparkDataFrame-method @@ -2772,18 +2808,12 @@ setMethod("na.omit", #' fillna - Replace null values. #' -#' @param x A SparkDataFrame. -#' @param value Value to replace null values with. +#' @param value value to replace null values with. #' Should be an integer, numeric, character or named list. #' If the value is a named list, then cols is ignored and #' value must be a mapping from column name (character) to #' replacement value. The replacement value must be an #' integer, numeric or character. -#' @param cols optional list of column names to consider. -#' Columns specified in cols that do not have matching data -#' type are ignored. For example, if value is a character, and -#' subset contains a non-character column, then the non-character -#' column is simply ignored. #' #' @rdname nafunctions #' @name fillna @@ -2848,8 +2878,11 @@ setMethod("fillna", #' Since data.frames are held in memory, ensure that you have enough memory #' in your system to accommodate the contents. #' -#' @param x a SparkDataFrame -#' @return a data.frame +#' @param x a SparkDataFrame. +#' @param row.names NULL or a character vector giving the row names for the data frame. +#' @param optional If `TRUE`, converting column names is optional. +#' @param ... additional arguments to pass to base::as.data.frame. +#' @return A data.frame. #' @family SparkDataFrame functions #' @aliases as.data.frame,SparkDataFrame-method #' @rdname as.data.frame @@ -3003,9 +3036,10 @@ setMethod("str", #' Returns a new SparkDataFrame with columns dropped. #' This is a no-op if schema doesn't contain column name(s). #' -#' @param x A SparkDataFrame. -#' @param cols A character vector of column names or a Column. -#' @return A SparkDataFrame +#' @param x a SparkDataFrame. +#' @param col a character vector of column names or a Column. +#' @param ... further arguments to be passed to or from other methods. +#' @return A SparkDataFrame. #' #' @family SparkDataFrame functions #' @rdname drop @@ -3024,7 +3058,7 @@ setMethod("str", #' @note drop since 2.0.0 setMethod("drop", signature(x = "SparkDataFrame"), - function(x, col) { + function(x, col, ...) { stopifnot(class(col) == "character" || class(col) == "Column") if (class(col) == "Column") { @@ -3052,8 +3086,8 @@ setMethod("drop", #' #' @name histogram #' @param nbins the number of bins (optional). Default value is 10. +#' @param col the column as Character string or a Column to build the histogram from. #' @param df the SparkDataFrame containing the Column to build the histogram from. -#' @param colname the name of the column to build the histogram from. #' @return a data.frame with the histogram statistics, i.e., counts and centroids. #' @rdname histogram #' @aliases histogram,SparkDataFrame,characterOrColumn-method @@ -3184,10 +3218,11 @@ setMethod("histogram", #' and to not change the existing data. #' } #' -#' @param x A SparkDataFrame -#' @param url JDBC database url of the form `jdbc:subprotocol:subname` -#' @param tableName The name of the table in the external database -#' @param mode One of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default) +#' @param x s SparkDataFrame. +#' @param url JDBC database url of the form `jdbc:subprotocol:subname`. +#' @param tableName yhe name of the table in the external database. +#' @param mode one of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default). +#' @param ... additional JDBC database connection properties. #' @family SparkDataFrame functions #' @rdname write.jdbc #' @name write.jdbc diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index 0c06bba639d9b..a9cd2d85f898c 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -165,9 +165,9 @@ getDefaultSqlSource <- function() { #' #' Converts R data.frame or list into SparkDataFrame. #' -#' @param data An RDD or list or data.frame -#' @param schema a list of column names or named list (StructType), optional -#' @return a SparkDataFrame +#' @param data an RDD or list or data.frame. +#' @param schema a list of column names or named list (StructType), optional. +#' @return A SparkDataFrame. #' @rdname createDataFrame #' @export #' @examples @@ -257,23 +257,25 @@ createDataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0) { } createDataFrame <- function(x, ...) { - dispatchFunc("createDataFrame(data, schema = NULL, samplingRatio = 1.0)", x, ...) + dispatchFunc("createDataFrame(data, schema = NULL)", x, ...) } +#' @param samplingRatio Currently not used. #' @rdname createDataFrame #' @aliases createDataFrame #' @export #' @method as.DataFrame default #' @note as.DataFrame since 1.6.0 as.DataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0) { - createDataFrame(data, schema, samplingRatio) + createDataFrame(data, schema) } +#' @param ... additional argument(s). #' @rdname createDataFrame #' @aliases as.DataFrame #' @export -as.DataFrame <- function(x, ...) { - dispatchFunc("as.DataFrame(data, schema = NULL, samplingRatio = 1.0)", x, ...) +as.DataFrame <- function(data, ...) { + dispatchFunc("as.DataFrame(data, schema = NULL)", data, ...) } #' toDF @@ -398,7 +400,7 @@ read.orc <- function(path) { #' #' Loads a Parquet file, returning the result as a SparkDataFrame. #' -#' @param path Path of file to read. A vector of multiple paths is allowed. +#' @param path path of file to read. A vector of multiple paths is allowed. #' @return SparkDataFrame #' @rdname read.parquet #' @export @@ -418,6 +420,7 @@ read.parquet <- function(x, ...) { dispatchFunc("read.parquet(...)", x, ...) } +#' @param ... argument(s) passed to the method. #' @rdname read.parquet #' @name parquetFile #' @export @@ -727,6 +730,7 @@ dropTempView <- function(viewName) { #' @param source The name of external data source #' @param schema The data schema defined in structType #' @param na.strings Default string value for NA when source is "csv" +#' @param ... additional external data source specific named properties. #' @return SparkDataFrame #' @rdname read.df #' @name read.df @@ -791,10 +795,11 @@ loadDF <- function(x, ...) { #' If `source` is not specified, the default data source configured by #' "spark.sql.sources.default" will be used. #' -#' @param tableName A name of the table -#' @param path The path of files to load -#' @param source the name of external data source -#' @return SparkDataFrame +#' @param tableName a name of the table. +#' @param path the path of files to load. +#' @param source the name of external data source. +#' @param ... additional argument(s) passed to the method. +#' @return A SparkDataFrame. #' @rdname createExternalTable #' @export #' @examples @@ -840,6 +845,7 @@ createExternalTable <- function(x, ...) { #' clause expressions used to split the column `partitionColumn` evenly. #' This defaults to SparkContext.defaultParallelism when unset. #' @param predicates a list of conditions in the where clause; each one defines one partition +#' @param ... additional JDBC database connection named propertie(s). #' @return SparkDataFrame #' @rdname read.jdbc #' @name read.jdbc diff --git a/R/pkg/R/WindowSpec.R b/R/pkg/R/WindowSpec.R index 751ba3fde954d..b55356b07d5e3 100644 --- a/R/pkg/R/WindowSpec.R +++ b/R/pkg/R/WindowSpec.R @@ -54,8 +54,10 @@ setMethod("show", "WindowSpec", #' #' Defines the partitioning columns in a WindowSpec. #' -#' @param x a WindowSpec -#' @return a WindowSpec +#' @param x a WindowSpec. +#' @param col a column to partition on (desribed by the name or Column). +#' @param ... additional column(s) to partition on. +#' @return A WindowSpec. #' @rdname partitionBy #' @name partitionBy #' @aliases partitionBy,WindowSpec-method @@ -86,7 +88,7 @@ setMethod("partitionBy", #' #' Defines the ordering columns in a WindowSpec. #' @param x a WindowSpec -#' @param col a character or Column object indicating an ordering column +#' @param col a character or Column indicating an ordering column #' @param ... additional sorting fields #' @return A WindowSpec. #' @name orderBy @@ -192,6 +194,9 @@ setMethod("rangeBetween", #' #' Define a windowing column. #' +#' @param x a Column, usually one returned by window function(s). +#' @param window a WindowSpec object. Can be created by `windowPartitionBy` or +#' `windowOrderBy` and configured by other WindowSpec methods. #' @rdname over #' @name over #' @aliases over,Column,WindowSpec-method diff --git a/R/pkg/R/column.R b/R/pkg/R/column.R index 0edb9d2ae5c45..af486e1ce212d 100644 --- a/R/pkg/R/column.R +++ b/R/pkg/R/column.R @@ -163,8 +163,9 @@ setMethod("alias", #' @family colum_func #' @aliases substr,Column-method #' -#' @param start starting position -#' @param stop ending position +#' @param x a Column. +#' @param start starting position. +#' @param stop ending position. #' @note substr since 1.4.0 setMethod("substr", signature(x = "Column"), function(x, start, stop) { @@ -219,6 +220,7 @@ setMethod("endsWith", signature(x = "Column"), #' @family colum_func #' @aliases between,Column-method #' +#' @param x a Column #' @param bounds lower and upper bounds #' @note between since 1.5.0 setMethod("between", signature(x = "Column"), @@ -233,6 +235,11 @@ setMethod("between", signature(x = "Column"), #' Casts the column to a different data type. #' +#' @param x a Column. +#' @param dataType a character object describing the target data type. +#' See +#' \href{https://spark.apache.org/docs/latest/sparkr.html#data-type-mapping-between-r-and-spark}{ +#' Spark Data Types} for available data types. #' @rdname cast #' @name cast #' @family colum_func @@ -254,10 +261,12 @@ setMethod("cast", #' Match a column with given values. #' +#' @param x a Column. +#' @param table a collection of values (coercible to list) to compare with. #' @rdname match #' @name %in% #' @aliases %in%,Column-method -#' @return a matched values as a result of comparing with given values. +#' @return A matched values as a result of comparing with given values. #' @export #' @examples #' \dontrun{ @@ -277,6 +286,9 @@ setMethod("%in%", #' If values in the specified column are null, returns the value. #' Can be used in conjunction with `when` to specify a default value for expressions. #' +#' @param x a Column. +#' @param value value to replace when the corresponding entry in \code{x} is NA. +#' Can be a single value or a Column. #' @rdname otherwise #' @name otherwise #' @family colum_func diff --git a/R/pkg/R/functions.R b/R/pkg/R/functions.R index 573c915a5c67a..b3c10de71f3fe 100644 --- a/R/pkg/R/functions.R +++ b/R/pkg/R/functions.R @@ -23,6 +23,7 @@ NULL #' A new \linkS4class{Column} is created to represent the literal value. #' If the parameter is a \linkS4class{Column}, it is returned unchanged. #' +#' @param x a literal value or a Column. #' @family normal_funcs #' @rdname lit #' @name lit @@ -89,8 +90,6 @@ setMethod("acos", #' Returns the approximate number of distinct items in a group. This is a column #' aggregate function. #' -#' @param x Column to compute on. -#' #' @rdname approxCountDistinct #' @name approxCountDistinct #' @return the approximate number of distinct items in a group. @@ -171,8 +170,6 @@ setMethod("atan", #' #' Aggregate function: returns the average of the values in a group. #' -#' @param x Column to compute on. -#' #' @rdname avg #' @name avg #' @family agg_funcs @@ -319,7 +316,7 @@ setMethod("column", #' #' Computes the Pearson Correlation Coefficient for two Columns. #' -#' @param x Column to compute on. +#' @param col2 a (second) Column. #' #' @rdname corr #' @name corr @@ -339,8 +336,6 @@ setMethod("corr", signature(x = "Column"), #' #' Compute the sample covariance between two expressions. #' -#' @param x Column to compute on. -#' #' @rdname cov #' @name cov #' @family math_funcs @@ -362,8 +357,8 @@ setMethod("cov", signature(x = "characterOrColumn"), #' @rdname cov #' -#' @param col1 First column to compute cov_samp. -#' @param col2 Second column to compute cov_samp. +#' @param col1 the first Column. +#' @param col2 the second Column. #' @name covar_samp #' @aliases covar_samp,characterOrColumn,characterOrColumn-method #' @note covar_samp since 2.0.0 @@ -451,9 +446,7 @@ setMethod("cosh", #' #' Returns the number of items in a group. This is a column aggregate function. #' -#' @param x Column to compute on. -#' -#' @rdname nrow +#' @rdname count #' @name count #' @family agg_funcs #' @aliases count,Column-method @@ -493,6 +486,7 @@ setMethod("crc32", #' Calculates the hash code of given columns, and returns the result as a int column. #' #' @param x Column to compute on. +#' @param ... additional Column(s) to be included. #' #' @rdname hash #' @name hash @@ -663,7 +657,8 @@ setMethod("factorial", #' The function by default returns the first values it sees. It will return the first non-missing #' value it sees when na.rm is set to true. If all values are missing, then NA is returned. #' -#' @param x Column to compute on. +#' @param na.rm a logical value indicating whether NA values should be stripped +#' before the computation proceeds. #' #' @rdname first #' @name first @@ -832,7 +827,10 @@ setMethod("kurtosis", #' The function by default returns the last values it sees. It will return the last non-missing #' value it sees when na.rm is set to true. If all values are missing, then NA is returned. #' -#' @param x Column to compute on. +#' @param x column to compute on. +#' @param na.rm a logical value indicating whether NA values should be stripped +#' before the computation proceeds. +#' @param ... further arguments to be passed to or from other methods. #' #' @rdname last #' @name last @@ -1143,7 +1141,7 @@ setMethod("minute", #' @export #' @examples \dontrun{select(df, monotonically_increasing_id())} setMethod("monotonically_increasing_id", - signature(x = "missing"), + signature("missing"), function() { jc <- callJStatic("org.apache.spark.sql.functions", "monotonically_increasing_id") column(jc) @@ -1272,13 +1270,16 @@ setMethod("round", #' bround #' -#' Returns the value of the column `e` rounded to `scale` decimal places using HALF_EVEN rounding -#' mode if `scale` >= 0 or at integral part when `scale` < 0. +#' Returns the value of the column \code{e} rounded to \code{scale} decimal places using HALF_EVEN rounding +#' mode if \code{scale} >= 0 or at integer part when \code{scale} < 0. #' Also known as Gaussian rounding or bankers' rounding that rounds to the nearest even number. #' bround(2.5, 0) = 2, bround(3.5, 0) = 4. #' #' @param x Column to compute on. -#' +#' @param scale round to \code{scale} digits to the right of the decimal point when \code{scale} > 0, +#' the nearest even number when \code{scale} = 0, and \code{scale} digits to the left +#' of the decimal point when \code{scale} < 0. +#' @param ... further arguments to be passed to or from other methods. #' @rdname bround #' @name bround #' @family math_funcs @@ -1319,7 +1320,7 @@ setMethod("rtrim", #' Aggregate function: alias for \link{stddev_samp} #' #' @param x Column to compute on. -#' +#' @param na.rm currently not used. #' @rdname sd #' @name sd #' @family agg_funcs @@ -1497,7 +1498,7 @@ setMethod("soundex", #' \dontrun{select(df, spark_partition_id())} #' @note spark_partition_id since 2.0.0 setMethod("spark_partition_id", - signature(x = "missing"), + signature("missing"), function() { jc <- callJStatic("org.apache.spark.sql.functions", "spark_partition_id") column(jc) @@ -1560,7 +1561,8 @@ setMethod("stddev_samp", #' #' Creates a new struct column that composes multiple input columns. #' -#' @param x Column to compute on. +#' @param x a column to compute on. +#' @param ... optional column(s) to be included. #' #' @rdname struct #' @name struct @@ -1831,8 +1833,8 @@ setMethod("upper", #' #' Aggregate function: alias for \link{var_samp}. #' -#' @param x Column to compute on. -#' +#' @param x a Column to compute on. +#' @param y,na.rm,use currently not used. #' @rdname var #' @name var #' @family agg_funcs @@ -2114,7 +2116,9 @@ setMethod("pmod", signature(y = "Column"), #' @rdname approxCountDistinct #' @name approxCountDistinct #' +#' @param x Column to compute on. #' @param rsd maximum estimation error allowed (default = 0.05) +#' @param ... further arguments to be passed to or from other methods. #' #' @aliases approxCountDistinct,Column-method #' @export @@ -2127,7 +2131,7 @@ setMethod("approxCountDistinct", column(jc) }) -#' Count Distinct +#' Count Distinct Values #' #' @param x Column to compute on #' @param ... other columns @@ -2156,7 +2160,7 @@ setMethod("countDistinct", #' concat #' #' Concatenates multiple input string columns together into a single string column. -#' +#' #' @param x Column to compute on #' @param ... other columns #' @@ -2246,7 +2250,6 @@ setMethod("ceiling", }) #' @rdname sign -#' @param x Column to compute on #' #' @name sign #' @aliases sign,Column-method @@ -2262,9 +2265,6 @@ setMethod("sign", signature(x = "Column"), #' #' Aggregate function: returns the number of distinct items in a group. #' -#' @param x Column to compute on -#' @param ... other columns -#' #' @rdname countDistinct #' @name n_distinct #' @aliases n_distinct,Column-method @@ -2276,9 +2276,7 @@ setMethod("n_distinct", signature(x = "Column"), countDistinct(x, ...) }) -#' @rdname nrow -#' @param x Column to compute on -#' +#' @rdname count #' @name n #' @aliases n,Column-method #' @export @@ -2300,8 +2298,8 @@ setMethod("n", signature(x = "Column"), #' NOTE: Use when ever possible specialized functions like \code{year}. These benefit from a #' specialized implementation. #' -#' @param y Column to compute on -#' @param x date format specification +#' @param y Column to compute on. +#' @param x date format specification. #' #' @family datetime_funcs #' @rdname date_format @@ -2320,8 +2318,8 @@ setMethod("date_format", signature(y = "Column", x = "character"), #' #' Assumes given timestamp is UTC and converts to given timezone. #' -#' @param y Column to compute on -#' @param x time zone to use +#' @param y Column to compute on. +#' @param x time zone to use. #' #' @family datetime_funcs #' @rdname from_utc_timestamp @@ -2370,8 +2368,8 @@ setMethod("instr", signature(y = "Column", x = "character"), #' Day of the week parameter is case insensitive, and accepts first three or two characters: #' "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun". #' -#' @param y Column to compute on -#' @param x Day of the week string +#' @param y Column to compute on. +#' @param x Day of the week string. #' #' @family datetime_funcs #' @rdname next_day @@ -2637,6 +2635,7 @@ setMethod("conv", signature(x = "Column", fromBase = "numeric", toBase = "numeri #' Parses the expression string into the column that it represents, similar to #' SparkDataFrame.selectExpr #' +#' @param x an expression character object to be parsed. #' @family normal_funcs #' @rdname expr #' @aliases expr,character-method @@ -2654,6 +2653,9 @@ setMethod("expr", signature(x = "character"), #' #' Formats the arguments in printf-style and returns the result as a string column. #' +#' @param format a character object of format strings. +#' @param x a Column. +#' @param ... additional Column(s). #' @family string_funcs #' @rdname format_string #' @name format_string @@ -2676,6 +2678,11 @@ setMethod("format_string", signature(format = "character", x = "Column"), #' representing the timestamp of that moment in the current system time zone in the given #' format. #' +#' @param x a Column of unix timestamp. +#' @param format the target format. See +#' \href{http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html}{ +#' Customizing Formats} for available options. +#' @param ... further arguments to be passed to or from other methods. #' @family datetime_funcs #' @rdname from_unixtime #' @name from_unixtime @@ -2702,19 +2709,21 @@ setMethod("from_unixtime", signature(x = "Column"), #' [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in #' the order of months are not supported. #' -#' The time column must be of TimestampType. -#' -#' Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid -#' interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'. -#' If the `slideDuration` is not provided, the windows will be tumbling windows. -#' -#' The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start -#' window intervals. For example, in order to have hourly tumbling windows that start 15 minutes -#' past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`. -#' -#' The output column will be a struct called 'window' by default with the nested columns 'start' -#' and 'end'. -#' +#' @param x a time Column. Must be of TimestampType. +#' @param windowDuration a string specifying the width of the window, e.g. '1 second', +#' '1 day 12 hours', '2 minutes'. Valid interval strings are 'week', +#' 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'. +#' @param slideDuration a string specifying the sliding interval of the window. Same format as +#' \code{windowDuration}. A new window will be generated every +#' \code{slideDuration}. Must be less than or equal to +#' the \code{windowDuration}. +#' @param startTime the offset with respect to 1970-01-01 00:00:00 UTC with which to start +#' window intervals. For example, in order to have hourly tumbling windows +#' that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide +#' \code{startTime} as \code{"15 minutes"}. +#' @param ... further arguments to be passed to or from other methods. +#' @return An output column of struct called 'window' by default with the nested columns 'start' +#' and 'end'. #' @family datetime_funcs #' @rdname window #' @name window @@ -2766,6 +2775,10 @@ setMethod("window", signature(x = "Column"), #' NOTE: The position is not zero based, but 1 based index, returns 0 if substr #' could not be found in str. #' +#' @param substr a character string to be matched. +#' @param str a Column where matches are sought for each entry. +#' @param pos start position of search. +#' @param ... further arguments to be passed to or from other methods. #' @family string_funcs #' @rdname locate #' @aliases locate,character,Column-method @@ -2785,6 +2798,9 @@ setMethod("locate", signature(substr = "character", str = "Column"), #' #' Left-pad the string column with #' +#' @param x the string Column to be left-padded. +#' @param len maximum length of each output result. +#' @param pad a character string to be padded with. #' @family string_funcs #' @rdname lpad #' @aliases lpad,Column,numeric,character-method @@ -2804,6 +2820,7 @@ setMethod("lpad", signature(x = "Column", len = "numeric", pad = "character"), #' #' Generate a random column with i.i.d. samples from U[0.0, 1.0]. #' +#' @param seed a random seed. Can be missing. #' @family normal_funcs #' @rdname rand #' @name rand @@ -2832,6 +2849,7 @@ setMethod("rand", signature(seed = "numeric"), #' #' Generate a column with i.i.d. samples from the standard normal distribution. #' +#' @param seed a random seed. Can be missing. #' @family normal_funcs #' @rdname randn #' @name randn @@ -2860,6 +2878,9 @@ setMethod("randn", signature(seed = "numeric"), #' #' Extract a specific(idx) group identified by a java regex, from the specified string column. #' +#' @param x a string Column. +#' @param pattern a regular expression. +#' @param idx a group index. #' @family string_funcs #' @rdname regexp_extract #' @name regexp_extract @@ -2880,6 +2901,9 @@ setMethod("regexp_extract", #' #' Replace all substrings of the specified string value that match regexp with rep. #' +#' @param x a string Column. +#' @param pattern a regular expression. +#' @param replacement a character string that a matched \code{pattern} is replaced with. #' @family string_funcs #' @rdname regexp_replace #' @name regexp_replace @@ -2900,6 +2924,9 @@ setMethod("regexp_replace", #' #' Right-padded with pad to a length of len. #' +#' @param x the string Column to be right-padded. +#' @param len maximum length of each output result. +#' @param pad a character string to be padded with. #' @family string_funcs #' @rdname rpad #' @name rpad @@ -2922,6 +2949,11 @@ setMethod("rpad", signature(x = "Column", len = "numeric", pad = "character"), #' returned. If count is negative, every to the right of the final delimiter (counting from the #' right) is returned. substring_index performs a case-sensitive match when searching for delim. #' +#' @param x a Column. +#' @param delim a delimiter string. +#' @param count number of occurrences of \code{delim} before the substring is returned. +#' A positive number means counting from the left, while negative means +#' counting from the right. #' @family string_funcs #' @rdname substring_index #' @aliases substring_index,Column,character,numeric-method @@ -2949,6 +2981,11 @@ setMethod("substring_index", #' The translate will happen when any character in the string matching with the character #' in the matchingString. #' +#' @param x a string Column. +#' @param matchingString a source string where each character will be translated. +#' @param replaceString a target string where each \code{matchingString} character will +#' be replaced by the character in \code{replaceString} +#' at the same location, if any. #' @family string_funcs #' @rdname translate #' @name translate @@ -2997,6 +3034,10 @@ setMethod("unix_timestamp", signature(x = "Column", format = "missing"), column(jc) }) +#' @param x a Column of date, in string, date or timestamp type. +#' @param format the target format. See +#' \href{http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html}{ +#' Customizing Formats} for available options. #' @rdname unix_timestamp #' @name unix_timestamp #' @aliases unix_timestamp,Column,character-method @@ -3012,6 +3053,8 @@ setMethod("unix_timestamp", signature(x = "Column", format = "character"), #' Evaluates a list of conditions and returns one of multiple possible result expressions. #' For unmatched expressions null is returned. #' +#' @param condition the condition to test on. Must be a Column expression. +#' @param value result expression. #' @family normal_funcs #' @rdname when #' @name when @@ -3033,6 +3076,9 @@ setMethod("when", signature(condition = "Column", value = "ANY"), #' Evaluates a list of conditions and returns \code{yes} if the conditions are satisfied. #' Otherwise \code{no} is returned for unmatched conditions. #' +#' @param test a Column expression that describes the condition. +#' @param yes return values for \code{TRUE} elements of test. +#' @param no return values for \code{FALSE} elements of test. #' @family normal_funcs #' @rdname ifelse #' @name ifelse @@ -3074,10 +3120,14 @@ setMethod("ifelse", #' @family window_funcs #' @aliases cume_dist,missing-method #' @export -#' @examples \dontrun{cume_dist()} +#' @examples \dontrun{ +#' df <- createDataFrame(iris) +#' ws <- orderBy(windowPartitionBy("Species"), "Sepal_Length") +#' out <- select(df, over(cume_dist(), ws), df$Sepal_Length, df$Species) +#' } #' @note cume_dist since 1.6.0 setMethod("cume_dist", - signature(x = "missing"), + signature("missing"), function() { jc <- callJStatic("org.apache.spark.sql.functions", "cume_dist") column(jc) @@ -3101,7 +3151,7 @@ setMethod("cume_dist", #' @examples \dontrun{dense_rank()} #' @note dense_rank since 1.6.0 setMethod("dense_rank", - signature(x = "missing"), + signature("missing"), function() { jc <- callJStatic("org.apache.spark.sql.functions", "dense_rank") column(jc) @@ -3115,6 +3165,11 @@ setMethod("dense_rank", #' #' This is equivalent to the LAG function in SQL. #' +#' @param x the column as a character string or a Column to compute on. +#' @param offset the number of rows back from the current row from which to obtain a value. +#' If not specified, the default is 1. +#' @param defaultValue default to use when the offset row does not exist. +#' @param ... further arguments to be passed to or from other methods. #' @rdname lag #' @name lag #' @aliases lag,characterOrColumn-method @@ -3143,7 +3198,7 @@ setMethod("lag", #' an `offset` of one will return the next row at any given point in the window partition. #' #' This is equivalent to the LEAD function in SQL. -#' +#' #' @param x Column to compute on #' @param offset Number of rows to offset #' @param defaultValue (Optional) default value to use @@ -3211,7 +3266,7 @@ setMethod("ntile", #' @examples \dontrun{percent_rank()} #' @note percent_rank since 1.6.0 setMethod("percent_rank", - signature(x = "missing"), + signature("missing"), function() { jc <- callJStatic("org.apache.spark.sql.functions", "percent_rank") column(jc) @@ -3243,6 +3298,8 @@ setMethod("rank", }) # Expose rank() in the R base package +#' @param x a numeric, complex, character or logical vector. +#' @param ... additional argument(s) passed to the method. #' @name rank #' @rdname rank #' @aliases rank,ANY-method @@ -3267,7 +3324,7 @@ setMethod("rank", #' @examples \dontrun{row_number()} #' @note row_number since 1.6.0 setMethod("row_number", - signature(x = "missing"), + signature("missing"), function() { jc <- callJStatic("org.apache.spark.sql.functions", "row_number") column(jc) @@ -3318,7 +3375,7 @@ setMethod("explode", #' size #' #' Returns length of array or map. -#' +#' #' @param x Column to compute on #' #' @rdname size diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 693aa31d3ecab..6610a25c8c05a 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -380,6 +380,9 @@ setGeneric("value", function(bcast) { standardGeneric("value") }) #################### SparkDataFrame Methods ######################## +#' @param x a SparkDataFrame or GroupedData. +#' @param ... further arguments to be passed to or from other methods. +#' @return A SparkDataFrame. #' @rdname summarize #' @export setGeneric("agg", function (x, ...) { standardGeneric("agg") }) @@ -407,6 +410,8 @@ setGeneric("cache", function(x) { standardGeneric("cache") }) #' @export setGeneric("collect", function(x, ...) { standardGeneric("collect") }) +#' @param do.NULL currently not used. +#' @param prefix currently not used. #' @rdname columns #' @export setGeneric("colnames", function(x, do.NULL = TRUE, prefix = "col") { standardGeneric("colnames") }) @@ -427,15 +432,24 @@ setGeneric("coltypes<-", function(x, value) { standardGeneric("coltypes<-") }) #' @export setGeneric("columns", function(x) {standardGeneric("columns") }) -#' @rdname nrow +#' @param x a GroupedData or Column. +#' @rdname count #' @export setGeneric("count", function(x) { standardGeneric("count") }) #' @rdname cov +#' @param x a Column object or a SparkDataFrame. +#' @param ... additional argument(s). If `x` is a Column object, a Column object +#' should be provided. If `x` is a SparkDataFrame, two column names should +#' be provided. #' @export setGeneric("cov", function(x, ...) {standardGeneric("cov") }) #' @rdname corr +#' @param x a Column object or a SparkDataFrame. +#' @param ... additional argument(s). If `x` is a Column object, a Column object +#' should be provided. If `x` is a SparkDataFrame, two column names should +#' be provided. #' @export setGeneric("corr", function(x, ...) {standardGeneric("corr") }) @@ -462,10 +476,14 @@ setGeneric("dapply", function(x, func, schema) { standardGeneric("dapply") }) #' @export setGeneric("dapplyCollect", function(x, func) { standardGeneric("dapplyCollect") }) +#' @param x a SparkDataFrame or GroupedData. +#' @param ... additional argument(s) passed to the method. #' @rdname gapply #' @export setGeneric("gapply", function(x, ...) { standardGeneric("gapply") }) +#' @param x a SparkDataFrame or GroupedData. +#' @param ... additional argument(s) passed to the method. #' @rdname gapplyCollect #' @export setGeneric("gapplyCollect", function(x, ...) { standardGeneric("gapplyCollect") }) @@ -667,8 +685,8 @@ setGeneric("selectExpr", function(x, expr, ...) { standardGeneric("selectExpr") #' @export setGeneric("showDF", function(x, ...) { standardGeneric("showDF") }) -# @rdname subset -# @export +#' @rdname subset +#' @export setGeneric("subset", function(x, ...) { standardGeneric("subset") }) #' @rdname summarize @@ -735,6 +753,8 @@ setGeneric("between", function(x, bounds) { standardGeneric("between") }) setGeneric("cast", function(x, dataType) { standardGeneric("cast") }) #' @rdname columnfunctions +#' @param x a Column object. +#' @param ... additional argument(s). #' @export setGeneric("contains", function(x, ...) { standardGeneric("contains") }) @@ -830,6 +850,8 @@ setGeneric("array_contains", function(x, value) { standardGeneric("array_contain #' @export setGeneric("ascii", function(x) { standardGeneric("ascii") }) +#' @param x Column to compute on or a GroupedData object. +#' @param ... additional argument(s) when `x` is a GroupedData object. #' @rdname avg #' @export setGeneric("avg", function(x, ...) { standardGeneric("avg") }) @@ -886,9 +908,10 @@ setGeneric("crc32", function(x) { standardGeneric("crc32") }) #' @export setGeneric("hash", function(x, ...) { standardGeneric("hash") }) +#' @param x empty. Should be used with no argument. #' @rdname cume_dist #' @export -setGeneric("cume_dist", function(x) { standardGeneric("cume_dist") }) +setGeneric("cume_dist", function(x = "missing") { standardGeneric("cume_dist") }) #' @rdname datediff #' @export @@ -918,9 +941,10 @@ setGeneric("dayofyear", function(x) { standardGeneric("dayofyear") }) #' @export setGeneric("decode", function(x, charset) { standardGeneric("decode") }) +#' @param x empty. Should be used with no argument. #' @rdname dense_rank #' @export -setGeneric("dense_rank", function(x) { standardGeneric("dense_rank") }) +setGeneric("dense_rank", function(x = "missing") { standardGeneric("dense_rank") }) #' @rdname encode #' @export @@ -1034,10 +1058,11 @@ setGeneric("md5", function(x) { standardGeneric("md5") }) #' @export setGeneric("minute", function(x) { standardGeneric("minute") }) +#' @param x empty. Should be used with no argument. #' @rdname monotonically_increasing_id #' @export setGeneric("monotonically_increasing_id", - function(x) { standardGeneric("monotonically_increasing_id") }) + function(x = "missing") { standardGeneric("monotonically_increasing_id") }) #' @rdname month #' @export @@ -1047,7 +1072,7 @@ setGeneric("month", function(x) { standardGeneric("month") }) #' @export setGeneric("months_between", function(y, x) { standardGeneric("months_between") }) -#' @rdname nrow +#' @rdname count #' @export setGeneric("n", function(x) { standardGeneric("n") }) @@ -1071,9 +1096,10 @@ setGeneric("ntile", function(x) { standardGeneric("ntile") }) #' @export setGeneric("n_distinct", function(x, ...) { standardGeneric("n_distinct") }) +#' @param x empty. Should be used with no argument. #' @rdname percent_rank #' @export -setGeneric("percent_rank", function(x) { standardGeneric("percent_rank") }) +setGeneric("percent_rank", function(x = "missing") { standardGeneric("percent_rank") }) #' @rdname pmod #' @export @@ -1114,11 +1140,12 @@ setGeneric("reverse", function(x) { standardGeneric("reverse") }) #' @rdname rint #' @export -setGeneric("rint", function(x, ...) { standardGeneric("rint") }) +setGeneric("rint", function(x) { standardGeneric("rint") }) +#' @param x empty. Should be used with no argument. #' @rdname row_number #' @export -setGeneric("row_number", function(x) { standardGeneric("row_number") }) +setGeneric("row_number", function(x = "missing") { standardGeneric("row_number") }) #' @rdname rpad #' @export @@ -1176,9 +1203,10 @@ setGeneric("sort_array", function(x, asc = TRUE) { standardGeneric("sort_array") #' @export setGeneric("soundex", function(x) { standardGeneric("soundex") }) +#' @param x empty. Should be used with no argument. #' @rdname spark_partition_id #' @export -setGeneric("spark_partition_id", function(x) { standardGeneric("spark_partition_id") }) +setGeneric("spark_partition_id", function(x = "missing") { standardGeneric("spark_partition_id") }) #' @rdname sd #' @export @@ -1276,10 +1304,16 @@ setGeneric("year", function(x) { standardGeneric("year") }) #' @export setGeneric("spark.glm", function(data, formula, ...) { standardGeneric("spark.glm") }) +#' @param x,y For \code{glm}: logical values indicating whether the response vector +#' and model matrix used in the fitting process should be returned as +#' components of the returned value. +#' @inheritParams stats::glm #' @rdname glm #' @export setGeneric("glm") +#' @param object a fitted ML model object. +#' @param ... additional argument(s) passed to the method. #' @rdname predict #' @export setGeneric("predict", function(object, ...) { standardGeneric("predict") }) @@ -1302,7 +1336,7 @@ setGeneric("spark.naiveBayes", function(data, formula, ...) { standardGeneric("s #' @rdname spark.survreg #' @export -setGeneric("spark.survreg", function(data, formula, ...) { standardGeneric("spark.survreg") }) +setGeneric("spark.survreg", function(data, formula) { standardGeneric("spark.survreg") }) #' @rdname spark.lda #' @param ... Additional parameters to tune LDA. @@ -1328,7 +1362,9 @@ setGeneric("spark.gaussianMixture", standardGeneric("spark.gaussianMixture") }) -#' write.ml +#' @param object a fitted ML model object. +#' @param path the directory where the model is saved. +#' @param ... additional argument(s) passed to the method. #' @rdname write.ml #' @export setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") }) diff --git a/R/pkg/R/group.R b/R/pkg/R/group.R index 85348ae76baa7..3c85ada91a444 100644 --- a/R/pkg/R/group.R +++ b/R/pkg/R/group.R @@ -59,8 +59,7 @@ setMethod("show", "GroupedData", #' Count the number of rows for each group. #' The resulting SparkDataFrame will also contain the grouping columns. #' -#' @param x a GroupedData -#' @return a SparkDataFrame +#' @return A SparkDataFrame. #' @rdname count #' @aliases count,GroupedData-method #' @export @@ -83,8 +82,6 @@ setMethod("count", #' df2 <- agg(df, = ) #' df2 <- agg(df, newColName = aggFunction(column)) #' -#' @param x a GroupedData -#' @return a SparkDataFrame #' @rdname summarize #' @aliases agg,GroupedData-method #' @name agg @@ -201,7 +198,6 @@ createMethods() #' gapply #' -#' @param x A GroupedData #' @rdname gapply #' @aliases gapply,GroupedData-method #' @name gapply @@ -216,7 +212,6 @@ setMethod("gapply", #' gapplyCollect #' -#' @param x A GroupedData #' @rdname gapplyCollect #' @aliases gapplyCollect,GroupedData-method #' @name gapplyCollect diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 36f38fc73a510..9a53c80aecded 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -113,17 +113,18 @@ NULL #' Users can call \code{summary} to print a summary of the fitted model, \code{predict} to make #' predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. #' -#' @param data SparkDataFrame for training. -#' @param formula A symbolic description of the model to be fitted. Currently only a few formula +#' @param data a SparkDataFrame for training. +#' @param formula a symbolic description of the model to be fitted. Currently only a few formula #' operators are supported, including '~', '.', ':', '+', and '-'. -#' @param family A description of the error distribution and link function to be used in the model. +#' @param family a description of the error distribution and link function to be used in the model. #' This can be a character string naming a family function, a family function or #' the result of a call to a family function. Refer R family at #' \url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/family.html}. -#' @param tol Positive convergence tolerance of iterations. -#' @param maxIter Integer giving the maximal number of IRLS iterations. -#' @param weightCol The weight column name. If this is not set or NULL, we treat all instance +#' @param weightCol the weight column name. If this is not set or NULL, we treat all instance #' weights as 1.0. +#' @param tol positive convergence tolerance of iterations. +#' @param maxIter integer giving the maximal number of IRLS iterations. +#' @param ... additional arguments passed to the method. #' @aliases spark.glm,SparkDataFrame,formula-method #' @return \code{spark.glm} returns a fitted generalized linear model #' @rdname spark.glm @@ -178,17 +179,17 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"), #' Generalized Linear Models (R-compliant) #' #' Fits a generalized linear model, similarly to R's glm(). -#' @param formula A symbolic description of the model to be fitted. Currently only a few formula +#' @param formula a symbolic description of the model to be fitted. Currently only a few formula #' operators are supported, including '~', '.', ':', '+', and '-'. -#' @param data SparkDataFrame for training. -#' @param family A description of the error distribution and link function to be used in the model. +#' @param data a SparkDataFrame or R's glm data for training. +#' @param family a description of the error distribution and link function to be used in the model. #' This can be a character string naming a family function, a family function or #' the result of a call to a family function. Refer R family at #' \url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/family.html}. -#' @param epsilon Positive convergence tolerance of iterations. -#' @param maxit Integer giving the maximal number of IRLS iterations. -#' @param weightCol The weight column name. If this is not set or NULL, we treat all instance +#' @param weightCol the weight column name. If this is not set or NULL, we treat all instance #' weights as 1.0. +#' @param epsilon positive convergence tolerance of iterations. +#' @param maxit integer giving the maximal number of IRLS iterations. #' @return \code{glm} returns a fitted generalized linear model. #' @rdname glm #' @export @@ -209,7 +210,7 @@ setMethod("glm", signature(formula = "formula", family = "ANY", data = "SparkDat # Returns the summary of a model produced by glm() or spark.glm(), similarly to R's summary(). -#' @param object A fitted generalized linear model +#' @param object a fitted generalized linear model. #' @return \code{summary} returns a summary object of the fitted model, a list of components #' including at least the coefficients, null/residual deviance, null/residual degrees #' of freedom, AIC and number of iterations IRLS takes. @@ -250,7 +251,7 @@ setMethod("summary", signature(object = "GeneralizedLinearRegressionModel"), # Prints the summary of GeneralizedLinearRegressionModel #' @rdname spark.glm -#' @param x Summary object of fitted generalized linear model returned by \code{summary} function +#' @param x summary object of fitted generalized linear model returned by \code{summary} function #' @export #' @note print.summary.GeneralizedLinearRegressionModel since 2.0.0 print.summary.GeneralizedLinearRegressionModel <- function(x, ...) { @@ -282,7 +283,7 @@ print.summary.GeneralizedLinearRegressionModel <- function(x, ...) { # Makes predictions from a generalized linear model produced by glm() or spark.glm(), # similarly to R's predict(). -#' @param newData SparkDataFrame for testing +#' @param newData a SparkDataFrame for testing. #' @return \code{predict} returns a SparkDataFrame containing predicted labels in a column named #' "prediction" #' @rdname spark.glm @@ -296,7 +297,7 @@ setMethod("predict", signature(object = "GeneralizedLinearRegressionModel"), # Makes predictions from a naive Bayes model or a model produced by spark.naiveBayes(), # similarly to R package e1071's predict. -#' @param newData A SparkDataFrame for testing +#' @param newData a SparkDataFrame for testing. #' @return \code{predict} returns a SparkDataFrame containing predicted labeled in a column named #' "prediction" #' @rdname spark.naiveBayes @@ -309,9 +310,9 @@ setMethod("predict", signature(object = "NaiveBayesModel"), # Returns the summary of a naive Bayes model produced by \code{spark.naiveBayes} -#' @param object A naive Bayes model fitted by \code{spark.naiveBayes} +#' @param object a naive Bayes model fitted by \code{spark.naiveBayes}. #' @return \code{summary} returns a list containing \code{apriori}, the label distribution, and -#' \code{tables}, conditional probabilities given the target label +#' \code{tables}, conditional probabilities given the target label. #' @rdname spark.naiveBayes #' @export #' @note summary(NaiveBayesModel) since 2.0.0 @@ -491,7 +492,6 @@ setMethod("predict", signature(object = "IsotonicRegressionModel"), # Get the summary of an IsotonicRegressionModel model -#' @param object a fitted IsotonicRegressionModel #' @param ... Other optional arguments to summary of an IsotonicRegressionModel #' @return \code{summary} returns the model's boundaries and prediction as lists #' @rdname spark.isoreg @@ -512,14 +512,15 @@ setMethod("summary", signature(object = "IsotonicRegressionModel"), #' Users can call \code{summary} to print a summary of the fitted model, \code{predict} to make #' predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. #' -#' @param data SparkDataFrame for training -#' @param formula A symbolic description of the model to be fitted. Currently only a few formula +#' @param data a SparkDataFrame for training. +#' @param formula a symbolic description of the model to be fitted. Currently only a few formula #' operators are supported, including '~', '.', ':', '+', and '-'. #' Note that the response variable of formula is empty in spark.kmeans. -#' @param k Number of centers -#' @param maxIter Maximum iteration number -#' @param initMode The initialization algorithm choosen to fit the model -#' @return \code{spark.kmeans} returns a fitted k-means model +#' @param k number of centers. +#' @param maxIter maximum iteration number. +#' @param initMode the initialization algorithm choosen to fit the model. +#' @param ... additional argument(s) passed to the method. +#' @return \code{spark.kmeans} returns a fitted k-means model. #' @rdname spark.kmeans #' @aliases spark.kmeans,SparkDataFrame,formula-method #' @name spark.kmeans @@ -560,8 +561,11 @@ setMethod("spark.kmeans", signature(data = "SparkDataFrame", formula = "formula" #' Get fitted result from a k-means model, similarly to R's fitted(). #' Note: A saved-loaded model does not support this method. #' -#' @param object A fitted k-means model -#' @return \code{fitted} returns a SparkDataFrame containing fitted values +#' @param object a fitted k-means model. +#' @param method type of fitted results, \code{"centers"} for cluster centers +#' or \code{"classes"} for assigned classes. +#' @param ... additional argument(s) passed to the method. +#' @return \code{fitted} returns a SparkDataFrame containing fitted values. #' @rdname fitted #' @export #' @examples @@ -585,8 +589,8 @@ setMethod("fitted", signature(object = "KMeansModel"), # Get the summary of a k-means model -#' @param object A fitted k-means model -#' @return \code{summary} returns the model's coefficients, size and cluster +#' @param object a fitted k-means model. +#' @return \code{summary} returns the model's coefficients, size and cluster. #' @rdname spark.kmeans #' @export #' @note summary(KMeansModel) since 2.0.0 @@ -612,7 +616,8 @@ setMethod("summary", signature(object = "KMeansModel"), # Predicted values based on a k-means model -#' @return \code{predict} returns the predicted values based on a k-means model +#' @param newData a SparkDataFrame for testing. +#' @return \code{predict} returns the predicted values based on a k-means model. #' @rdname spark.kmeans #' @export #' @note predict(KMeansModel) since 2.0.0 @@ -628,11 +633,12 @@ setMethod("predict", signature(object = "KMeansModel"), #' predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. #' Only categorical data is supported. #' -#' @param data A \code{SparkDataFrame} of observations and labels for model fitting -#' @param formula A symbolic description of the model to be fitted. Currently only a few formula +#' @param data a \code{SparkDataFrame} of observations and labels for model fitting. +#' @param formula a symbolic description of the model to be fitted. Currently only a few formula #' operators are supported, including '~', '.', ':', '+', and '-'. -#' @param smoothing Smoothing parameter -#' @return \code{spark.naiveBayes} returns a fitted naive Bayes model +#' @param smoothing smoothing parameter. +#' @param ... additional argument(s) passed to the method. Currently only \code{smoothing}. +#' @return \code{spark.naiveBayes} returns a fitted naive Bayes model. #' @rdname spark.naiveBayes #' @aliases spark.naiveBayes,SparkDataFrame,formula-method #' @name spark.naiveBayes @@ -668,8 +674,8 @@ setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = "form # Saves the Bernoulli naive Bayes model to the input path. -#' @param path The directory where the model is saved -#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE +#' @param path the directory where the model is saved +#' @param overwrite overwrites or not if the output path already exists. Default is FALSE #' which means throw exception if the output path exists. #' #' @rdname spark.naiveBayes @@ -687,10 +693,9 @@ setMethod("write.ml", signature(object = "NaiveBayesModel", path = "character"), # Saves the AFT survival regression model to the input path. -#' @param path The directory where the model is saved -#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE +#' @param path the directory where the model is saved. +#' @param overwrite overwrites or not if the output path already exists. Default is FALSE #' which means throw exception if the output path exists. -#' #' @rdname spark.survreg #' @export #' @note write.ml(AFTSurvivalRegressionModel, character) since 2.0.0 @@ -706,8 +711,8 @@ setMethod("write.ml", signature(object = "AFTSurvivalRegressionModel", path = "c # Saves the generalized linear model to the input path. -#' @param path The directory where the model is saved -#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE +#' @param path the directory where the model is saved. +#' @param overwrite overwrites or not if the output path already exists. Default is FALSE #' which means throw exception if the output path exists. #' #' @rdname spark.glm @@ -724,8 +729,8 @@ setMethod("write.ml", signature(object = "GeneralizedLinearRegressionModel", pat # Save fitted MLlib model to the input path -#' @param path The directory where the model is saved -#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE +#' @param path the directory where the model is saved. +#' @param overwrite overwrites or not if the output path already exists. Default is FALSE #' which means throw exception if the output path exists. #' #' @rdname spark.kmeans @@ -780,8 +785,8 @@ setMethod("write.ml", signature(object = "GaussianMixtureModel", path = "charact #' Load a fitted MLlib model from the input path. #' -#' @param path Path of the model to read. -#' @return a fitted MLlib model +#' @param path path of the model to read. +#' @return A fitted MLlib model. #' @rdname read.ml #' @name read.ml #' @export @@ -823,11 +828,11 @@ read.ml <- function(path) { #' \code{predict} to make predictions on new data, and \code{write.ml}/\code{read.ml} to #' save/load fitted models. #' -#' @param data A SparkDataFrame for training -#' @param formula A symbolic description of the model to be fitted. Currently only a few formula +#' @param data a SparkDataFrame for training. +#' @param formula a symbolic description of the model to be fitted. Currently only a few formula #' operators are supported, including '~', ':', '+', and '-'. -#' Note that operator '.' is not supported currently -#' @return \code{spark.survreg} returns a fitted AFT survival regression model +#' Note that operator '.' is not supported currently. +#' @return \code{spark.survreg} returns a fitted AFT survival regression model. #' @rdname spark.survreg #' @seealso survival: \url{https://cran.r-project.org/web/packages/survival/} #' @export @@ -851,7 +856,7 @@ read.ml <- function(path) { #' } #' @note spark.survreg since 2.0.0 setMethod("spark.survreg", signature(data = "SparkDataFrame", formula = "formula"), - function(data, formula, ...) { + function(data, formula) { formula <- paste(deparse(formula), collapse = "") jobj <- callJStatic("org.apache.spark.ml.r.AFTSurvivalRegressionWrapper", "fit", formula, data@sdf) @@ -927,14 +932,14 @@ setMethod("spark.lda", signature(data = "SparkDataFrame"), # Returns a summary of the AFT survival regression model produced by spark.survreg, # similarly to R's summary(). -#' @param object A fitted AFT survival regression model +#' @param object a fitted AFT survival regression model. #' @return \code{summary} returns a list containing the model's coefficients, #' intercept and log(scale) #' @rdname spark.survreg #' @export #' @note summary(AFTSurvivalRegressionModel) since 2.0.0 setMethod("summary", signature(object = "AFTSurvivalRegressionModel"), - function(object, ...) { + function(object) { jobj <- object@jobj features <- callJMethod(jobj, "rFeatures") coefficients <- callJMethod(jobj, "rCoefficients") @@ -947,9 +952,9 @@ setMethod("summary", signature(object = "AFTSurvivalRegressionModel"), # Makes predictions from an AFT survival regression model or a model produced by # spark.survreg, similarly to R package survival's predict. -#' @param newData A SparkDataFrame for testing +#' @param newData a SparkDataFrame for testing. #' @return \code{predict} returns a SparkDataFrame containing predicted values -#' on the original scale of the data (mean predicted value at scale = 1.0) +#' on the original scale of the data (mean predicted value at scale = 1.0). #' @rdname spark.survreg #' @export #' @note predict(AFTSurvivalRegressionModel) since 2.0.0 diff --git a/R/pkg/R/schema.R b/R/pkg/R/schema.R index b429f5de13b87..cb5bdb90175bf 100644 --- a/R/pkg/R/schema.R +++ b/R/pkg/R/schema.R @@ -92,8 +92,9 @@ print.structType <- function(x, ...) { #' #' Create a structField object that contains the metadata for a single field in a schema. #' -#' @param x The name of the field -#' @return a structField object +#' @param x the name of the field. +#' @param ... additional argument(s) passed to the method. +#' @return A structField object. #' @rdname structField #' @export #' @examples diff --git a/R/pkg/R/sparkR.R b/R/pkg/R/sparkR.R index f8bdee739ef02..85815af1f3639 100644 --- a/R/pkg/R/sparkR.R +++ b/R/pkg/R/sparkR.R @@ -320,14 +320,15 @@ sparkRHive.init <- function(jsc = NULL) { #' For details on how to initialize and use SparkR, refer to SparkR programming guide at #' \url{http://spark.apache.org/docs/latest/sparkr.html#starting-up-sparksession}. #' -#' @param master The Spark master URL -#' @param appName Application name to register with cluster manager -#' @param sparkHome Spark Home directory -#' @param sparkConfig Named list of Spark configuration to set on worker nodes -#' @param sparkJars Character vector of jar files to pass to the worker nodes -#' @param sparkPackages Character vector of packages from spark-packages.org -#' @param enableHiveSupport Enable support for Hive, fallback if not built with Hive support; once +#' @param master the Spark master URL. +#' @param appName application name to register with cluster manager. +#' @param sparkHome Spark Home directory. +#' @param sparkConfig named list of Spark configuration to set on worker nodes. +#' @param sparkJars character vector of jar files to pass to the worker nodes. +#' @param sparkPackages character vector of packages from spark-packages.org +#' @param enableHiveSupport enable support for Hive, fallback if not built with Hive support; once #' set, this cannot be turned off on an existing session +#' @param ... named Spark properties passed to the method. #' @export #' @examples #'\dontrun{ @@ -413,9 +414,9 @@ sparkR.session <- function( #' Assigns a group ID to all the jobs started by this thread until the group ID is set to a #' different value or cleared. #' -#' @param groupid the ID to be assigned to job groups -#' @param description description for the job group ID -#' @param interruptOnCancel flag to indicate if the job is interrupted on job cancellation +#' @param groupId the ID to be assigned to job groups. +#' @param description description for the job group ID. +#' @param interruptOnCancel flag to indicate if the job is interrupted on job cancellation. #' @rdname setJobGroup #' @name setJobGroup #' @examples diff --git a/R/pkg/R/stats.R b/R/pkg/R/stats.R index 2b4ce195cbddb..8ea24d81729ec 100644 --- a/R/pkg/R/stats.R +++ b/R/pkg/R/stats.R @@ -25,6 +25,7 @@ setOldClass("jobj") #' table. The number of distinct values for each column should be less than 1e4. At most 1e6 #' non-zero pair frequencies will be returned. #' +#' @param x a SparkDataFrame #' @param col1 name of the first column. Distinct items will make the first item of each row. #' @param col2 name of the second column. Distinct items will make the column names of the output. #' @return a local R data.frame representing the contingency table. The first column of each row @@ -53,10 +54,9 @@ setMethod("crosstab", #' Calculate the sample covariance of two numerical columns of a SparkDataFrame. #' -#' @param x A SparkDataFrame -#' @param col1 the name of the first column -#' @param col2 the name of the second column -#' @return the covariance of the two columns. +#' @param colName1 the name of the first column +#' @param colName2 the name of the second column +#' @return The covariance of the two columns. #' #' @rdname cov #' @name cov @@ -71,19 +71,18 @@ setMethod("crosstab", #' @note cov since 1.6.0 setMethod("cov", signature(x = "SparkDataFrame"), - function(x, col1, col2) { - stopifnot(class(col1) == "character" && class(col2) == "character") + function(x, colName1, colName2) { + stopifnot(class(colName1) == "character" && class(colName2) == "character") statFunctions <- callJMethod(x@sdf, "stat") - callJMethod(statFunctions, "cov", col1, col2) + callJMethod(statFunctions, "cov", colName1, colName2) }) #' Calculates the correlation of two columns of a SparkDataFrame. #' Currently only supports the Pearson Correlation Coefficient. #' For Spearman Correlation, consider using RDD methods found in MLlib's Statistics. #' -#' @param x A SparkDataFrame -#' @param col1 the name of the first column -#' @param col2 the name of the second column +#' @param colName1 the name of the first column +#' @param colName2 the name of the second column #' @param method Optional. A character specifying the method for calculating the correlation. #' only "pearson" is allowed now. #' @return The Pearson Correlation Coefficient as a Double. @@ -102,10 +101,10 @@ setMethod("cov", #' @note corr since 1.6.0 setMethod("corr", signature(x = "SparkDataFrame"), - function(x, col1, col2, method = "pearson") { - stopifnot(class(col1) == "character" && class(col2) == "character") + function(x, colName1, colName2, method = "pearson") { + stopifnot(class(colName1) == "character" && class(colName2) == "character") statFunctions <- callJMethod(x@sdf, "stat") - callJMethod(statFunctions, "corr", col1, col2, method) + callJMethod(statFunctions, "corr", colName1, colName2, method) }) From 3e5fdeb3fb084cc9d25ce2f3f8cbf07a0aa2c573 Mon Sep 17 00:00:00 2001 From: "wm624@hotmail.com" Date: Sat, 20 Aug 2016 07:00:51 -0700 Subject: [PATCH 038/270] [SPARKR][EXAMPLE] change example APP name ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) For R SQL example, appname is "MyApp". While examples in scala, Java and python, the appName is "x Spark SQL basic example". I made the R example consistent with other examples. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Manual test (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: wm624@hotmail.com Closes #14703 from wangmiao1981/example. --- examples/src/main/r/RSparkSQLExample.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/r/RSparkSQLExample.R b/examples/src/main/r/RSparkSQLExample.R index de489e1bda2c3..4e0267a03851b 100644 --- a/examples/src/main/r/RSparkSQLExample.R +++ b/examples/src/main/r/RSparkSQLExample.R @@ -18,7 +18,7 @@ library(SparkR) # $example on:init_session$ -sparkR.session(appName = "MyApp", sparkConfig = list(spark.some.config.option = "some-value")) +sparkR.session(appName = "R Spark SQL basic example", sparkConfig = list(spark.some.config.option = "some-value")) # $example off:init_session$ From 31a015572024046f4deaa6cec66bb6fab110f31d Mon Sep 17 00:00:00 2001 From: Liang-Chi Hsieh Date: Sat, 20 Aug 2016 23:29:48 +0800 Subject: [PATCH 039/270] [SPARK-17104][SQL] LogicalRelation.newInstance should follow the semantics of MultiInstanceRelation ## What changes were proposed in this pull request? Currently `LogicalRelation.newInstance()` simply creates another `LogicalRelation` object with the same parameters. However, the `newInstance()` method inherited from `MultiInstanceRelation` should return a copy of object with unique expression ids. Current `LogicalRelation.newInstance()` can cause failure when doing self-join. ## How was this patch tested? Jenkins tests. Author: Liang-Chi Hsieh Closes #14682 from viirya/fix-localrelation. --- .../sql/execution/datasources/LogicalRelation.scala | 11 +++++++++-- .../org/apache/spark/sql/hive/parquetSuites.scala | 7 +++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala index 90711f2b1dde4..2a8e147011f55 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala @@ -79,11 +79,18 @@ case class LogicalRelation( /** Used to lookup original attribute capitalization */ val attributeMap: AttributeMap[AttributeReference] = AttributeMap(output.map(o => (o, o))) - def newInstance(): this.type = + /** + * Returns a new instance of this LogicalRelation. According to the semantics of + * MultiInstanceRelation, this method returns a copy of this object with + * unique expression ids. We respect the `expectedOutputAttributes` and create + * new instances of attributes in it. + */ + override def newInstance(): this.type = { LogicalRelation( relation, - expectedOutputAttributes, + expectedOutputAttributes.map(_.map(_.newInstance())), metastoreTableIdentifier).asInstanceOf[this.type] + } override def refresh(): Unit = relation match { case fs: HadoopFsRelation => fs.refresh() diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala index 31b6197d56fc7..e92bbdea75a7b 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala @@ -589,6 +589,13 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest { } } } + + test("self-join") { + val table = spark.table("normal_parquet") + val selfJoin = table.as("t1").join(table.as("t2")) + checkAnswer(selfJoin, + sql("SELECT * FROM normal_parquet x JOIN normal_parquet y")) + } } /** From 9560c8d29542a5dcaaa07b7af9ef5ddcdbb5d14d Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Sun, 21 Aug 2016 00:25:55 +0800 Subject: [PATCH 040/270] [SPARK-17124][SQL] RelationalGroupedDataset.agg should preserve order and allow multiple aggregates per column ## What changes were proposed in this pull request? This patch fixes a longstanding issue with one of the RelationalGroupedDataset.agg function. Even though the signature accepts vararg of pairs, the underlying implementation turns the seq into a map, and thus not order preserving nor allowing multiple aggregates per column. This change also allows users to use this function to run multiple different aggregations for a single column, e.g. ``` agg("age" -> "max", "age" -> "count") ``` ## How was this patch tested? Added a test case in DataFrameAggregateSuite. Author: petermaxlee Closes #14697 from petermaxlee/SPARK-17124. --- .../apache/spark/sql/RelationalGroupedDataset.scala | 6 ++++-- .../org/apache/spark/sql/DataFrameAggregateSuite.scala | 10 ++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala index 7cfd1cdc7d5d1..53d732403f979 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala @@ -128,7 +128,7 @@ class RelationalGroupedDataset protected[sql]( } /** - * (Scala-specific) Compute aggregates by specifying a map from column name to + * (Scala-specific) Compute aggregates by specifying the column names and * aggregate methods. The resulting [[DataFrame]] will also contain the grouping columns. * * The available aggregate methods are `avg`, `max`, `min`, `sum`, `count`. @@ -143,7 +143,9 @@ class RelationalGroupedDataset protected[sql]( * @since 1.3.0 */ def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = { - agg((aggExpr +: aggExprs).toMap) + toDF((aggExpr +: aggExprs).map { case (colName, expr) => + strToExpr(expr)(df(colName).expr) + }) } /** diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala index 92aa7b95434dc..69a3b5f278fd8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala @@ -87,6 +87,16 @@ class DataFrameAggregateSuite extends QueryTest with SharedSQLContext { ) } + test("SPARK-17124 agg should be ordering preserving") { + val df = spark.range(2) + val ret = df.groupBy("id").agg("id" -> "sum", "id" -> "count", "id" -> "min") + assert(ret.schema.map(_.name) == Seq("id", "sum(id)", "count(id)", "min(id)")) + checkAnswer( + ret, + Row(0, 0, 1, 0) :: Row(1, 1, 1, 1) :: Nil + ) + } + test("rollup") { checkAnswer( courseSales.rollup("course", "year").sum("earnings"), From 9f37d4eac28dd179dd523fa7d645be97bb52af9c Mon Sep 17 00:00:00 2001 From: Bryan Cutler Date: Sat, 20 Aug 2016 13:45:26 -0700 Subject: [PATCH 041/270] [SPARK-12666][CORE] SparkSubmit packages fix for when 'default' conf doesn't exist in dependent module ## What changes were proposed in this pull request? Adding a "(runtime)" to the dependency configuration will set a fallback configuration to be used if the requested one is not found. E.g. with the setting "default(runtime)", Ivy will look for the conf "default" in the module ivy file and if not found will look for the conf "runtime". This can help with the case when using "sbt publishLocal" which does not write a "default" conf in the published ivy.xml file. ## How was this patch tested? used spark-submit with --packages option for a package published locally with no default conf, and a package resolved from Maven central. Author: Bryan Cutler Closes #13428 from BryanCutler/fallback-package-conf-SPARK-12666. --- .../scala/org/apache/spark/deploy/SparkSubmit.scala | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala index 9feafc99ac07f..7b6d5a394bc35 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala @@ -897,9 +897,12 @@ private[spark] object SparkSubmitUtils { val localIvyRoot = new File(ivySettings.getDefaultIvyUserDir, "local") localIvy.setLocal(true) localIvy.setRepository(new FileRepository(localIvyRoot)) - val ivyPattern = Seq("[organisation]", "[module]", "[revision]", "[type]s", - "[artifact](-[classifier]).[ext]").mkString(File.separator) - localIvy.addIvyPattern(localIvyRoot.getAbsolutePath + File.separator + ivyPattern) + val ivyPattern = Seq(localIvyRoot.getAbsolutePath, "[organisation]", "[module]", "[revision]", + "ivys", "ivy.xml").mkString(File.separator) + localIvy.addIvyPattern(ivyPattern) + val artifactPattern = Seq(localIvyRoot.getAbsolutePath, "[organisation]", "[module]", + "[revision]", "[type]s", "[artifact](-[classifier]).[ext]").mkString(File.separator) + localIvy.addArtifactPattern(artifactPattern) localIvy.setName("local-ivy-cache") cr.add(localIvy) @@ -944,7 +947,7 @@ private[spark] object SparkSubmitUtils { artifacts.foreach { mvn => val ri = ModuleRevisionId.newInstance(mvn.groupId, mvn.artifactId, mvn.version) val dd = new DefaultDependencyDescriptor(ri, false, false) - dd.addDependencyConfiguration(ivyConfName, ivyConfName) + dd.addDependencyConfiguration(ivyConfName, ivyConfName + "(runtime)") // scalastyle:off println printStream.println(s"${dd.getDependencyId} added as a dependency") // scalastyle:on println From 61ef74f2272faa7ce8f2badc7e00039908e3551f Mon Sep 17 00:00:00 2001 From: hqzizania Date: Sat, 20 Aug 2016 18:52:44 -0700 Subject: [PATCH 042/270] [SPARK-17090][ML] Make tree aggregation level in linear/logistic regression configurable ## What changes were proposed in this pull request? Linear/logistic regression use treeAggregate with default depth (always = 2) for collecting coefficient gradient updates to the driver. For high dimensional problems, this can cause OOM error on the driver. This patch makes it configurable to avoid this problem if users' input data has many features. It adds a HasTreeDepth API in `sharedParams.scala`, and extends it to both Linear regression and logistic regression in .ml Author: hqzizania Closes #14717 from hqzizania/SPARK-17090. --- .../classification/LogisticRegression.scala | 24 +++++++++++++----- .../MultinomialLogisticRegression.scala | 16 ++++++++++-- .../ml/param/shared/SharedParamsCodeGen.scala | 4 ++- .../spark/ml/param/shared/sharedParams.scala | 25 ++++++++++++++++--- .../ml/regression/LinearRegression.scala | 22 +++++++++++++--- 5 files changed, 74 insertions(+), 17 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala index ea31c68e4c943..757d52052d87f 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala @@ -48,7 +48,7 @@ import org.apache.spark.storage.StorageLevel */ private[classification] trait LogisticRegressionParams extends ProbabilisticClassifierParams with HasRegParam with HasElasticNetParam with HasMaxIter with HasFitIntercept with HasTol - with HasStandardization with HasWeightCol with HasThreshold { + with HasStandardization with HasWeightCol with HasThreshold with HasAggregationDepth { /** * Set threshold in binary classification, in range [0, 1]. @@ -256,6 +256,17 @@ class LogisticRegression @Since("1.2.0") ( @Since("1.5.0") override def getThresholds: Array[Double] = super.getThresholds + /** + * Suggested depth for treeAggregate (>= 2). + * If the dimensions of features or the number of partitions are large, + * this param could be adjusted to a larger size. + * Default is 2. + * @group expertSetParam + */ + @Since("2.1.0") + def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value) + setDefault(aggregationDepth -> 2) + private var optInitialModel: Option[LogisticRegressionModel] = None /** @group setParam */ @@ -294,7 +305,8 @@ class LogisticRegression @Since("1.2.0") ( (c1._1.merge(c2._1), c1._2.merge(c2._2)) instances.treeAggregate( - new MultivariateOnlineSummarizer, new MultiClassSummarizer)(seqOp, combOp) + new MultivariateOnlineSummarizer, new MultiClassSummarizer + )(seqOp, combOp, $(aggregationDepth)) } val histogram = labelSummarizer.histogram @@ -358,7 +370,7 @@ class LogisticRegression @Since("1.2.0") ( val bcFeaturesStd = instances.context.broadcast(featuresStd) val costFun = new LogisticCostFun(instances, numClasses, $(fitIntercept), - $(standardization), bcFeaturesStd, regParamL2, multinomial = false) + $(standardization), bcFeaturesStd, regParamL2, multinomial = false, $(aggregationDepth)) val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) { new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol)) @@ -1331,8 +1343,8 @@ private class LogisticCostFun( standardization: Boolean, bcFeaturesStd: Broadcast[Array[Double]], regParamL2: Double, - multinomial: Boolean) extends DiffFunction[BDV[Double]] { - + multinomial: Boolean, + aggregationDepth: Int) extends DiffFunction[BDV[Double]] { override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = { val coeffs = Vectors.fromBreeze(coefficients) @@ -1347,7 +1359,7 @@ private class LogisticCostFun( instances.treeAggregate( new LogisticAggregator(bcCoeffs, bcFeaturesStd, numClasses, fitIntercept, multinomial) - )(seqOp, combOp) + )(seqOp, combOp, aggregationDepth) } val totalGradientArray = logisticAggregator.gradient.toArray diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala index dfadd68c5f476..f85ac76a8d129 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala @@ -44,7 +44,8 @@ import org.apache.spark.storage.StorageLevel */ private[classification] trait MultinomialLogisticRegressionParams extends ProbabilisticClassifierParams with HasRegParam with HasElasticNetParam with HasMaxIter - with HasFitIntercept with HasTol with HasStandardization with HasWeightCol { + with HasFitIntercept with HasTol with HasStandardization with HasWeightCol + with HasAggregationDepth { /** * Set thresholds in multiclass (or binary) classification to adjust the probability of @@ -163,6 +164,17 @@ class MultinomialLogisticRegression @Since("2.1.0") ( @Since("2.1.0") override def setThresholds(value: Array[Double]): this.type = super.setThresholds(value) + /** + * Suggested depth for treeAggregate (>= 2). + * If the dimensions of features or the number of partitions are large, + * this param could be adjusted to a larger size. + * Default is 2. + * @group expertSetParam + */ + @Since("2.1.0") + def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value) + setDefault(aggregationDepth -> 2) + override protected[spark] def train(dataset: Dataset[_]): MultinomialLogisticRegressionModel = { val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol)) val instances: RDD[Instance] = @@ -245,7 +257,7 @@ class MultinomialLogisticRegression @Since("2.1.0") ( val bcFeaturesStd = instances.context.broadcast(featuresStd) val costFun = new LogisticCostFun(instances, numClasses, $(fitIntercept), - $(standardization), bcFeaturesStd, regParamL2, multinomial = true) + $(standardization), bcFeaturesStd, regParamL2, multinomial = true, $(aggregationDepth)) val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) { new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol)) diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala index 4ab0c16a1b4d0..0f48a16a429ff 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala @@ -78,7 +78,9 @@ private[shared] object SharedParamsCodeGen { ParamDesc[String]("weightCol", "weight column name. If this is not set or empty, we treat " + "all instance weights as 1.0"), ParamDesc[String]("solver", "the solver algorithm for optimization. If this is not set or " + - "empty, default value is 'auto'", Some("\"auto\""))) + "empty, default value is 'auto'", Some("\"auto\"")), + ParamDesc[Int]("aggregationDepth", "suggested depth for treeAggregate (>= 2)", Some("2"), + isValid = "ParamValidators.gtEq(2)")) val code = genSharedParams(params) val file = "src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala" diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala index 64d6af2766ca9..6803772c63d62 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala @@ -334,10 +334,10 @@ private[ml] trait HasElasticNetParam extends Params { private[ml] trait HasTol extends Params { /** - * Param for the convergence tolerance for iterative algorithms. + * Param for the convergence tolerance for iterative algorithms (>= 0). * @group param */ - final val tol: DoubleParam = new DoubleParam(this, "tol", "the convergence tolerance for iterative algorithms") + final val tol: DoubleParam = new DoubleParam(this, "tol", "the convergence tolerance for iterative algorithms (>= 0)", ParamValidators.gtEq(0)) /** @group getParam */ final def getTol: Double = $(tol) @@ -349,10 +349,10 @@ private[ml] trait HasTol extends Params { private[ml] trait HasStepSize extends Params { /** - * Param for Step size to be used for each iteration of optimization. + * Param for Step size to be used for each iteration of optimization (> 0). * @group param */ - final val stepSize: DoubleParam = new DoubleParam(this, "stepSize", "Step size to be used for each iteration of optimization") + final val stepSize: DoubleParam = new DoubleParam(this, "stepSize", "Step size to be used for each iteration of optimization (> 0)", ParamValidators.gt(0)) /** @group getParam */ final def getStepSize: Double = $(stepSize) @@ -389,4 +389,21 @@ private[ml] trait HasSolver extends Params { /** @group getParam */ final def getSolver: String = $(solver) } + +/** + * Trait for shared param aggregationDepth (default: 2). + */ +private[ml] trait HasAggregationDepth extends Params { + + /** + * Param for suggested depth for treeAggregate (>= 2). + * @group param + */ + final val aggregationDepth: IntParam = new IntParam(this, "aggregationDepth", "suggested depth for treeAggregate (>= 2)", ParamValidators.gtEq(2)) + + setDefault(aggregationDepth, 2) + + /** @group getParam */ + final def getAggregationDepth: Int = $(aggregationDepth) +} // scalastyle:on diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala index 76be4204e9050..b1bb9b9fe0058 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala @@ -53,6 +53,7 @@ import org.apache.spark.storage.StorageLevel private[regression] trait LinearRegressionParams extends PredictorParams with HasRegParam with HasElasticNetParam with HasMaxIter with HasTol with HasFitIntercept with HasStandardization with HasWeightCol with HasSolver + with HasAggregationDepth /** * Linear regression. @@ -172,6 +173,17 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String def setSolver(value: String): this.type = set(solver, value) setDefault(solver -> "auto") + /** + * Suggested depth for treeAggregate (>= 2). + * If the dimensions of features or the number of partitions are large, + * this param could be adjusted to a larger size. + * Default is 2. + * @group expertSetParam + */ + @Since("2.1.0") + def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value) + setDefault(aggregationDepth -> 2) + override protected def train(dataset: Dataset[_]): LinearRegressionModel = { // Extract the number of features before deciding optimization solver. val numFeatures = dataset.select(col($(featuresCol))).first().getAs[Vector](0).size @@ -230,7 +242,8 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String (c1._1.merge(c2._1), c1._2.merge(c2._2)) instances.treeAggregate( - new MultivariateOnlineSummarizer, new MultivariateOnlineSummarizer)(seqOp, combOp) + new MultivariateOnlineSummarizer, new MultivariateOnlineSummarizer + )(seqOp, combOp, $(aggregationDepth)) } val yMean = ySummarizer.mean(0) @@ -296,7 +309,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String val effectiveL2RegParam = (1.0 - $(elasticNetParam)) * effectiveRegParam val costFun = new LeastSquaresCostFun(instances, yStd, yMean, $(fitIntercept), - $(standardization), bcFeaturesStd, bcFeaturesMean, effectiveL2RegParam) + $(standardization), bcFeaturesStd, bcFeaturesMean, effectiveL2RegParam, $(aggregationDepth)) val optimizer = if ($(elasticNetParam) == 0.0 || effectiveRegParam == 0.0) { new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol)) @@ -1016,7 +1029,8 @@ private class LeastSquaresCostFun( standardization: Boolean, bcFeaturesStd: Broadcast[Array[Double]], bcFeaturesMean: Broadcast[Array[Double]], - effectiveL2regParam: Double) extends DiffFunction[BDV[Double]] { + effectiveL2regParam: Double, + aggregationDepth: Int) extends DiffFunction[BDV[Double]] { override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = { val coeffs = Vectors.fromBreeze(coefficients) @@ -1029,7 +1043,7 @@ private class LeastSquaresCostFun( instances.treeAggregate( new LeastSquaresAggregator(bcCoeffs, labelStd, labelMean, fitIntercept, bcFeaturesStd, - bcFeaturesMean))(seqOp, combOp) + bcFeaturesMean))(seqOp, combOp, aggregationDepth) } val totalGradientArray = leastSquaresAggregator.gradient.toArray From 7f08a60b6e9acb89482fa0e268b192250d9ba6e4 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Sun, 21 Aug 2016 02:23:31 -0700 Subject: [PATCH 043/270] [SPARK-16961][FOLLOW-UP][SPARKR] More robust test case for spark.gaussianMixture. ## What changes were proposed in this pull request? #14551 fixed off-by-one bug in ```randomizeInPlace``` and some test failure caused by this fix. But for SparkR ```spark.gaussianMixture``` test case, the fix is inappropriate. It only changed the output result of native R which should be compared by SparkR, however, it did not change the R code in annotation which is used for reproducing the result in native R. It will confuse users who can not reproduce the same result in native R. This PR sends a more robust test case which can produce same result between SparkR and native R. ## How was this patch tested? Unit test update. Author: Yanbo Liang Closes #14730 from yanboliang/spark-16961-followup. --- R/pkg/inst/tests/testthat/test_mllib.R | 47 ++++++++++++++------------ 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index 67a3099101cf1..d15c2393b94ac 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -512,49 +512,52 @@ test_that("spark.gaussianMixture", { # R code to reproduce the result. # nolint start #' library(mvtnorm) - #' set.seed(100) - #' a <- rmvnorm(4, c(0, 0)) - #' b <- rmvnorm(6, c(3, 4)) + #' set.seed(1) + #' a <- rmvnorm(7, c(0, 0)) + #' b <- rmvnorm(8, c(10, 10)) #' data <- rbind(a, b) #' model <- mvnormalmixEM(data, k = 2) #' model$lambda # - # [1] 0.4 0.6 + # [1] 0.4666667 0.5333333 # #' model$mu # - # [1] -0.2614822 0.5128697 - # [1] 2.647284 4.544682 + # [1] 0.11731091 -0.06192351 + # [1] 10.363673 9.897081 # #' model$sigma # # [[1]] - # [,1] [,2] - # [1,] 0.08427399 0.00548772 - # [2,] 0.00548772 0.09090715 + # [,1] [,2] + # [1,] 0.62049934 0.06880802 + # [2,] 0.06880802 1.27431874 # # [[2]] - # [,1] [,2] - # [1,] 0.1641373 -0.1673806 - # [2,] -0.1673806 0.7508951 + # [,1] [,2] + # [1,] 0.2961543 0.160783 + # [2,] 0.1607830 1.008878 # nolint end - data <- list(list(-0.50219235, 0.1315312), list(-0.07891709, 0.8867848), - list(0.11697127, 0.3186301), list(-0.58179068, 0.7145327), - list(2.17474057, 3.6401379), list(3.08988614, 4.0962745), - list(2.79836605, 4.7398405), list(3.12337950, 3.9706833), - list(2.61114575, 4.5108563), list(2.08618581, 6.3102968)) + data <- list(list(-0.6264538, 0.1836433), list(-0.8356286, 1.5952808), + list(0.3295078, -0.8204684), list(0.4874291, 0.7383247), + list(0.5757814, -0.3053884), list(1.5117812, 0.3898432), + list(-0.6212406, -2.2146999), list(11.1249309, 9.9550664), + list(9.9838097, 10.9438362), list(10.8212212, 10.5939013), + list(10.9189774, 10.7821363), list(10.0745650, 8.0106483), + list(10.6198257, 9.9438713), list(9.8442045, 8.5292476), + list(9.5218499, 10.4179416)) df <- createDataFrame(data, c("x1", "x2")) model <- spark.gaussianMixture(df, ~ x1 + x2, k = 2) stats <- summary(model) - rLambda <- c(0.50861, 0.49139) - rMu <- c(0.267, 1.195, 2.743, 4.730) - rSigma <- c(1.099, 1.339, 1.339, 1.798, - 0.145, -0.309, -0.309, 0.716) + rLambda <- c(0.4666667, 0.5333333) + rMu <- c(0.11731091, -0.06192351, 10.363673, 9.897081) + rSigma <- c(0.62049934, 0.06880802, 0.06880802, 1.27431874, + 0.2961543, 0.160783, 0.1607830, 1.008878) expect_equal(stats$lambda, rLambda, tolerance = 1e-3) expect_equal(unlist(stats$mu), rMu, tolerance = 1e-3) expect_equal(unlist(stats$sigma), rSigma, tolerance = 1e-3) p <- collect(select(predict(model, df), "prediction")) - expect_equal(p$prediction, c(0, 0, 0, 0, 0, 1, 1, 1, 1, 1)) + expect_equal(p$prediction, c(0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1)) # Test model save/load modelPath <- tempfile(pattern = "spark-gaussianMixture", fileext = ".tmp") From e328f577e81363f6b3f892931f20dbf68f7d29cf Mon Sep 17 00:00:00 2001 From: "wm624@hotmail.com" Date: Sun, 21 Aug 2016 11:51:46 +0100 Subject: [PATCH 044/270] [SPARK-17002][CORE] Document that spark.ssl.protocol. is required for SSL ## What changes were proposed in this pull request? `spark.ssl.enabled`=true, but failing to set `spark.ssl.protocol` will fail and throw meaningless exception. `spark.ssl.protocol` is required when `spark.ssl.enabled`. Improvement: require `spark.ssl.protocol` when initializing SSLContext, otherwise throws an exception to indicate that. Remove the OrElse("default"). Document this requirement in configure.md ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Manual tests: Build document and check document Configure `spark.ssl.enabled` only, it throws exception below: 6/08/16 16:04:37 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(mwang); groups with view permissions: Set(); users with modify permissions: Set(mwang); groups with modify permissions: Set() Exception in thread "main" java.lang.IllegalArgumentException: requirement failed: spark.ssl.protocol is required when enabling SSL connections. at scala.Predef$.require(Predef.scala:224) at org.apache.spark.SecurityManager.(SecurityManager.scala:285) at org.apache.spark.deploy.master.Master$.startRpcEnvAndEndpoint(Master.scala:1026) at org.apache.spark.deploy.master.Master$.main(Master.scala:1011) at org.apache.spark.deploy.master.Master.main(Master.scala) Configure `spark.ssl.protocol` and `spark.ssl.protocol` It works fine. Author: wm624@hotmail.com Closes #14674 from wangmiao1981/ssl. --- core/src/main/scala/org/apache/spark/SecurityManager.scala | 5 ++++- docs/configuration.md | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/SecurityManager.scala b/core/src/main/scala/org/apache/spark/SecurityManager.scala index f72c7ded5ea52..a6550b6ca8c94 100644 --- a/core/src/main/scala/org/apache/spark/SecurityManager.scala +++ b/core/src/main/scala/org/apache/spark/SecurityManager.scala @@ -282,7 +282,10 @@ private[spark] class SecurityManager(sparkConf: SparkConf) }: TrustManager }) - val sslContext = SSLContext.getInstance(fileServerSSLOptions.protocol.getOrElse("Default")) + require(fileServerSSLOptions.protocol.isDefined, + "spark.ssl.protocol is required when enabling SSL connections.") + + val sslContext = SSLContext.getInstance(fileServerSSLOptions.protocol.get) sslContext.init(null, trustStoreManagers.getOrElse(credulousTrustStoreManagers), null) val hostVerifier = new HostnameVerifier { diff --git a/docs/configuration.md b/docs/configuration.md index 96e8c6d08a1e3..4bda464b98bf6 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1472,6 +1472,9 @@ Apart from these, the following properties are also available, and may be useful

Whether to enable SSL connections on all supported protocols.

+

When spark.ssl.enabled is configured, spark.ssl.protocol + is required.

+

All the SSL settings like spark.ssl.xxx where xxx is a particular configuration property, denote the global configuration for all the supported protocols. In order to override the global configuration for the particular protocol, From ab7143463daf2056736c85e3a943c826b5992623 Mon Sep 17 00:00:00 2001 From: Xiangrui Meng Date: Sun, 21 Aug 2016 10:31:25 -0700 Subject: [PATCH 045/270] [MINOR][R] add SparkR.Rcheck/ and SparkR_*.tar.gz to R/.gitignore ## What changes were proposed in this pull request? Ignore temp files generated by `check-cran.sh`. Author: Xiangrui Meng Closes #14740 from mengxr/R-gitignore. --- R/.gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/R/.gitignore b/R/.gitignore index 9a5889ba28b2a..c98504ab07781 100644 --- a/R/.gitignore +++ b/R/.gitignore @@ -4,3 +4,5 @@ lib pkg/man pkg/html +SparkR.Rcheck/ +SparkR_*.tar.gz From 91c2397684ab791572ac57ffb2a924ff058bb64f Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Sun, 21 Aug 2016 22:07:47 +0200 Subject: [PATCH 046/270] [SPARK-17098][SQL] Fix `NullPropagation` optimizer to handle `COUNT(NULL) OVER` correctly ## What changes were proposed in this pull request? Currently, `NullPropagation` optimizer replaces `COUNT` on null literals in a bottom-up fashion. During that, `WindowExpression` is not covered properly. This PR adds the missing propagation logic. **Before** ```scala scala> sql("SELECT COUNT(1 + NULL) OVER ()").show java.lang.UnsupportedOperationException: Cannot evaluate expression: cast(0 as bigint) windowspecdefinition(ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) ``` **After** ```scala scala> sql("SELECT COUNT(1 + NULL) OVER ()").show +----------------------------------------------------------------------------------------------+ |count((1 + CAST(NULL AS INT))) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)| +----------------------------------------------------------------------------------------------+ | 0| +----------------------------------------------------------------------------------------------+ ``` ## How was this patch tested? Pass the Jenkins test with a new test case. Author: Dongjoon Hyun Closes #14689 from dongjoon-hyun/SPARK-17098. --- .../sql/catalyst/optimizer/Optimizer.scala | 2 + .../sql-tests/inputs/null-propagation.sql | 9 +++++ .../results/null-propagation.sql.out | 38 +++++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 sql/core/src/test/resources/sql-tests/inputs/null-propagation.sql create mode 100644 sql/core/src/test/resources/sql-tests/results/null-propagation.sql.out diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index ce57f05868fe1..9a0ff8a9b3211 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -619,6 +619,8 @@ object NullPropagation extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case q: LogicalPlan => q transformExpressionsUp { + case e @ WindowExpression(Cast(Literal(0L, _), _), _) => + Cast(Literal(0L), e.dataType) case e @ AggregateExpression(Count(exprs), _, _, _) if !exprs.exists(nonNullLiteral) => Cast(Literal(0L), e.dataType) case e @ IsNull(c) if !c.nullable => Literal.create(false, BooleanType) diff --git a/sql/core/src/test/resources/sql-tests/inputs/null-propagation.sql b/sql/core/src/test/resources/sql-tests/inputs/null-propagation.sql new file mode 100644 index 0000000000000..66549da7971d3 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/null-propagation.sql @@ -0,0 +1,9 @@ + +-- count(null) should be 0 +SELECT COUNT(NULL) FROM VALUES 1, 2, 3; +SELECT COUNT(1 + NULL) FROM VALUES 1, 2, 3; + +-- count(null) on window should be 0 +SELECT COUNT(NULL) OVER () FROM VALUES 1, 2, 3; +SELECT COUNT(1 + NULL) OVER () FROM VALUES 1, 2, 3; + diff --git a/sql/core/src/test/resources/sql-tests/results/null-propagation.sql.out b/sql/core/src/test/resources/sql-tests/results/null-propagation.sql.out new file mode 100644 index 0000000000000..ed3a651aa6614 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/null-propagation.sql.out @@ -0,0 +1,38 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 4 + + +-- !query 0 +SELECT COUNT(NULL) FROM VALUES 1, 2, 3 +-- !query 0 schema +struct +-- !query 0 output +0 + + +-- !query 1 +SELECT COUNT(1 + NULL) FROM VALUES 1, 2, 3 +-- !query 1 schema +struct +-- !query 1 output +0 + + +-- !query 2 +SELECT COUNT(NULL) OVER () FROM VALUES 1, 2, 3 +-- !query 2 schema +struct +-- !query 2 output +0 +0 +0 + + +-- !query 3 +SELECT COUNT(1 + NULL) OVER () FROM VALUES 1, 2, 3 +-- !query 3 schema +struct +-- !query 3 output +0 +0 +0 From b2074b664a9c269c4103760d40c4a14e7aeb1e83 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Sun, 21 Aug 2016 22:23:14 -0700 Subject: [PATCH 047/270] [SPARK-16498][SQL] move hive hack for data source table into HiveExternalCatalog ## What changes were proposed in this pull request? Spark SQL doesn't have its own meta store yet, and use hive's currently. However, hive's meta store has some limitations(e.g. columns can't be too many, not case-preserving, bad decimal type support, etc.), so we have some hacks to successfully store data source table metadata into hive meta store, i.e. put all the information in table properties. This PR moves these hacks to `HiveExternalCatalog`, tries to isolate hive specific logic in one place. changes overview: 1. **before this PR**: we need to put metadata(schema, partition columns, etc.) of data source tables to table properties before saving it to external catalog, even the external catalog doesn't use hive metastore(e.g. `InMemoryCatalog`) **after this PR**: the table properties tricks are only in `HiveExternalCatalog`, the caller side doesn't need to take care of it anymore. 2. **before this PR**: because the table properties tricks are done outside of external catalog, so we also need to revert these tricks when we read the table metadata from external catalog and use it. e.g. in `DescribeTableCommand` we will read schema and partition columns from table properties. **after this PR**: The table metadata read from external catalog is exactly the same with what we saved to it. bonus: now we can create data source table using `SessionCatalog`, if schema is specified. breaks: `schemaStringLengthThreshold` is not configurable anymore. `hive.default.rcfile.serde` is not configurable anymore. ## How was this patch tested? existing tests. Author: Wenchen Fan Closes #14155 from cloud-fan/catalog-table. --- .../ml/source/libsvm/LibSVMRelation.scala | 3 +- .../spark/sql/execution/SparkSqlParser.scala | 4 +- .../command/createDataSourceTables.scala | 255 ++------------ .../spark/sql/execution/command/ddl.scala | 94 +---- .../spark/sql/execution/command/tables.scala | 59 +--- .../datasources/DataSourceStrategy.scala | 22 +- .../datasources/WriterContainer.scala | 16 +- .../datasources/csv/CSVRelation.scala | 5 +- .../datasources/json/JsonFileFormat.scala | 3 +- .../parquet/ParquetFileFormat.scala | 4 +- .../datasources/text/TextFileFormat.scala | 3 +- .../apache/spark/sql/internal/HiveSerDe.scala | 6 +- .../execution/command/DDLCommandSuite.scala | 6 +- .../sql/execution/command/DDLSuite.scala | 110 +----- .../sources/CreateTableAsSelectSuite.scala | 5 +- .../spark/sql/hive/HiveExternalCatalog.scala | 328 +++++++++++++++++- .../spark/sql/hive/HiveMetastoreCatalog.scala | 67 +--- .../sql/hive/client/HiveClientImpl.scala | 16 +- .../spark/sql/hive/orc/OrcFileFormat.scala | 3 +- .../sql/hive/MetastoreDataSourcesSuite.scala | 110 +++--- .../sql/hive/execution/HiveCommandSuite.scala | 40 ++- .../sql/hive/execution/HiveDDLSuite.scala | 23 ++ .../sql/hive/execution/SQLQuerySuite.scala | 4 +- .../sql/sources/SimpleTextRelation.scala | 3 +- 24 files changed, 536 insertions(+), 653 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala b/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala index 034223e115389..5c79c6905801c 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala @@ -33,7 +33,6 @@ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.RowEncoder import org.apache.spark.sql.catalyst.expressions.AttributeReference import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ @@ -51,7 +50,7 @@ private[libsvm] class LibSVMOutputWriter( new TextOutputFormat[NullWritable, Text]() { override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { val configuration = context.getConfiguration - val uniqueWriteJobId = configuration.get(CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = configuration.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val split = taskAttemptId.getTaskID.getId new Path(path, f"part-r-$split%05d-$uniqueWriteJobId$extension") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 71c3bd31e02e4..e32d30178eeb1 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -971,7 +971,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { // Storage format val defaultStorage: CatalogStorageFormat = { val defaultStorageType = conf.getConfString("hive.default.fileformat", "textfile") - val defaultHiveSerde = HiveSerDe.sourceToSerDe(defaultStorageType, conf) + val defaultHiveSerde = HiveSerDe.sourceToSerDe(defaultStorageType) CatalogStorageFormat( locationUri = None, inputFormat = defaultHiveSerde.flatMap(_.inputFormat) @@ -1115,7 +1115,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { override def visitGenericFileFormat( ctx: GenericFileFormatContext): CatalogStorageFormat = withOrigin(ctx) { val source = ctx.identifier.getText - HiveSerDe.sourceToSerDe(source, conf) match { + HiveSerDe.sourceToSerDe(source) match { case Some(s) => CatalogStorageFormat.empty.copy( inputFormat = s.inputFormat, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala index 7b028e72ed0a8..7400a0e7bb1f1 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala @@ -17,10 +17,6 @@ package org.apache.spark.sql.execution.command -import scala.collection.mutable -import scala.util.control.NonFatal - -import org.apache.spark.internal.Logging import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases @@ -28,7 +24,6 @@ import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.datasources._ -import org.apache.spark.sql.internal.HiveSerDe import org.apache.spark.sql.sources.{BaseRelation, InsertableRelation} import org.apache.spark.sql.types._ @@ -97,16 +92,19 @@ case class CreateDataSourceTableCommand( } } - CreateDataSourceTableUtils.createDataSourceTable( - sparkSession = sparkSession, - tableIdent = tableIdent, + val table = CatalogTable( + identifier = tableIdent, + tableType = if (isExternal) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED, + storage = CatalogStorageFormat.empty.copy(properties = optionsWithPath), schema = dataSource.schema, - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, - provider = provider, - options = optionsWithPath, - isExternal = isExternal) - + provider = Some(provider), + partitionColumnNames = partitionColumns, + bucketSpec = bucketSpec + ) + + // We will return Nil or throw exception at the beginning if the table already exists, so when + // we reach here, the table should not exist and we should set `ignoreIfExists` to false. + sessionState.catalog.createTable(table, ignoreIfExists = false) Seq.empty[Row] } } @@ -193,7 +191,7 @@ case class CreateDataSourceTableAsSelectCommand( } existingSchema = Some(l.schema) case s: SimpleCatalogRelation if DDLUtils.isDatasourceTable(s.metadata) => - existingSchema = Some(DDLUtils.getSchemaFromTableProperties(s.metadata)) + existingSchema = Some(s.metadata.schema) case o => throw new AnalysisException(s"Saving data in ${o.toString} is not supported.") } @@ -233,15 +231,17 @@ case class CreateDataSourceTableAsSelectCommand( // We will use the schema of resolved.relation as the schema of the table (instead of // the schema of df). It is important since the nullability may be changed by the relation // provider (for example, see org.apache.spark.sql.parquet.DefaultSource). - CreateDataSourceTableUtils.createDataSourceTable( - sparkSession = sparkSession, - tableIdent = tableIdent, - schema = result.schema, - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, - provider = provider, - options = optionsWithPath, - isExternal = isExternal) + val schema = result.schema + val table = CatalogTable( + identifier = tableIdent, + tableType = if (isExternal) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED, + storage = CatalogStorageFormat.empty.copy(properties = optionsWithPath), + schema = schema, + provider = Some(provider), + partitionColumnNames = partitionColumns, + bucketSpec = bucketSpec + ) + sessionState.catalog.createTable(table, ignoreIfExists = false) } // Refresh the cache of the table in the catalog. @@ -249,210 +249,3 @@ case class CreateDataSourceTableAsSelectCommand( Seq.empty[Row] } } - - -object CreateDataSourceTableUtils extends Logging { - - val DATASOURCE_PREFIX = "spark.sql.sources." - val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider" - val DATASOURCE_WRITEJOBUUID = DATASOURCE_PREFIX + "writeJobUUID" - val DATASOURCE_OUTPUTPATH = DATASOURCE_PREFIX + "output.path" - val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema" - val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "." - val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts" - val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols" - val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols" - val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets" - val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols" - val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part." - val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol." - val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol." - val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol." - - def createDataSourceTable( - sparkSession: SparkSession, - tableIdent: TableIdentifier, - schema: StructType, - partitionColumns: Array[String], - bucketSpec: Option[BucketSpec], - provider: String, - options: Map[String, String], - isExternal: Boolean): Unit = { - val tableProperties = new mutable.HashMap[String, String] - tableProperties.put(DATASOURCE_PROVIDER, provider) - - // Serialized JSON schema string may be too long to be stored into a single metastore table - // property. In this case, we split the JSON string and store each part as a separate table - // property. - val threshold = sparkSession.sessionState.conf.schemaStringLengthThreshold - val schemaJsonString = schema.json - // Split the JSON string. - val parts = schemaJsonString.grouped(threshold).toSeq - tableProperties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString) - parts.zipWithIndex.foreach { case (part, index) => - tableProperties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part) - } - - if (partitionColumns.length > 0) { - tableProperties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString) - partitionColumns.zipWithIndex.foreach { case (partCol, index) => - tableProperties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol) - } - } - - if (bucketSpec.isDefined) { - val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get - - tableProperties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString) - tableProperties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString) - bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) => - tableProperties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol) - } - - if (sortColumnNames.nonEmpty) { - tableProperties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString) - sortColumnNames.zipWithIndex.foreach { case (sortCol, index) => - tableProperties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol) - } - } - } - - val tableType = if (isExternal) { - tableProperties.put("EXTERNAL", "TRUE") - CatalogTableType.EXTERNAL - } else { - tableProperties.put("EXTERNAL", "FALSE") - CatalogTableType.MANAGED - } - - val maybeSerDe = HiveSerDe.sourceToSerDe(provider, sparkSession.sessionState.conf) - val dataSource = - DataSource( - sparkSession, - userSpecifiedSchema = Some(schema), - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, - className = provider, - options = options) - - def newSparkSQLSpecificMetastoreTable(): CatalogTable = { - CatalogTable( - identifier = tableIdent, - tableType = tableType, - schema = new StructType, - provider = Some(provider), - storage = CatalogStorageFormat( - locationUri = None, - inputFormat = None, - outputFormat = None, - serde = None, - compressed = false, - properties = options - ), - properties = tableProperties.toMap) - } - - def newHiveCompatibleMetastoreTable( - relation: HadoopFsRelation, - serde: HiveSerDe): CatalogTable = { - assert(partitionColumns.isEmpty) - assert(relation.partitionSchema.isEmpty) - - CatalogTable( - identifier = tableIdent, - tableType = tableType, - storage = CatalogStorageFormat( - locationUri = Some(relation.location.paths.map(_.toUri.toString).head), - inputFormat = serde.inputFormat, - outputFormat = serde.outputFormat, - serde = serde.serde, - compressed = false, - properties = options - ), - schema = relation.schema, - provider = Some(provider), - properties = tableProperties.toMap, - viewText = None) - } - - // TODO: Support persisting partitioned data source relations in Hive compatible format - val qualifiedTableName = tableIdent.quotedString - val skipHiveMetadata = options.getOrElse("skipHiveMetadata", "false").toBoolean - val resolvedRelation = dataSource.resolveRelation(checkPathExist = false) - val (hiveCompatibleTable, logMessage) = (maybeSerDe, resolvedRelation) match { - case _ if skipHiveMetadata => - val message = - s"Persisting partitioned data source relation $qualifiedTableName into " + - "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive." - (None, message) - - case (Some(serde), relation: HadoopFsRelation) if relation.location.paths.length == 1 && - relation.partitionSchema.isEmpty && relation.bucketSpec.isEmpty => - val hiveTable = newHiveCompatibleMetastoreTable(relation, serde) - val message = - s"Persisting data source relation $qualifiedTableName with a single input path " + - s"into Hive metastore in Hive compatible format. Input path: " + - s"${relation.location.paths.head}." - (Some(hiveTable), message) - - case (Some(serde), relation: HadoopFsRelation) if relation.partitionSchema.nonEmpty => - val message = - s"Persisting partitioned data source relation $qualifiedTableName into " + - "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " + - "Input path(s): " + relation.location.paths.mkString("\n", "\n", "") - (None, message) - - case (Some(serde), relation: HadoopFsRelation) if relation.bucketSpec.nonEmpty => - val message = - s"Persisting bucketed data source relation $qualifiedTableName into " + - "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " + - "Input path(s): " + relation.location.paths.mkString("\n", "\n", "") - (None, message) - - case (Some(serde), relation: HadoopFsRelation) => - val message = - s"Persisting data source relation $qualifiedTableName with multiple input paths into " + - "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " + - s"Input paths: " + relation.location.paths.mkString("\n", "\n", "") - (None, message) - - case (Some(serde), _) => - val message = - s"Data source relation $qualifiedTableName is not a " + - s"${classOf[HadoopFsRelation].getSimpleName}. Persisting it into Hive metastore " + - "in Spark SQL specific format, which is NOT compatible with Hive." - (None, message) - - case _ => - val message = - s"Couldn't find corresponding Hive SerDe for data source provider $provider. " + - s"Persisting data source relation $qualifiedTableName into Hive metastore in " + - s"Spark SQL specific format, which is NOT compatible with Hive." - (None, message) - } - - (hiveCompatibleTable, logMessage) match { - case (Some(table), message) => - // We first try to save the metadata of the table in a Hive compatible way. - // If Hive throws an error, we fall back to save its metadata in the Spark SQL - // specific way. - try { - logInfo(message) - sparkSession.sessionState.catalog.createTable(table, ignoreIfExists = false) - } catch { - case NonFatal(e) => - val warningMessage = - s"Could not persist $qualifiedTableName in a Hive compatible way. Persisting " + - s"it into Hive metastore in Spark SQL specific format." - logWarning(warningMessage, e) - val table = newSparkSQLSpecificMetastoreTable() - sparkSession.sessionState.catalog.createTable(table, ignoreIfExists = false) - } - - case (None, message) => - logWarning(message) - val table = newSparkSQLSpecificMetastoreTable() - sparkSession.sessionState.catalog.createTable(table, ignoreIfExists = false) - } - } -} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala index 2eff9337bc14a..3817f919f3a5a 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala @@ -27,10 +27,9 @@ import org.apache.hadoop.mapred.{FileInputFormat, JobConf} import org.apache.spark.sql.{AnalysisException, Row, SparkSession} import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogDatabase, CatalogTable, CatalogTablePartition, CatalogTableType, SessionCatalog} -import org.apache.spark.sql.catalyst.catalog.CatalogTypes._ +import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTable, CatalogTablePartition, CatalogTableType, SessionCatalog} +import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ import org.apache.spark.sql.execution.datasources.PartitioningUtils import org.apache.spark.sql.types._ @@ -234,10 +233,8 @@ case class AlterTableSetPropertiesCommand( extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { - val ident = if (isView) "VIEW" else "TABLE" val catalog = sparkSession.sessionState.catalog DDLUtils.verifyAlterTableType(catalog, tableName, isView) - DDLUtils.verifyTableProperties(properties.keys.toSeq, s"ALTER $ident") val table = catalog.getTableMetadata(tableName) // This overrides old properties val newTable = table.copy(properties = table.properties ++ properties) @@ -264,10 +261,8 @@ case class AlterTableUnsetPropertiesCommand( extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { - val ident = if (isView) "VIEW" else "TABLE" val catalog = sparkSession.sessionState.catalog DDLUtils.verifyAlterTableType(catalog, tableName, isView) - DDLUtils.verifyTableProperties(propKeys, s"ALTER $ident") val table = catalog.getTableMetadata(tableName) if (!ifExists) { propKeys.foreach { k => @@ -445,11 +440,11 @@ case class AlterTableRecoverPartitionsCommand( if (!catalog.tableExists(tableName)) { throw new AnalysisException(s"Table $tableName in $cmd does not exist.") } - val table = catalog.getTableMetadata(tableName) if (catalog.isTemporaryTable(tableName)) { throw new AnalysisException( s"Operation not allowed: $cmd on temporary tables: $tableName") } + val table = catalog.getTableMetadata(tableName) if (DDLUtils.isDatasourceTable(table)) { throw new AnalysisException( s"Operation not allowed: $cmd on datasource tables: $tableName") @@ -458,7 +453,7 @@ case class AlterTableRecoverPartitionsCommand( throw new AnalysisException( s"Operation not allowed: $cmd only works on external tables: $tableName") } - if (!DDLUtils.isTablePartitioned(table)) { + if (table.partitionColumnNames.isEmpty) { throw new AnalysisException( s"Operation not allowed: $cmd only works on partitioned tables: $tableName") } @@ -584,13 +579,8 @@ case class AlterTableSetLocationCommand( object DDLUtils { - - def isDatasourceTable(props: Map[String, String]): Boolean = { - props.contains(DATASOURCE_PROVIDER) - } - def isDatasourceTable(table: CatalogTable): Boolean = { - isDatasourceTable(table.properties) + table.provider.isDefined && table.provider.get != "hive" } /** @@ -611,78 +601,4 @@ object DDLUtils { case _ => }) } - - /** - * If the given table properties (or SerDe properties) contains datasource properties, - * throw an exception. - */ - def verifyTableProperties(propKeys: Seq[String], operation: String): Unit = { - val datasourceKeys = propKeys.filter(_.startsWith(DATASOURCE_PREFIX)) - if (datasourceKeys.nonEmpty) { - throw new AnalysisException(s"Operation not allowed: $operation property keys may not " + - s"start with '$DATASOURCE_PREFIX': ${datasourceKeys.mkString("[", ", ", "]")}") - } - } - - def isTablePartitioned(table: CatalogTable): Boolean = { - table.partitionColumnNames.nonEmpty || table.properties.contains(DATASOURCE_SCHEMA_NUMPARTCOLS) - } - - // A persisted data source table always store its schema in the catalog. - def getSchemaFromTableProperties(metadata: CatalogTable): StructType = { - require(isDatasourceTable(metadata)) - val msgSchemaCorrupted = "Could not read schema from the metastore because it is corrupted." - val props = metadata.properties - props.get(DATASOURCE_SCHEMA).map { schema => - // Originally, we used spark.sql.sources.schema to store the schema of a data source table. - // After SPARK-6024, we removed this flag. - // Although we are not using spark.sql.sources.schema any more, we need to still support. - DataType.fromJson(schema).asInstanceOf[StructType] - } getOrElse { - props.get(DATASOURCE_SCHEMA_NUMPARTS).map { numParts => - val parts = (0 until numParts.toInt).map { index => - val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull - if (part == null) { - throw new AnalysisException(msgSchemaCorrupted + - s" (missing part $index of the schema, $numParts parts are expected).") - } - part - } - // Stick all parts back to a single schema string. - DataType.fromJson(parts.mkString).asInstanceOf[StructType] - } getOrElse(throw new AnalysisException(msgSchemaCorrupted)) - } - } - - private def getColumnNamesByType( - props: Map[String, String], colType: String, typeName: String): Seq[String] = { - require(isDatasourceTable(props)) - - for { - numCols <- props.get(s"spark.sql.sources.schema.num${colType.capitalize}Cols").toSeq - index <- 0 until numCols.toInt - } yield props.getOrElse( - s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index", - throw new AnalysisException( - s"Corrupted $typeName in catalog: $numCols parts expected, but part $index is missing." - ) - ) - } - - def getPartitionColumnsFromTableProperties(metadata: CatalogTable): Seq[String] = { - getColumnNamesByType(metadata.properties, "part", "partitioning columns") - } - - def getBucketSpecFromTableProperties(metadata: CatalogTable): Option[BucketSpec] = { - if (isDatasourceTable(metadata)) { - metadata.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { numBuckets => - BucketSpec( - numBuckets.toInt, - getColumnNamesByType(metadata.properties, "bucket", "bucketing columns"), - getColumnNamesByType(metadata.properties, "sort", "sorting columns")) - } - } else { - None - } - } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 720399ecc596a..af2b5ffd1c427 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -119,11 +119,9 @@ case class CreateTableLikeCommand( case class CreateTableCommand(table: CatalogTable, ifNotExists: Boolean) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { - DDLUtils.verifyTableProperties(table.properties.keys.toSeq, "CREATE TABLE") sparkSession.sessionState.catalog.createTable(table, ifNotExists) Seq.empty[Row] } - } @@ -414,8 +412,8 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF describeSchema(catalog.lookupRelation(table).schema, result) } else { val metadata = catalog.getTableMetadata(table) + describeSchema(metadata.schema, result) - describeSchema(metadata, result) if (isExtended) { describeExtended(metadata, result) } else if (isFormatted) { @@ -429,20 +427,10 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF } private def describePartitionInfo(table: CatalogTable, buffer: ArrayBuffer[Row]): Unit = { - if (DDLUtils.isDatasourceTable(table)) { - val partColNames = DDLUtils.getPartitionColumnsFromTableProperties(table) - if (partColNames.nonEmpty) { - val userSpecifiedSchema = DDLUtils.getSchemaFromTableProperties(table) - append(buffer, "# Partition Information", "", "") - append(buffer, s"# ${output.head.name}", output(1).name, output(2).name) - describeSchema(StructType(partColNames.map(userSpecifiedSchema(_))), buffer) - } - } else { - if (table.partitionColumnNames.nonEmpty) { - append(buffer, "# Partition Information", "", "") - append(buffer, s"# ${output.head.name}", output(1).name, output(2).name) - describeSchema(table.partitionSchema, buffer) - } + if (table.partitionColumnNames.nonEmpty) { + append(buffer, "# Partition Information", "", "") + append(buffer, s"# ${output.head.name}", output(1).name, output(2).name) + describeSchema(table.partitionSchema, buffer) } } @@ -466,11 +454,7 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF append(buffer, "Table Type:", table.tableType.name, "") append(buffer, "Table Parameters:", "", "") - table.properties.filterNot { - // Hides schema properties that hold user-defined schema, partition columns, and bucketing - // information since they are already extracted and shown in other parts. - case (key, _) => key.startsWith(CreateDataSourceTableUtils.DATASOURCE_SCHEMA) - }.foreach { case (key, value) => + table.properties.foreach { case (key, value) => append(buffer, s" $key", value, "") } @@ -493,7 +477,7 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF } private def describeBucketingInfo(metadata: CatalogTable, buffer: ArrayBuffer[Row]): Unit = { - def appendBucketInfo(bucketSpec: Option[BucketSpec]) = bucketSpec match { + metadata.bucketSpec match { case Some(BucketSpec(numBuckets, bucketColumnNames, sortColumnNames)) => append(buffer, "Num Buckets:", numBuckets.toString, "") append(buffer, "Bucket Columns:", bucketColumnNames.mkString("[", ", ", "]"), "") @@ -501,23 +485,6 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF case _ => } - - if (DDLUtils.isDatasourceTable(metadata)) { - appendBucketInfo(DDLUtils.getBucketSpecFromTableProperties(metadata)) - } else { - appendBucketInfo(metadata.bucketSpec) - } - } - - private def describeSchema( - tableDesc: CatalogTable, - buffer: ArrayBuffer[Row]): Unit = { - if (DDLUtils.isDatasourceTable(tableDesc)) { - val schema = DDLUtils.getSchemaFromTableProperties(tableDesc) - describeSchema(schema, buffer) - } else { - describeSchema(tableDesc.schema, buffer) - } } private def describeSchema(schema: StructType, buffer: ArrayBuffer[Row]): Unit = { @@ -678,7 +645,7 @@ case class ShowPartitionsCommand( s"SHOW PARTITIONS is not allowed on a view or index table: ${tab.qualifiedName}") } - if (!DDLUtils.isTablePartitioned(tab)) { + if (tab.partitionColumnNames.isEmpty) { throw new AnalysisException( s"SHOW PARTITIONS is not allowed on a table that is not partitioned: ${tab.qualifiedName}") } @@ -729,6 +696,7 @@ case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableComman val tableMetadata = catalog.getTableMetadata(table) + // TODO: unify this after we unify the CREATE TABLE syntax for hive serde and data source table. val stmt = if (DDLUtils.isDatasourceTable(tableMetadata)) { showCreateDataSourceTable(tableMetadata) } else { @@ -872,15 +840,14 @@ case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableComman private def showDataSourceTableDataColumns( metadata: CatalogTable, builder: StringBuilder): Unit = { - val schema = DDLUtils.getSchemaFromTableProperties(metadata) - val columns = schema.fields.map(f => s"${quoteIdentifier(f.name)} ${f.dataType.sql}") + val columns = metadata.schema.fields.map(f => s"${quoteIdentifier(f.name)} ${f.dataType.sql}") builder ++= columns.mkString("(", ", ", ")\n") } private def showDataSourceTableOptions(metadata: CatalogTable, builder: StringBuilder): Unit = { val props = metadata.properties - builder ++= s"USING ${props(CreateDataSourceTableUtils.DATASOURCE_PROVIDER)}\n" + builder ++= s"USING ${metadata.provider.get}\n" val dataSourceOptions = metadata.storage.properties.filterNot { case (key, value) => @@ -900,12 +867,12 @@ case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableComman private def showDataSourceTableNonDataColumns( metadata: CatalogTable, builder: StringBuilder): Unit = { - val partCols = DDLUtils.getPartitionColumnsFromTableProperties(metadata) + val partCols = metadata.partitionColumnNames if (partCols.nonEmpty) { builder ++= s"PARTITIONED BY ${partCols.mkString("(", ", ", ")")}\n" } - DDLUtils.getBucketSpecFromTableProperties(metadata).foreach { spec => + metadata.bucketSpec.foreach { spec => if (spec.bucketColumnNames.nonEmpty) { builder ++= s"CLUSTERED BY ${spec.bucketColumnNames.mkString("(", ", ", ")")}\n" diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala index 733ba185287e1..5eba7df060c4e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala @@ -34,7 +34,7 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project} import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, UnknownPartitioning} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.execution.{RowDataSourceScanExec, SparkPlan} -import org.apache.spark.sql.execution.command.{CreateDataSourceTableUtils, DDLUtils, ExecutedCommandExec} +import org.apache.spark.sql.execution.command.{DDLUtils, ExecutedCommandExec} import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String @@ -204,24 +204,14 @@ case class DataSourceAnalysis(conf: CatalystConf) extends Rule[LogicalPlan] { */ class FindDataSourceTable(sparkSession: SparkSession) extends Rule[LogicalPlan] { private def readDataSourceTable(sparkSession: SparkSession, table: CatalogTable): LogicalPlan = { - val schema = DDLUtils.getSchemaFromTableProperties(table) - - // We only need names at here since userSpecifiedSchema we loaded from the metastore - // contains partition columns. We can always get datatypes of partitioning columns - // from userSpecifiedSchema. - val partitionColumns = DDLUtils.getPartitionColumnsFromTableProperties(table) - - val bucketSpec = DDLUtils.getBucketSpecFromTableProperties(table) - - val options = table.storage.properties val dataSource = DataSource( sparkSession, - userSpecifiedSchema = Some(schema), - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, - className = table.properties(CreateDataSourceTableUtils.DATASOURCE_PROVIDER), - options = options) + userSpecifiedSchema = Some(table.schema), + partitionColumns = table.partitionColumnNames, + bucketSpec = table.bucketSpec, + className = table.provider.get, + options = table.storage.properties) LogicalRelation( dataSource.resolveRelation(), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala index 447c237e3a1b0..7880c7cfa16f8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala @@ -33,7 +33,6 @@ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.execution.UnsafeKVExternalSorter -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType} import org.apache.spark.util.{SerializableConfiguration, Utils} @@ -48,6 +47,11 @@ private[datasources] case class WriteRelation( prepareJobForWrite: Job => OutputWriterFactory, bucketSpec: Option[BucketSpec]) +object WriterContainer { + val DATASOURCE_WRITEJOBUUID = "spark.sql.sources.writeJobUUID" + val DATASOURCE_OUTPUTPATH = "spark.sql.sources.output.path" +} + private[datasources] abstract class BaseWriterContainer( @transient val relation: WriteRelation, @transient private val job: Job, @@ -94,7 +98,7 @@ private[datasources] abstract class BaseWriterContainer( // This UUID is sent to executor side together with the serialized `Configuration` object within // the `Job` instance. `OutputWriters` on the executor side should use this UUID to generate // unique task output files. - job.getConfiguration.set(DATASOURCE_WRITEJOBUUID, uniqueWriteJobId.toString) + job.getConfiguration.set(WriterContainer.DATASOURCE_WRITEJOBUUID, uniqueWriteJobId.toString) // Order of the following two lines is important. For Hadoop 1, TaskAttemptContext constructor // clones the Configuration object passed in. If we initialize the TaskAttemptContext first, @@ -244,7 +248,7 @@ private[datasources] class DefaultWriterContainer( def writeRows(taskContext: TaskContext, iterator: Iterator[InternalRow]): Unit = { executorSideSetup(taskContext) val configuration = taskAttemptContext.getConfiguration - configuration.set(DATASOURCE_OUTPUTPATH, outputPath) + configuration.set(WriterContainer.DATASOURCE_OUTPUTPATH, outputPath) var writer = newOutputWriter(getWorkPath) writer.initConverter(dataSchema) @@ -352,10 +356,12 @@ private[datasources] class DynamicPartitionWriterContainer( val configuration = taskAttemptContext.getConfiguration val path = if (partitionColumns.nonEmpty) { val partitionPath = getPartitionString(key).getString(0) - configuration.set(DATASOURCE_OUTPUTPATH, new Path(outputPath, partitionPath).toString) + configuration.set( + WriterContainer.DATASOURCE_OUTPUTPATH, + new Path(outputPath, partitionPath).toString) new Path(getWorkPath, partitionPath).toString } else { - configuration.set(DATASOURCE_OUTPUTPATH, outputPath) + configuration.set(WriterContainer.DATASOURCE_OUTPUTPATH, outputPath) getWorkPath } val bucketId = getBucketIdFromKey(key) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala index 6b2f9fc61e677..de2d633c0bcf4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala @@ -30,8 +30,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.GenericMutableRow -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils -import org.apache.spark.sql.execution.datasources.{OutputWriter, OutputWriterFactory, PartitionedFile} +import org.apache.spark.sql.execution.datasources.{OutputWriter, OutputWriterFactory, PartitionedFile, WriterContainer} import org.apache.spark.sql.types._ object CSVRelation extends Logging { @@ -192,7 +191,7 @@ private[csv] class CsvOutputWriter( new TextOutputFormat[NullWritable, Text]() { override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { val configuration = context.getConfiguration - val uniqueWriteJobId = configuration.get(CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = configuration.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val split = taskAttemptId.getTaskID.getId new Path(path, f"part-r-$split%05d-$uniqueWriteJobId.csv$extension") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala index 27910e2cddad8..16150b91d6452 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala @@ -31,7 +31,6 @@ import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD import org.apache.spark.sql.{AnalysisException, Row, SparkSession} import org.apache.spark.sql.catalyst.InternalRow -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.sources._ import org.apache.spark.sql.types.StructType @@ -164,7 +163,7 @@ private[json] class JsonOutputWriter( new TextOutputFormat[NullWritable, Text]() { override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { val configuration = context.getConfiguration - val uniqueWriteJobId = configuration.get(CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = configuration.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val split = taskAttemptId.getTaskID.getId val bucketString = bucketId.map(BucketingUtils.bucketIdToString).getOrElse("") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala index 9c4778acf53d7..9208c82179d8d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala @@ -44,7 +44,6 @@ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection import org.apache.spark.sql.catalyst.parser.LegacyTypeStringParser -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.execution.metric.SQLMetric import org.apache.spark.sql.internal.SQLConf @@ -547,8 +546,7 @@ private[parquet] class ParquetOutputWriter( // partitions in the case of dynamic partitioning. override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { val configuration = context.getConfiguration - val uniqueWriteJobId = configuration.get( - CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = configuration.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val split = taskAttemptId.getTaskID.getId val bucketString = bucketId.map(BucketingUtils.bucketIdToString).getOrElse("") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala index abb6059f75ba8..a0c3fd53fb53b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala @@ -27,7 +27,6 @@ import org.apache.spark.sql.{AnalysisException, Row, SparkSession} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.UnsafeRow import org.apache.spark.sql.catalyst.expressions.codegen.{BufferHolder, UnsafeRowWriter} -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.sources._ import org.apache.spark.sql.types.{StringType, StructType} @@ -131,7 +130,7 @@ class TextOutputWriter(path: String, dataSchema: StructType, context: TaskAttemp new TextOutputFormat[NullWritable, Text]() { override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { val configuration = context.getConfiguration - val uniqueWriteJobId = configuration.get(CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = configuration.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val split = taskAttemptId.getTaskID.getId new Path(path, f"part-r-$split%05d-$uniqueWriteJobId.txt$extension") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/HiveSerDe.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/HiveSerDe.scala index ad69137f7401b..52e648a917d8b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/HiveSerDe.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/HiveSerDe.scala @@ -28,10 +28,9 @@ object HiveSerDe { * * @param source Currently the source abbreviation can be one of the following: * SequenceFile, RCFile, ORC, PARQUET, and case insensitive. - * @param conf SQLConf * @return HiveSerDe associated with the specified source */ - def sourceToSerDe(source: String, conf: SQLConf): Option[HiveSerDe] = { + def sourceToSerDe(source: String): Option[HiveSerDe] = { val serdeMap = Map( "sequencefile" -> HiveSerDe( @@ -42,8 +41,7 @@ object HiveSerDe { HiveSerDe( inputFormat = Option("org.apache.hadoop.hive.ql.io.RCFileInputFormat"), outputFormat = Option("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"), - serde = Option(conf.getConfString("hive.default.rcfile.serde", - "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"))), + serde = Option("org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe")), "orc" -> HiveSerDe( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala index be1bccbd990a0..8dd883b37bde0 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala @@ -243,7 +243,7 @@ class DDLCommandSuite extends PlanTest { allSources.foreach { s => val query = s"CREATE TABLE my_tab STORED AS $s" val ct = parseAs[CreateTable](query) - val hiveSerde = HiveSerDe.sourceToSerDe(s, new SQLConf) + val hiveSerde = HiveSerDe.sourceToSerDe(s) assert(hiveSerde.isDefined) assert(ct.tableDesc.storage.serde == hiveSerde.get.serde) assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) @@ -276,7 +276,7 @@ class DDLCommandSuite extends PlanTest { val query = s"CREATE TABLE my_tab ROW FORMAT SERDE 'anything' STORED AS $s" if (supportedSources.contains(s)) { val ct = parseAs[CreateTable](query) - val hiveSerde = HiveSerDe.sourceToSerDe(s, new SQLConf) + val hiveSerde = HiveSerDe.sourceToSerDe(s) assert(hiveSerde.isDefined) assert(ct.tableDesc.storage.serde == Some("anything")) assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) @@ -295,7 +295,7 @@ class DDLCommandSuite extends PlanTest { val query = s"CREATE TABLE my_tab ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS $s" if (supportedSources.contains(s)) { val ct = parseAs[CreateTable](query) - val hiveSerde = HiveSerDe.sourceToSerDe(s, new SQLConf) + val hiveSerde = HiveSerDe.sourceToSerDe(s) assert(hiveSerde.isDefined) assert(ct.tableDesc.storage.serde == hiveSerde.get.serde) assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index 0f7fda7666a3b..e6ae42258d4c5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -30,7 +30,6 @@ import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogDatabase, Catal import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogTableType} import org.apache.spark.sql.catalyst.catalog.{CatalogTablePartition, SessionCatalog} import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types._ @@ -93,7 +92,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { .add("col2", "string") .add("a", "int") .add("b", "int"), - provider = Some("parquet"), + provider = Some("hive"), partitionColumnNames = Seq("a", "b"), createTime = 0L) } @@ -277,10 +276,8 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { """.stripMargin) val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName)) - assert(expectedSchema == - DDLUtils.getSchemaFromTableProperties(tableMetadata)) - assert(expectedPartitionCols == - DDLUtils.getPartitionColumnsFromTableProperties(tableMetadata)) + assert(expectedSchema == tableMetadata.schema) + assert(expectedPartitionCols == tableMetadata.partitionColumnNames) } } @@ -399,41 +396,6 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { assert(e.message == "Found duplicate column(s) in bucket: a") } - test("Describe Table with Corrupted Schema") { - import testImplicits._ - - val tabName = "tab1" - withTempPath { dir => - val path = dir.getCanonicalPath - val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("col1", "col2") - df.write.format("json").save(path) - val uri = dir.toURI - - withTable(tabName) { - sql( - s""" - |CREATE TABLE $tabName - |USING json - |OPTIONS ( - | path '$uri' - |) - """.stripMargin) - - val catalog = spark.sessionState.catalog - val table = catalog.getTableMetadata(TableIdentifier(tabName)) - val newProperties = table.properties.filterKeys(key => - key != CreateDataSourceTableUtils.DATASOURCE_SCHEMA_NUMPARTS) - val newTable = table.copy(properties = newProperties) - catalog.alterTable(newTable) - - val e = intercept[AnalysisException] { - sql(s"DESC $tabName") - }.getMessage - assert(e.contains(s"Could not read schema from the metastore because it is corrupted")) - } - } - } - test("Refresh table after changing the data source table partitioning") { import testImplicits._ @@ -460,10 +422,8 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { |) """.stripMargin) val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName)) - val tableSchema = DDLUtils.getSchemaFromTableProperties(tableMetadata) - assert(tableSchema == schema) - val partCols = DDLUtils.getPartitionColumnsFromTableProperties(tableMetadata) - assert(partCols == partitionCols) + assert(tableMetadata.schema == schema) + assert(tableMetadata.partitionColumnNames == partitionCols) // Change the schema val newDF = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)) @@ -472,23 +432,15 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { // No change on the schema val tableMetadataBeforeRefresh = catalog.getTableMetadata(TableIdentifier(tabName)) - val tableSchemaBeforeRefresh = - DDLUtils.getSchemaFromTableProperties(tableMetadataBeforeRefresh) - assert(tableSchemaBeforeRefresh == schema) - val partColsBeforeRefresh = - DDLUtils.getPartitionColumnsFromTableProperties(tableMetadataBeforeRefresh) - assert(partColsBeforeRefresh == partitionCols) + assert(tableMetadataBeforeRefresh.schema == schema) + assert(tableMetadataBeforeRefresh.partitionColumnNames == partitionCols) // Refresh does not affect the schema spark.catalog.refreshTable(tabName) val tableMetadataAfterRefresh = catalog.getTableMetadata(TableIdentifier(tabName)) - val tableSchemaAfterRefresh = - DDLUtils.getSchemaFromTableProperties(tableMetadataAfterRefresh) - assert(tableSchemaAfterRefresh == schema) - val partColsAfterRefresh = - DDLUtils.getPartitionColumnsFromTableProperties(tableMetadataAfterRefresh) - assert(partColsAfterRefresh == partitionCols) + assert(tableMetadataAfterRefresh.schema == schema) + assert(tableMetadataAfterRefresh.partitionColumnNames == partitionCols) } } } @@ -641,7 +593,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { val table = catalog.getTableMetadata(TableIdentifier("tbl")) assert(table.tableType == CatalogTableType.MANAGED) assert(table.schema == new StructType().add("a", "int").add("b", "int")) - assert(table.properties(DATASOURCE_PROVIDER) == "parquet") + assert(table.provider == Some("parquet")) } } @@ -651,12 +603,9 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { sql("CREATE TABLE tbl(a INT, b INT) USING parquet PARTITIONED BY (a)") val table = catalog.getTableMetadata(TableIdentifier("tbl")) assert(table.tableType == CatalogTableType.MANAGED) - assert(table.schema.isEmpty) // partitioned datasource table is not hive-compatible - assert(table.properties(DATASOURCE_PROVIDER) == "parquet") - assert(DDLUtils.getSchemaFromTableProperties(table) == - new StructType().add("a", IntegerType).add("b", IntegerType)) - assert(DDLUtils.getPartitionColumnsFromTableProperties(table) == - Seq("a")) + assert(table.provider == Some("parquet")) + assert(table.schema == new StructType().add("a", IntegerType).add("b", IntegerType)) + assert(table.partitionColumnNames == Seq("a")) } } @@ -667,12 +616,9 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { "CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS") val table = catalog.getTableMetadata(TableIdentifier("tbl")) assert(table.tableType == CatalogTableType.MANAGED) - assert(table.schema.isEmpty) // partitioned datasource table is not hive-compatible - assert(table.properties(DATASOURCE_PROVIDER) == "parquet") - assert(DDLUtils.getSchemaFromTableProperties(table) == - new StructType().add("a", IntegerType).add("b", IntegerType)) - assert(DDLUtils.getBucketSpecFromTableProperties(table) == - Some(BucketSpec(5, Seq("a"), Seq("b")))) + assert(table.provider == Some("parquet")) + assert(table.schema == new StructType().add("a", IntegerType).add("b", IntegerType)) + assert(table.bucketSpec == Some(BucketSpec(5, Seq("a"), Seq("b")))) } } @@ -1096,7 +1042,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { catalog: SessionCatalog, tableIdent: TableIdentifier): Unit = { catalog.alterTable(catalog.getTableMetadata(tableIdent).copy( - properties = Map(DATASOURCE_PROVIDER -> "csv"))) + provider = Some("csv"))) } private def testSetProperties(isDatasourceTable: Boolean): Unit = { @@ -1108,9 +1054,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { convertToDatasourceTable(catalog, tableIdent) } def getProps: Map[String, String] = { - catalog.getTableMetadata(tableIdent).properties.filterKeys { k => - !isDatasourceTable || !k.startsWith(DATASOURCE_PREFIX) - } + catalog.getTableMetadata(tableIdent).properties } assert(getProps.isEmpty) // set table properties @@ -1124,11 +1068,6 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { intercept[AnalysisException] { sql("ALTER TABLE does_not_exist SET TBLPROPERTIES ('winner' = 'loser')") } - // datasource table property keys are not allowed - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE tab1 SET TBLPROPERTIES ('${DATASOURCE_PREFIX}foo' = 'loser')") - } - assert(e.getMessage.contains(DATASOURCE_PREFIX + "foo")) } private def testUnsetProperties(isDatasourceTable: Boolean): Unit = { @@ -1140,9 +1079,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { convertToDatasourceTable(catalog, tableIdent) } def getProps: Map[String, String] = { - catalog.getTableMetadata(tableIdent).properties.filterKeys { k => - !isDatasourceTable || !k.startsWith(DATASOURCE_PREFIX) - } + catalog.getTableMetadata(tableIdent).properties } // unset table properties sql("ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('j' = 'am', 'p' = 'an', 'c' = 'lan', 'x' = 'y')") @@ -1164,11 +1101,6 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { // property to unset does not exist, but "IF EXISTS" is specified sql("ALTER TABLE tab1 UNSET TBLPROPERTIES IF EXISTS ('c', 'xyz')") assert(getProps == Map("x" -> "y")) - // datasource table property keys are not allowed - val e2 = intercept[AnalysisException] { - sql(s"ALTER TABLE tab1 UNSET TBLPROPERTIES ('${DATASOURCE_PREFIX}foo')") - } - assert(e2.getMessage.contains(DATASOURCE_PREFIX + "foo")) } private def testSetLocation(isDatasourceTable: Boolean): Unit = { @@ -1573,10 +1505,6 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { } } - test("create table with datasource properties (not allowed)") { - assertUnsupported("CREATE TABLE my_tab TBLPROPERTIES ('spark.sql.sources.me'='anything')") - } - test("Create Hive Table As Select") { import testImplicits._ withTable("t", "t1") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala index 49153f77362b7..729c9fdda543e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala @@ -201,7 +201,7 @@ class CreateTableAsSelectSuite """.stripMargin ) val table = catalog.getTableMetadata(TableIdentifier("t")) - assert(DDLUtils.getPartitionColumnsFromTableProperties(table) == Seq("a")) + assert(table.partitionColumnNames == Seq("a")) } } @@ -217,8 +217,7 @@ class CreateTableAsSelectSuite """.stripMargin ) val table = catalog.getTableMetadata(TableIdentifier("t")) - assert(DDLUtils.getBucketSpecFromTableProperties(table) == - Option(BucketSpec(5, Seq("a"), Seq("b")))) + assert(table.bucketSpec == Option(BucketSpec(5, Seq("a"), Seq("b")))) } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index 8302e3e98ad34..de3e60a44d920 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -30,7 +30,11 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog._ +import org.apache.spark.sql.execution.command.DDLUtils +import org.apache.spark.sql.execution.datasources.CaseInsensitiveMap import org.apache.spark.sql.hive.client.HiveClient +import org.apache.spark.sql.internal.HiveSerDe +import org.apache.spark.sql.types.{DataType, StructType} /** @@ -41,6 +45,8 @@ private[spark] class HiveExternalCatalog(client: HiveClient, hadoopConf: Configu extends ExternalCatalog with Logging { import CatalogTypes.TablePartitionSpec + import HiveExternalCatalog._ + import CatalogTableType._ // Exceptions thrown by the hive client that we would like to wrap private val clientExceptions = Set( @@ -81,6 +87,20 @@ private[spark] class HiveExternalCatalog(client: HiveClient, hadoopConf: Configu withClient { getTable(db, table) } } + /** + * If the given table properties contains datasource properties, throw an exception. We will do + * this check when create or alter a table, i.e. when we try to write table metadata to Hive + * metastore. + */ + private def verifyTableProperties(table: CatalogTable): Unit = { + val datasourceKeys = table.properties.keys.filter(_.startsWith(DATASOURCE_PREFIX)) + if (datasourceKeys.nonEmpty) { + throw new AnalysisException(s"Cannot persistent ${table.qualifiedName} into hive metastore " + + s"as table property keys may not start with '$DATASOURCE_PREFIX': " + + datasourceKeys.mkString("[", ", ", "]")) + } + } + // -------------------------------------------------------------------------- // Databases // -------------------------------------------------------------------------- @@ -144,16 +164,162 @@ private[spark] class HiveExternalCatalog(client: HiveClient, hadoopConf: Configu assert(tableDefinition.identifier.database.isDefined) val db = tableDefinition.identifier.database.get requireDbExists(db) + verifyTableProperties(tableDefinition) + + // Before saving data source table metadata into Hive metastore, we should: + // 1. Put table schema, partition column names and bucket specification in table properties. + // 2. Check if this table is hive compatible + // 2.1 If it's not hive compatible, set schema, partition columns and bucket spec to empty + // and save table metadata to Hive. + // 2.1 If it's hive compatible, set serde information in table metadata and try to save + // it to Hive. If it fails, treat it as not hive compatible and go back to 2.1 + if (DDLUtils.isDatasourceTable(tableDefinition)) { + // data source table always have a provider, it's guaranteed by `DDLUtils.isDatasourceTable`. + val provider = tableDefinition.provider.get + val partitionColumns = tableDefinition.partitionColumnNames + val bucketSpec = tableDefinition.bucketSpec + + val tableProperties = new scala.collection.mutable.HashMap[String, String] + tableProperties.put(DATASOURCE_PROVIDER, provider) + + // Serialized JSON schema string may be too long to be stored into a single metastore table + // property. In this case, we split the JSON string and store each part as a separate table + // property. + // TODO: the threshold should be set by `spark.sql.sources.schemaStringLengthThreshold`, + // however the current SQLConf is session isolated, which is not applicable to external + // catalog. We should re-enable this conf instead of hard code the value here, after we have + // global SQLConf. + val threshold = 4000 + val schemaJsonString = tableDefinition.schema.json + // Split the JSON string. + val parts = schemaJsonString.grouped(threshold).toSeq + tableProperties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString) + parts.zipWithIndex.foreach { case (part, index) => + tableProperties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part) + } + + if (partitionColumns.nonEmpty) { + tableProperties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString) + partitionColumns.zipWithIndex.foreach { case (partCol, index) => + tableProperties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol) + } + } + + if (bucketSpec.isDefined) { + val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get + + tableProperties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString) + tableProperties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString) + bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) => + tableProperties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol) + } + + if (sortColumnNames.nonEmpty) { + tableProperties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString) + sortColumnNames.zipWithIndex.foreach { case (sortCol, index) => + tableProperties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol) + } + } + } + + // converts the table metadata to Spark SQL specific format, i.e. set schema, partition column + // names and bucket specification to empty. + def newSparkSQLSpecificMetastoreTable(): CatalogTable = { + tableDefinition.copy( + schema = new StructType, + partitionColumnNames = Nil, + bucketSpec = None, + properties = tableDefinition.properties ++ tableProperties) + } + + // converts the table metadata to Hive compatible format, i.e. set the serde information. + def newHiveCompatibleMetastoreTable(serde: HiveSerDe, path: String): CatalogTable = { + tableDefinition.copy( + storage = tableDefinition.storage.copy( + locationUri = Some(new Path(path).toUri.toString), + inputFormat = serde.inputFormat, + outputFormat = serde.outputFormat, + serde = serde.serde + ), + properties = tableDefinition.properties ++ tableProperties) + } + + val qualifiedTableName = tableDefinition.identifier.quotedString + val maybeSerde = HiveSerDe.sourceToSerDe(tableDefinition.provider.get) + val maybePath = new CaseInsensitiveMap(tableDefinition.storage.properties).get("path") + val skipHiveMetadata = tableDefinition.storage.properties + .getOrElse("skipHiveMetadata", "false").toBoolean + + val (hiveCompatibleTable, logMessage) = (maybeSerde, maybePath) match { + case _ if skipHiveMetadata => + val message = + s"Persisting data source table $qualifiedTableName into Hive metastore in" + + "Spark SQL specific format, which is NOT compatible with Hive." + (None, message) + + // our bucketing is un-compatible with hive(different hash function) + case _ if tableDefinition.bucketSpec.nonEmpty => + val message = + s"Persisting bucketed data source table $qualifiedTableName into " + + "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " + (None, message) + + case (Some(serde), Some(path)) => + val message = + s"Persisting file based data source table $qualifiedTableName with an input path " + + s"into Hive metastore in Hive compatible format." + (Some(newHiveCompatibleMetastoreTable(serde, path)), message) + + case (Some(_), None) => + val message = + s"Data source table $qualifiedTableName is not file based. Persisting it into " + + s"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive." + (None, message) + + case _ => + val provider = tableDefinition.provider.get + val message = + s"Couldn't find corresponding Hive SerDe for data source provider $provider. " + + s"Persisting data source table $qualifiedTableName into Hive metastore in " + + s"Spark SQL specific format, which is NOT compatible with Hive." + (None, message) + } + + (hiveCompatibleTable, logMessage) match { + case (Some(table), message) => + // We first try to save the metadata of the table in a Hive compatible way. + // If Hive throws an error, we fall back to save its metadata in the Spark SQL + // specific way. + try { + logInfo(message) + saveTableIntoHive(table, ignoreIfExists) + } catch { + case NonFatal(e) => + val warningMessage = + s"Could not persist ${tableDefinition.identifier.quotedString} in a Hive " + + "compatible way. Persisting it into Hive metastore in Spark SQL specific format." + logWarning(warningMessage, e) + saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists) + } + + case (None, message) => + logWarning(message) + saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists) + } + } else { + client.createTable(tableDefinition, ignoreIfExists) + } + } - if ( + private def saveTableIntoHive(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = { + assert(DDLUtils.isDatasourceTable(tableDefinition), + "saveTableIntoHive only takes data source table.") // If this is an external data source table... - tableDefinition.properties.contains("spark.sql.sources.provider") && - tableDefinition.tableType == CatalogTableType.EXTERNAL && - // ... that is not persisted as Hive compatible format (external tables in Hive compatible - // format always set `locationUri` to the actual data location and should NOT be hacked as - // following.) - tableDefinition.storage.locationUri.isEmpty - ) { + if (tableDefinition.tableType == EXTERNAL && + // ... that is not persisted as Hive compatible format (external tables in Hive compatible + // format always set `locationUri` to the actual data location and should NOT be hacked as + // following.) + tableDefinition.storage.locationUri.isEmpty) { // !! HACK ALERT !! // // Due to a restriction of Hive metastore, here we have to set `locationUri` to a temporary @@ -200,22 +366,79 @@ private[spark] class HiveExternalCatalog(client: HiveClient, hadoopConf: Configu * Alter a table whose name that matches the one specified in `tableDefinition`, * assuming the table exists. * - * Note: As of now, this only supports altering table properties, serde properties, - * and num buckets! + * Note: As of now, this doesn't support altering table schema, partition column names and bucket + * specification. We will ignore them even if users do specify different values for these fields. */ override def alterTable(tableDefinition: CatalogTable): Unit = withClient { assert(tableDefinition.identifier.database.isDefined) val db = tableDefinition.identifier.database.get requireTableExists(db, tableDefinition.identifier.table) - client.alterTable(tableDefinition) + verifyTableProperties(tableDefinition) + + if (DDLUtils.isDatasourceTable(tableDefinition)) { + val oldDef = client.getTable(db, tableDefinition.identifier.table) + // Sets the `schema`, `partitionColumnNames` and `bucketSpec` from the old table definition, + // to retain the spark specific format if it is. Also add old data source properties to table + // properties, to retain the data source table format. + val oldDataSourceProps = oldDef.properties.filter(_._1.startsWith(DATASOURCE_PREFIX)) + val newDef = tableDefinition.copy( + schema = oldDef.schema, + partitionColumnNames = oldDef.partitionColumnNames, + bucketSpec = oldDef.bucketSpec, + properties = oldDataSourceProps ++ tableDefinition.properties) + + client.alterTable(newDef) + } else { + client.alterTable(tableDefinition) + } } override def getTable(db: String, table: String): CatalogTable = withClient { - client.getTable(db, table) + restoreTableMetadata(client.getTable(db, table)) } override def getTableOption(db: String, table: String): Option[CatalogTable] = withClient { - client.getTableOption(db, table) + client.getTableOption(db, table).map(restoreTableMetadata) + } + + /** + * Restores table metadata from the table properties if it's a datasouce table. This method is + * kind of a opposite version of [[createTable]]. + * + * It reads table schema, provider, partition column names and bucket specification from table + * properties, and filter out these special entries from table properties. + */ + private def restoreTableMetadata(table: CatalogTable): CatalogTable = { + if (table.tableType == VIEW) { + table + } else { + getProviderFromTableProperties(table).map { provider => + assert(provider != "hive", "Hive serde table should not save provider in table properties.") + // SPARK-15269: Persisted data source tables always store the location URI as a storage + // property named "path" instead of standard Hive `dataLocation`, because Hive only + // allows directory paths as location URIs while Spark SQL data source tables also + // allows file paths. So the standard Hive `dataLocation` is meaningless for Spark SQL + // data source tables. + // Spark SQL may also save external data source in Hive compatible format when + // possible, so that these tables can be directly accessed by Hive. For these tables, + // `dataLocation` is still necessary. Here we also check for input format because only + // these Hive compatible tables set this field. + val storage = if (table.tableType == EXTERNAL && table.storage.inputFormat.isEmpty) { + table.storage.copy(locationUri = None) + } else { + table.storage + } + table.copy( + storage = storage, + schema = getSchemaFromTableProperties(table), + provider = Some(provider), + partitionColumnNames = getPartitionColumnsFromTableProperties(table), + bucketSpec = getBucketSpecFromTableProperties(table), + properties = getOriginalTableProperties(table)) + } getOrElse { + table.copy(provider = Some("hive")) + } + } } override def tableExists(db: String, table: String): Boolean = withClient { @@ -363,3 +586,82 @@ private[spark] class HiveExternalCatalog(client: HiveClient, hadoopConf: Configu } } + +object HiveExternalCatalog { + val DATASOURCE_PREFIX = "spark.sql.sources." + val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider" + val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema" + val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "." + val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts" + val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols" + val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols" + val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets" + val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols" + val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part." + val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol." + val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol." + val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol." + + def getProviderFromTableProperties(metadata: CatalogTable): Option[String] = { + metadata.properties.get(DATASOURCE_PROVIDER) + } + + def getOriginalTableProperties(metadata: CatalogTable): Map[String, String] = { + metadata.properties.filterNot { case (key, _) => key.startsWith(DATASOURCE_PREFIX) } + } + + // A persisted data source table always store its schema in the catalog. + def getSchemaFromTableProperties(metadata: CatalogTable): StructType = { + val errorMessage = "Could not read schema from the hive metastore because it is corrupted." + val props = metadata.properties + props.get(DATASOURCE_SCHEMA).map { schema => + // Originally, we used `spark.sql.sources.schema` to store the schema of a data source table. + // After SPARK-6024, we removed this flag. + // Although we are not using `spark.sql.sources.schema` any more, we need to still support. + DataType.fromJson(schema).asInstanceOf[StructType] + } getOrElse { + props.get(DATASOURCE_SCHEMA_NUMPARTS).map { numParts => + val parts = (0 until numParts.toInt).map { index => + val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull + if (part == null) { + throw new AnalysisException(errorMessage + + s" (missing part $index of the schema, $numParts parts are expected).") + } + part + } + // Stick all parts back to a single schema string. + DataType.fromJson(parts.mkString).asInstanceOf[StructType] + } getOrElse { + throw new AnalysisException(errorMessage) + } + } + } + + private def getColumnNamesByType( + props: Map[String, String], + colType: String, + typeName: String): Seq[String] = { + for { + numCols <- props.get(s"spark.sql.sources.schema.num${colType.capitalize}Cols").toSeq + index <- 0 until numCols.toInt + } yield props.getOrElse( + s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index", + throw new AnalysisException( + s"Corrupted $typeName in catalog: $numCols parts expected, but part $index is missing." + ) + ) + } + + def getPartitionColumnsFromTableProperties(metadata: CatalogTable): Seq[String] = { + getColumnNamesByType(metadata.properties, "part", "partitioning columns") + } + + def getBucketSpecFromTableProperties(metadata: CatalogTable): Option[BucketSpec] = { + metadata.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { numBuckets => + BucketSpec( + numBuckets.toInt, + getColumnNamesByType(metadata.properties, "bucket", "bucketing columns"), + getColumnNamesByType(metadata.properties, "sort", "sorting columns")) + } + } +} diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index 7118edabb83cf..181f470b2a100 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ +import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.{Partition => _, _} import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat, ParquetOptions} import org.apache.spark.sql.hive.orc.OrcFileFormat @@ -68,64 +68,16 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log val cacheLoader = new CacheLoader[QualifiedTableName, LogicalPlan]() { override def load(in: QualifiedTableName): LogicalPlan = { logDebug(s"Creating new cached data source for $in") - val table = client.getTable(in.database, in.name) + val table = sparkSession.sharedState.externalCatalog.getTable(in.database, in.name) - // TODO: the following code is duplicated with FindDataSourceTable.readDataSourceTable - - def schemaStringFromParts: Option[String] = { - table.properties.get(DATASOURCE_SCHEMA_NUMPARTS).map { numParts => - val parts = (0 until numParts.toInt).map { index => - val part = table.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull - if (part == null) { - throw new AnalysisException( - "Could not read schema from the metastore because it is corrupted " + - s"(missing part $index of the schema, $numParts parts are expected).") - } - - part - } - // Stick all parts back to a single schema string. - parts.mkString - } - } - - def getColumnNames(colType: String): Seq[String] = { - table.properties.get(s"$DATASOURCE_SCHEMA.num${colType.capitalize}Cols").map { - numCols => (0 until numCols.toInt).map { index => - table.properties.getOrElse(s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index", - throw new AnalysisException( - s"Could not read $colType columns from the metastore because it is corrupted " + - s"(missing part $index of it, $numCols parts are expected).")) - } - }.getOrElse(Nil) - } - - // Originally, we used spark.sql.sources.schema to store the schema of a data source table. - // After SPARK-6024, we removed this flag. - // Although we are not using spark.sql.sources.schema any more, we need to still support. - val schemaString = table.properties.get(DATASOURCE_SCHEMA).orElse(schemaStringFromParts) - - val userSpecifiedSchema = - schemaString.map(s => DataType.fromJson(s).asInstanceOf[StructType]) - - // We only need names at here since userSpecifiedSchema we loaded from the metastore - // contains partition columns. We can always get data types of partitioning columns - // from userSpecifiedSchema. - val partitionColumns = getColumnNames("part") - - val bucketSpec = table.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { n => - BucketSpec(n.toInt, getColumnNames("bucket"), getColumnNames("sort")) - } - - val options = table.storage.properties val dataSource = DataSource( sparkSession, - userSpecifiedSchema = userSpecifiedSchema, - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, - className = table.properties(DATASOURCE_PROVIDER), - options = options) + userSpecifiedSchema = Some(table.schema), + partitionColumns = table.partitionColumnNames, + bucketSpec = table.bucketSpec, + className = table.provider.get, + options = table.storage.properties) LogicalRelation( dataSource.resolveRelation(checkPathExist = true), @@ -158,9 +110,10 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log tableIdent: TableIdentifier, alias: Option[String]): LogicalPlan = { val qualifiedTableName = getQualifiedTableName(tableIdent) - val table = client.getTable(qualifiedTableName.database, qualifiedTableName.name) + val table = sparkSession.sharedState.externalCatalog.getTable( + qualifiedTableName.database, qualifiedTableName.name) - if (table.properties.get(DATASOURCE_PROVIDER).isDefined) { + if (DDLUtils.isDatasourceTable(table)) { val dataSourceTable = cachedDataSourceTables(qualifiedTableName) val qualifiedTable = SubqueryAlias(qualifiedTableName.name, dataSourceTable, None) // Then, if alias is specified, wrap the table with a Subquery using the alias. diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index f8204e183f03a..9b7afd462841c 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -45,7 +45,6 @@ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParseException} import org.apache.spark.sql.execution.QueryExecutionException -import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.types.{StructField, StructType} import org.apache.spark.util.{CircularBuffer, Utils} @@ -392,20 +391,7 @@ private[hive] class HiveClientImpl( createTime = h.getTTable.getCreateTime.toLong * 1000, lastAccessTime = h.getLastAccessTime.toLong * 1000, storage = CatalogStorageFormat( - locationUri = shim.getDataLocation(h).filterNot { _ => - // SPARK-15269: Persisted data source tables always store the location URI as a SerDe - // property named "path" instead of standard Hive `dataLocation`, because Hive only - // allows directory paths as location URIs while Spark SQL data source tables also - // allows file paths. So the standard Hive `dataLocation` is meaningless for Spark SQL - // data source tables. - DDLUtils.isDatasourceTable(properties) && - h.getTableType == HiveTableType.EXTERNAL_TABLE && - // Spark SQL may also save external data source in Hive compatible format when - // possible, so that these tables can be directly accessed by Hive. For these tables, - // `dataLocation` is still necessary. Here we also check for input format class - // because only these Hive compatible tables set this field. - h.getInputFormatClass == null - }, + locationUri = shim.getDataLocation(h), inputFormat = Option(h.getInputFormatClass).map(_.getName), outputFormat = Option(h.getOutputFormatClass).map(_.getName), serde = Option(h.getSerializationLib), diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala index c74d948a6fa52..286197b50e229 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala @@ -34,7 +34,6 @@ import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit} import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.hive.{HiveInspectors, HiveShim} import org.apache.spark.sql.sources.{Filter, _} @@ -222,7 +221,7 @@ private[orc] class OrcOutputWriter( private lazy val recordWriter: RecordWriter[NullWritable, Writable] = { recordWriterInstantiated = true - val uniqueWriteJobId = conf.get(CreateDataSourceTableUtils.DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = conf.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val partition = taskAttemptId.getTaskID.getId val bucketString = bucketId.map(BucketingUtils.bucketIdToString).getOrElse("") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index 3892fe87e2a80..571ba49d115f8 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -26,9 +26,9 @@ import org.apache.hadoop.fs.Path import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} -import org.apache.spark.sql.catalyst.parser.CatalystSqlParser -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation} +import org.apache.spark.sql.hive.HiveExternalCatalog._ +import org.apache.spark.sql.hive.client.HiveClient import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SQLTestUtils @@ -49,6 +49,10 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv jsonFilePath = Utils.getSparkClassLoader.getResource("sample.json").getFile } + // To test `HiveExternalCatalog`, we need to read the raw table metadata(schema, partition + // columns and bucket specification are still in table properties) from hive client. + private def hiveClient: HiveClient = sharedState.asInstanceOf[HiveSharedState].metadataHive + test("persistent JSON table") { withTable("jsonTable") { sql( @@ -697,18 +701,18 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv withTable("wide_schema") { withTempDir { tempDir => // We will need 80 splits for this schema if the threshold is 4000. - val schema = StructType((1 to 5000).map(i => StructField(s"c_$i", StringType, true))) - - // Manually create a metastore data source table. - createDataSourceTable( - sparkSession = spark, - tableIdent = TableIdentifier("wide_schema"), + val schema = StructType((1 to 5000).map(i => StructField(s"c_$i", StringType))) + + val tableDesc = CatalogTable( + identifier = TableIdentifier("wide_schema"), + tableType = CatalogTableType.EXTERNAL, + storage = CatalogStorageFormat.empty.copy( + properties = Map("path" -> tempDir.getCanonicalPath) + ), schema = schema, - partitionColumns = Array.empty[String], - bucketSpec = None, - provider = "json", - options = Map("path" -> tempDir.getCanonicalPath), - isExternal = false) + provider = Some("json") + ) + spark.sessionState.catalog.createTable(tableDesc, ignoreIfExists = false) sessionState.refreshTable("wide_schema") @@ -741,7 +745,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv DATASOURCE_SCHEMA -> schema.json, "EXTERNAL" -> "FALSE")) - sharedState.externalCatalog.createTable(hiveTable, ignoreIfExists = false) + hiveClient.createTable(hiveTable, ignoreIfExists = false) sessionState.refreshTable(tableName) val actualSchema = table(tableName).schema @@ -759,7 +763,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv withTable(tableName) { df.write.format("parquet").partitionBy("d", "b").saveAsTable(tableName) sessionState.refreshTable(tableName) - val metastoreTable = sharedState.externalCatalog.getTable("default", tableName) + val metastoreTable = hiveClient.getTable("default", tableName) val expectedPartitionColumns = StructType(df.schema("d") :: df.schema("b") :: Nil) val numPartCols = metastoreTable.properties(DATASOURCE_SCHEMA_NUMPARTCOLS).toInt @@ -794,7 +798,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv .sortBy("c") .saveAsTable(tableName) sessionState.refreshTable(tableName) - val metastoreTable = sharedState.externalCatalog.getTable("default", tableName) + val metastoreTable = hiveClient.getTable("default", tableName) val expectedBucketByColumns = StructType(df.schema("d") :: df.schema("b") :: Nil) val expectedSortByColumns = StructType(df.schema("c") :: Nil) @@ -985,35 +989,37 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv withTempDir { tempPath => val schema = StructType((1 to 5).map(i => StructField(s"c_$i", StringType))) - createDataSourceTable( - sparkSession = spark, - tableIdent = TableIdentifier("not_skip_hive_metadata"), + val tableDesc1 = CatalogTable( + identifier = TableIdentifier("not_skip_hive_metadata"), + tableType = CatalogTableType.EXTERNAL, + storage = CatalogStorageFormat.empty.copy( + properties = Map("path" -> tempPath.getCanonicalPath, "skipHiveMetadata" -> "false") + ), schema = schema, - partitionColumns = Array.empty[String], - bucketSpec = None, - provider = "parquet", - options = Map("path" -> tempPath.getCanonicalPath, "skipHiveMetadata" -> "false"), - isExternal = false) + provider = Some("parquet") + ) + spark.sessionState.catalog.createTable(tableDesc1, ignoreIfExists = false) // As a proxy for verifying that the table was stored in Hive compatible format, // we verify that each column of the table is of native type StringType. - assert(sharedState.externalCatalog.getTable("default", "not_skip_hive_metadata").schema + assert(hiveClient.getTable("default", "not_skip_hive_metadata").schema .forall(_.dataType == StringType)) - createDataSourceTable( - sparkSession = spark, - tableIdent = TableIdentifier("skip_hive_metadata"), + val tableDesc2 = CatalogTable( + identifier = TableIdentifier("skip_hive_metadata", Some("default")), + tableType = CatalogTableType.EXTERNAL, + storage = CatalogStorageFormat.empty.copy( + properties = Map("path" -> tempPath.getCanonicalPath, "skipHiveMetadata" -> "true") + ), schema = schema, - partitionColumns = Array.empty[String], - bucketSpec = None, - provider = "parquet", - options = Map("path" -> tempPath.getCanonicalPath, "skipHiveMetadata" -> "true"), - isExternal = false) + provider = Some("parquet") + ) + spark.sessionState.catalog.createTable(tableDesc2, ignoreIfExists = false) // As a proxy for verifying that the table was stored in SparkSQL format, // we verify that the table has a column type as array of StringType. - assert(sharedState.externalCatalog.getTable("default", "skip_hive_metadata") - .schema.forall(_.dataType == ArrayType(StringType))) + assert(hiveClient.getTable("default", "skip_hive_metadata").schema + .forall(_.dataType == ArrayType(StringType))) } } @@ -1030,7 +1036,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv """.stripMargin ) - val metastoreTable = sharedState.externalCatalog.getTable("default", "t") + val metastoreTable = hiveClient.getTable("default", "t") assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMPARTCOLS).toInt === 1) assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMBUCKETS)) assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMBUCKETCOLS)) @@ -1054,7 +1060,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv """.stripMargin ) - val metastoreTable = sharedState.externalCatalog.getTable("default", "t") + val metastoreTable = hiveClient.getTable("default", "t") assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMPARTCOLS)) assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt === 2) assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt === 1) @@ -1076,7 +1082,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv """.stripMargin ) - val metastoreTable = sharedState.externalCatalog.getTable("default", "t") + val metastoreTable = hiveClient.getTable("default", "t") assert(!metastoreTable.properties.contains(DATASOURCE_SCHEMA_NUMPARTCOLS)) assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt === 2) assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt === 1) @@ -1101,7 +1107,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv """.stripMargin ) - val metastoreTable = sharedState.externalCatalog.getTable("default", "t") + val metastoreTable = hiveClient.getTable("default", "t") assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMPARTCOLS).toInt === 1) assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETS).toInt === 2) assert(metastoreTable.properties(DATASOURCE_SCHEMA_NUMBUCKETCOLS).toInt === 1) @@ -1168,7 +1174,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv ) sql("insert into t values (2, 3, 4)") checkAnswer(table("t"), Seq(Row(1, 2, 3), Row(2, 3, 4))) - val catalogTable = sharedState.externalCatalog.getTable("default", "t") + val catalogTable = hiveClient.getTable("default", "t") // there should not be a lowercase key 'path' now assert(catalogTable.storage.properties.get("path").isEmpty) assert(catalogTable.storage.properties.get("PATH").isDefined) @@ -1188,4 +1194,28 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv } } } + + test("read table with corrupted schema") { + try { + val schema = StructType(StructField("int", IntegerType, true) :: Nil) + val hiveTable = CatalogTable( + identifier = TableIdentifier("t", Some("default")), + tableType = CatalogTableType.MANAGED, + schema = new StructType, + storage = CatalogStorageFormat.empty, + properties = Map( + DATASOURCE_PROVIDER -> "json", + // no DATASOURCE_SCHEMA_NUMPARTS + DATASOURCE_SCHEMA_PART_PREFIX + 0 -> schema.json)) + + hiveClient.createTable(hiveTable, ignoreIfExists = false) + + val e = intercept[AnalysisException] { + sharedState.externalCatalog.getTable("default", "t") + }.getMessage + assert(e.contains(s"Could not read schema from the hive metastore because it is corrupted")) + } finally { + hiveClient.dropTable("default", "t", ignoreIfNotExists = true, purge = true) + } + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala index 5d510197c4d95..76aa84b19410d 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala @@ -18,21 +18,32 @@ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} +import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.NoSuchTableException -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ +import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.test.SQLTestUtils +import org.apache.spark.sql.types.StructType class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { import testImplicits._ protected override def beforeAll(): Unit = { super.beforeAll() - sql( - """ - |CREATE TABLE parquet_tab1 (c1 INT, c2 STRING) - |USING org.apache.spark.sql.parquet.DefaultSource - """.stripMargin) + + // Use catalog to create table instead of SQL string here, because we don't support specifying + // table properties for data source table with SQL API now. + hiveContext.sessionState.catalog.createTable( + CatalogTable( + identifier = TableIdentifier("parquet_tab1"), + tableType = CatalogTableType.MANAGED, + storage = CatalogStorageFormat.empty, + schema = new StructType().add("c1", "int").add("c2", "string"), + provider = Some("parquet"), + properties = Map("my_key1" -> "v1") + ), + ignoreIfExists = false + ) sql( """ @@ -101,23 +112,14 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto test("show tblproperties of data source tables - basic") { checkAnswer( - sql("SHOW TBLPROPERTIES parquet_tab1").filter(s"key = '$DATASOURCE_PROVIDER'"), - Row(DATASOURCE_PROVIDER, "org.apache.spark.sql.parquet.DefaultSource") :: Nil + sql("SHOW TBLPROPERTIES parquet_tab1").filter(s"key = 'my_key1'"), + Row("my_key1", "v1") :: Nil ) checkAnswer( - sql(s"SHOW TBLPROPERTIES parquet_tab1($DATASOURCE_PROVIDER)"), - Row("org.apache.spark.sql.parquet.DefaultSource") :: Nil + sql(s"SHOW TBLPROPERTIES parquet_tab1('my_key1')"), + Row("v1") :: Nil ) - - checkAnswer( - sql("SHOW TBLPROPERTIES parquet_tab1").filter(s"key = '$DATASOURCE_SCHEMA_NUMPARTS'"), - Row(DATASOURCE_SCHEMA_NUMPARTS, "1") :: Nil - ) - - checkAnswer( - sql(s"SHOW TBLPROPERTIES parquet_tab1('$DATASOURCE_SCHEMA_NUMPARTS')"), - Row("1")) } test("show tblproperties for datasource table - errors") { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index 970b6885f6254..f00a99b6d0b3d 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -692,4 +692,27 @@ class HiveDDLSuite )) } } + + test("datasource table property keys are not allowed") { + import org.apache.spark.sql.hive.HiveExternalCatalog.DATASOURCE_PREFIX + + withTable("tbl") { + sql("CREATE TABLE tbl(a INT) STORED AS parquet") + + val e = intercept[AnalysisException] { + sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${DATASOURCE_PREFIX}foo' = 'loser')") + } + assert(e.getMessage.contains(DATASOURCE_PREFIX + "foo")) + + val e2 = intercept[AnalysisException] { + sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${DATASOURCE_PREFIX}foo')") + } + assert(e2.getMessage.contains(DATASOURCE_PREFIX + "foo")) + + val e3 = intercept[AnalysisException] { + sql(s"CREATE TABLE tbl TBLPROPERTIES ('${DATASOURCE_PREFIX}foo'='anything')") + } + assert(e3.getMessage.contains(DATASOURCE_PREFIX + "foo")) + } + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index e6fe47aa65f34..4ca882f840a58 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -29,7 +29,6 @@ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, FunctionRegistry} import org.apache.spark.sql.catalyst.catalog.CatalogTableType import org.apache.spark.sql.catalyst.parser.ParseException -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation} import org.apache.spark.sql.functions._ import org.apache.spark.sql.hive.{HiveUtils, MetastoreRelation} @@ -436,8 +435,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton { assert(r.options("path") === location) case None => // OK. } - assert( - catalogTable.properties(CreateDataSourceTableUtils.DATASOURCE_PROVIDER) === format) + assert(catalogTable.provider.get === format) case r: MetastoreRelation => if (isDataSourceParquet) { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala index 67a58a3859b84..906de6bbcbee5 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala @@ -29,7 +29,6 @@ import org.apache.spark.sql.{sources, Row, SparkSession} import org.apache.spark.sql.catalyst.{expressions, InternalRow} import org.apache.spark.sql.catalyst.expressions.{Cast, Expression, GenericInternalRow, InterpretedPredicate, InterpretedProjection, JoinedRow, Literal} import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection -import org.apache.spark.sql.execution.command.CreateDataSourceTableUtils._ import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.types.{DataType, StructType} import org.apache.spark.util.SerializableConfiguration @@ -145,7 +144,7 @@ class AppendingTextOutputFormat(outputFile: Path) extends TextOutputFormat[NullW override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { val configuration = context.getConfiguration - val uniqueWriteJobId = configuration.get(DATASOURCE_WRITEJOBUUID) + val uniqueWriteJobId = configuration.get(WriterContainer.DATASOURCE_WRITEJOBUUID) val taskAttemptId = context.getTaskAttemptID val split = taskAttemptId.getTaskID.getId val name = FileOutputFormat.getOutputName(context) From 083de00cb608a7414aae99a639825482bebfea8a Mon Sep 17 00:00:00 2001 From: Richael Date: Mon, 22 Aug 2016 09:01:50 +0100 Subject: [PATCH 048/270] [SPARK-17127] Make unaligned access in unsafe available for AArch64 ## # What changes were proposed in this pull request? From the spark of version 2.0.0 , when MemoryMode.OFF_HEAP is set , whether the architecture supports unaligned access or not is checked. If the check doesn't pass, exception is raised. We know that AArch64 also supports unaligned access , but now only i386, x86, amd64, and X86_64 are included. I think we should include aarch64 when performing the check. ## How was this patch tested? Unit test suite Author: Richael Closes #14700 from yimuxi/zym_change_unsafe. --- .../unsafe/src/main/java/org/apache/spark/unsafe/Platform.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/unsafe/src/main/java/org/apache/spark/unsafe/Platform.java b/common/unsafe/src/main/java/org/apache/spark/unsafe/Platform.java index a2ee45c37e2b3..c892b9cdaf49c 100644 --- a/common/unsafe/src/main/java/org/apache/spark/unsafe/Platform.java +++ b/common/unsafe/src/main/java/org/apache/spark/unsafe/Platform.java @@ -55,7 +55,7 @@ public final class Platform { // We at least know x86 and x64 support unaligned access. String arch = System.getProperty("os.arch", ""); //noinspection DynamicRegexReplaceableByCompiledPattern - _unaligned = arch.matches("^(i[3-6]86|x86(_64)?|x64|amd64)$"); + _unaligned = arch.matches("^(i[3-6]86|x86(_64)?|x64|amd64|aarch64)$"); } unaligned = _unaligned; } From 4b6c2cbcb109c7cef6087bae32d87cc3ddb69cf9 Mon Sep 17 00:00:00 2001 From: GraceH Date: Mon, 22 Aug 2016 09:03:46 +0100 Subject: [PATCH 049/270] [SPARK-16968] Document additional options in jdbc Writer ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) This is the document for previous JDBC Writer options. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Unit test has been added in previous PR. (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: GraceH Closes #14683 from GraceH/jdbc_options. --- docs/sql-programming-guide.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index c89286d0e49d0..28cc88c322b7e 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -1058,6 +1058,20 @@ the Data Sources API. The following options are supported: The JDBC fetch size, which determines how many rows to fetch per round trip. This can help performance on JDBC drivers which default to low fetch size (eg. Oracle with 10 rows). + + + truncate + + This is a JDBC writer related option. When SaveMode.Overwrite is enabled, this option causes Spark to truncate an existing table instead of dropping and recreating it. This can be more efficient, and prevents the table metadata (e.g. indices) from being removed. However, it will not work in some cases, such as when the new data has a different schema. It defaults to false. + + + + + createTableOptions + + This is a JDBC writer related option. If specified, this option allows setting of database-specific table and partition options when creating a table. For example: CREATE TABLE t (name string) ENGINE=InnoDB. + +

From 8d35a6f68d6d733212674491cbf31bed73fada0f Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Mon, 22 Aug 2016 16:16:03 +0800 Subject: [PATCH 050/270] [SPARK-17115][SQL] decrease the threshold when split expressions ## What changes were proposed in this pull request? In 2.0, we change the threshold of splitting expressions from 16K to 64K, which cause very bad performance on wide table, because the generated method can't be JIT compiled by default (above the limit of 8K bytecode). This PR will decrease it to 1K, based on the benchmark results for a wide table with 400 columns of LongType. It also fix a bug around splitting expression in whole-stage codegen (it should not split them). ## How was this patch tested? Added benchmark suite. Author: Davies Liu Closes #14692 from davies/split_exprs. --- .../expressions/codegen/CodeGenerator.scala | 9 ++-- .../aggregate/HashAggregateExec.scala | 2 - .../benchmark/BenchmarkWideTable.scala | 53 +++++++++++++++++++ 3 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BenchmarkWideTable.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala index 16fb1f683710f..4bd9ee03f96dd 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala @@ -584,15 +584,18 @@ class CodegenContext { * @param expressions the codes to evaluate expressions. */ def splitExpressions(row: String, expressions: Seq[String]): String = { - if (row == null) { + if (row == null || currentVars != null) { // Cannot split these expressions because they are not created from a row object. return expressions.mkString("\n") } val blocks = new ArrayBuffer[String]() val blockBuilder = new StringBuilder() for (code <- expressions) { - // We can't know how many byte code will be generated, so use the number of bytes as limit - if (blockBuilder.length > 64 * 1000) { + // We can't know how many bytecode will be generated, so use the length of source code + // as metric. A method should not go beyond 8K, otherwise it will not be JITted, should + // also not be too small, or it will have many function calls (for wide table), see the + // results in BenchmarkWideTable. + if (blockBuilder.length > 1024) { blocks.append(blockBuilder.toString()) blockBuilder.clear() } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala index cfc47aba889aa..bd7efa606e0ce 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala @@ -603,8 +603,6 @@ case class HashAggregateExec( // create grouping key ctx.currentVars = input - // make sure that the generated code will not be splitted as multiple functions - ctx.INPUT_ROW = null val unsafeRowKeyCode = GenerateUnsafeProjection.createCode( ctx, groupingExpressions.map(e => BindReferences.bindReference[Expression](e, child.output))) val vectorizedRowKeys = ctx.generateExpressions( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BenchmarkWideTable.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BenchmarkWideTable.scala new file mode 100644 index 0000000000000..9dcaca0ca93ee --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BenchmarkWideTable.scala @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.benchmark + +import org.apache.spark.util.Benchmark + + +/** + * Benchmark to measure performance for wide table. + * To run this: + * build/sbt "sql/test-only *benchmark.BenchmarkWideTable" + * + * Benchmarks in this file are skipped in normal builds. + */ +class BenchmarkWideTable extends BenchmarkBase { + + ignore("project on wide table") { + val N = 1 << 20 + val df = sparkSession.range(N) + val columns = (0 until 400).map{ i => s"id as id$i"} + val benchmark = new Benchmark("projection on wide table", N) + benchmark.addCase("wide table", numIters = 5) { iter => + df.selectExpr(columns : _*).queryExecution.toRdd.count() + } + benchmark.run() + + /** + * Here are some numbers with different split threshold: + * + * Split threshold methods Rate(M/s) Per Row(ns) + * 10 400 0.4 2279 + * 100 200 0.6 1554 + * 1k 37 0.9 1116 + * 8k 5 0.5 2025 + * 64k 1 0.0 21649 + */ + } +} From bd9655063bdba8836b4ec96ed115e5653e246b65 Mon Sep 17 00:00:00 2001 From: Jagadeesan Date: Mon, 22 Aug 2016 09:30:31 +0100 Subject: [PATCH 051/270] [SPARK-17085][STREAMING][DOCUMENTATION AND ACTUAL CODE DIFFERS - UNSUPPORTED OPERATIONS] Changes in Spark Stuctured Streaming doc in this link https://spark.apache.org/docs/2.0.0/structured-streaming-programming-guide.html#unsupported-operations Author: Jagadeesan Closes #14715 from jagadeesanas2/SPARK-17085. --- docs/structured-streaming-programming-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/structured-streaming-programming-guide.md b/docs/structured-streaming-programming-guide.md index e2c881bf4a604..226ff740a5d67 100644 --- a/docs/structured-streaming-programming-guide.md +++ b/docs/structured-streaming-programming-guide.md @@ -726,9 +726,9 @@ However, note that all of the operations applicable on static DataFrames/Dataset + Full outer join with a streaming Dataset is not supported - + Left outer join with a streaming Dataset on the left is not supported + + Left outer join with a streaming Dataset on the right is not supported - + Right outer join with a streaming Dataset on the right is not supported + + Right outer join with a streaming Dataset on the left is not supported - Any kind of joins between two streaming Datasets are not yet supported. From b264cbb16fb97116e630fb593adf5898a5a0e8fa Mon Sep 17 00:00:00 2001 From: Holden Karau Date: Mon, 22 Aug 2016 12:21:22 +0200 Subject: [PATCH 052/270] [SPARK-15113][PYSPARK][ML] Add missing num features num classes ## What changes were proposed in this pull request? Add missing `numFeatures` and `numClasses` to the wrapped Java models in PySpark ML pipelines. Also tag `DecisionTreeClassificationModel` as Expiremental to match Scala doc. ## How was this patch tested? Extended doctests Author: Holden Karau Closes #12889 from holdenk/SPARK-15113-add-missing-numFeatures-numClasses. --- .../GeneralizedLinearRegression.scala | 2 + python/pyspark/ml/classification.py | 37 ++++++++++++++++--- python/pyspark/ml/regression.py | 22 ++++++++--- python/pyspark/ml/util.py | 16 ++++++++ 4 files changed, 66 insertions(+), 11 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala index 2bdc09e1db246..1d4dfd1147589 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala @@ -788,6 +788,8 @@ class GeneralizedLinearRegressionModel private[ml] ( @Since("2.0.0") override def write: MLWriter = new GeneralizedLinearRegressionModel.GeneralizedLinearRegressionModelWriter(this) + + override val numFeatures: Int = coefficients.size } @Since("2.0.0") diff --git a/python/pyspark/ml/classification.py b/python/pyspark/ml/classification.py index 6468007045691..33ada27454b72 100644 --- a/python/pyspark/ml/classification.py +++ b/python/pyspark/ml/classification.py @@ -43,6 +43,23 @@ 'OneVsRest', 'OneVsRestModel'] +@inherit_doc +class JavaClassificationModel(JavaPredictionModel): + """ + (Private) Java Model produced by a ``Classifier``. + Classes are indexed {0, 1, ..., numClasses - 1}. + To be mixed in with class:`pyspark.ml.JavaModel` + """ + + @property + @since("2.1.0") + def numClasses(self): + """ + Number of classes (values which the label can take). + """ + return self._call_java("numClasses") + + @inherit_doc class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol, @@ -212,7 +229,7 @@ def _checkThresholdConsistency(self): " threshold (%g) and thresholds (equivalent to %g)" % (t2, t)) -class LogisticRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable): +class LogisticRegressionModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ Model fitted by LogisticRegression. @@ -522,6 +539,10 @@ class DecisionTreeClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPred 1 >>> model.featureImportances SparseVector(1, {0: 1.0}) + >>> model.numFeatures + 1 + >>> model.numClasses + 2 >>> print(model.toDebugString) DecisionTreeClassificationModel (uid=...) of depth 1 with 3 nodes... >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) @@ -595,7 +616,8 @@ def _create_model(self, java_model): @inherit_doc -class DecisionTreeClassificationModel(DecisionTreeModel, JavaMLWritable, JavaMLReadable): +class DecisionTreeClassificationModel(DecisionTreeModel, JavaClassificationModel, JavaMLWritable, + JavaMLReadable): """ Model fitted by DecisionTreeClassifier. @@ -722,7 +744,8 @@ def _create_model(self, java_model): return RandomForestClassificationModel(java_model) -class RandomForestClassificationModel(TreeEnsembleModel, JavaMLWritable, JavaMLReadable): +class RandomForestClassificationModel(TreeEnsembleModel, JavaClassificationModel, JavaMLWritable, + JavaMLReadable): """ Model fitted by RandomForestClassifier. @@ -873,7 +896,8 @@ def getLossType(self): return self.getOrDefault(self.lossType) -class GBTClassificationModel(TreeEnsembleModel, JavaMLWritable, JavaMLReadable): +class GBTClassificationModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, + JavaMLReadable): """ Model fitted by GBTClassifier. @@ -1027,7 +1051,7 @@ def getModelType(self): return self.getOrDefault(self.modelType) -class NaiveBayesModel(JavaModel, JavaMLWritable, JavaMLReadable): +class NaiveBayesModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ Model fitted by NaiveBayes. @@ -1226,7 +1250,8 @@ def getInitialWeights(self): return self.getOrDefault(self.initialWeights) -class MultilayerPerceptronClassificationModel(JavaModel, JavaMLWritable, JavaMLReadable): +class MultilayerPerceptronClassificationModel(JavaModel, JavaPredictionModel, JavaMLWritable, + JavaMLReadable): """ .. note:: Experimental diff --git a/python/pyspark/ml/regression.py b/python/pyspark/ml/regression.py index 1ae2bd4e400e8..56312f672f71d 100644 --- a/python/pyspark/ml/regression.py +++ b/python/pyspark/ml/regression.py @@ -88,6 +88,8 @@ class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPrediction True >>> model.intercept == model2.intercept True + >>> model.numFeatures + 1 .. versionadded:: 1.4.0 """ @@ -126,7 +128,7 @@ def _create_model(self, java_model): return LinearRegressionModel(java_model) -class LinearRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable): +class LinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`LinearRegression`. @@ -654,6 +656,8 @@ class DecisionTreeRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredi 3 >>> model.featureImportances SparseVector(1, {0: 1.0}) + >>> model.numFeatures + 1 >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.transform(test0).head().prediction 0.0 @@ -719,7 +723,7 @@ def _create_model(self, java_model): @inherit_doc -class DecisionTreeModel(JavaModel): +class DecisionTreeModel(JavaModel, JavaPredictionModel): """ Abstraction for Decision Tree models. @@ -843,6 +847,8 @@ class RandomForestRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredi >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.transform(test0).head().prediction 0.0 + >>> model.numFeatures + 1 >>> model.trees [DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...] >>> model.getNumTrees @@ -909,7 +915,8 @@ def _create_model(self, java_model): return RandomForestRegressionModel(java_model) -class RandomForestRegressionModel(TreeEnsembleModel, JavaMLWritable, JavaMLReadable): +class RandomForestRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, + JavaMLReadable): """ Model fitted by :class:`RandomForestRegressor`. @@ -958,6 +965,8 @@ class GBTRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, >>> model = gbt.fit(df) >>> model.featureImportances SparseVector(1, {0: 1.0}) + >>> model.numFeatures + 1 >>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1]) True >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) @@ -1047,7 +1056,7 @@ def getLossType(self): return self.getOrDefault(self.lossType) -class GBTRegressionModel(TreeEnsembleModel, JavaMLWritable, JavaMLReadable): +class GBTRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`GBTRegressor`. @@ -1307,6 +1316,8 @@ class GeneralizedLinearRegression(JavaEstimator, HasLabelCol, HasFeaturesCol, Ha True >>> model.coefficients DenseVector([1.5..., -1.0...]) + >>> model.numFeatures + 2 >>> abs(model.intercept - 1.5) < 0.001 True >>> glr_path = temp_path + "/glr" @@ -1412,7 +1423,8 @@ def getLink(self): return self.getOrDefault(self.link) -class GeneralizedLinearRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable): +class GeneralizedLinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable, + JavaMLReadable): """ .. note:: Experimental diff --git a/python/pyspark/ml/util.py b/python/pyspark/ml/util.py index 4a31a298096fc..7d39c30122350 100644 --- a/python/pyspark/ml/util.py +++ b/python/pyspark/ml/util.py @@ -238,3 +238,19 @@ class JavaMLReadable(MLReadable): def read(cls): """Returns an MLReader instance for this class.""" return JavaMLReader(cls) + + +@inherit_doc +class JavaPredictionModel(): + """ + (Private) Java Model for prediction tasks (regression and classification). + To be mixed in with class:`pyspark.ml.JavaModel` + """ + + @property + @since("2.1.0") + def numFeatures(self): + """ + Returns the number of features the model was trained on. If unknown, returns -1 + """ + return self._call_java("numFeatures") From 209e1b3c0683a9106428e269e5041980b6cc327f Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Mon, 22 Aug 2016 10:03:48 -0700 Subject: [PATCH 053/270] [SPARKR][MINOR] Fix Cache Folder Path in Windows ## What changes were proposed in this pull request? This PR tries to fix the scheme of local cache folder in Windows. The name of the environment variable should be `LOCALAPPDATA` rather than `%LOCALAPPDATA%`. ## How was this patch tested? Manual test in Windows 7. Author: Junyang Qian Closes #14743 from junyangq/SPARKR-FixWindowsInstall. --- R/pkg/R/install.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/pkg/R/install.R b/R/pkg/R/install.R index 987bac7bebc0e..ff81e86835ff8 100644 --- a/R/pkg/R/install.R +++ b/R/pkg/R/install.R @@ -212,7 +212,7 @@ hadoop_version_name <- function(hadoopVersion) { # adapt to Spark context spark_cache_path <- function() { if (.Platform$OS.type == "windows") { - winAppPath <- Sys.getenv("%LOCALAPPDATA%", unset = NA) + winAppPath <- Sys.getenv("LOCALAPPDATA", unset = NA) if (is.na(winAppPath)) { msg <- paste("%LOCALAPPDATA% not found.", "Please define the environment variable", From 342278c09cf6e79ed4f63422988a6bbd1e7d8a91 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Mon, 22 Aug 2016 11:15:53 -0700 Subject: [PATCH 054/270] [SPARK-16320][DOC] Document G1 heap region's effect on spark 2.0 vs 1.6 ## What changes were proposed in this pull request? Collect GC discussion in one section, and documenting findings about G1 GC heap region size. ## How was this patch tested? Jekyll doc build Author: Sean Owen Closes #14732 from srowen/SPARK-16320. --- docs/tuning.md | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/docs/tuning.md b/docs/tuning.md index 976f2eb8a7b23..cbf37213aa724 100644 --- a/docs/tuning.md +++ b/docs/tuning.md @@ -122,21 +122,8 @@ large records. `R` is the storage space within `M` where cached blocks immune to being evicted by execution. The value of `spark.memory.fraction` should be set in order to fit this amount of heap space -comfortably within the JVM's old or "tenured" generation. Otherwise, when much of this space is -used for caching and execution, the tenured generation will be full, which causes the JVM to -significantly increase time spent in garbage collection. See -Java GC sizing documentation -for more information. - -The tenured generation size is controlled by the JVM's `NewRatio` parameter, which defaults to 2, -meaning that the tenured generation is 2 times the size of the new generation (the rest of the heap). -So, by default, the tenured generation occupies 2/3 or about 0.66 of the heap. A value of -0.6 for `spark.memory.fraction` keeps storage and execution memory within the old generation with -room to spare. If `spark.memory.fraction` is increased to, say, 0.8, then `NewRatio` may have to -increase to 6 or more. - -`NewRatio` is set as a JVM flag for executors, which means adding -`spark.executor.extraJavaOptions=-XX:NewRatio=x` to a Spark job's configuration. +comfortably within the JVM's old or "tenured" generation. See the discussion of advanced GC +tuning below for details. ## Determining Memory Consumption @@ -217,14 +204,22 @@ temporary objects created during task execution. Some steps which may be useful * Check if there are too many garbage collections by collecting GC stats. If a full GC is invoked multiple times for before a task completes, it means that there isn't enough memory available for executing tasks. -* In the GC stats that are printed, if the OldGen is close to being full, reduce the amount of - memory used for caching by lowering `spark.memory.storageFraction`; it is better to cache fewer - objects than to slow down task execution! - * If there are too many minor collections but not many major GCs, allocating more memory for Eden would help. You can set the size of the Eden to be an over-estimate of how much memory each task will need. If the size of Eden is determined to be `E`, then you can set the size of the Young generation using the option `-Xmn=4/3*E`. (The scaling up by 4/3 is to account for space used by survivor regions as well.) + +* In the GC stats that are printed, if the OldGen is close to being full, reduce the amount of + memory used for caching by lowering `spark.memory.fraction`; it is better to cache fewer + objects than to slow down task execution. Alternatively, consider decreasing the size of + the Young generation. This means lowering `-Xmn` if you've set it as above. If not, try changing the + value of the JVM's `NewRatio` parameter. Many JVMs default this to 2, meaning that the Old generation + occupies 2/3 of the heap. It should be large enough such that this fraction exceeds `spark.memory.fraction`. + +* Try the G1GC garbage collector with `-XX:+UseG1GC`. It can improve performance in some situations where + garbage collection is a bottleneck. Note that with large executor heap sizes, it may be important to + increase the [G1 region size](https://blogs.oracle.com/g1gc/entry/g1_gc_tuning_a_case) + with `-XX:G1HeapRegionSize` * As an example, if your task is reading data from HDFS, the amount of memory used by the task can be estimated using the size of the data block read from HDFS. Note that the size of a decompressed block is often 2 or 3 times the @@ -237,6 +232,9 @@ Our experience suggests that the effect of GC tuning depends on your application There are [many more tuning options](http://www.oracle.com/technetwork/java/javase/gc-tuning-6-140523.html) described online, but at a high level, managing how frequently full GC takes place can help in reducing the overhead. +GC tuning flags for executors can be specified by setting `spark.executor.extraJavaOptions` in +a job's configuration. + # Other Considerations ## Level of Parallelism From 0583ecda1b63a7e3f126c3276059e4f99548a741 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Mon, 22 Aug 2016 12:27:33 -0700 Subject: [PATCH 055/270] [SPARK-17173][SPARKR] R MLlib refactor, cleanup, reformat, fix deprecation in test ## What changes were proposed in this pull request? refactor, cleanup, reformat, fix deprecation in test ## How was this patch tested? unit tests, manual tests Author: Felix Cheung Closes #14735 from felixcheung/rmllibutil. --- R/pkg/R/mllib.R | 205 +++++++++++-------------- R/pkg/inst/tests/testthat/test_mllib.R | 10 +- 2 files changed, 98 insertions(+), 117 deletions(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 9a53c80aecded..b36fbcee17671 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -88,9 +88,9 @@ setClass("ALSModel", representation(jobj = "jobj")) #' @rdname write.ml #' @name write.ml #' @export -#' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} -#' @seealso \link{spark.als}, \link{spark.kmeans}, \link{spark.lda}, \link{spark.naiveBayes} -#' @seealso \link{spark.survreg}, \link{spark.isoreg} +#' @seealso \link{spark.glm}, \link{glm}, +#' @seealso \link{spark.als}, \link{spark.gaussianMixture}, \link{spark.isoreg}, \link{spark.kmeans}, +#' @seealso \link{spark.lda}, \link{spark.naiveBayes}, \link{spark.survreg}, #' @seealso \link{read.ml} NULL @@ -101,11 +101,22 @@ NULL #' @rdname predict #' @name predict #' @export -#' @seealso \link{spark.glm}, \link{glm}, \link{spark.gaussianMixture} -#' @seealso \link{spark.als}, \link{spark.kmeans}, \link{spark.naiveBayes}, \link{spark.survreg} -#' @seealso \link{spark.isoreg} +#' @seealso \link{spark.glm}, \link{glm}, +#' @seealso \link{spark.als}, \link{spark.gaussianMixture}, \link{spark.isoreg}, \link{spark.kmeans}, +#' @seealso \link{spark.naiveBayes}, \link{spark.survreg}, NULL +write_internal <- function(object, path, overwrite = FALSE) { + writer <- callJMethod(object@jobj, "write") + if (overwrite) { + writer <- callJMethod(writer, "overwrite") + } + invisible(callJMethod(writer, "save", path)) +} + +predict_internal <- function(object, newData) { + dataFrame(callJMethod(object@jobj, "transform", newData@sdf)) +} #' Generalized Linear Models #' @@ -173,7 +184,7 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"), jobj <- callJStatic("org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper", "fit", formula, data@sdf, family$family, family$link, tol, as.integer(maxIter), as.character(weightCol)) - return(new("GeneralizedLinearRegressionModel", jobj = jobj)) + new("GeneralizedLinearRegressionModel", jobj = jobj) }) #' Generalized Linear Models (R-compliant) @@ -219,7 +230,7 @@ setMethod("glm", signature(formula = "formula", family = "ANY", data = "SparkDat #' @export #' @note summary(GeneralizedLinearRegressionModel) since 2.0.0 setMethod("summary", signature(object = "GeneralizedLinearRegressionModel"), - function(object, ...) { + function(object) { jobj <- object@jobj is.loaded <- callJMethod(jobj, "isLoaded") features <- callJMethod(jobj, "rFeatures") @@ -245,7 +256,7 @@ setMethod("summary", signature(object = "GeneralizedLinearRegressionModel"), deviance = deviance, df.null = df.null, df.residual = df.residual, aic = aic, iter = iter, family = family, is.loaded = is.loaded) class(ans) <- "summary.GeneralizedLinearRegressionModel" - return(ans) + ans }) # Prints the summary of GeneralizedLinearRegressionModel @@ -275,8 +286,7 @@ print.summary.GeneralizedLinearRegressionModel <- function(x, ...) { " on", format(unlist(x[c("df.null", "df.residual")])), " degrees of freedom\n"), 1L, paste, collapse = " "), sep = "") cat("AIC: ", format(x$aic, digits = 4L), "\n\n", - "Number of Fisher Scoring iterations: ", x$iter, "\n", sep = "") - cat("\n") + "Number of Fisher Scoring iterations: ", x$iter, "\n\n", sep = "") invisible(x) } @@ -291,7 +301,7 @@ print.summary.GeneralizedLinearRegressionModel <- function(x, ...) { #' @note predict(GeneralizedLinearRegressionModel) since 1.5.0 setMethod("predict", signature(object = "GeneralizedLinearRegressionModel"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) # Makes predictions from a naive Bayes model or a model produced by spark.naiveBayes(), @@ -305,7 +315,7 @@ setMethod("predict", signature(object = "GeneralizedLinearRegressionModel"), #' @note predict(NaiveBayesModel) since 2.0.0 setMethod("predict", signature(object = "NaiveBayesModel"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) # Returns the summary of a naive Bayes model produced by \code{spark.naiveBayes} @@ -317,7 +327,7 @@ setMethod("predict", signature(object = "NaiveBayesModel"), #' @export #' @note summary(NaiveBayesModel) since 2.0.0 setMethod("summary", signature(object = "NaiveBayesModel"), - function(object, ...) { + function(object) { jobj <- object@jobj features <- callJMethod(jobj, "features") labels <- callJMethod(jobj, "labels") @@ -328,7 +338,7 @@ setMethod("summary", signature(object = "NaiveBayesModel"), tables <- matrix(tables, nrow = length(labels)) rownames(tables) <- unlist(labels) colnames(tables) <- unlist(features) - return(list(apriori = apriori, tables = tables)) + list(apriori = apriori, tables = tables) }) # Returns posterior probabilities from a Latent Dirichlet Allocation model produced by spark.lda() @@ -342,7 +352,7 @@ setMethod("summary", signature(object = "NaiveBayesModel"), #' @note spark.posterior(LDAModel) since 2.1.0 setMethod("spark.posterior", signature(object = "LDAModel", newData = "SparkDataFrame"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) # Returns the summary of a Latent Dirichlet Allocation model produced by \code{spark.lda} @@ -377,12 +387,11 @@ setMethod("summary", signature(object = "LDAModel"), vocabSize <- callJMethod(jobj, "vocabSize") topics <- dataFrame(callJMethod(jobj, "topics", maxTermsPerTopic)) vocabulary <- callJMethod(jobj, "vocabulary") - return(list(docConcentration = unlist(docConcentration), - topicConcentration = topicConcentration, - logLikelihood = logLikelihood, logPerplexity = logPerplexity, - isDistributed = isDistributed, vocabSize = vocabSize, - topics = topics, - vocabulary = unlist(vocabulary))) + list(docConcentration = unlist(docConcentration), + topicConcentration = topicConcentration, + logLikelihood = logLikelihood, logPerplexity = logPerplexity, + isDistributed = isDistributed, vocabSize = vocabSize, + topics = topics, vocabulary = unlist(vocabulary)) }) # Returns the log perplexity of a Latent Dirichlet Allocation model produced by \code{spark.lda} @@ -395,8 +404,8 @@ setMethod("summary", signature(object = "LDAModel"), #' @note spark.perplexity(LDAModel) since 2.1.0 setMethod("spark.perplexity", signature(object = "LDAModel", data = "SparkDataFrame"), function(object, data) { - return(ifelse(missing(data), callJMethod(object@jobj, "logPerplexity"), - callJMethod(object@jobj, "computeLogPerplexity", data@sdf))) + ifelse(missing(data), callJMethod(object@jobj, "logPerplexity"), + callJMethod(object@jobj, "computeLogPerplexity", data@sdf)) }) # Saves the Latent Dirichlet Allocation model to the input path. @@ -412,11 +421,7 @@ setMethod("spark.perplexity", signature(object = "LDAModel", data = "SparkDataFr #' @note write.ml(LDAModel, character) since 2.1.0 setMethod("write.ml", signature(object = "LDAModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) #' Isotonic Regression Model @@ -471,9 +476,9 @@ setMethod("spark.isoreg", signature(data = "SparkDataFrame", formula = "formula" } jobj <- callJStatic("org.apache.spark.ml.r.IsotonicRegressionWrapper", "fit", - data@sdf, formula, as.logical(isotonic), as.integer(featureIndex), - as.character(weightCol)) - return(new("IsotonicRegressionModel", jobj = jobj)) + data@sdf, formula, as.logical(isotonic), as.integer(featureIndex), + as.character(weightCol)) + new("IsotonicRegressionModel", jobj = jobj) }) # Predicted values based on an isotonicRegression model @@ -487,7 +492,7 @@ setMethod("spark.isoreg", signature(data = "SparkDataFrame", formula = "formula" #' @note predict(IsotonicRegressionModel) since 2.1.0 setMethod("predict", signature(object = "IsotonicRegressionModel"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) # Get the summary of an IsotonicRegressionModel model @@ -499,11 +504,11 @@ setMethod("predict", signature(object = "IsotonicRegressionModel"), #' @export #' @note summary(IsotonicRegressionModel) since 2.1.0 setMethod("summary", signature(object = "IsotonicRegressionModel"), - function(object, ...) { + function(object) { jobj <- object@jobj boundaries <- callJMethod(jobj, "boundaries") predictions <- callJMethod(jobj, "predictions") - return(list(boundaries = boundaries, predictions = predictions)) + list(boundaries = boundaries, predictions = predictions) }) #' K-Means Clustering Model @@ -553,7 +558,7 @@ setMethod("spark.kmeans", signature(data = "SparkDataFrame", formula = "formula" initMode <- match.arg(initMode) jobj <- callJStatic("org.apache.spark.ml.r.KMeansWrapper", "fit", data@sdf, formula, as.integer(k), as.integer(maxIter), initMode) - return(new("KMeansModel", jobj = jobj)) + new("KMeansModel", jobj = jobj) }) #' Get fitted result from a k-means model @@ -576,14 +581,14 @@ setMethod("spark.kmeans", signature(data = "SparkDataFrame", formula = "formula" #'} #' @note fitted since 2.0.0 setMethod("fitted", signature(object = "KMeansModel"), - function(object, method = c("centers", "classes"), ...) { + function(object, method = c("centers", "classes")) { method <- match.arg(method) jobj <- object@jobj is.loaded <- callJMethod(jobj, "isLoaded") if (is.loaded) { - stop(paste("Saved-loaded k-means model does not support 'fitted' method")) + stop("Saved-loaded k-means model does not support 'fitted' method") } else { - return(dataFrame(callJMethod(jobj, "fitted", method))) + dataFrame(callJMethod(jobj, "fitted", method)) } }) @@ -595,7 +600,7 @@ setMethod("fitted", signature(object = "KMeansModel"), #' @export #' @note summary(KMeansModel) since 2.0.0 setMethod("summary", signature(object = "KMeansModel"), - function(object, ...) { + function(object) { jobj <- object@jobj is.loaded <- callJMethod(jobj, "isLoaded") features <- callJMethod(jobj, "features") @@ -610,8 +615,8 @@ setMethod("summary", signature(object = "KMeansModel"), } else { dataFrame(callJMethod(jobj, "cluster")) } - return(list(coefficients = coefficients, size = size, - cluster = cluster, is.loaded = is.loaded)) + list(coefficients = coefficients, size = size, + cluster = cluster, is.loaded = is.loaded) }) # Predicted values based on a k-means model @@ -623,7 +628,7 @@ setMethod("summary", signature(object = "KMeansModel"), #' @note predict(KMeansModel) since 2.0.0 setMethod("predict", signature(object = "KMeansModel"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) #' Naive Bayes Models @@ -665,11 +670,11 @@ setMethod("predict", signature(object = "KMeansModel"), #' } #' @note spark.naiveBayes since 2.0.0 setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = "formula"), - function(data, formula, smoothing = 1.0, ...) { + function(data, formula, smoothing = 1.0) { formula <- paste(deparse(formula), collapse = "") jobj <- callJStatic("org.apache.spark.ml.r.NaiveBayesWrapper", "fit", formula, data@sdf, smoothing) - return(new("NaiveBayesModel", jobj = jobj)) + new("NaiveBayesModel", jobj = jobj) }) # Saves the Bernoulli naive Bayes model to the input path. @@ -684,11 +689,7 @@ setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = "form #' @note write.ml(NaiveBayesModel, character) since 2.0.0 setMethod("write.ml", signature(object = "NaiveBayesModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) # Saves the AFT survival regression model to the input path. @@ -702,11 +703,7 @@ setMethod("write.ml", signature(object = "NaiveBayesModel", path = "character"), #' @seealso \link{read.ml} setMethod("write.ml", signature(object = "AFTSurvivalRegressionModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) # Saves the generalized linear model to the input path. @@ -720,11 +717,7 @@ setMethod("write.ml", signature(object = "AFTSurvivalRegressionModel", path = "c #' @note write.ml(GeneralizedLinearRegressionModel, character) since 2.0.0 setMethod("write.ml", signature(object = "GeneralizedLinearRegressionModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) # Save fitted MLlib model to the input path @@ -738,11 +731,7 @@ setMethod("write.ml", signature(object = "GeneralizedLinearRegressionModel", pat #' @note write.ml(KMeansModel, character) since 2.0.0 setMethod("write.ml", signature(object = "KMeansModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) # Save fitted IsotonicRegressionModel to the input path @@ -757,11 +746,7 @@ setMethod("write.ml", signature(object = "KMeansModel", path = "character"), #' @note write.ml(IsotonicRegression, character) since 2.1.0 setMethod("write.ml", signature(object = "IsotonicRegressionModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) # Save fitted MLlib model to the input path @@ -776,11 +761,7 @@ setMethod("write.ml", signature(object = "IsotonicRegressionModel", path = "char #' @note write.ml(GaussianMixtureModel, character) since 2.1.0 setMethod("write.ml", signature(object = "GaussianMixtureModel", path = "character"), function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) + write_internal(object, path, overwrite) }) #' Load a fitted MLlib model from the input path. @@ -801,21 +782,21 @@ read.ml <- function(path) { path <- suppressWarnings(normalizePath(path)) jobj <- callJStatic("org.apache.spark.ml.r.RWrappers", "load", path) if (isInstanceOf(jobj, "org.apache.spark.ml.r.NaiveBayesWrapper")) { - return(new("NaiveBayesModel", jobj = jobj)) + new("NaiveBayesModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.AFTSurvivalRegressionWrapper")) { - return(new("AFTSurvivalRegressionModel", jobj = jobj)) + new("AFTSurvivalRegressionModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper")) { - return(new("GeneralizedLinearRegressionModel", jobj = jobj)) + new("GeneralizedLinearRegressionModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.KMeansWrapper")) { - return(new("KMeansModel", jobj = jobj)) + new("KMeansModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.LDAWrapper")) { - return(new("LDAModel", jobj = jobj)) + new("LDAModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.IsotonicRegressionWrapper")) { - return(new("IsotonicRegressionModel", jobj = jobj)) + new("IsotonicRegressionModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.GaussianMixtureWrapper")) { - return(new("GaussianMixtureModel", jobj = jobj)) + new("GaussianMixtureModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.ALSWrapper")) { - return(new("ALSModel", jobj = jobj)) + new("ALSModel", jobj = jobj) } else { stop(paste("Unsupported model: ", jobj)) } @@ -860,7 +841,7 @@ setMethod("spark.survreg", signature(data = "SparkDataFrame", formula = "formula formula <- paste(deparse(formula), collapse = "") jobj <- callJStatic("org.apache.spark.ml.r.AFTSurvivalRegressionWrapper", "fit", formula, data@sdf) - return(new("AFTSurvivalRegressionModel", jobj = jobj)) + new("AFTSurvivalRegressionModel", jobj = jobj) }) #' Latent Dirichlet Allocation @@ -926,7 +907,7 @@ setMethod("spark.lda", signature(data = "SparkDataFrame"), as.numeric(subsamplingRate), topicConcentration, as.array(docConcentration), as.array(customizedStopWords), maxVocabSize) - return(new("LDAModel", jobj = jobj)) + new("LDAModel", jobj = jobj) }) # Returns a summary of the AFT survival regression model produced by spark.survreg, @@ -946,7 +927,7 @@ setMethod("summary", signature(object = "AFTSurvivalRegressionModel"), coefficients <- as.matrix(unlist(coefficients)) colnames(coefficients) <- c("Value") rownames(coefficients) <- unlist(features) - return(list(coefficients = coefficients)) + list(coefficients = coefficients) }) # Makes predictions from an AFT survival regression model or a model produced by @@ -960,7 +941,7 @@ setMethod("summary", signature(object = "AFTSurvivalRegressionModel"), #' @note predict(AFTSurvivalRegressionModel) since 2.0.0 setMethod("predict", signature(object = "AFTSurvivalRegressionModel"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) #' Multivariate Gaussian Mixture Model (GMM) @@ -1014,7 +995,7 @@ setMethod("spark.gaussianMixture", signature(data = "SparkDataFrame", formula = formula <- paste(deparse(formula), collapse = "") jobj <- callJStatic("org.apache.spark.ml.r.GaussianMixtureWrapper", "fit", data@sdf, formula, as.integer(k), as.integer(maxIter), as.numeric(tol)) - return(new("GaussianMixtureModel", jobj = jobj)) + new("GaussianMixtureModel", jobj = jobj) }) # Get the summary of a multivariate gaussian mixture model @@ -1027,7 +1008,7 @@ setMethod("spark.gaussianMixture", signature(data = "SparkDataFrame", formula = #' @export #' @note summary(GaussianMixtureModel) since 2.1.0 setMethod("summary", signature(object = "GaussianMixtureModel"), - function(object, ...) { + function(object) { jobj <- object@jobj is.loaded <- callJMethod(jobj, "isLoaded") lambda <- unlist(callJMethod(jobj, "lambda")) @@ -1052,8 +1033,8 @@ setMethod("summary", signature(object = "GaussianMixtureModel"), } else { dataFrame(callJMethod(jobj, "posterior")) } - return(list(lambda = lambda, mu = mu, sigma = sigma, - posterior = posterior, is.loaded = is.loaded)) + list(lambda = lambda, mu = mu, sigma = sigma, + posterior = posterior, is.loaded = is.loaded) }) # Predicted values based on a gaussian mixture model @@ -1067,7 +1048,7 @@ setMethod("summary", signature(object = "GaussianMixtureModel"), #' @note predict(GaussianMixtureModel) since 2.1.0 setMethod("predict", signature(object = "GaussianMixtureModel"), function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) + predict_internal(object, newData) }) #' Alternating Least Squares (ALS) for Collaborative Filtering @@ -1149,7 +1130,7 @@ setMethod("spark.als", signature(data = "SparkDataFrame"), reg, as.integer(maxIter), implicitPrefs, alpha, nonnegative, as.integer(numUserBlocks), as.integer(numItemBlocks), as.integer(checkpointInterval), as.integer(seed)) - return(new("ALSModel", jobj = jobj)) + new("ALSModel", jobj = jobj) }) # Returns a summary of the ALS model produced by spark.als. @@ -1163,17 +1144,17 @@ setMethod("spark.als", signature(data = "SparkDataFrame"), #' @export #' @note summary(ALSModel) since 2.1.0 setMethod("summary", signature(object = "ALSModel"), -function(object, ...) { - jobj <- object@jobj - user <- callJMethod(jobj, "userCol") - item <- callJMethod(jobj, "itemCol") - rating <- callJMethod(jobj, "ratingCol") - userFactors <- dataFrame(callJMethod(jobj, "userFactors")) - itemFactors <- dataFrame(callJMethod(jobj, "itemFactors")) - rank <- callJMethod(jobj, "rank") - return(list(user = user, item = item, rating = rating, userFactors = userFactors, - itemFactors = itemFactors, rank = rank)) -}) + function(object) { + jobj <- object@jobj + user <- callJMethod(jobj, "userCol") + item <- callJMethod(jobj, "itemCol") + rating <- callJMethod(jobj, "ratingCol") + userFactors <- dataFrame(callJMethod(jobj, "userFactors")) + itemFactors <- dataFrame(callJMethod(jobj, "itemFactors")) + rank <- callJMethod(jobj, "rank") + list(user = user, item = item, rating = rating, userFactors = userFactors, + itemFactors = itemFactors, rank = rank) + }) # Makes predictions from an ALS model or a model produced by spark.als. @@ -1185,9 +1166,9 @@ function(object, ...) { #' @export #' @note predict(ALSModel) since 2.1.0 setMethod("predict", signature(object = "ALSModel"), -function(object, newData) { - return(dataFrame(callJMethod(object@jobj, "transform", newData@sdf))) -}) + function(object, newData) { + predict_internal(object, newData) + }) # Saves the ALS model to the input path. @@ -1203,10 +1184,6 @@ function(object, newData) { #' @seealso \link{read.ml} #' @note write.ml(ALSModel, character) since 2.1.0 setMethod("write.ml", signature(object = "ALSModel", path = "character"), -function(object, path, overwrite = FALSE) { - writer <- callJMethod(object@jobj, "write") - if (overwrite) { - writer <- callJMethod(writer, "overwrite") - } - invisible(callJMethod(writer, "save", path)) -}) + function(object, path, overwrite = FALSE) { + write_internal(object, path, overwrite) + }) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index d15c2393b94ac..de9bd48662c3a 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -95,6 +95,10 @@ test_that("spark.glm summary", { expect_equal(stats$df.residual, rStats$df.residual) expect_equal(stats$aic, rStats$aic) + out <- capture.output(print(stats)) + expect_match(out[2], "Deviance Residuals:") + expect_true(any(grepl("AIC: 59.22", out))) + # binomial family df <- suppressWarnings(createDataFrame(iris)) training <- df[df$Species %in% c("versicolor", "virginica"), ] @@ -409,7 +413,7 @@ test_that("spark.naiveBayes", { # Test e1071::naiveBayes if (requireNamespace("e1071", quietly = TRUE)) { - expect_that(m <- e1071::naiveBayes(Survived ~ ., data = t1), not(throws_error())) + expect_error(m <- e1071::naiveBayes(Survived ~ ., data = t1), NA) expect_equal(as.character(predict(m, t1[1, ])), "Yes") } }) @@ -487,7 +491,7 @@ test_that("spark.isotonicRegression", { weightCol = "weight") # only allow one variable on the right hand side of the formula expect_error(model2 <- spark.isoreg(df, ~., isotonic = FALSE)) - result <- summary(model, df) + result <- summary(model) expect_equal(result$predictions, list(7, 5, 4, 4, 1)) # Test model prediction @@ -503,7 +507,7 @@ test_that("spark.isotonicRegression", { expect_error(write.ml(model, modelPath)) write.ml(model, modelPath, overwrite = TRUE) model2 <- read.ml(modelPath) - expect_equal(result, summary(model2, df)) + expect_equal(result, summary(model2)) unlink(modelPath) }) From 6f3cd36f93c11265449fdce3323e139fec8ab22d Mon Sep 17 00:00:00 2001 From: Shivaram Venkataraman Date: Mon, 22 Aug 2016 12:53:52 -0700 Subject: [PATCH 056/270] [SPARKR][MINOR] Add Xiangrui and Felix to maintainers ## What changes were proposed in this pull request? This change adds Xiangrui Meng and Felix Cheung to the maintainers field in the package description. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: Shivaram Venkataraman Closes #14758 from shivaram/sparkr-maintainers. --- R/pkg/DESCRIPTION | 2 ++ 1 file changed, 2 insertions(+) diff --git a/R/pkg/DESCRIPTION b/R/pkg/DESCRIPTION index 357ab007931f5..d81f1a3d4de68 100644 --- a/R/pkg/DESCRIPTION +++ b/R/pkg/DESCRIPTION @@ -5,6 +5,8 @@ Version: 2.0.0 Date: 2016-07-07 Author: The Apache Software Foundation Maintainer: Shivaram Venkataraman + Xiangrui Meng + Felix Cheung Depends: R (>= 3.0), methods From 929cb8beed9b7014231580cc002853236a5337d6 Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Mon, 22 Aug 2016 13:31:38 -0700 Subject: [PATCH 057/270] [MINOR][SQL] Fix some typos in comments and test hints ## What changes were proposed in this pull request? Fix some typos in comments and test hints ## How was this patch tested? N/A. Author: Sean Zhong Closes #14755 from clockfly/fix_minor_typo. --- .../apache/spark/sql/execution/UnsafeKVExternalSorter.java | 2 +- .../execution/aggregate/TungstenAggregationIterator.scala | 6 +++--- .../src/test/scala/org/apache/spark/sql/QueryTest.scala | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java index eb105bd09a3ea..0d51dc9ff8a85 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java @@ -99,7 +99,7 @@ public UnsafeKVExternalSorter( // The array will be used to do in-place sort, which require half of the space to be empty. assert(map.numKeys() <= map.getArray().size() / 2); // During spilling, the array in map will not be used, so we can borrow that and use it - // as the underline array for in-memory sorter (it's always large enough). + // as the underlying array for in-memory sorter (it's always large enough). // Since we will not grow the array, it's fine to pass `null` as consumer. final UnsafeInMemorySorter inMemSorter = new UnsafeInMemorySorter( null, taskMemoryManager, recordComparator, prefixComparator, map.getArray(), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala index 4b8adf5230717..4e072a92cc772 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala @@ -32,9 +32,9 @@ import org.apache.spark.unsafe.KVIterator * An iterator used to evaluate aggregate functions. It operates on [[UnsafeRow]]s. * * This iterator first uses hash-based aggregation to process input rows. It uses - * a hash map to store groups and their corresponding aggregation buffers. If we - * this map cannot allocate memory from memory manager, it spill the map into disk - * and create a new one. After processed all the input, then merge all the spills + * a hash map to store groups and their corresponding aggregation buffers. If + * this map cannot allocate memory from memory manager, it spills the map into disk + * and creates a new one. After processed all the input, then merge all the spills * together using external sorter, and do sort-based aggregation. * * The process has the following step: diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala index 484e4380331f8..c7af40227d45f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala @@ -358,11 +358,11 @@ abstract class QueryTest extends PlanTest { */ def assertEmptyMissingInput(query: Dataset[_]): Unit = { assert(query.queryExecution.analyzed.missingInput.isEmpty, - s"The analyzed logical plan has missing inputs: ${query.queryExecution.analyzed}") + s"The analyzed logical plan has missing inputs:\n${query.queryExecution.analyzed}") assert(query.queryExecution.optimizedPlan.missingInput.isEmpty, - s"The optimized logical plan has missing inputs: ${query.queryExecution.optimizedPlan}") + s"The optimized logical plan has missing inputs:\n${query.queryExecution.optimizedPlan}") assert(query.queryExecution.executedPlan.missingInput.isEmpty, - s"The physical plan has missing inputs: ${query.queryExecution.executedPlan}") + s"The physical plan has missing inputs:\n${query.queryExecution.executedPlan}") } } From 84770b59f773f132073cd2af4204957fc2d7bf35 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Mon, 22 Aug 2016 15:48:35 -0700 Subject: [PATCH 058/270] [SPARK-17162] Range does not support SQL generation ## What changes were proposed in this pull request? The range operator previously didn't support SQL generation, which made it not possible to use in views. ## How was this patch tested? Unit tests. cc hvanhovell Author: Eric Liang Closes #14724 from ericl/spark-17162. --- .../ResolveTableValuedFunctions.scala | 11 ++++------ .../plans/logical/basicLogicalOperators.scala | 21 ++++++++++++------- .../spark/sql/catalyst/SQLBuilder.scala | 3 +++ .../execution/basicPhysicalOperators.scala | 2 +- .../spark/sql/execution/command/views.scala | 3 +-- sql/hive/src/test/resources/sqlgen/range.sql | 4 ++++ .../resources/sqlgen/range_with_splits.sql | 4 ++++ .../sql/catalyst/LogicalPlanToSQLSuite.scala | 14 ++++++++++++- 8 files changed, 44 insertions(+), 18 deletions(-) create mode 100644 sql/hive/src/test/resources/sqlgen/range.sql create mode 100644 sql/hive/src/test/resources/sqlgen/range_with_splits.sql diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala index 7fdf7fa0c06a3..6b3bb68538dd1 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveTableValuedFunctions.scala @@ -28,9 +28,6 @@ import org.apache.spark.sql.types.{DataType, IntegerType, LongType} * Rule that resolves table-valued function references. */ object ResolveTableValuedFunctions extends Rule[LogicalPlan] { - private lazy val defaultParallelism = - SparkContext.getOrCreate(new SparkConf(false)).defaultParallelism - /** * List of argument names and their types, used to declare a function. */ @@ -84,25 +81,25 @@ object ResolveTableValuedFunctions extends Rule[LogicalPlan] { "range" -> Map( /* range(end) */ tvf("end" -> LongType) { case Seq(end: Long) => - Range(0, end, 1, defaultParallelism) + Range(0, end, 1, None) }, /* range(start, end) */ tvf("start" -> LongType, "end" -> LongType) { case Seq(start: Long, end: Long) => - Range(start, end, 1, defaultParallelism) + Range(start, end, 1, None) }, /* range(start, end, step) */ tvf("start" -> LongType, "end" -> LongType, "step" -> LongType) { case Seq(start: Long, end: Long, step: Long) => - Range(start, end, step, defaultParallelism) + Range(start, end, step, None) }, /* range(start, end, step, numPartitions) */ tvf("start" -> LongType, "end" -> LongType, "step" -> LongType, "numPartitions" -> IntegerType) { case Seq(start: Long, end: Long, step: Long, numPartitions: Int) => - Range(start, end, step, numPartitions) + Range(start, end, step, Some(numPartitions)) }) ) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala index af1736e60799b..010aec7ba1a42 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala @@ -422,17 +422,20 @@ case class Sort( /** Factory for constructing new `Range` nodes. */ object Range { - def apply(start: Long, end: Long, step: Long, numSlices: Int): Range = { + def apply(start: Long, end: Long, step: Long, numSlices: Option[Int]): Range = { val output = StructType(StructField("id", LongType, nullable = false) :: Nil).toAttributes new Range(start, end, step, numSlices, output) } + def apply(start: Long, end: Long, step: Long, numSlices: Int): Range = { + Range(start, end, step, Some(numSlices)) + } } case class Range( start: Long, end: Long, step: Long, - numSlices: Int, + numSlices: Option[Int], output: Seq[Attribute]) extends LeafNode with MultiInstanceRelation { @@ -449,6 +452,14 @@ case class Range( } } + def toSQL(): String = { + if (numSlices.isDefined) { + s"SELECT id AS `${output.head.name}` FROM range($start, $end, $step, ${numSlices.get})" + } else { + s"SELECT id AS `${output.head.name}` FROM range($start, $end, $step)" + } + } + override def newInstance(): Range = copy(output = output.map(_.newInstance())) override lazy val statistics: Statistics = { @@ -457,11 +468,7 @@ case class Range( } override def simpleString: String = { - if (step == 1) { - s"Range ($start, $end, splits=$numSlices)" - } else { - s"Range ($start, $end, step=$step, splits=$numSlices)" - } + s"Range ($start, $end, step=$step, splits=$numSlices)" } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala index af1de511da060..dde91b0a8606e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala @@ -208,6 +208,9 @@ class SQLBuilder private ( case p: LocalRelation => p.toSQL(newSubqueryName()) + case p: Range => + p.toSQL() + case OneRowRelation => "" diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala index ad8a71689895b..3562083b06740 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala @@ -318,7 +318,7 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range) def start: Long = range.start def step: Long = range.step - def numSlices: Int = range.numSlices + def numSlices: Int = range.numSlices.getOrElse(sparkContext.defaultParallelism) def numElements: BigInt = range.numElements override val output: Seq[Attribute] = range.output diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala index e397cfa058e24..f0d7b64c3c160 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala @@ -179,8 +179,7 @@ case class CreateViewCommand( sparkSession.sql(viewSQL).queryExecution.assertAnalyzed() } catch { case NonFatal(e) => - throw new RuntimeException( - "Failed to analyze the canonicalized SQL. It is possible there is a bug in Spark.", e) + throw new RuntimeException(s"Failed to analyze the canonicalized SQL: ${viewSQL}", e) } val viewSchema = if (userSpecifiedColumns.isEmpty) { diff --git a/sql/hive/src/test/resources/sqlgen/range.sql b/sql/hive/src/test/resources/sqlgen/range.sql new file mode 100644 index 0000000000000..53c72ea71e6ac --- /dev/null +++ b/sql/hive/src/test/resources/sqlgen/range.sql @@ -0,0 +1,4 @@ +-- This file is automatically generated by LogicalPlanToSQLSuite. +select * from range(100) +-------------------------------------------------------------------------------- +SELECT `gen_attr_0` AS `id` FROM (SELECT `gen_attr_0` FROM (SELECT id AS `gen_attr_0` FROM range(0, 100, 1)) AS gen_subquery_0) AS gen_subquery_1 diff --git a/sql/hive/src/test/resources/sqlgen/range_with_splits.sql b/sql/hive/src/test/resources/sqlgen/range_with_splits.sql new file mode 100644 index 0000000000000..83d637d54a302 --- /dev/null +++ b/sql/hive/src/test/resources/sqlgen/range_with_splits.sql @@ -0,0 +1,4 @@ +-- This file is automatically generated by LogicalPlanToSQLSuite. +select * from range(1, 100, 20, 10) +-------------------------------------------------------------------------------- +SELECT `gen_attr_0` AS `id` FROM (SELECT `gen_attr_0` FROM (SELECT id AS `gen_attr_0` FROM range(1, 100, 20, 10)) AS gen_subquery_0) AS gen_subquery_1 diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala index 742b065891a8e..9c6da6a628dcf 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala @@ -23,7 +23,10 @@ import java.nio.file.{Files, NoSuchFileException, Paths} import scala.util.control.NonFatal import org.apache.spark.sql.Column +import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation +import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.catalyst.plans.logical.LeafNode import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SQLTestUtils @@ -180,7 +183,11 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { } test("Test should fail if the SQL query cannot be regenerated") { - spark.range(10).createOrReplaceTempView("not_sql_gen_supported_table_so_far") + case class Unsupported() extends LeafNode with MultiInstanceRelation { + override def newInstance(): Unsupported = copy() + override def output: Seq[Attribute] = Nil + } + Unsupported().createOrReplaceTempView("not_sql_gen_supported_table_so_far") sql("select * from not_sql_gen_supported_table_so_far") val m3 = intercept[org.scalatest.exceptions.TestFailedException] { checkSQL("select * from not_sql_gen_supported_table_so_far", "in") @@ -196,6 +203,11 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { } } + test("range") { + checkSQL("select * from range(100)", "range") + checkSQL("select * from range(1, 100, 20, 10)", "range_with_splits") + } + test("in") { checkSQL("SELECT id FROM parquet_t0 WHERE id IN (1, 2, 3)", "in") } From 71afeeea4ec8e67edc95b5d504c557c88a2598b9 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Mon, 22 Aug 2016 15:53:10 -0700 Subject: [PATCH 059/270] [SPARK-16508][SPARKR] doc updates and more CRAN check fixes ## What changes were proposed in this pull request? replace ``` ` ``` in code doc with `\code{thing}` remove added `...` for drop(DataFrame) fix remaining CRAN check warnings ## How was this patch tested? create doc with knitr junyangq Author: Felix Cheung Closes #14734 from felixcheung/rdoccleanup. --- R/pkg/NAMESPACE | 6 +++- R/pkg/R/DataFrame.R | 71 ++++++++++++++++++++++---------------------- R/pkg/R/RDD.R | 10 +++---- R/pkg/R/SQLContext.R | 30 +++++++++---------- R/pkg/R/WindowSpec.R | 23 +++++++------- R/pkg/R/column.R | 2 +- R/pkg/R/functions.R | 36 +++++++++++----------- R/pkg/R/generics.R | 15 +++++----- R/pkg/R/group.R | 1 + R/pkg/R/mllib.R | 19 ++++++------ R/pkg/R/pairRDD.R | 6 ++-- R/pkg/R/stats.R | 14 ++++----- 12 files changed, 119 insertions(+), 114 deletions(-) diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index e1b87b28d35ae..709057675e578 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -1,5 +1,9 @@ # Imports from base R -importFrom(methods, setGeneric, setMethod, setOldClass) +# Do not include stats:: "rpois", "runif" - causes error at runtime +importFrom("methods", "setGeneric", "setMethod", "setOldClass") +importFrom("methods", "is", "new", "signature", "show") +importFrom("stats", "gaussian", "setNames") +importFrom("utils", "download.file", "packageVersion", "untar") # Disable native libraries till we figure out how to package it # See SPARKR-7839 diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index 540dc3122dd6d..52a6628ad7b32 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -150,7 +150,7 @@ setMethod("explain", #' isLocal #' -#' Returns True if the `collect` and `take` methods can be run locally +#' Returns True if the \code{collect} and \code{take} methods can be run locally #' (without any Spark executors). #' #' @param x A SparkDataFrame @@ -182,7 +182,7 @@ setMethod("isLocal", #' @param numRows the number of rows to print. Defaults to 20. #' @param truncate whether truncate long strings. If \code{TRUE}, strings more than #' 20 characters will be truncated. However, if set greater than zero, -#' truncates strings longer than `truncate` characters and all cells +#' truncates strings longer than \code{truncate} characters and all cells #' will be aligned right. #' @param ... further arguments to be passed to or from other methods. #' @family SparkDataFrame functions @@ -642,10 +642,10 @@ setMethod("unpersist", #' The following options for repartition are possible: #' \itemize{ #' \item{1.} {Return a new SparkDataFrame partitioned by -#' the given columns into `numPartitions`.} -#' \item{2.} {Return a new SparkDataFrame that has exactly `numPartitions`.} +#' the given columns into \code{numPartitions}.} +#' \item{2.} {Return a new SparkDataFrame that has exactly \code{numPartitions}.} #' \item{3.} {Return a new SparkDataFrame partitioned by the given column(s), -#' using `spark.sql.shuffle.partitions` as number of partitions.} +#' using \code{spark.sql.shuffle.partitions} as number of partitions.} #'} #' @param x a SparkDataFrame. #' @param numPartitions the number of partitions to use. @@ -1132,9 +1132,8 @@ setMethod("take", #' Head #' -#' Return the first NUM rows of a SparkDataFrame as a R data.frame. If NUM is NULL, -#' then head() returns the first 6 rows in keeping with the current data.frame -#' convention in R. +#' Return the first \code{num} rows of a SparkDataFrame as a R data.frame. If \code{num} is not +#' specified, then head() returns the first 6 rows as with R data.frame. #' #' @param x a SparkDataFrame. #' @param num the number of rows to return. Default is 6. @@ -1406,11 +1405,11 @@ setMethod("dapplyCollect", #' #' @param cols grouping columns. #' @param func a function to be applied to each group partition specified by grouping -#' column of the SparkDataFrame. The function `func` takes as argument +#' column of the SparkDataFrame. The function \code{func} takes as argument #' a key - grouping columns and a data frame - a local R data.frame. -#' The output of `func` is a local R data.frame. +#' The output of \code{func} is a local R data.frame. #' @param schema the schema of the resulting SparkDataFrame after the function is applied. -#' The schema must match to output of `func`. It has to be defined for each +#' The schema must match to output of \code{func}. It has to be defined for each #' output column with preferred output column name and corresponding data type. #' @return A SparkDataFrame. #' @family SparkDataFrame functions @@ -1497,9 +1496,9 @@ setMethod("gapply", #' #' @param cols grouping columns. #' @param func a function to be applied to each group partition specified by grouping -#' column of the SparkDataFrame. The function `func` takes as argument +#' column of the SparkDataFrame. The function \code{func} takes as argument #' a key - grouping columns and a data frame - a local R data.frame. -#' The output of `func` is a local R data.frame. +#' The output of \code{func} is a local R data.frame. #' @return A data.frame. #' @family SparkDataFrame functions #' @aliases gapplyCollect,SparkDataFrame-method @@ -1657,7 +1656,7 @@ setMethod("$", signature(x = "SparkDataFrame"), getColumn(x, name) }) -#' @param value a Column or NULL. If NULL, the specified Column is dropped. +#' @param value a Column or \code{NULL}. If \code{NULL}, the specified Column is dropped. #' @rdname select #' @name $<- #' @aliases $<-,SparkDataFrame-method @@ -1747,7 +1746,7 @@ setMethod("[", signature(x = "SparkDataFrame"), #' @family subsetting functions #' @examples #' \dontrun{ -#' # Columns can be selected using `[[` and `[` +#' # Columns can be selected using [[ and [ #' df[[2]] == df[["age"]] #' df[,2] == df[,"age"] #' df[,c("name", "age")] @@ -1792,7 +1791,7 @@ setMethod("subset", signature(x = "SparkDataFrame"), #' select(df, df$name, df$age + 1) #' select(df, c("col1", "col2")) #' select(df, list(df$name, df$age + 1)) -#' # Similar to R data frames columns can also be selected using `$` +#' # Similar to R data frames columns can also be selected using $ #' df[,df$age] #' } #' @note select(SparkDataFrame, character) since 1.4.0 @@ -2443,7 +2442,7 @@ generateAliasesForIntersectedCols <- function (x, intersectedColNames, suffix) { #' Return a new SparkDataFrame containing the union of rows #' #' Return a new SparkDataFrame containing the union of rows in this SparkDataFrame -#' and another SparkDataFrame. This is equivalent to `UNION ALL` in SQL. +#' and another SparkDataFrame. This is equivalent to \code{UNION ALL} in SQL. #' Note that this does not remove duplicate rows across the two SparkDataFrames. #' #' @param x A SparkDataFrame @@ -2486,7 +2485,7 @@ setMethod("unionAll", #' Union two or more SparkDataFrames #' -#' Union two or more SparkDataFrames. This is equivalent to `UNION ALL` in SQL. +#' Union two or more SparkDataFrames. This is equivalent to \code{UNION ALL} in SQL. #' Note that this does not remove duplicate rows across the two SparkDataFrames. #' #' @param x a SparkDataFrame. @@ -2519,7 +2518,7 @@ setMethod("rbind", #' Intersect #' #' Return a new SparkDataFrame containing rows only in both this SparkDataFrame -#' and another SparkDataFrame. This is equivalent to `INTERSECT` in SQL. +#' and another SparkDataFrame. This is equivalent to \code{INTERSECT} in SQL. #' #' @param x A SparkDataFrame #' @param y A SparkDataFrame @@ -2547,7 +2546,7 @@ setMethod("intersect", #' except #' #' Return a new SparkDataFrame containing rows in this SparkDataFrame -#' but not in another SparkDataFrame. This is equivalent to `EXCEPT` in SQL. +#' but not in another SparkDataFrame. This is equivalent to \code{EXCEPT} in SQL. #' #' @param x a SparkDataFrame. #' @param y a SparkDataFrame. @@ -2576,8 +2575,8 @@ setMethod("except", #' Save the contents of SparkDataFrame to a data source. #' -#' The data source is specified by the `source` and a set of options (...). -#' If `source` is not specified, the default data source configured by +#' The data source is specified by the \code{source} and a set of options (...). +#' If \code{source} is not specified, the default data source configured by #' spark.sql.sources.default will be used. #' #' Additionally, mode is used to specify the behavior of the save operation when data already @@ -2613,7 +2612,7 @@ setMethod("except", #' @note write.df since 1.4.0 setMethod("write.df", signature(df = "SparkDataFrame", path = "character"), - function(df, path, source = NULL, mode = "error", ...){ + function(df, path, source = NULL, mode = "error", ...) { if (is.null(source)) { source <- getDefaultSqlSource() } @@ -2635,14 +2634,14 @@ setMethod("write.df", #' @note saveDF since 1.4.0 setMethod("saveDF", signature(df = "SparkDataFrame", path = "character"), - function(df, path, source = NULL, mode = "error", ...){ + function(df, path, source = NULL, mode = "error", ...) { write.df(df, path, source, mode, ...) }) #' Save the contents of the SparkDataFrame to a data source as a table #' -#' The data source is specified by the `source` and a set of options (...). -#' If `source` is not specified, the default data source configured by +#' The data source is specified by the \code{source} and a set of options (...). +#' If \code{source} is not specified, the default data source configured by #' spark.sql.sources.default will be used. #' #' Additionally, mode is used to specify the behavior of the save operation when @@ -2675,7 +2674,7 @@ setMethod("saveDF", #' @note saveAsTable since 1.4.0 setMethod("saveAsTable", signature(df = "SparkDataFrame", tableName = "character"), - function(df, tableName, source = NULL, mode="error", ...){ + function(df, tableName, source = NULL, mode="error", ...) { if (is.null(source)) { source <- getDefaultSqlSource() } @@ -2752,11 +2751,11 @@ setMethod("summary", #' @param how "any" or "all". #' if "any", drop a row if it contains any nulls. #' if "all", drop a row only if all its values are null. -#' if minNonNulls is specified, how is ignored. +#' if \code{minNonNulls} is specified, how is ignored. #' @param minNonNulls if specified, drop rows that have less than -#' minNonNulls non-null values. +#' \code{minNonNulls} non-null values. #' This overwrites the how parameter. -#' @param cols optional list of column names to consider. In `fillna`, +#' @param cols optional list of column names to consider. In \code{fillna}, #' columns specified in cols that do not have matching data #' type are ignored. For example, if value is a character, and #' subset contains a non-character column, then the non-character @@ -2879,8 +2878,8 @@ setMethod("fillna", #' in your system to accommodate the contents. #' #' @param x a SparkDataFrame. -#' @param row.names NULL or a character vector giving the row names for the data frame. -#' @param optional If `TRUE`, converting column names is optional. +#' @param row.names \code{NULL} or a character vector giving the row names for the data frame. +#' @param optional If \code{TRUE}, converting column names is optional. #' @param ... additional arguments to pass to base::as.data.frame. #' @return A data.frame. #' @family SparkDataFrame functions @@ -3058,7 +3057,7 @@ setMethod("str", #' @note drop since 2.0.0 setMethod("drop", signature(x = "SparkDataFrame"), - function(x, col, ...) { + function(x, col) { stopifnot(class(col) == "character" || class(col) == "Column") if (class(col) == "Column") { @@ -3218,8 +3217,8 @@ setMethod("histogram", #' and to not change the existing data. #' } #' -#' @param x s SparkDataFrame. -#' @param url JDBC database url of the form `jdbc:subprotocol:subname`. +#' @param x a SparkDataFrame. +#' @param url JDBC database url of the form \code{jdbc:subprotocol:subname}. #' @param tableName yhe name of the table in the external database. #' @param mode one of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default). #' @param ... additional JDBC database connection properties. @@ -3237,7 +3236,7 @@ setMethod("histogram", #' @note write.jdbc since 2.0.0 setMethod("write.jdbc", signature(x = "SparkDataFrame", url = "character", tableName = "character"), - function(x, url, tableName, mode = "error", ...){ + function(x, url, tableName, mode = "error", ...) { jmode <- convertToJSaveMode(mode) jprops <- varargsToJProperties(...) write <- callJMethod(x@sdf, "write") diff --git a/R/pkg/R/RDD.R b/R/pkg/R/RDD.R index 6b254bb0d302c..6cd0704003f1a 100644 --- a/R/pkg/R/RDD.R +++ b/R/pkg/R/RDD.R @@ -887,17 +887,17 @@ setMethod("sampleRDD", # Discards some random values to ensure each partition has a # different random seed. - runif(partIndex) + stats::runif(partIndex) for (elem in part) { if (withReplacement) { - count <- rpois(1, fraction) + count <- stats::rpois(1, fraction) if (count > 0) { res[ (len + 1) : (len + count) ] <- rep(list(elem), count) len <- len + count } } else { - if (runif(1) < fraction) { + if (stats::runif(1) < fraction) { len <- len + 1 res[[len]] <- elem } @@ -965,7 +965,7 @@ setMethod("takeSample", signature(x = "RDD", withReplacement = "logical", set.seed(seed) samples <- collectRDD(sampleRDD(x, withReplacement, fraction, - as.integer(ceiling(runif(1, + as.integer(ceiling(stats::runif(1, -MAXINT, MAXINT))))) # If the first sample didn't turn out large enough, keep trying to @@ -973,7 +973,7 @@ setMethod("takeSample", signature(x = "RDD", withReplacement = "logical", # multiplier for thei initial size while (length(samples) < total) samples <- collectRDD(sampleRDD(x, withReplacement, fraction, - as.integer(ceiling(runif(1, + as.integer(ceiling(stats::runif(1, -MAXINT, MAXINT))))) diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index a9cd2d85f898c..572e71e25b80b 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -115,7 +115,7 @@ infer_type <- function(x) { #' Get Runtime Config from the current active SparkSession #' #' Get Runtime Config from the current active SparkSession. -#' To change SparkSession Runtime Config, please see `sparkR.session()`. +#' To change SparkSession Runtime Config, please see \code{sparkR.session()}. #' #' @param key (optional) The key of the config to get, if omitted, all config is returned #' @param defaultValue (optional) The default value of the config to return if they config is not @@ -720,11 +720,11 @@ dropTempView <- function(viewName) { #' #' Returns the dataset in a data source as a SparkDataFrame #' -#' The data source is specified by the `source` and a set of options(...). -#' If `source` is not specified, the default data source configured by +#' The data source is specified by the \code{source} and a set of options(...). +#' If \code{source} is not specified, the default data source configured by #' "spark.sql.sources.default" will be used. \cr -#' Similar to R read.csv, when `source` is "csv", by default, a value of "NA" will be interpreted -#' as NA. +#' Similar to R read.csv, when \code{source} is "csv", by default, a value of "NA" will be +#' interpreted as NA. #' #' @param path The path of files to load #' @param source The name of external data source @@ -791,8 +791,8 @@ loadDF <- function(x, ...) { #' Creates an external table based on the dataset in a data source, #' Returns a SparkDataFrame associated with the external table. #' -#' The data source is specified by the `source` and a set of options(...). -#' If `source` is not specified, the default data source configured by +#' The data source is specified by the \code{source} and a set of options(...). +#' If \code{source} is not specified, the default data source configured by #' "spark.sql.sources.default" will be used. #' #' @param tableName a name of the table. @@ -830,22 +830,22 @@ createExternalTable <- function(x, ...) { #' Additional JDBC database connection properties can be set (...) #' #' Only one of partitionColumn or predicates should be set. Partitions of the table will be -#' retrieved in parallel based on the `numPartitions` or by the predicates. +#' retrieved in parallel based on the \code{numPartitions} or by the predicates. #' #' Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash #' your external database systems. #' -#' @param url JDBC database url of the form `jdbc:subprotocol:subname` +#' @param url JDBC database url of the form \code{jdbc:subprotocol:subname} #' @param tableName the name of the table in the external database #' @param partitionColumn the name of a column of integral type that will be used for partitioning -#' @param lowerBound the minimum value of `partitionColumn` used to decide partition stride -#' @param upperBound the maximum value of `partitionColumn` used to decide partition stride -#' @param numPartitions the number of partitions, This, along with `lowerBound` (inclusive), -#' `upperBound` (exclusive), form partition strides for generated WHERE -#' clause expressions used to split the column `partitionColumn` evenly. +#' @param lowerBound the minimum value of \code{partitionColumn} used to decide partition stride +#' @param upperBound the maximum value of \code{partitionColumn} used to decide partition stride +#' @param numPartitions the number of partitions, This, along with \code{lowerBound} (inclusive), +#' \code{upperBound} (exclusive), form partition strides for generated WHERE +#' clause expressions used to split the column \code{partitionColumn} evenly. #' This defaults to SparkContext.defaultParallelism when unset. #' @param predicates a list of conditions in the where clause; each one defines one partition -#' @param ... additional JDBC database connection named propertie(s). +#' @param ... additional JDBC database connection named properties. #' @return SparkDataFrame #' @rdname read.jdbc #' @name read.jdbc diff --git a/R/pkg/R/WindowSpec.R b/R/pkg/R/WindowSpec.R index b55356b07d5e3..ddd2ef2fcdee5 100644 --- a/R/pkg/R/WindowSpec.R +++ b/R/pkg/R/WindowSpec.R @@ -44,6 +44,7 @@ windowSpec <- function(sws) { } #' @rdname show +#' @export #' @note show(WindowSpec) since 2.0.0 setMethod("show", "WindowSpec", function(object) { @@ -125,11 +126,11 @@ setMethod("orderBy", #' rowsBetween #' -#' Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive). +#' Defines the frame boundaries, from \code{start} (inclusive) to \code{end} (inclusive). #' -#' Both `start` and `end` are relative positions from the current row. For example, "0" means -#' "current row", while "-1" means the row before the current row, and "5" means the fifth row -#' after the current row. +#' Both \code{start} and \code{end} are relative positions from the current row. For example, +#' "0" means "current row", while "-1" means the row before the current row, and "5" means the +#' fifth row after the current row. #' #' @param x a WindowSpec #' @param start boundary start, inclusive. @@ -157,12 +158,12 @@ setMethod("rowsBetween", #' rangeBetween #' -#' Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive). +#' Defines the frame boundaries, from \code{start} (inclusive) to \code{end} (inclusive). +#' +#' Both \code{start} and \code{end} are relative from the current row. For example, "0" means +#' "current row", while "-1" means one off before the current row, and "5" means the five off +#' after the current row. #' -#' Both `start` and `end` are relative from the current row. For example, "0" means "current row", -#' while "-1" means one off before the current row, and "5" means the five off after the -#' current row. - #' @param x a WindowSpec #' @param start boundary start, inclusive. #' The frame is unbounded if this is the minimum long value. @@ -195,8 +196,8 @@ setMethod("rangeBetween", #' Define a windowing column. #' #' @param x a Column, usually one returned by window function(s). -#' @param window a WindowSpec object. Can be created by `windowPartitionBy` or -#' `windowOrderBy` and configured by other WindowSpec methods. +#' @param window a WindowSpec object. Can be created by \code{windowPartitionBy} or +#' \code{windowOrderBy} and configured by other WindowSpec methods. #' @rdname over #' @name over #' @aliases over,Column,WindowSpec-method diff --git a/R/pkg/R/column.R b/R/pkg/R/column.R index af486e1ce212d..539d91b0f8797 100644 --- a/R/pkg/R/column.R +++ b/R/pkg/R/column.R @@ -284,7 +284,7 @@ setMethod("%in%", #' otherwise #' #' If values in the specified column are null, returns the value. -#' Can be used in conjunction with `when` to specify a default value for expressions. +#' Can be used in conjunction with \code{when} to specify a default value for expressions. #' #' @param x a Column. #' @param value value to replace when the corresponding entry in \code{x} is NA. diff --git a/R/pkg/R/functions.R b/R/pkg/R/functions.R index b3c10de71f3fe..f042adddef91f 100644 --- a/R/pkg/R/functions.R +++ b/R/pkg/R/functions.R @@ -1250,7 +1250,7 @@ setMethod("rint", #' round #' -#' Returns the value of the column `e` rounded to 0 decimal places using HALF_UP rounding mode. +#' Returns the value of the column \code{e} rounded to 0 decimal places using HALF_UP rounding mode. #' #' @param x Column to compute on. #' @@ -1974,7 +1974,7 @@ setMethod("atan2", signature(y = "Column"), #' datediff #' -#' Returns the number of days from `start` to `end`. +#' Returns the number of days from \code{start} to \code{end}. #' #' @param x start Column to use. #' @param y end Column to use. @@ -2043,7 +2043,7 @@ setMethod("levenshtein", signature(y = "Column"), #' months_between #' -#' Returns number of months between dates `date1` and `date2`. +#' Returns number of months between dates \code{date1} and \code{date2}. #' #' @param x start Column to use. #' @param y end Column to use. @@ -2430,7 +2430,7 @@ setMethod("add_months", signature(y = "Column", x = "numeric"), #' date_add #' -#' Returns the date that is `days` days after `start` +#' Returns the date that is \code{x} days after #' #' @param y Column to compute on #' @param x Number of days to add @@ -2450,7 +2450,7 @@ setMethod("date_add", signature(y = "Column", x = "numeric"), #' date_sub #' -#' Returns the date that is `days` days before `start` +#' Returns the date that is \code{x} days before #' #' @param y Column to compute on #' @param x Number of days to substract @@ -3113,7 +3113,7 @@ setMethod("ifelse", #' N = total number of rows in the partition #' cume_dist(x) = number of values before (and including) x / N #' -#' This is equivalent to the CUME_DIST function in SQL. +#' This is equivalent to the \code{CUME_DIST} function in SQL. #' #' @rdname cume_dist #' @name cume_dist @@ -3141,7 +3141,7 @@ setMethod("cume_dist", #' and had three people tie for second place, you would say that all three were in second #' place and that the next person came in third. #' -#' This is equivalent to the DENSE_RANK function in SQL. +#' This is equivalent to the \code{DENSE_RANK} function in SQL. #' #' @rdname dense_rank #' @name dense_rank @@ -3159,11 +3159,11 @@ setMethod("dense_rank", #' lag #' -#' Window function: returns the value that is `offset` rows before the current row, and -#' `defaultValue` if there is less than `offset` rows before the current row. For example, -#' an `offset` of one will return the previous row at any given point in the window partition. +#' Window function: returns the value that is \code{offset} rows before the current row, and +#' \code{defaultValue} if there is less than \code{offset} rows before the current row. For example, +#' an \code{offset} of one will return the previous row at any given point in the window partition. #' -#' This is equivalent to the LAG function in SQL. +#' This is equivalent to the \code{LAG} function in SQL. #' #' @param x the column as a character string or a Column to compute on. #' @param offset the number of rows back from the current row from which to obtain a value. @@ -3193,11 +3193,11 @@ setMethod("lag", #' lead #' -#' Window function: returns the value that is `offset` rows after the current row, and -#' `null` if there is less than `offset` rows after the current row. For example, -#' an `offset` of one will return the next row at any given point in the window partition. +#' Window function: returns the value that is \code{offset} rows after the current row, and +#' NULL if there is less than \code{offset} rows after the current row. For example, +#' an \code{offset} of one will return the next row at any given point in the window partition. #' -#' This is equivalent to the LEAD function in SQL. +#' This is equivalent to the \code{LEAD} function in SQL. #' #' @param x Column to compute on #' @param offset Number of rows to offset @@ -3226,11 +3226,11 @@ setMethod("lead", #' ntile #' -#' Window function: returns the ntile group id (from 1 to `n` inclusive) in an ordered window -#' partition. For example, if `n` is 4, the first quarter of the rows will get value 1, the second +#' Window function: returns the ntile group id (from 1 to n inclusive) in an ordered window +#' partition. For example, if n is 4, the first quarter of the rows will get value 1, the second #' quarter will get 2, the third quarter will get 3, and the last quarter will get 4. #' -#' This is equivalent to the NTILE function in SQL. +#' This is equivalent to the \code{NTILE} function in SQL. #' #' @param x Number of ntile groups #' diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 6610a25c8c05a..88884e62575df 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -438,17 +438,17 @@ setGeneric("columns", function(x) {standardGeneric("columns") }) setGeneric("count", function(x) { standardGeneric("count") }) #' @rdname cov -#' @param x a Column object or a SparkDataFrame. -#' @param ... additional argument(s). If `x` is a Column object, a Column object -#' should be provided. If `x` is a SparkDataFrame, two column names should +#' @param x a Column or a SparkDataFrame. +#' @param ... additional argument(s). If \code{x} is a Column, a Column +#' should be provided. If \code{x} is a SparkDataFrame, two column names should #' be provided. #' @export setGeneric("cov", function(x, ...) {standardGeneric("cov") }) #' @rdname corr -#' @param x a Column object or a SparkDataFrame. -#' @param ... additional argument(s). If `x` is a Column object, a Column object -#' should be provided. If `x` is a SparkDataFrame, two column names should +#' @param x a Column or a SparkDataFrame. +#' @param ... additional argument(s). If \code{x} is a Column, a Column +#' should be provided. If \code{x} is a SparkDataFrame, two column names should #' be provided. #' @export setGeneric("corr", function(x, ...) {standardGeneric("corr") }) @@ -851,7 +851,7 @@ setGeneric("array_contains", function(x, value) { standardGeneric("array_contain setGeneric("ascii", function(x) { standardGeneric("ascii") }) #' @param x Column to compute on or a GroupedData object. -#' @param ... additional argument(s) when `x` is a GroupedData object. +#' @param ... additional argument(s) when \code{x} is a GroupedData object. #' @rdname avg #' @export setGeneric("avg", function(x, ...) { standardGeneric("avg") }) @@ -1339,7 +1339,6 @@ setGeneric("spark.naiveBayes", function(data, formula, ...) { standardGeneric("s setGeneric("spark.survreg", function(data, formula) { standardGeneric("spark.survreg") }) #' @rdname spark.lda -#' @param ... Additional parameters to tune LDA. #' @export setGeneric("spark.lda", function(data, ...) { standardGeneric("spark.lda") }) diff --git a/R/pkg/R/group.R b/R/pkg/R/group.R index 3c85ada91a444..e3479ef5fa583 100644 --- a/R/pkg/R/group.R +++ b/R/pkg/R/group.R @@ -48,6 +48,7 @@ groupedData <- function(sgd) { #' @rdname show #' @aliases show,GroupedData-method +#' @export #' @note show(GroupedData) since 1.4.0 setMethod("show", "GroupedData", function(object) { diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index b36fbcee17671..a40310d194d27 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -131,7 +131,7 @@ predict_internal <- function(object, newData) { #' This can be a character string naming a family function, a family function or #' the result of a call to a family function. Refer R family at #' \url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/family.html}. -#' @param weightCol the weight column name. If this is not set or NULL, we treat all instance +#' @param weightCol the weight column name. If this is not set or \code{NULL}, we treat all instance #' weights as 1.0. #' @param tol positive convergence tolerance of iterations. #' @param maxIter integer giving the maximal number of IRLS iterations. @@ -197,7 +197,7 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"), #' This can be a character string naming a family function, a family function or #' the result of a call to a family function. Refer R family at #' \url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/family.html}. -#' @param weightCol the weight column name. If this is not set or NULL, we treat all instance +#' @param weightCol the weight column name. If this is not set or \code{NULL}, we treat all instance #' weights as 1.0. #' @param epsilon positive convergence tolerance of iterations. #' @param maxit integer giving the maximal number of IRLS iterations. @@ -434,8 +434,8 @@ setMethod("write.ml", signature(object = "LDAModel", path = "character"), #' operators are supported, including '~', '.', ':', '+', and '-'. #' @param isotonic Whether the output sequence should be isotonic/increasing (TRUE) or #' antitonic/decreasing (FALSE) -#' @param featureIndex The index of the feature if \code{featuresCol} is a vector column (default: `0`), -#' no effect otherwise +#' @param featureIndex The index of the feature if \code{featuresCol} is a vector column +#' (default: 0), no effect otherwise #' @param weightCol The weight column name. #' @return \code{spark.isoreg} returns a fitted Isotonic Regression model #' @rdname spark.isoreg @@ -647,7 +647,7 @@ setMethod("predict", signature(object = "KMeansModel"), #' @rdname spark.naiveBayes #' @aliases spark.naiveBayes,SparkDataFrame,formula-method #' @name spark.naiveBayes -#' @seealso e1071: \url{https://cran.r-project.org/web/packages/e1071/} +#' @seealso e1071: \url{https://cran.r-project.org/package=e1071} #' @export #' @examples #' \dontrun{ @@ -815,7 +815,7 @@ read.ml <- function(path) { #' Note that operator '.' is not supported currently. #' @return \code{spark.survreg} returns a fitted AFT survival regression model. #' @rdname spark.survreg -#' @seealso survival: \url{https://cran.r-project.org/web/packages/survival/} +#' @seealso survival: \url{https://cran.r-project.org/package=survival} #' @export #' @examples #' \dontrun{ @@ -870,10 +870,11 @@ setMethod("spark.survreg", signature(data = "SparkDataFrame", formula = "formula #' @param customizedStopWords stopwords that need to be removed from the given corpus. Ignore the #' parameter if libSVM-format column is used as the features column. #' @param maxVocabSize maximum vocabulary size, default 1 << 18 +#' @param ... additional argument(s) passed to the method. #' @return \code{spark.lda} returns a fitted Latent Dirichlet Allocation model #' @rdname spark.lda #' @aliases spark.lda,SparkDataFrame-method -#' @seealso topicmodels: \url{https://cran.r-project.org/web/packages/topicmodels/} +#' @seealso topicmodels: \url{https://cran.r-project.org/package=topicmodels} #' @export #' @examples #' \dontrun{ @@ -962,7 +963,7 @@ setMethod("predict", signature(object = "AFTSurvivalRegressionModel"), #' @return \code{spark.gaussianMixture} returns a fitted multivariate gaussian mixture model. #' @rdname spark.gaussianMixture #' @name spark.gaussianMixture -#' @seealso mixtools: \url{https://cran.r-project.org/web/packages/mixtools/} +#' @seealso mixtools: \url{https://cran.r-project.org/package=mixtools} #' @export #' @examples #' \dontrun{ @@ -1075,7 +1076,7 @@ setMethod("predict", signature(object = "GaussianMixtureModel"), #' @param numUserBlocks number of user blocks used to parallelize computation (> 0). #' @param numItemBlocks number of item blocks used to parallelize computation (> 0). #' @param checkpointInterval number of checkpoint intervals (>= 1) or disable checkpoint (-1). -#' +#' @param ... additional argument(s) passed to the method. #' @return \code{spark.als} returns a fitted ALS model #' @rdname spark.als #' @aliases spark.als,SparkDataFrame-method diff --git a/R/pkg/R/pairRDD.R b/R/pkg/R/pairRDD.R index f0605db1e9e83..4dee3245f9b75 100644 --- a/R/pkg/R/pairRDD.R +++ b/R/pkg/R/pairRDD.R @@ -917,19 +917,19 @@ setMethod("sampleByKey", len <- 0 # mixing because the initial seeds are close to each other - runif(10) + stats::runif(10) for (elem in part) { if (elem[[1]] %in% names(fractions)) { frac <- as.numeric(fractions[which(elem[[1]] == names(fractions))]) if (withReplacement) { - count <- rpois(1, frac) + count <- stats::rpois(1, frac) if (count > 0) { res[ (len + 1) : (len + count) ] <- rep(list(elem), count) len <- len + count } } else { - if (runif(1) < frac) { + if (stats::runif(1) < frac) { len <- len + 1 res[[len]] <- elem } diff --git a/R/pkg/R/stats.R b/R/pkg/R/stats.R index 8ea24d81729ec..dcd7198f41ea7 100644 --- a/R/pkg/R/stats.R +++ b/R/pkg/R/stats.R @@ -29,9 +29,9 @@ setOldClass("jobj") #' @param col1 name of the first column. Distinct items will make the first item of each row. #' @param col2 name of the second column. Distinct items will make the column names of the output. #' @return a local R data.frame representing the contingency table. The first column of each row -#' will be the distinct values of `col1` and the column names will be the distinct values -#' of `col2`. The name of the first column will be `$col1_$col2`. Pairs that have no -#' occurrences will have zero as their counts. +#' will be the distinct values of \code{col1} and the column names will be the distinct values +#' of \code{col2}. The name of the first column will be "\code{col1}_\code{col2}". Pairs +#' that have no occurrences will have zero as their counts. #' #' @rdname crosstab #' @name crosstab @@ -116,7 +116,7 @@ setMethod("corr", #' #' @param x A SparkDataFrame. #' @param cols A vector column names to search frequent items in. -#' @param support (Optional) The minimum frequency for an item to be considered `frequent`. +#' @param support (Optional) The minimum frequency for an item to be considered \code{frequent}. #' Should be greater than 1e-4. Default support = 0.01. #' @return a local R data.frame with the frequent items in each column #' @@ -142,9 +142,9 @@ setMethod("freqItems", signature(x = "SparkDataFrame", cols = "character"), #' #' Calculates the approximate quantiles of a numerical column of a SparkDataFrame. #' The result of this algorithm has the following deterministic bound: -#' If the SparkDataFrame has N elements and if we request the quantile at probability `p` up to -#' error `err`, then the algorithm will return a sample `x` from the SparkDataFrame so that the -#' *exact* rank of `x` is close to (p * N). More precisely, +#' If the SparkDataFrame has N elements and if we request the quantile at probability p up to +#' error err, then the algorithm will return a sample x from the SparkDataFrame so that the +#' *exact* rank of x is close to (p * N). More precisely, #' floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). #' This method implements a variation of the Greenwald-Khanna algorithm (with some speed #' optimizations). The algorithm was first present in [[http://dx.doi.org/10.1145/375663.375670 From 8e223ea67acf5aa730ccf688802f17f6fc10907c Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Mon, 22 Aug 2016 16:32:14 -0700 Subject: [PATCH 060/270] [SPARK-16550][SPARK-17042][CORE] Certain classes fail to deserialize in block manager replication ## What changes were proposed in this pull request? This is a straightforward clone of JoshRosen 's original patch. I have follow-up changes to fix block replication for repl-defined classes as well, but those appear to be flaking tests so I'm going to leave that for SPARK-17042 ## How was this patch tested? End-to-end test in ReplSuite (also more tests in DistributedSuite from the original patch). Author: Eric Liang Closes #14311 from ericl/spark-16550. --- .../spark/serializer/SerializerManager.scala | 14 +++- .../apache/spark/storage/BlockManager.scala | 13 +++- .../org/apache/spark/DistributedSuite.scala | 77 ++++++------------- .../org/apache/spark/repl/ReplSuite.scala | 14 ++++ 4 files changed, 60 insertions(+), 58 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala b/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala index 9dc274c9fe288..07caadbe40438 100644 --- a/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala +++ b/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala @@ -68,7 +68,7 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar * loaded yet. */ private lazy val compressionCodec: CompressionCodec = CompressionCodec.createCodec(conf) - private def canUseKryo(ct: ClassTag[_]): Boolean = { + def canUseKryo(ct: ClassTag[_]): Boolean = { primitiveAndPrimitiveArrayClassTags.contains(ct) || ct == stringClassTag } @@ -128,8 +128,18 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar /** Serializes into a chunked byte buffer. */ def dataSerialize[T: ClassTag](blockId: BlockId, values: Iterator[T]): ChunkedByteBuffer = { + dataSerializeWithExplicitClassTag(blockId, values, implicitly[ClassTag[T]]) + } + + /** Serializes into a chunked byte buffer. */ + def dataSerializeWithExplicitClassTag( + blockId: BlockId, + values: Iterator[_], + classTag: ClassTag[_]): ChunkedByteBuffer = { val bbos = new ChunkedByteBufferOutputStream(1024 * 1024 * 4, ByteBuffer.allocate) - dataSerializeStream(blockId, bbos, values) + val byteStream = new BufferedOutputStream(bbos) + val ser = getSerializer(classTag).newInstance() + ser.serializeStream(wrapForCompression(blockId, byteStream)).writeAll(values).close() bbos.toChunkedByteBuffer } diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index 015e71d1260ea..fe8465279860d 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -498,7 +498,8 @@ private[spark] class BlockManager( diskStore.getBytes(blockId) } else if (level.useMemory && memoryStore.contains(blockId)) { // The block was not found on disk, so serialize an in-memory copy: - serializerManager.dataSerialize(blockId, memoryStore.getValues(blockId).get) + serializerManager.dataSerializeWithExplicitClassTag( + blockId, memoryStore.getValues(blockId).get, info.classTag) } else { handleLocalReadFailure(blockId) } @@ -973,8 +974,16 @@ private[spark] class BlockManager( if (level.replication > 1) { val remoteStartTime = System.currentTimeMillis val bytesToReplicate = doGetLocalBytes(blockId, info) + // [SPARK-16550] Erase the typed classTag when using default serialization, since + // NettyBlockRpcServer crashes when deserializing repl-defined classes. + // TODO(ekl) remove this once the classloader issue on the remote end is fixed. + val remoteClassTag = if (!serializerManager.canUseKryo(classTag)) { + scala.reflect.classTag[Any] + } else { + classTag + } try { - replicate(blockId, bytesToReplicate, level, classTag) + replicate(blockId, bytesToReplicate, level, remoteClassTag) } finally { bytesToReplicate.dispose() } diff --git a/core/src/test/scala/org/apache/spark/DistributedSuite.scala b/core/src/test/scala/org/apache/spark/DistributedSuite.scala index 6beae842b04d1..4ee0e00fde506 100644 --- a/core/src/test/scala/org/apache/spark/DistributedSuite.scala +++ b/core/src/test/scala/org/apache/spark/DistributedSuite.scala @@ -149,61 +149,16 @@ class DistributedSuite extends SparkFunSuite with Matchers with LocalSparkContex sc.parallelize(1 to 10).count() } - test("caching") { + private def testCaching(storageLevel: StorageLevel): Unit = { sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).cache() - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) - } - - test("caching on disk") { - sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).persist(StorageLevel.DISK_ONLY) - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) - } - - test("caching in memory, replicated") { - sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).persist(StorageLevel.MEMORY_ONLY_2) - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) - } - - test("caching in memory, serialized, replicated") { - sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).persist(StorageLevel.MEMORY_ONLY_SER_2) - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) - } - - test("caching on disk, replicated") { - sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).persist(StorageLevel.DISK_ONLY_2) - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) - } - - test("caching in memory and disk, replicated") { - sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).persist(StorageLevel.MEMORY_AND_DISK_2) - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) - } - - test("caching in memory and disk, serialized, replicated") { - sc = new SparkContext(clusterUrl, "test") - val data = sc.parallelize(1 to 1000, 10).persist(StorageLevel.MEMORY_AND_DISK_SER_2) - - assert(data.count() === 1000) - assert(data.count() === 1000) - assert(data.count() === 1000) + sc.jobProgressListener.waitUntilExecutorsUp(2, 30000) + val data = sc.parallelize(1 to 1000, 10) + val cachedData = data.persist(storageLevel) + assert(cachedData.count === 1000) + assert(sc.getExecutorStorageStatus.map(_.rddBlocksById(cachedData.id).size).sum === + storageLevel.replication * data.getNumPartitions) + assert(cachedData.count === 1000) + assert(cachedData.count === 1000) // Get all the locations of the first partition and try to fetch the partitions // from those locations. @@ -221,6 +176,20 @@ class DistributedSuite extends SparkFunSuite with Matchers with LocalSparkContex } } + Seq( + "caching" -> StorageLevel.MEMORY_ONLY, + "caching on disk" -> StorageLevel.DISK_ONLY, + "caching in memory, replicated" -> StorageLevel.MEMORY_ONLY_2, + "caching in memory, serialized, replicated" -> StorageLevel.MEMORY_ONLY_SER_2, + "caching on disk, replicated" -> StorageLevel.DISK_ONLY_2, + "caching in memory and disk, replicated" -> StorageLevel.MEMORY_AND_DISK_2, + "caching in memory and disk, serialized, replicated" -> StorageLevel.MEMORY_AND_DISK_SER_2 + ).foreach { case (testName, storageLevel) => + test(testName) { + testCaching(storageLevel) + } + } + test("compute without caching when no partitions fit in memory") { val size = 10000 val conf = new SparkConf() diff --git a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala index c10db947bcb44..06b09f3158d77 100644 --- a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala +++ b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala @@ -396,6 +396,20 @@ class ReplSuite extends SparkFunSuite { assertContains("ret: Array[(Int, Iterable[Foo])] = Array((1,", output) } + test("replicating blocks of object with class defined in repl") { + val output = runInterpreter("local-cluster[2,1,1024]", + """ + |import org.apache.spark.storage.StorageLevel._ + |case class Foo(i: Int) + |val ret = sc.parallelize((1 to 100).map(Foo), 10).persist(MEMORY_ONLY_2) + |ret.count() + |sc.getExecutorStorageStatus.map(s => s.rddBlocksById(ret.id).size).sum + """.stripMargin) + assertDoesNotContain("error:", output) + assertDoesNotContain("Exception", output) + assertContains(": Int = 20", output) + } + test("line wrapper only initialized once when used as encoder outer scope") { val output = runInterpreter("local", """ From 6d93f9e0236aa61e39a1abfb0f7f7c558fb7d5d5 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Tue, 23 Aug 2016 08:03:08 +0800 Subject: [PATCH 061/270] [SPARK-17144][SQL] Removal of useless CreateHiveTableAsSelectLogicalPlan ## What changes were proposed in this pull request? `CreateHiveTableAsSelectLogicalPlan` is a dead code after refactoring. ## How was this patch tested? N/A Author: gatorsmile Closes #14707 from gatorsmile/removeCreateHiveTable. --- .../spark/sql/execution/command/tables.scala | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index af2b5ffd1c427..21544a37d9975 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -33,28 +33,11 @@ import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogTable, CatalogT import org.apache.spark.sql.catalyst.catalog.CatalogTableType._ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} -import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan, UnaryNode} import org.apache.spark.sql.catalyst.util.quoteIdentifier -import org.apache.spark.sql.execution.datasources.{PartitioningUtils} +import org.apache.spark.sql.execution.datasources.PartitioningUtils import org.apache.spark.sql.types._ import org.apache.spark.util.Utils -case class CreateHiveTableAsSelectLogicalPlan( - tableDesc: CatalogTable, - child: LogicalPlan, - allowExisting: Boolean) extends UnaryNode with Command { - - override def output: Seq[Attribute] = Seq.empty[Attribute] - - override lazy val resolved: Boolean = - tableDesc.identifier.database.isDefined && - tableDesc.schema.nonEmpty && - tableDesc.storage.serde.isDefined && - tableDesc.storage.inputFormat.isDefined && - tableDesc.storage.outputFormat.isDefined && - childrenResolved -} - /** * A command to create a table with the same definition of the given existing table. * From 37f0ab70d25802b609317bc93421d2fe3ee9db6e Mon Sep 17 00:00:00 2001 From: hqzizania Date: Mon, 22 Aug 2016 17:09:08 -0700 Subject: [PATCH 062/270] [SPARK-17090][FOLLOW-UP][ML] Add expert param support to SharedParamsCodeGen ## What changes were proposed in this pull request? Add expert param support to SharedParamsCodeGen where aggregationDepth a expert param is added. Author: hqzizania Closes #14738 from hqzizania/SPARK-17090-minor. --- .../ml/param/shared/SharedParamsCodeGen.scala | 14 ++++++++++---- .../spark/ml/param/shared/sharedParams.scala | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala index 0f48a16a429ff..480b03d0f35c4 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala @@ -80,7 +80,7 @@ private[shared] object SharedParamsCodeGen { ParamDesc[String]("solver", "the solver algorithm for optimization. If this is not set or " + "empty, default value is 'auto'", Some("\"auto\"")), ParamDesc[Int]("aggregationDepth", "suggested depth for treeAggregate (>= 2)", Some("2"), - isValid = "ParamValidators.gtEq(2)")) + isValid = "ParamValidators.gtEq(2)", isExpertParam = true)) val code = genSharedParams(params) val file = "src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala" @@ -95,7 +95,8 @@ private[shared] object SharedParamsCodeGen { doc: String, defaultValueStr: Option[String] = None, isValid: String = "", - finalMethods: Boolean = true) { + finalMethods: Boolean = true, + isExpertParam: Boolean = false) { require(name.matches("[a-z][a-zA-Z0-9]*"), s"Param name $name is invalid.") require(doc.nonEmpty) // TODO: more rigorous on doc @@ -153,6 +154,11 @@ private[shared] object SharedParamsCodeGen { } else { "" } + val groupStr = if (param.isExpertParam) { + Array("expertParam", "expertGetParam") + } else { + Array("param", "getParam") + } val methodStr = if (param.finalMethods) { "final def" } else { @@ -167,11 +173,11 @@ private[shared] object SharedParamsCodeGen { | | /** | * Param for $doc. - | * @group param + | * @group ${groupStr(0)} | */ | final val $name: $Param = new $Param(this, "$name", "$doc"$isValid) |$setDefault - | /** @group getParam */ + | /** @group ${groupStr(1)} */ | $methodStr get$Name: $T = $$($name) |} |""".stripMargin diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala index 6803772c63d62..9125d9e19bf09 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala @@ -397,13 +397,13 @@ private[ml] trait HasAggregationDepth extends Params { /** * Param for suggested depth for treeAggregate (>= 2). - * @group param + * @group expertParam */ final val aggregationDepth: IntParam = new IntParam(this, "aggregationDepth", "suggested depth for treeAggregate (>= 2)", ParamValidators.gtEq(2)) setDefault(aggregationDepth, 2) - /** @group getParam */ + /** @group expertGetParam */ final def getAggregationDepth: Int = $(aggregationDepth) } // scalastyle:on From 920806ab272ba58a369072a5eeb89df5e9b470a6 Mon Sep 17 00:00:00 2001 From: Shivaram Venkataraman Date: Mon, 22 Aug 2016 17:09:32 -0700 Subject: [PATCH 063/270] [SPARK-16577][SPARKR] Add CRAN documentation checks to run-tests.sh ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) ## How was this patch tested? This change adds CRAN documentation checks to be run as a part of `R/run-tests.sh` . As this script is also used by Jenkins this means that we will get documentation checks on every PR going forward. (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: Shivaram Venkataraman Closes #14759 from shivaram/sparkr-cran-jenkins. --- R/check-cran.sh | 18 +++++++++++++++--- R/run-tests.sh | 27 ++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/R/check-cran.sh b/R/check-cran.sh index 5c90fd07f28e4..bb331466ae931 100755 --- a/R/check-cran.sh +++ b/R/check-cran.sh @@ -43,10 +43,22 @@ $FWDIR/create-docs.sh "$R_SCRIPT_PATH/"R CMD build $FWDIR/pkg # Run check as-cran. -# TODO(shivaram): Remove the skip tests once we figure out the install mechanism - VERSION=`grep Version $FWDIR/pkg/DESCRIPTION | awk '{print $NF}'` -"$R_SCRIPT_PATH/"R CMD check --as-cran SparkR_"$VERSION".tar.gz +CRAN_CHECK_OPTIONS="--as-cran" + +if [ -n "$NO_TESTS" ] +then + CRAN_CHECK_OPTIONS=$CRAN_CHECK_OPTIONS" --no-tests" +fi + +if [ -n "$NO_MANUAL" ] +then + CRAN_CHECK_OPTIONS=$CRAN_CHECK_OPTIONS" --no-manual" +fi + +echo "Running CRAN check with $CRAN_CHECK_OPTIONS options" + +"$R_SCRIPT_PATH/"R CMD check $CRAN_CHECK_OPTIONS SparkR_"$VERSION".tar.gz popd > /dev/null diff --git a/R/run-tests.sh b/R/run-tests.sh index 9dcf0ace7d97e..1a1e8ab9ffe18 100755 --- a/R/run-tests.sh +++ b/R/run-tests.sh @@ -26,6 +26,17 @@ rm -f $LOGFILE SPARK_TESTING=1 $FWDIR/../bin/spark-submit --driver-java-options "-Dlog4j.configuration=file:$FWDIR/log4j.properties" --conf spark.hadoop.fs.default.name="file:///" $FWDIR/pkg/tests/run-all.R 2>&1 | tee -a $LOGFILE FAILED=$((PIPESTATUS[0]||$FAILED)) +# Also run the documentation tests for CRAN +CRAN_CHECK_LOG_FILE=$FWDIR/cran-check.out +rm -f $CRAN_CHECK_LOG_FILE + +NO_TESTS=1 NO_MANUAL=1 $FWDIR/check-cran.sh 2>&1 | tee -a $CRAN_CHECK_LOG_FILE +FAILED=$((PIPESTATUS[0]||$FAILED)) + +NUM_CRAN_WARNING="$(grep -c WARNING$ $CRAN_CHECK_LOG_FILE)" +NUM_CRAN_ERROR="$(grep -c ERROR$ $CRAN_CHECK_LOG_FILE)" +NUM_CRAN_NOTES="$(grep -c NOTE$ $CRAN_CHECK_LOG_FILE)" + if [[ $FAILED != 0 ]]; then cat $LOGFILE echo -en "\033[31m" # Red @@ -33,7 +44,17 @@ if [[ $FAILED != 0 ]]; then echo -en "\033[0m" # No color exit -1 else - echo -en "\033[32m" # Green - echo "Tests passed." - echo -en "\033[0m" # No color + # We have 2 existing NOTEs for new maintainer, attach() + # We have one more NOTE in Jenkins due to "No repository set" + if [[ $NUM_CRAN_WARNING != 0 || $NUM_CRAN_ERROR != 0 || $NUM_CRAN_NOTES -gt 3 ]]; then + cat $CRAN_CHECK_LOG_FILE + echo -en "\033[31m" # Red + echo "Had CRAN check errors; see logs." + echo -en "\033[0m" # No color + exit -1 + else + echo -en "\033[32m" # Green + echo "Tests passed." + echo -en "\033[0m" # No color + fi fi From 2cdd92a7cd6f85186c846635b422b977bdafbcdd Mon Sep 17 00:00:00 2001 From: Cheng Lian Date: Tue, 23 Aug 2016 09:11:47 +0800 Subject: [PATCH 064/270] [SPARK-17182][SQL] Mark Collect as non-deterministic ## What changes were proposed in this pull request? This PR marks the abstract class `Collect` as non-deterministic since the results of `CollectList` and `CollectSet` depend on the actual order of input rows. ## How was this patch tested? Existing test cases should be enough. Author: Cheng Lian Closes #14749 from liancheng/spark-17182-non-deterministic-collect. --- .../spark/sql/catalyst/expressions/aggregate/collect.scala | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/collect.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/collect.scala index ac2cefaddcf59..896ff61b23093 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/collect.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/collect.scala @@ -54,6 +54,10 @@ abstract class Collect extends ImperativeAggregate { override def inputAggBufferAttributes: Seq[AttributeReference] = Nil + // Both `CollectList` and `CollectSet` are non-deterministic since their results depend on the + // actual order of input rows. + override def deterministic: Boolean = false + protected[this] val buffer: Growable[Any] with Iterable[Any] override def initialize(b: MutableRow): Unit = { From d2b3d3e63e1a9217de6ef507c350308017664a62 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Mon, 22 Aug 2016 20:15:03 -0700 Subject: [PATCH 065/270] [SPARKR][MINOR] Update R DESCRIPTION file ## What changes were proposed in this pull request? Update DESCRIPTION ## How was this patch tested? Run install and CRAN tests Author: Felix Cheung Closes #14764 from felixcheung/rpackagedescription. --- R/pkg/DESCRIPTION | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/R/pkg/DESCRIPTION b/R/pkg/DESCRIPTION index d81f1a3d4de68..e5afed2d0a93e 100644 --- a/R/pkg/DESCRIPTION +++ b/R/pkg/DESCRIPTION @@ -3,10 +3,15 @@ Type: Package Title: R Frontend for Apache Spark Version: 2.0.0 Date: 2016-07-07 -Author: The Apache Software Foundation -Maintainer: Shivaram Venkataraman - Xiangrui Meng - Felix Cheung +Authors@R: c(person("Shivaram", "Venkataraman", role = c("aut", "cre"), + email = "shivaram@cs.berkeley.edu"), + person("Xiangrui", "Meng", role = "aut", + email = "meng@databricks.com"), + person("Felix", "Cheung", role = "aut", + email = "felixcheung@apache.org"), + person(family = "The Apache Software Foundation", role = c("aut", "cph"))) +URL: http://www.apache.org/ http://spark.apache.org/ +BugReports: https://issues.apache.org/jira/secure/CreateIssueDetails!init.jspa?pid=12315420&components=12325400&issuetype=4 Depends: R (>= 3.0), methods From cc33460a51d2890fe8f50f5b6b87003d6d210f04 Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Tue, 23 Aug 2016 14:57:00 +0800 Subject: [PATCH 066/270] [SPARK-17188][SQL] Moves class QuantileSummaries to project catalyst for implementing percentile_approx ## What changes were proposed in this pull request? This is a sub-task of [SPARK-16283](https://issues.apache.org/jira/browse/SPARK-16283) (Implement percentile_approx SQL function), which moves class QuantileSummaries to project catalyst so that it can be reused when implementing aggregation function `percentile_approx`. ## How was this patch tested? This PR only does class relocation, class implementation is not changed. Author: Sean Zhong Closes #14754 from clockfly/move_QuantileSummaries_to_catalyst. --- .../sql/catalyst/util/QuantileSummaries.scala | 264 ++++++++++++++++++ .../util/QuantileSummariesSuite.scala} | 7 +- .../sql/execution/stat/StatFunctions.scala | 247 +--------------- 3 files changed, 267 insertions(+), 251 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala rename sql/{core/src/test/scala/org/apache/spark/sql/execution/stat/ApproxQuantileSuite.scala => catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala} (96%) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala new file mode 100644 index 0000000000000..493b5faf9e50a --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.util + +import scala.collection.mutable.ArrayBuffer + +import org.apache.spark.sql.catalyst.util.QuantileSummaries.Stats + +/** + * Helper class to compute approximate quantile summary. + * This implementation is based on the algorithm proposed in the paper: + * "Space-efficient Online Computation of Quantile Summaries" by Greenwald, Michael + * and Khanna, Sanjeev. (http://dx.doi.org/10.1145/375663.375670) + * + * In order to optimize for speed, it maintains an internal buffer of the last seen samples, + * and only inserts them after crossing a certain size threshold. This guarantees a near-constant + * runtime complexity compared to the original algorithm. + * + * @param compressThreshold the compression threshold. + * After the internal buffer of statistics crosses this size, it attempts to compress the + * statistics together. + * @param relativeError the target relative error. + * It is uniform across the complete range of values. + * @param sampled a buffer of quantile statistics. + * See the G-K article for more details. + * @param count the count of all the elements *inserted in the sampled buffer* + * (excluding the head buffer) + */ +class QuantileSummaries( + val compressThreshold: Int, + val relativeError: Double, + val sampled: Array[Stats] = Array.empty, + val count: Long = 0L) extends Serializable { + + // a buffer of latest samples seen so far + private val headSampled: ArrayBuffer[Double] = ArrayBuffer.empty + + import QuantileSummaries._ + + /** + * Returns a summary with the given observation inserted into the summary. + * This method may either modify in place the current summary (and return the same summary, + * modified in place), or it may create a new summary from scratch it necessary. + * @param x the new observation to insert into the summary + */ + def insert(x: Double): QuantileSummaries = { + headSampled.append(x) + if (headSampled.size >= defaultHeadSize) { + this.withHeadBufferInserted + } else { + this + } + } + + /** + * Inserts an array of (unsorted samples) in a batch, sorting the array first to traverse + * the summary statistics in a single batch. + * + * This method does not modify the current object and returns if necessary a new copy. + * + * @return a new quantile summary object. + */ + private def withHeadBufferInserted: QuantileSummaries = { + if (headSampled.isEmpty) { + return this + } + var currentCount = count + val sorted = headSampled.toArray.sorted + val newSamples: ArrayBuffer[Stats] = new ArrayBuffer[Stats]() + // The index of the next element to insert + var sampleIdx = 0 + // The index of the sample currently being inserted. + var opsIdx: Int = 0 + while(opsIdx < sorted.length) { + val currentSample = sorted(opsIdx) + // Add all the samples before the next observation. + while(sampleIdx < sampled.size && sampled(sampleIdx).value <= currentSample) { + newSamples.append(sampled(sampleIdx)) + sampleIdx += 1 + } + + // If it is the first one to insert, of if it is the last one + currentCount += 1 + val delta = + if (newSamples.isEmpty || (sampleIdx == sampled.size && opsIdx == sorted.length - 1)) { + 0 + } else { + math.floor(2 * relativeError * currentCount).toInt + } + + val tuple = Stats(currentSample, 1, delta) + newSamples.append(tuple) + opsIdx += 1 + } + + // Add all the remaining existing samples + while(sampleIdx < sampled.size) { + newSamples.append(sampled(sampleIdx)) + sampleIdx += 1 + } + new QuantileSummaries(compressThreshold, relativeError, newSamples.toArray, currentCount) + } + + /** + * Returns a new summary that compresses the summary statistics and the head buffer. + * + * This implements the COMPRESS function of the GK algorithm. It does not modify the object. + * + * @return a new summary object with compressed statistics + */ + def compress(): QuantileSummaries = { + // Inserts all the elements first + val inserted = this.withHeadBufferInserted + assert(inserted.headSampled.isEmpty) + assert(inserted.count == count + headSampled.size) + val compressed = + compressImmut(inserted.sampled, mergeThreshold = 2 * relativeError * inserted.count) + new QuantileSummaries(compressThreshold, relativeError, compressed, inserted.count) + } + + private def shallowCopy: QuantileSummaries = { + new QuantileSummaries(compressThreshold, relativeError, sampled, count) + } + + /** + * Merges two (compressed) summaries together. + * + * Returns a new summary. + */ + def merge(other: QuantileSummaries): QuantileSummaries = { + require(headSampled.isEmpty, "Current buffer needs to be compressed before merge") + require(other.headSampled.isEmpty, "Other buffer needs to be compressed before merge") + if (other.count == 0) { + this.shallowCopy + } else if (count == 0) { + other.shallowCopy + } else { + // Merge the two buffers. + // The GK algorithm is a bit unclear about it, but it seems there is no need to adjust the + // statistics during the merging: the invariants are still respected after the merge. + // TODO: could replace full sort by ordered merge, the two lists are known to be sorted + // already. + val res = (sampled ++ other.sampled).sortBy(_.value) + val comp = compressImmut(res, mergeThreshold = 2 * relativeError * count) + new QuantileSummaries( + other.compressThreshold, other.relativeError, comp, other.count + count) + } + } + + /** + * Runs a query for a given quantile. + * The result follows the approximation guarantees detailed above. + * The query can only be run on a compressed summary: you need to call compress() before using + * it. + * + * @param quantile the target quantile + * @return + */ + def query(quantile: Double): Double = { + require(quantile >= 0 && quantile <= 1.0, "quantile should be in the range [0.0, 1.0]") + require(headSampled.isEmpty, + "Cannot operate on an uncompressed summary, call compress() first") + + if (quantile <= relativeError) { + return sampled.head.value + } + + if (quantile >= 1 - relativeError) { + return sampled.last.value + } + + // Target rank + val rank = math.ceil(quantile * count).toInt + val targetError = math.ceil(relativeError * count) + // Minimum rank at current sample + var minRank = 0 + var i = 1 + while (i < sampled.size - 1) { + val curSample = sampled(i) + minRank += curSample.g + val maxRank = minRank + curSample.delta + if (maxRank - targetError <= rank && rank <= minRank + targetError) { + return curSample.value + } + i += 1 + } + sampled.last.value + } +} + +object QuantileSummaries { + // TODO(tjhunter) more tuning could be done one the constants here, but for now + // the main cost of the algorithm is accessing the data in SQL. + /** + * The default value for the compression threshold. + */ + val defaultCompressThreshold: Int = 10000 + + /** + * The size of the head buffer. + */ + val defaultHeadSize: Int = 50000 + + /** + * The default value for the relative error (1%). + * With this value, the best extreme percentiles that can be approximated are 1% and 99%. + */ + val defaultRelativeError: Double = 0.01 + + /** + * Statistics from the Greenwald-Khanna paper. + * @param value the sampled value + * @param g the minimum rank jump from the previous value's minimum rank + * @param delta the maximum span of the rank. + */ + case class Stats(value: Double, g: Int, delta: Int) + + private def compressImmut( + currentSamples: IndexedSeq[Stats], + mergeThreshold: Double): Array[Stats] = { + if (currentSamples.isEmpty) { + return Array.empty[Stats] + } + val res: ArrayBuffer[Stats] = ArrayBuffer.empty + // Start for the last element, which is always part of the set. + // The head contains the current new head, that may be merged with the current element. + var head = currentSamples.last + var i = currentSamples.size - 2 + // Do not compress the last element + while (i >= 1) { + // The current sample: + val sample1 = currentSamples(i) + // Do we need to compress? + if (sample1.g + head.g + head.delta < mergeThreshold) { + // Do not insert yet, just merge the current element into the head. + head = head.copy(g = head.g + sample1.g) + } else { + // Prepend the current head, and keep the current sample as target for merging. + res.prepend(head) + head = sample1 + } + i -= 1 + } + res.prepend(head) + // If necessary, add the minimum element: + res.prepend(currentSamples.head) + res.toArray + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/stat/ApproxQuantileSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala similarity index 96% rename from sql/core/src/test/scala/org/apache/spark/sql/execution/stat/ApproxQuantileSuite.scala rename to sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala index 0a989d026ce1c..89b2a22a3de45 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/stat/ApproxQuantileSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala @@ -15,15 +15,13 @@ * limitations under the License. */ -package org.apache.spark.sql.execution.stat +package org.apache.spark.sql.catalyst.util import scala.util.Random import org.apache.spark.SparkFunSuite -import org.apache.spark.sql.execution.stat.StatFunctions.QuantileSummaries - -class ApproxQuantileSuite extends SparkFunSuite { +class QuantileSummariesSuite extends SparkFunSuite { private val r = new Random(1) private val n = 100 @@ -125,5 +123,4 @@ class ApproxQuantileSuite extends SparkFunSuite { checkQuantile(0.001, data, s) } } - } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala index 7c58c4897fcd5..822f49ecab47b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala @@ -17,20 +17,17 @@ package org.apache.spark.sql.execution.stat -import scala.collection.mutable.ArrayBuffer - import org.apache.spark.internal.Logging import org.apache.spark.sql.{Column, DataFrame, Dataset, Row} import org.apache.spark.sql.catalyst.expressions.{Cast, GenericMutableRow} import org.apache.spark.sql.catalyst.plans.logical.LocalRelation +import org.apache.spark.sql.catalyst.util.QuantileSummaries import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String object StatFunctions extends Logging { - import QuantileSummaries.Stats - /** * Calculates the approximate quantiles of multiple numerical columns of a DataFrame in one pass. * @@ -95,248 +92,6 @@ object StatFunctions extends Logging { summaries.map { summary => probabilities.map(summary.query) } } - /** - * Helper class to compute approximate quantile summary. - * This implementation is based on the algorithm proposed in the paper: - * "Space-efficient Online Computation of Quantile Summaries" by Greenwald, Michael - * and Khanna, Sanjeev. (http://dx.doi.org/10.1145/375663.375670) - * - * In order to optimize for speed, it maintains an internal buffer of the last seen samples, - * and only inserts them after crossing a certain size threshold. This guarantees a near-constant - * runtime complexity compared to the original algorithm. - * - * @param compressThreshold the compression threshold. - * After the internal buffer of statistics crosses this size, it attempts to compress the - * statistics together. - * @param relativeError the target relative error. - * It is uniform across the complete range of values. - * @param sampled a buffer of quantile statistics. - * See the G-K article for more details. - * @param count the count of all the elements *inserted in the sampled buffer* - * (excluding the head buffer) - */ - class QuantileSummaries( - val compressThreshold: Int, - val relativeError: Double, - val sampled: Array[Stats] = Array.empty, - val count: Long = 0L) extends Serializable { - - // a buffer of latest samples seen so far - private val headSampled: ArrayBuffer[Double] = ArrayBuffer.empty - - import QuantileSummaries._ - - /** - * Returns a summary with the given observation inserted into the summary. - * This method may either modify in place the current summary (and return the same summary, - * modified in place), or it may create a new summary from scratch it necessary. - * @param x the new observation to insert into the summary - */ - def insert(x: Double): QuantileSummaries = { - headSampled.append(x) - if (headSampled.size >= defaultHeadSize) { - this.withHeadBufferInserted - } else { - this - } - } - - /** - * Inserts an array of (unsorted samples) in a batch, sorting the array first to traverse - * the summary statistics in a single batch. - * - * This method does not modify the current object and returns if necessary a new copy. - * - * @return a new quantile summary object. - */ - private def withHeadBufferInserted: QuantileSummaries = { - if (headSampled.isEmpty) { - return this - } - var currentCount = count - val sorted = headSampled.toArray.sorted - val newSamples: ArrayBuffer[Stats] = new ArrayBuffer[Stats]() - // The index of the next element to insert - var sampleIdx = 0 - // The index of the sample currently being inserted. - var opsIdx: Int = 0 - while(opsIdx < sorted.length) { - val currentSample = sorted(opsIdx) - // Add all the samples before the next observation. - while(sampleIdx < sampled.size && sampled(sampleIdx).value <= currentSample) { - newSamples.append(sampled(sampleIdx)) - sampleIdx += 1 - } - - // If it is the first one to insert, of if it is the last one - currentCount += 1 - val delta = - if (newSamples.isEmpty || (sampleIdx == sampled.size && opsIdx == sorted.length - 1)) { - 0 - } else { - math.floor(2 * relativeError * currentCount).toInt - } - - val tuple = Stats(currentSample, 1, delta) - newSamples.append(tuple) - opsIdx += 1 - } - - // Add all the remaining existing samples - while(sampleIdx < sampled.size) { - newSamples.append(sampled(sampleIdx)) - sampleIdx += 1 - } - new QuantileSummaries(compressThreshold, relativeError, newSamples.toArray, currentCount) - } - - /** - * Returns a new summary that compresses the summary statistics and the head buffer. - * - * This implements the COMPRESS function of the GK algorithm. It does not modify the object. - * - * @return a new summary object with compressed statistics - */ - def compress(): QuantileSummaries = { - // Inserts all the elements first - val inserted = this.withHeadBufferInserted - assert(inserted.headSampled.isEmpty) - assert(inserted.count == count + headSampled.size) - val compressed = - compressImmut(inserted.sampled, mergeThreshold = 2 * relativeError * inserted.count) - new QuantileSummaries(compressThreshold, relativeError, compressed, inserted.count) - } - - private def shallowCopy: QuantileSummaries = { - new QuantileSummaries(compressThreshold, relativeError, sampled, count) - } - - /** - * Merges two (compressed) summaries together. - * - * Returns a new summary. - */ - def merge(other: QuantileSummaries): QuantileSummaries = { - require(headSampled.isEmpty, "Current buffer needs to be compressed before merge") - require(other.headSampled.isEmpty, "Other buffer needs to be compressed before merge") - if (other.count == 0) { - this.shallowCopy - } else if (count == 0) { - other.shallowCopy - } else { - // Merge the two buffers. - // The GK algorithm is a bit unclear about it, but it seems there is no need to adjust the - // statistics during the merging: the invariants are still respected after the merge. - // TODO: could replace full sort by ordered merge, the two lists are known to be sorted - // already. - val res = (sampled ++ other.sampled).sortBy(_.value) - val comp = compressImmut(res, mergeThreshold = 2 * relativeError * count) - new QuantileSummaries( - other.compressThreshold, other.relativeError, comp, other.count + count) - } - } - - /** - * Runs a query for a given quantile. - * The result follows the approximation guarantees detailed above. - * The query can only be run on a compressed summary: you need to call compress() before using - * it. - * - * @param quantile the target quantile - * @return - */ - def query(quantile: Double): Double = { - require(quantile >= 0 && quantile <= 1.0, "quantile should be in the range [0.0, 1.0]") - require(headSampled.isEmpty, - "Cannot operate on an uncompressed summary, call compress() first") - - if (quantile <= relativeError) { - return sampled.head.value - } - - if (quantile >= 1 - relativeError) { - return sampled.last.value - } - - // Target rank - val rank = math.ceil(quantile * count).toInt - val targetError = math.ceil(relativeError * count) - // Minimum rank at current sample - var minRank = 0 - var i = 1 - while (i < sampled.size - 1) { - val curSample = sampled(i) - minRank += curSample.g - val maxRank = minRank + curSample.delta - if (maxRank - targetError <= rank && rank <= minRank + targetError) { - return curSample.value - } - i += 1 - } - sampled.last.value - } - } - - object QuantileSummaries { - // TODO(tjhunter) more tuning could be done one the constants here, but for now - // the main cost of the algorithm is accessing the data in SQL. - /** - * The default value for the compression threshold. - */ - val defaultCompressThreshold: Int = 10000 - - /** - * The size of the head buffer. - */ - val defaultHeadSize: Int = 50000 - - /** - * The default value for the relative error (1%). - * With this value, the best extreme percentiles that can be approximated are 1% and 99%. - */ - val defaultRelativeError: Double = 0.01 - - /** - * Statistics from the Greenwald-Khanna paper. - * @param value the sampled value - * @param g the minimum rank jump from the previous value's minimum rank - * @param delta the maximum span of the rank. - */ - case class Stats(value: Double, g: Int, delta: Int) - - private def compressImmut( - currentSamples: IndexedSeq[Stats], - mergeThreshold: Double): Array[Stats] = { - if (currentSamples.isEmpty) { - return Array.empty[Stats] - } - val res: ArrayBuffer[Stats] = ArrayBuffer.empty - // Start for the last element, which is always part of the set. - // The head contains the current new head, that may be merged with the current element. - var head = currentSamples.last - var i = currentSamples.size - 2 - // Do not compress the last element - while (i >= 1) { - // The current sample: - val sample1 = currentSamples(i) - // Do we need to compress? - if (sample1.g + head.g + head.delta < mergeThreshold) { - // Do not insert yet, just merge the current element into the head. - head = head.copy(g = head.g + sample1.g) - } else { - // Prepend the current head, and keep the current sample as target for merging. - res.prepend(head) - head = sample1 - } - i -= 1 - } - res.prepend(head) - // If necessary, add the minimum element: - res.prepend(currentSamples.head) - res.toArray - } - } - /** Calculate the Pearson Correlation Coefficient for the given columns */ def pearsonCorrelation(df: DataFrame, cols: Seq[String]): Double = { val counts = collectStatisticalData(df, cols, "correlation") From 9d376ad76ca702ae3fc6ffd0567e7590d9a8daf3 Mon Sep 17 00:00:00 2001 From: Jacek Laskowski Date: Tue, 23 Aug 2016 12:59:25 +0200 Subject: [PATCH 067/270] [SPARK-17199] Use CatalystConf.resolver for case-sensitivity comparison ## What changes were proposed in this pull request? Use `CatalystConf.resolver` consistently for case-sensitivity comparison (removed dups). ## How was this patch tested? Local build. Waiting for Jenkins to ensure clean build and test. Author: Jacek Laskowski Closes #14771 from jaceklaskowski/17199-catalystconf-resolver. --- .../apache/spark/sql/catalyst/analysis/Analyzer.scala | 8 +------- .../spark/sql/execution/datasources/DataSource.scala | 10 ++-------- .../sql/execution/datasources/DataSourceStrategy.scala | 8 +------- .../spark/sql/execution/streaming/FileStreamSink.scala | 6 +----- 4 files changed, 5 insertions(+), 27 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index 41e0e6d65e9ad..e559f235c5a38 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -64,13 +64,7 @@ class Analyzer( this(catalog, conf, conf.optimizerMaxIterations) } - def resolver: Resolver = { - if (conf.caseSensitiveAnalysis) { - caseSensitiveResolution - } else { - caseInsensitiveResolution - } - } + def resolver: Resolver = conf.resolver protected val fixedPoint = FixedPoint(maxIterations) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala index 5ad6ae0956e1c..b783d699745b1 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala @@ -394,13 +394,7 @@ case class DataSource( sparkSession, globbedPaths, options, partitionSchema, !checkPathExist) val dataSchema = userSpecifiedSchema.map { schema => - val equality = - if (sparkSession.sessionState.conf.caseSensitiveAnalysis) { - org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution - } else { - org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution - } - + val equality = sparkSession.sessionState.conf.resolver StructType(schema.filterNot(f => partitionColumns.exists(equality(_, f.name)))) }.orElse { format.inferSchema( @@ -430,7 +424,7 @@ case class DataSource( relation } - /** Writes the give [[DataFrame]] out to this [[DataSource]]. */ + /** Writes the given [[DataFrame]] out to this [[DataSource]]. */ def write( mode: SaveMode, data: DataFrame): BaseRelation = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala index 5eba7df060c4e..a6621054fc74b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala @@ -45,13 +45,7 @@ import org.apache.spark.unsafe.types.UTF8String */ case class DataSourceAnalysis(conf: CatalystConf) extends Rule[LogicalPlan] { - def resolver: Resolver = { - if (conf.caseSensitiveAnalysis) { - caseSensitiveResolution - } else { - caseInsensitiveResolution - } - } + def resolver: Resolver = conf.resolver // Visible for testing. def convertStaticPartitions( diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala index 117d6672ee2f7..0f7d958136835 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala @@ -102,11 +102,7 @@ class FileStreamSinkWriter( // Get the actual partition columns as attributes after matching them by name with // the given columns names. private val partitionColumns = partitionColumnNames.map { col => - val nameEquality = if (data.sparkSession.sessionState.conf.caseSensitiveAnalysis) { - org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution - } else { - org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution - } + val nameEquality = data.sparkSession.sessionState.conf.resolver data.logicalPlan.output.find(f => nameEquality(f.name, col)).getOrElse { throw new RuntimeException(s"Partition column $col not found in schema $dataSchema") } From 97d461b75badbfa323d7f1508b20600ea189bb95 Mon Sep 17 00:00:00 2001 From: Jagadeesan Date: Tue, 23 Aug 2016 12:23:30 +0100 Subject: [PATCH 068/270] [SPARK-17095] [Documentation] [Latex and Scala doc do not play nicely] ## What changes were proposed in this pull request? In Latex, it is common to find "}}}" when closing several expressions at once. [SPARK-16822](https://issues.apache.org/jira/browse/SPARK-16822) added Mathjax to render Latex equations in scaladoc. However, when scala doc sees "}}}" or "{{{" it treats it as a special character for code block. This results in some very strange output. Author: Jagadeesan Closes #14688 from jagadeesanas2/SPARK-17095. --- .../spark/ml/feature/PolynomialExpansion.scala | 8 +++++--- .../ml/regression/GeneralizedLinearRegression.scala | 8 +++++--- .../spark/ml/regression/LinearRegression.scala | 9 ++++++--- .../spark/mllib/clustering/StreamingKMeans.scala | 12 ++++++++---- 4 files changed, 24 insertions(+), 13 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala index 6e872c1f2cada..25fb6be5afd81 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala @@ -76,9 +76,11 @@ class PolynomialExpansion @Since("1.4.0") (@Since("1.4.0") override val uid: Str * (n + d choose d) (including 1 and first-order values). For example, let f([a, b, c], 3) be the * function that expands [a, b, c] to their monomials of degree 3. We have the following recursion: * - * {{{ - * f([a, b, c], 3) = f([a, b], 3) ++ f([a, b], 2) * c ++ f([a, b], 1) * c^2 ++ [c^3] - * }}} + *

+ * $$ + * f([a, b, c], 3) &= f([a, b], 3) ++ f([a, b], 2) * c ++ f([a, b], 1) * c^2 ++ [c^3] + * $$ + *

* * To handle sparsity, if c is zero, we can skip all monomials that contain it. We remember the * current index and increment it properly for sparse input. diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala index 1d4dfd1147589..02b27fb650979 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala @@ -196,9 +196,11 @@ class GeneralizedLinearRegression @Since("2.0.0") (@Since("2.0.0") override val /** * Sets the regularization parameter for L2 regularization. * The regularization term is - * {{{ - * 0.5 * regParam * L2norm(coefficients)^2 - * }}} + *

+ * $$ + * 0.5 * regParam * L2norm(coefficients)^2 + * $$ + *

* Default is 0.0. * * @group setParam diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala index b1bb9b9fe0058..7fddfd9b10f84 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala @@ -338,9 +338,12 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String /* Note that in Linear Regression, the objective history (loss + regularization) returned from optimizer is computed in the scaled space given by the following formula. - {{{ - L = 1/2n||\sum_i w_i(x_i - \bar{x_i}) / \hat{x_i} - (y - \bar{y}) / \hat{y}||^2 + regTerms - }}} +

+ $$ + L &= 1/2n||\sum_i w_i(x_i - \bar{x_i}) / \hat{x_i} - (y - \bar{y}) / \hat{y}||^2 + + regTerms \\ + $$ +

*/ val arrayBuilder = mutable.ArrayBuilder.make[Double] var state: optimizer.State = null diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/StreamingKMeans.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/StreamingKMeans.scala index 52bdccb919a61..f20ab09bf0b42 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/StreamingKMeans.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/StreamingKMeans.scala @@ -39,10 +39,14 @@ import org.apache.spark.util.random.XORShiftRandom * generalized to incorporate forgetfullness (i.e. decay). * The update rule (for each cluster) is: * - * {{{ - * c_t+1 = [(c_t * n_t * a) + (x_t * m_t)] / [n_t + m_t] - * n_t+t = n_t * a + m_t - * }}} + *

+ * $$ + * \begin{align} + * c_t+1 &= [(c_t * n_t * a) + (x_t * m_t)] / [n_t + m_t] \\ + * n_t+t &= n_t * a + m_t + * \end{align} + * $$ + *

* * Where c_t is the previously estimated centroid for that cluster, * n_t is the number of points assigned to it thus far, x_t is the centroid From 9afdfc94f49395e69a7959e881c19d787ce00c3e Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Tue, 23 Aug 2016 09:45:13 -0700 Subject: [PATCH 069/270] [SPARK-13286] [SQL] add the next expression of SQLException as cause ## What changes were proposed in this pull request? Some JDBC driver (for example PostgreSQL) does not use the underlying exception as cause, but have another APIs (getNextException) to access that, so it it's included in the error logging, making us hard to find the root cause, especially in batch mode. This PR will pull out the next exception and add it as cause (if it's different) or suppressed (if there is another different cause). ## How was this patch tested? Can't reproduce this on the default JDBC driver, so did not add a regression test. Author: Davies Liu Closes #14722 from davies/keep_cause. --- .../execution/datasources/jdbc/JdbcUtils.scala | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala index a33c26d81354f..cbd504603bbf4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.execution.datasources.jdbc -import java.sql.{Connection, Driver, DriverManager, PreparedStatement} +import java.sql.{Connection, Driver, DriverManager, PreparedStatement, SQLException} import java.util.Properties import scala.collection.JavaConverters._ @@ -289,7 +289,7 @@ object JdbcUtils extends Logging { } val stmt = insertStatement(conn, table, rddSchema, dialect) val setters: Array[JDBCValueSetter] = rddSchema.fields.map(_.dataType) - .map(makeSetter(conn, dialect, _)).toArray + .map(makeSetter(conn, dialect, _)).toArray try { var rowCount = 0 @@ -322,6 +322,17 @@ object JdbcUtils extends Logging { conn.commit() } committed = true + } catch { + case e: SQLException => + val cause = e.getNextException + if (e.getCause != cause) { + if (e.getCause == null) { + e.initCause(cause) + } else { + e.addSuppressed(cause) + } + } + throw e } finally { if (!committed) { // The stage must fail. We got here through an exception path, so From 8fd63e808e15c8a7e78fef847183c86f332daa91 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Tue, 23 Aug 2016 11:22:32 -0700 Subject: [PATCH 070/270] [SPARKR][MINOR] Remove reference link for common Windows environment variables ## What changes were proposed in this pull request? The PR removes reference link in the doc for environment variables for common Windows folders. The cran check gave code 503: service unavailable on the original link. ## How was this patch tested? Manual check. Author: Junyang Qian Closes #14767 from junyangq/SPARKR-RemoveLink. --- R/pkg/R/install.R | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/R/pkg/R/install.R b/R/pkg/R/install.R index ff81e86835ff8..c6ed88e032a71 100644 --- a/R/pkg/R/install.R +++ b/R/pkg/R/install.R @@ -50,9 +50,7 @@ #' \itemize{ #' \item Mac OS X: \file{~/Library/Caches/spark} #' \item Unix: \env{$XDG_CACHE_HOME} if defined, otherwise \file{~/.cache/spark} -#' \item Windows: \file{\%LOCALAPPDATA\%\\spark\\spark\\Cache}. See -#' \href{https://www.microsoft.com/security/portal/mmpc/shared/variables.aspx}{ -#' Windows Common Folder Variables} about \%LOCALAPPDATA\% +#' \item Windows: \file{\%LOCALAPPDATA\%\\spark\\spark\\Cache}. #' } #' @param overwrite If \code{TRUE}, download and overwrite the existing tar file in localDir #' and force re-install Spark (in case the local directory or file is corrupted) From 588559911de94bbe0932526ee1e1dd36a581a423 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Tue, 23 Aug 2016 21:21:43 +0100 Subject: [PATCH 071/270] [MINOR][DOC] Use standard quotes instead of "curly quote" marks from Mac in structured streaming programming guides MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What changes were proposed in this pull request? This PR fixes curly quotes (`“` and `”` ) to standard quotes (`"`). This will be a actual problem when users copy and paste the examples. This would not work. This seems only happening in `structured-streaming-programming-guide.md`. ## How was this patch tested? Manually built. This will change some examples to be correctly marked down as below: ![2016-08-23 3 24 13](https://cloud.githubusercontent.com/assets/6477701/17882878/2a38332e-694a-11e6-8e84-76bdb89151e0.png) to ![2016-08-23 3 26 06](https://cloud.githubusercontent.com/assets/6477701/17882888/376eaa28-694a-11e6-8b88-32ea83997037.png) Author: hyukjinkwon Closes #14770 from HyukjinKwon/minor-quotes. --- .../structured-streaming-programming-guide.md | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/structured-streaming-programming-guide.md b/docs/structured-streaming-programming-guide.md index 226ff740a5d67..090b14f4ce2bc 100644 --- a/docs/structured-streaming-programming-guide.md +++ b/docs/structured-streaming-programming-guide.md @@ -88,7 +88,7 @@ val words = lines.as[String].flatMap(_.split(" ")) val wordCounts = words.groupBy("value").count() {% endhighlight %} -This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named “value”, and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have converted the DataFrame to a Dataset of String using `.as[String]`, so that we can apply the `flatMap` operation to split each line into multiple words. The resultant `words` Dataset contains all the words. Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream. +This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named "value", and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have converted the DataFrame to a Dataset of String using `.as[String]`, so that we can apply the `flatMap` operation to split each line into multiple words. The resultant `words` Dataset contains all the words. Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream.
@@ -117,7 +117,7 @@ Dataset words = lines Dataset wordCounts = words.groupBy("value").count(); {% endhighlight %} -This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named “value”, and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have converted the DataFrame to a Dataset of String using `.as(Encoders.STRING())`, so that we can apply the `flatMap` operation to split each line into multiple words. The resultant `words` Dataset contains all the words. Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream. +This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named "value", and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have converted the DataFrame to a Dataset of String using `.as(Encoders.STRING())`, so that we can apply the `flatMap` operation to split each line into multiple words. The resultant `words` Dataset contains all the words. Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream.
@@ -142,12 +142,12 @@ words = lines.select( wordCounts = words.groupBy('word').count() {% endhighlight %} -This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named “value”, and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have used two built-in SQL functions - split and explode, to split each line into multiple rows with a word each. In addition, we use the function `alias` to name the new column as “word”. Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream. +This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named "value", and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have used two built-in SQL functions - split and explode, to split each line into multiple rows with a word each. In addition, we use the function `alias` to name the new column as "word". Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream.
-We have now set up the query on the streaming data. All that is left is to actually start receiving data and computing the counts. To do this, we set it up to print the complete set of counts (specified by `outputMode(“complete”)`) to the console every time they are updated. And then start the streaming computation using `start()`. +We have now set up the query on the streaming data. All that is left is to actually start receiving data and computing the counts. To do this, we set it up to print the complete set of counts (specified by `outputMode("complete")`) to the console every time they are updated. And then start the streaming computation using `start()`.
@@ -361,16 +361,16 @@ table, and Spark runs it as an *incremental* query on the *unbounded* input table. Let’s understand this model in more detail. ## Basic Concepts -Consider the input data stream as the “Input Table”. Every data item that is +Consider the input data stream as the "Input Table". Every data item that is arriving on the stream is like a new row being appended to the Input Table. ![Stream as a Table](img/structured-streaming-stream-as-a-table.png "Stream as a Table") -A query on the input will generate the “Result Table”. Every trigger interval (say, every 1 second), new rows get appended to the Input Table, which eventually updates the Result Table. Whenever the result table gets updated, we would want to write the changed result rows to an external sink. +A query on the input will generate the "Result Table". Every trigger interval (say, every 1 second), new rows get appended to the Input Table, which eventually updates the Result Table. Whenever the result table gets updated, we would want to write the changed result rows to an external sink. ![Model](img/structured-streaming-model.png) -The “Output” is defined as what gets written out to the external storage. The output can be defined in different modes +The "Output" is defined as what gets written out to the external storage. The output can be defined in different modes - *Complete Mode* - The entire updated Result Table will be written to the external storage. It is up to the storage connector to decide how to handle writing of the entire table. @@ -386,7 +386,7 @@ the final `wordCounts` DataFrame is the result table. Note that the query on streaming `lines` DataFrame to generate `wordCounts` is *exactly the same* as it would be a static DataFrame. However, when this query is started, Spark will continuously check for new data from the socket connection. If there is -new data, Spark will run an “incremental” query that combines the previous +new data, Spark will run an "incremental" query that combines the previous running counts with the new data to compute updated counts, as shown below. ![Model](img/structured-streaming-example-model.png) @@ -682,8 +682,8 @@ Streaming DataFrames can be joined with static DataFrames to create new streamin val staticDf = spark.read. ... val streamingDf = spark.readStream. ... -streamingDf.join(staticDf, “type”) // inner equi-join with a static DF -streamingDf.join(staticDf, “type”, “right_join”) // right outer join with a static DF +streamingDf.join(staticDf, "type") // inner equi-join with a static DF +streamingDf.join(staticDf, "type", "right_join") // right outer join with a static DF {% endhighlight %} @@ -789,7 +789,7 @@ Here is a table of all the sinks, and the corresponding settings. File Sink
(only parquet in Spark 2.0) Append -
writeStream
.format(“parquet”)
.start()
+
writeStream
.format("parquet")
.start()
Yes Supports writes to partitioned tables. Partitioning by time may be useful. @@ -803,14 +803,14 @@ Here is a table of all the sinks, and the corresponding settings. Console Sink Append, Complete -
writeStream
.format(“console”)
.start()
+
writeStream
.format("console")
.start()
No Memory Sink Append, Complete -
writeStream
.format(“memory”)
.queryName(“table”)
.start()
+
writeStream
.format("memory")
.queryName("table")
.start()
No Saves the output data as a table, for interactive querying. Table name is the query name. @@ -839,7 +839,7 @@ noAggDF .start() // ========== DF with aggregation ========== -val aggDF = df.groupBy(“device”).count() +val aggDF = df.groupBy("device").count() // Print updated aggregations to console aggDF @@ -879,7 +879,7 @@ noAggDF .start(); // ========== DF with aggregation ========== -Dataset aggDF = df.groupBy(“device”).count(); +Dataset aggDF = df.groupBy("device").count(); // Print updated aggregations to console aggDF @@ -919,7 +919,7 @@ noAggDF\ .start() # ========== DF with aggregation ========== -aggDF = df.groupBy(“device”).count() +aggDF = df.groupBy("device").count() # Print updated aggregations to console aggDF\ @@ -1095,7 +1095,7 @@ In case of a failure or intentional shutdown, you can recover the previous progr aggDF .writeStream .outputMode("complete") - .option(“checkpointLocation”, “path/to/HDFS/dir”) + .option("checkpointLocation", "path/to/HDFS/dir") .format("memory") .start() {% endhighlight %} @@ -1107,7 +1107,7 @@ aggDF aggDF .writeStream() .outputMode("complete") - .option(“checkpointLocation”, “path/to/HDFS/dir”) + .option("checkpointLocation", "path/to/HDFS/dir") .format("memory") .start(); {% endhighlight %} @@ -1119,7 +1119,7 @@ aggDF aggDF\ .writeStream()\ .outputMode("complete")\ - .option(“checkpointLocation”, “path/to/HDFS/dir”)\ + .option("checkpointLocation", "path/to/HDFS/dir")\ .format("memory")\ .start() {% endhighlight %} From 6555ef0ccbecd09c3071670e10f0c1e2d7713bfe Mon Sep 17 00:00:00 2001 From: Zheng RuiFeng Date: Tue, 23 Aug 2016 21:25:04 +0100 Subject: [PATCH 072/270] [TRIVIAL] Typo Fix ## What changes were proposed in this pull request? Fix a typo ## How was this patch tested? no tests Author: Zheng RuiFeng Closes #14772 from zhengruifeng/minor_numClasses. --- .../scala/org/apache/spark/ml/classification/Classifier.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala index 6decea72719fd..d1b21b16f2342 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala @@ -83,7 +83,7 @@ abstract class Classifier[ case Row(label: Double, features: Vector) => require(label % 1 == 0 && label >= 0 && label < numClasses, s"Classifier was given" + s" dataset with invalid label $label. Labels must be integers in range" + - s" [0, 1, ..., $numClasses), where numClasses=$numClasses.") + s" [0, $numClasses).") LabeledPoint(label, features) } } From bf8ff833e30b39e5e5e35ba8dcac31b79323838c Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Tue, 23 Aug 2016 22:31:58 +0200 Subject: [PATCH 073/270] [SPARK-17194] Use single quotes when generating SQL for string literals When Spark emits SQL for a string literal, it should wrap the string in single quotes, not double quotes. Databases which adhere more strictly to the ANSI SQL standards, such as Postgres, allow only single-quotes to be used for denoting string literals (see http://stackoverflow.com/a/1992331/590203). Author: Josh Rosen Closes #14763 from JoshRosen/SPARK-17194. --- .../org/apache/spark/sql/catalyst/expressions/literals.scala | 4 ++-- .../src/test/resources/sqlgen/broadcast_join_subquery.sql | 2 +- sql/hive/src/test/resources/sqlgen/case_with_key.sql | 2 +- .../src/test/resources/sqlgen/case_with_key_and_else.sql | 2 +- sql/hive/src/test/resources/sqlgen/inline_tables.sql | 2 +- .../src/test/resources/sqlgen/json_tuple_generator_1.sql | 2 +- .../src/test/resources/sqlgen/json_tuple_generator_2.sql | 2 +- sql/hive/src/test/resources/sqlgen/not_like.sql | 2 +- sql/hive/src/test/resources/sqlgen/subquery_exists_1.sql | 2 +- sql/hive/src/test/resources/sqlgen/subquery_exists_2.sql | 2 +- .../src/test/resources/sqlgen/subquery_exists_having_1.sql | 2 +- .../src/test/resources/sqlgen/subquery_exists_having_2.sql | 2 +- .../src/test/resources/sqlgen/subquery_exists_having_3.sql | 2 +- sql/hive/src/test/resources/sqlgen/subquery_in_having_1.sql | 2 +- sql/hive/src/test/resources/sqlgen/subquery_in_having_2.sql | 2 +- sql/hive/src/test/resources/sqlgen/subquery_not_exists_1.sql | 2 +- sql/hive/src/test/resources/sqlgen/subquery_not_exists_2.sql | 2 +- .../test/resources/sqlgen/subquery_not_exists_having_1.sql | 2 +- .../test/resources/sqlgen/subquery_not_exists_having_2.sql | 2 +- .../spark/sql/catalyst/ExpressionSQLBuilderSuite.scala | 5 +++-- 20 files changed, 23 insertions(+), 22 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala index 7040008769a32..55fd9c0834fcc 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala @@ -245,8 +245,8 @@ case class Literal (value: Any, dataType: DataType) extends LeafExpression with case (_, NullType | _: ArrayType | _: MapType | _: StructType) if value == null => "NULL" case _ if value == null => s"CAST(NULL AS ${dataType.sql})" case (v: UTF8String, StringType) => - // Escapes all backslashes and double quotes. - "\"" + v.toString.replace("\\", "\\\\").replace("\"", "\\\"") + "\"" + // Escapes all backslashes and single quotes. + "'" + v.toString.replace("\\", "\\\\").replace("'", "\\'") + "'" case (v: Byte, ByteType) => v + "Y" case (v: Short, ShortType) => v + "S" case (v: Long, LongType) => v + "L" diff --git a/sql/hive/src/test/resources/sqlgen/broadcast_join_subquery.sql b/sql/hive/src/test/resources/sqlgen/broadcast_join_subquery.sql index 3e2111d58a3c6..ec881a216e0b0 100644 --- a/sql/hive/src/test/resources/sqlgen/broadcast_join_subquery.sql +++ b/sql/hive/src/test/resources/sqlgen/broadcast_join_subquery.sql @@ -5,4 +5,4 @@ FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) ORDER BY subq.key1, z.value -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key1`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_5` AS `gen_attr_0`, `gen_attr_7` AS `gen_attr_6`, `gen_attr_9` AS `gen_attr_8`, `gen_attr_11` AS `gen_attr_10` FROM (SELECT `key` AS `gen_attr_5`, `value` AS `gen_attr_7` FROM `default`.`src1`) AS gen_subquery_0 INNER JOIN (SELECT `key` AS `gen_attr_9`, `value` AS `gen_attr_11` FROM `default`.`src`) AS gen_subquery_1 ON (`gen_attr_5` = `gen_attr_9`)) AS subq INNER JOIN (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_1`, `ds` AS `gen_attr_3`, `hr` AS `gen_attr_4` FROM `default`.`srcpart`) AS gen_subquery_2 ON (((`gen_attr_0` = `gen_attr_2`) AND (`gen_attr_3` = "2008-04-08")) AND (CAST(`gen_attr_4` AS DOUBLE) = CAST(11 AS DOUBLE))) ORDER BY `gen_attr_0` ASC, `gen_attr_1` ASC) AS gen_subquery_3 +SELECT `gen_attr_0` AS `key1`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_5` AS `gen_attr_0`, `gen_attr_7` AS `gen_attr_6`, `gen_attr_9` AS `gen_attr_8`, `gen_attr_11` AS `gen_attr_10` FROM (SELECT `key` AS `gen_attr_5`, `value` AS `gen_attr_7` FROM `default`.`src1`) AS gen_subquery_0 INNER JOIN (SELECT `key` AS `gen_attr_9`, `value` AS `gen_attr_11` FROM `default`.`src`) AS gen_subquery_1 ON (`gen_attr_5` = `gen_attr_9`)) AS subq INNER JOIN (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_1`, `ds` AS `gen_attr_3`, `hr` AS `gen_attr_4` FROM `default`.`srcpart`) AS gen_subquery_2 ON (((`gen_attr_0` = `gen_attr_2`) AND (`gen_attr_3` = '2008-04-08')) AND (CAST(`gen_attr_4` AS DOUBLE) = CAST(11 AS DOUBLE))) ORDER BY `gen_attr_0` ASC, `gen_attr_1` ASC) AS gen_subquery_3 diff --git a/sql/hive/src/test/resources/sqlgen/case_with_key.sql b/sql/hive/src/test/resources/sqlgen/case_with_key.sql index dff65f10835f3..e991ebafdc90e 100644 --- a/sql/hive/src/test/resources/sqlgen/case_with_key.sql +++ b/sql/hive/src/test/resources/sqlgen/case_with_key.sql @@ -1,4 +1,4 @@ -- This file is automatically generated by LogicalPlanToSQLSuite. SELECT CASE id WHEN 0 THEN 'foo' WHEN 1 THEN 'bar' END FROM parquet_t0 -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `CASE WHEN (id = CAST(0 AS BIGINT)) THEN foo WHEN (id = CAST(1 AS BIGINT)) THEN bar END` FROM (SELECT CASE WHEN (`gen_attr_1` = CAST(0 AS BIGINT)) THEN "foo" WHEN (`gen_attr_1` = CAST(1 AS BIGINT)) THEN "bar" END AS `gen_attr_0` FROM (SELECT `id` AS `gen_attr_1` FROM `default`.`parquet_t0`) AS gen_subquery_0) AS gen_subquery_1 +SELECT `gen_attr_0` AS `CASE WHEN (id = CAST(0 AS BIGINT)) THEN foo WHEN (id = CAST(1 AS BIGINT)) THEN bar END` FROM (SELECT CASE WHEN (`gen_attr_1` = CAST(0 AS BIGINT)) THEN 'foo' WHEN (`gen_attr_1` = CAST(1 AS BIGINT)) THEN 'bar' END AS `gen_attr_0` FROM (SELECT `id` AS `gen_attr_1` FROM `default`.`parquet_t0`) AS gen_subquery_0) AS gen_subquery_1 diff --git a/sql/hive/src/test/resources/sqlgen/case_with_key_and_else.sql b/sql/hive/src/test/resources/sqlgen/case_with_key_and_else.sql index af3e169b54315..492777e376ecc 100644 --- a/sql/hive/src/test/resources/sqlgen/case_with_key_and_else.sql +++ b/sql/hive/src/test/resources/sqlgen/case_with_key_and_else.sql @@ -1,4 +1,4 @@ -- This file is automatically generated by LogicalPlanToSQLSuite. SELECT CASE id WHEN 0 THEN 'foo' WHEN 1 THEN 'bar' ELSE 'baz' END FROM parquet_t0 -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `CASE WHEN (id = CAST(0 AS BIGINT)) THEN foo WHEN (id = CAST(1 AS BIGINT)) THEN bar ELSE baz END` FROM (SELECT CASE WHEN (`gen_attr_1` = CAST(0 AS BIGINT)) THEN "foo" WHEN (`gen_attr_1` = CAST(1 AS BIGINT)) THEN "bar" ELSE "baz" END AS `gen_attr_0` FROM (SELECT `id` AS `gen_attr_1` FROM `default`.`parquet_t0`) AS gen_subquery_0) AS gen_subquery_1 +SELECT `gen_attr_0` AS `CASE WHEN (id = CAST(0 AS BIGINT)) THEN foo WHEN (id = CAST(1 AS BIGINT)) THEN bar ELSE baz END` FROM (SELECT CASE WHEN (`gen_attr_1` = CAST(0 AS BIGINT)) THEN 'foo' WHEN (`gen_attr_1` = CAST(1 AS BIGINT)) THEN 'bar' ELSE 'baz' END AS `gen_attr_0` FROM (SELECT `id` AS `gen_attr_1` FROM `default`.`parquet_t0`) AS gen_subquery_0) AS gen_subquery_1 diff --git a/sql/hive/src/test/resources/sqlgen/inline_tables.sql b/sql/hive/src/test/resources/sqlgen/inline_tables.sql index 602551e69da6e..18803a3ee59b9 100644 --- a/sql/hive/src/test/resources/sqlgen/inline_tables.sql +++ b/sql/hive/src/test/resources/sqlgen/inline_tables.sql @@ -1,4 +1,4 @@ -- This file is automatically generated by LogicalPlanToSQLSuite. select * from values ("one", 1), ("two", 2), ("three", null) as data(a, b) where b > 1 -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `a`, `gen_attr_1` AS `b` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (VALUES ("one", 1), ("two", 2), ("three", CAST(NULL AS INT)) AS gen_subquery_0(gen_attr_0, gen_attr_1)) AS data WHERE (`gen_attr_1` > 1)) AS data +SELECT `gen_attr_0` AS `a`, `gen_attr_1` AS `b` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (VALUES ('one', 1), ('two', 2), ('three', CAST(NULL AS INT)) AS gen_subquery_0(gen_attr_0, gen_attr_1)) AS data WHERE (`gen_attr_1` > 1)) AS data diff --git a/sql/hive/src/test/resources/sqlgen/json_tuple_generator_1.sql b/sql/hive/src/test/resources/sqlgen/json_tuple_generator_1.sql index 6f5562a20cccd..11e45a48f1b89 100644 --- a/sql/hive/src/test/resources/sqlgen/json_tuple_generator_1.sql +++ b/sql/hive/src/test/resources/sqlgen/json_tuple_generator_1.sql @@ -3,4 +3,4 @@ SELECT c0, c1, c2 FROM parquet_t3 LATERAL VIEW JSON_TUPLE(json, 'f1', 'f2', 'f3') jt -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `c0`, `gen_attr_1` AS `c1`, `gen_attr_2` AS `c2` FROM (SELECT `gen_attr_0`, `gen_attr_1`, `gen_attr_2` FROM (SELECT `arr` AS `gen_attr_4`, `arr2` AS `gen_attr_5`, `json` AS `gen_attr_3`, `id` AS `gen_attr_6` FROM `default`.`parquet_t3`) AS gen_subquery_0 LATERAL VIEW json_tuple(`gen_attr_3`, "f1", "f2", "f3") gen_subquery_1 AS `gen_attr_0`, `gen_attr_1`, `gen_attr_2`) AS jt +SELECT `gen_attr_0` AS `c0`, `gen_attr_1` AS `c1`, `gen_attr_2` AS `c2` FROM (SELECT `gen_attr_0`, `gen_attr_1`, `gen_attr_2` FROM (SELECT `arr` AS `gen_attr_4`, `arr2` AS `gen_attr_5`, `json` AS `gen_attr_3`, `id` AS `gen_attr_6` FROM `default`.`parquet_t3`) AS gen_subquery_0 LATERAL VIEW json_tuple(`gen_attr_3`, 'f1', 'f2', 'f3') gen_subquery_1 AS `gen_attr_0`, `gen_attr_1`, `gen_attr_2`) AS jt diff --git a/sql/hive/src/test/resources/sqlgen/json_tuple_generator_2.sql b/sql/hive/src/test/resources/sqlgen/json_tuple_generator_2.sql index 0d4f67f18426b..d86b39df57442 100644 --- a/sql/hive/src/test/resources/sqlgen/json_tuple_generator_2.sql +++ b/sql/hive/src/test/resources/sqlgen/json_tuple_generator_2.sql @@ -3,4 +3,4 @@ SELECT a, b, c FROM parquet_t3 LATERAL VIEW JSON_TUPLE(json, 'f1', 'f2', 'f3') jt AS a, b, c -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `a`, `gen_attr_1` AS `b`, `gen_attr_2` AS `c` FROM (SELECT `gen_attr_0`, `gen_attr_1`, `gen_attr_2` FROM (SELECT `arr` AS `gen_attr_4`, `arr2` AS `gen_attr_5`, `json` AS `gen_attr_3`, `id` AS `gen_attr_6` FROM `default`.`parquet_t3`) AS gen_subquery_0 LATERAL VIEW json_tuple(`gen_attr_3`, "f1", "f2", "f3") gen_subquery_1 AS `gen_attr_0`, `gen_attr_1`, `gen_attr_2`) AS jt +SELECT `gen_attr_0` AS `a`, `gen_attr_1` AS `b`, `gen_attr_2` AS `c` FROM (SELECT `gen_attr_0`, `gen_attr_1`, `gen_attr_2` FROM (SELECT `arr` AS `gen_attr_4`, `arr2` AS `gen_attr_5`, `json` AS `gen_attr_3`, `id` AS `gen_attr_6` FROM `default`.`parquet_t3`) AS gen_subquery_0 LATERAL VIEW json_tuple(`gen_attr_3`, 'f1', 'f2', 'f3') gen_subquery_1 AS `gen_attr_0`, `gen_attr_1`, `gen_attr_2`) AS jt diff --git a/sql/hive/src/test/resources/sqlgen/not_like.sql b/sql/hive/src/test/resources/sqlgen/not_like.sql index da39a62225a53..22485045e212e 100644 --- a/sql/hive/src/test/resources/sqlgen/not_like.sql +++ b/sql/hive/src/test/resources/sqlgen/not_like.sql @@ -1,4 +1,4 @@ -- This file is automatically generated by LogicalPlanToSQLSuite. SELECT id FROM t0 WHERE id + 5 NOT LIKE '1%' -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `id` FROM (SELECT `gen_attr_0` FROM (SELECT `id` AS `gen_attr_0` FROM `default`.`t0`) AS gen_subquery_0 WHERE (NOT CAST((`gen_attr_0` + CAST(5 AS BIGINT)) AS STRING) LIKE "1%")) AS t0 +SELECT `gen_attr_0` AS `id` FROM (SELECT `gen_attr_0` FROM (SELECT `id` AS `gen_attr_0` FROM `default`.`t0`) AS gen_subquery_0 WHERE (NOT CAST((`gen_attr_0` + CAST(5 AS BIGINT)) AS STRING) LIKE '1%')) AS t0 diff --git a/sql/hive/src/test/resources/sqlgen/subquery_exists_1.sql b/sql/hive/src/test/resources/sqlgen/subquery_exists_1.sql index d598e4c036a29..bd28d8dca94c2 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_exists_1.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_exists_1.sql @@ -5,4 +5,4 @@ where exists (select a.key from src a where b.value = a.value and a.key = b.key and a.value > 'val_9') -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_3`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > "val_9")) AS gen_subquery_1 WHERE ((`gen_attr_1` = `gen_attr_2`) AND (`gen_attr_3` = `gen_attr_0`))) AS gen_subquery_3)) AS b +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_3`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > 'val_9')) AS gen_subquery_1 WHERE ((`gen_attr_1` = `gen_attr_2`) AND (`gen_attr_3` = `gen_attr_0`))) AS gen_subquery_3)) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_exists_2.sql b/sql/hive/src/test/resources/sqlgen/subquery_exists_2.sql index a353c33af21a6..d2965fc0b9b77 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_exists_2.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_exists_2.sql @@ -6,4 +6,4 @@ from (select * from src a where b.value = a.value and a.key = b.key and a.value > 'val_9')) a -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_3`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > "val_9")) AS gen_subquery_1 WHERE ((`gen_attr_1` = `gen_attr_2`) AND (`gen_attr_3` = `gen_attr_0`))) AS gen_subquery_3)) AS a) AS a +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_3`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > 'val_9')) AS gen_subquery_1 WHERE ((`gen_attr_1` = `gen_attr_2`) AND (`gen_attr_3` = `gen_attr_0`))) AS gen_subquery_3)) AS a) AS a diff --git a/sql/hive/src/test/resources/sqlgen/subquery_exists_having_1.sql b/sql/hive/src/test/resources/sqlgen/subquery_exists_having_1.sql index f6873d24e16ec..93ce902b75994 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_exists_having_1.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_exists_having_1.sql @@ -6,4 +6,4 @@ having exists (select a.key from src a where a.key = b.key and a.value > 'val_9') -------------------------------------------------------------------------------- -SELECT `gen_attr_1` AS `key`, `gen_attr_2` AS `count(1)` FROM (SELECT `gen_attr_1`, count(1) AS `gen_attr_2` FROM (SELECT `key` AS `gen_attr_1`, `value` AS `gen_attr_3` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_1` HAVING EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_0` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_5` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_5` > "val_9")) AS gen_subquery_1 WHERE (`gen_attr_0` = `gen_attr_1`)) AS gen_subquery_3)) AS b +SELECT `gen_attr_1` AS `key`, `gen_attr_2` AS `count(1)` FROM (SELECT `gen_attr_1`, count(1) AS `gen_attr_2` FROM (SELECT `key` AS `gen_attr_1`, `value` AS `gen_attr_3` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_1` HAVING EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_0` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_5` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_5` > 'val_9')) AS gen_subquery_1 WHERE (`gen_attr_0` = `gen_attr_1`)) AS gen_subquery_3)) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_exists_having_2.sql b/sql/hive/src/test/resources/sqlgen/subquery_exists_having_2.sql index 8452ef946f61d..411e073f0d280 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_exists_having_2.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_exists_having_2.sql @@ -7,4 +7,4 @@ from (select b.key, count(*) from src a where a.key = b.key and a.value > 'val_9')) a -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `count(1)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, count(1) AS `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_3` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_2` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_5` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_5` > "val_9")) AS gen_subquery_1 WHERE (`gen_attr_2` = `gen_attr_0`)) AS gen_subquery_3)) AS a) AS a +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `count(1)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, count(1) AS `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_3` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_2` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_5` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_5` > 'val_9')) AS gen_subquery_1 WHERE (`gen_attr_2` = `gen_attr_0`)) AS gen_subquery_3)) AS a) AS a diff --git a/sql/hive/src/test/resources/sqlgen/subquery_exists_having_3.sql b/sql/hive/src/test/resources/sqlgen/subquery_exists_having_3.sql index 2ef38ce42944f..b2ed0b0557aff 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_exists_having_3.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_exists_having_3.sql @@ -6,4 +6,4 @@ having exists (select a.key from src a where a.value > 'val_9' and a.value = min(b.value)) -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `min(value)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, min(`gen_attr_4`) AS `gen_attr_1`, min(`gen_attr_4`) AS `gen_attr_3` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_4` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING EXISTS(SELECT `gen_attr_5` AS `1` FROM (SELECT 1 AS `gen_attr_5` FROM (SELECT `gen_attr_6`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_6`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_3 WHERE (`gen_attr_2` > "val_9")) AS gen_subquery_2 WHERE (`gen_attr_2` = `gen_attr_3`)) AS gen_subquery_4)) AS gen_subquery_1) AS b +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `min(value)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, min(`gen_attr_4`) AS `gen_attr_1`, min(`gen_attr_4`) AS `gen_attr_3` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_4` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING EXISTS(SELECT `gen_attr_5` AS `1` FROM (SELECT 1 AS `gen_attr_5` FROM (SELECT `gen_attr_6`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_6`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_3 WHERE (`gen_attr_2` > 'val_9')) AS gen_subquery_2 WHERE (`gen_attr_2` = `gen_attr_3`)) AS gen_subquery_4)) AS gen_subquery_1) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_in_having_1.sql b/sql/hive/src/test/resources/sqlgen/subquery_in_having_1.sql index bfa58211b12f1..9894f5ab39c76 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_in_having_1.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_in_having_1.sql @@ -5,4 +5,4 @@ group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key) order by key -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `count(1)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, count(1) AS `gen_attr_1`, count(1) AS `gen_attr_2` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_4` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING (`gen_attr_2` IN (SELECT `gen_attr_5` AS `_c0` FROM (SELECT `gen_attr_3` AS `gen_attr_5` FROM (SELECT count(1) AS `gen_attr_3` FROM (SELECT `key` AS `gen_attr_6`, `value` AS `gen_attr_7` FROM `default`.`src`) AS gen_subquery_3 WHERE (CAST(`gen_attr_6` AS DOUBLE) = CAST("90" AS DOUBLE)) GROUP BY `gen_attr_6`) AS gen_subquery_2) AS gen_subquery_4))) AS gen_subquery_1 ORDER BY `gen_attr_0` ASC) AS src +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `count(1)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, count(1) AS `gen_attr_1`, count(1) AS `gen_attr_2` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_4` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING (`gen_attr_2` IN (SELECT `gen_attr_5` AS `_c0` FROM (SELECT `gen_attr_3` AS `gen_attr_5` FROM (SELECT count(1) AS `gen_attr_3` FROM (SELECT `key` AS `gen_attr_6`, `value` AS `gen_attr_7` FROM `default`.`src`) AS gen_subquery_3 WHERE (CAST(`gen_attr_6` AS DOUBLE) = CAST('90' AS DOUBLE)) GROUP BY `gen_attr_6`) AS gen_subquery_2) AS gen_subquery_4))) AS gen_subquery_1 ORDER BY `gen_attr_0` ASC) AS src diff --git a/sql/hive/src/test/resources/sqlgen/subquery_in_having_2.sql b/sql/hive/src/test/resources/sqlgen/subquery_in_having_2.sql index f7503bce068f8..c3a122aa889b9 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_in_having_2.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_in_having_2.sql @@ -7,4 +7,4 @@ having b.key in (select a.key where a.value > 'val_9' and a.value = min(b.value)) order by b.key -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `min(value)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, min(`gen_attr_5`) AS `gen_attr_1`, min(`gen_attr_5`) AS `gen_attr_4` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_5` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING (struct(`gen_attr_0`, `gen_attr_4`) IN (SELECT `gen_attr_6` AS `_c0`, `gen_attr_7` AS `_c1` FROM (SELECT `gen_attr_2` AS `gen_attr_6`, `gen_attr_3` AS `gen_attr_7` FROM (SELECT `gen_attr_2`, `gen_attr_3` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_3` FROM `default`.`src`) AS gen_subquery_3 WHERE (`gen_attr_3` > "val_9")) AS gen_subquery_2) AS gen_subquery_4))) AS gen_subquery_1 ORDER BY `gen_attr_0` ASC) AS b +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `min(value)` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `gen_attr_0`, min(`gen_attr_5`) AS `gen_attr_1`, min(`gen_attr_5`) AS `gen_attr_4` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_5` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_0` HAVING (struct(`gen_attr_0`, `gen_attr_4`) IN (SELECT `gen_attr_6` AS `_c0`, `gen_attr_7` AS `_c1` FROM (SELECT `gen_attr_2` AS `gen_attr_6`, `gen_attr_3` AS `gen_attr_7` FROM (SELECT `gen_attr_2`, `gen_attr_3` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_3` FROM `default`.`src`) AS gen_subquery_3 WHERE (`gen_attr_3` > 'val_9')) AS gen_subquery_2) AS gen_subquery_4))) AS gen_subquery_1 ORDER BY `gen_attr_0` ASC) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_1.sql b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_1.sql index 54a38ec0edb4c..eed20a5d311f3 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_1.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_1.sql @@ -5,4 +5,4 @@ where not exists (select a.key from src a where b.value = a.value and a.key = b.key and a.value > 'val_2') -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE (NOT EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_3`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > "val_2")) AS gen_subquery_1 WHERE ((`gen_attr_1` = `gen_attr_2`) AND (`gen_attr_3` = `gen_attr_0`))) AS gen_subquery_3))) AS b +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE (NOT EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_3`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > 'val_2')) AS gen_subquery_1 WHERE ((`gen_attr_1` = `gen_attr_2`) AND (`gen_attr_3` = `gen_attr_0`))) AS gen_subquery_3))) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_2.sql b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_2.sql index c05bb5d991b4b..7040e106e7ba2 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_2.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_2.sql @@ -5,4 +5,4 @@ where not exists (select a.key from src a where b.value = a.value and a.value > 'val_2') -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE (NOT EXISTS(SELECT `gen_attr_3` AS `1` FROM (SELECT 1 AS `gen_attr_3` FROM (SELECT `gen_attr_4`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_4`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > "val_2")) AS gen_subquery_1 WHERE (`gen_attr_1` = `gen_attr_2`)) AS gen_subquery_3))) AS b +SELECT `gen_attr_0` AS `key`, `gen_attr_1` AS `value` FROM (SELECT `gen_attr_0`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_0`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_0 WHERE (NOT EXISTS(SELECT `gen_attr_3` AS `1` FROM (SELECT 1 AS `gen_attr_3` FROM (SELECT `gen_attr_4`, `gen_attr_2` FROM (SELECT `key` AS `gen_attr_4`, `value` AS `gen_attr_2` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_2` > 'val_2')) AS gen_subquery_1 WHERE (`gen_attr_1` = `gen_attr_2`)) AS gen_subquery_3))) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_1.sql b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_1.sql index d6047c52f20fc..3c0e90ed42223 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_1.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_1.sql @@ -6,4 +6,4 @@ having not exists (select a.key from src a where b.value = a.value and a.key = b.key and a.value > 'val_12') -------------------------------------------------------------------------------- -SELECT `gen_attr_3` AS `key`, `gen_attr_0` AS `value` FROM (SELECT `gen_attr_3`, `gen_attr_0` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_0` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_3`, `gen_attr_0` HAVING (NOT EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_2`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_1` > "val_12")) AS gen_subquery_1 WHERE ((`gen_attr_0` = `gen_attr_1`) AND (`gen_attr_2` = `gen_attr_3`))) AS gen_subquery_3))) AS b +SELECT `gen_attr_3` AS `key`, `gen_attr_0` AS `value` FROM (SELECT `gen_attr_3`, `gen_attr_0` FROM (SELECT `key` AS `gen_attr_3`, `value` AS `gen_attr_0` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_3`, `gen_attr_0` HAVING (NOT EXISTS(SELECT `gen_attr_4` AS `1` FROM (SELECT 1 AS `gen_attr_4` FROM (SELECT `gen_attr_2`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_1` > 'val_12')) AS gen_subquery_1 WHERE ((`gen_attr_0` = `gen_attr_1`) AND (`gen_attr_2` = `gen_attr_3`))) AS gen_subquery_3))) AS b diff --git a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_2.sql b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_2.sql index 8b5402d8aa77f..0c16f9e58b9b9 100644 --- a/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_2.sql +++ b/sql/hive/src/test/resources/sqlgen/subquery_not_exists_having_2.sql @@ -6,4 +6,4 @@ having not exists (select distinct a.key from src a where b.value = a.value and a.value > 'val_12') -------------------------------------------------------------------------------- -SELECT `gen_attr_2` AS `key`, `gen_attr_0` AS `value` FROM (SELECT `gen_attr_2`, `gen_attr_0` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_0` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_2`, `gen_attr_0` HAVING (NOT EXISTS(SELECT `gen_attr_3` AS `1` FROM (SELECT 1 AS `gen_attr_3` FROM (SELECT DISTINCT `gen_attr_4`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_4`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_1` > "val_12")) AS gen_subquery_1 WHERE (`gen_attr_0` = `gen_attr_1`)) AS gen_subquery_3))) AS b +SELECT `gen_attr_2` AS `key`, `gen_attr_0` AS `value` FROM (SELECT `gen_attr_2`, `gen_attr_0` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_0` FROM `default`.`src`) AS gen_subquery_0 GROUP BY `gen_attr_2`, `gen_attr_0` HAVING (NOT EXISTS(SELECT `gen_attr_3` AS `1` FROM (SELECT 1 AS `gen_attr_3` FROM (SELECT DISTINCT `gen_attr_4`, `gen_attr_1` FROM (SELECT `key` AS `gen_attr_4`, `value` AS `gen_attr_1` FROM `default`.`src`) AS gen_subquery_2 WHERE (`gen_attr_1` > 'val_12')) AS gen_subquery_1 WHERE (`gen_attr_0` = `gen_attr_1`)) AS gen_subquery_3))) AS b diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala index 7249df813b17f..93dc0f493eb7b 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala @@ -24,8 +24,9 @@ import org.apache.spark.sql.catalyst.expressions.{If, Literal, SpecifiedWindowFr class ExpressionSQLBuilderSuite extends SQLBuilderTest { test("literal") { - checkSQL(Literal("foo"), "\"foo\"") - checkSQL(Literal("\"foo\""), "\"\\\"foo\\\"\"") + checkSQL(Literal("foo"), "'foo'") + checkSQL(Literal("\"foo\""), "'\"foo\"'") + checkSQL(Literal("'foo'"), "'\\'foo\\''") checkSQL(Literal(1: Byte), "1Y") checkSQL(Literal(2: Short), "2S") checkSQL(Literal(4: Int), "4") From c1937dd19a23bd096a4707656c7ba19fb5c16966 Mon Sep 17 00:00:00 2001 From: Tejas Patil Date: Tue, 23 Aug 2016 18:48:08 -0700 Subject: [PATCH 074/270] [SPARK-16862] Configurable buffer size in `UnsafeSorterSpillReader` ## What changes were proposed in this pull request? Jira: https://issues.apache.org/jira/browse/SPARK-16862 `BufferedInputStream` used in `UnsafeSorterSpillReader` uses the default 8k buffer to read data off disk. This PR makes it configurable to improve on disk reads. I have made the default value to be 1 MB as with that value I observed improved performance. ## How was this patch tested? I am relying on the existing unit tests. ## Performance After deploying this change to prod and setting the config to 1 mb, there was a 12% reduction in the CPU time and 19.5% reduction in CPU reservation time. Author: Tejas Patil Closes #14726 from tejasapatil/spill_buffer_2. --- .../unsafe/sort/UnsafeSorterSpillReader.java | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java index 1d588c37c5db0..d048cf7aeb5f1 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java @@ -22,15 +22,21 @@ import com.google.common.io.ByteStreams; import com.google.common.io.Closeables; +import org.apache.spark.SparkEnv; import org.apache.spark.serializer.SerializerManager; import org.apache.spark.storage.BlockId; import org.apache.spark.unsafe.Platform; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Reads spill files written by {@link UnsafeSorterSpillWriter} (see that class for a description * of the file format). */ public final class UnsafeSorterSpillReader extends UnsafeSorterIterator implements Closeable { + private static final Logger logger = LoggerFactory.getLogger(UnsafeSorterSpillReader.class); + private static final int DEFAULT_BUFFER_SIZE_BYTES = 1024 * 1024; // 1 MB + private static final int MAX_BUFFER_SIZE_BYTES = 16777216; // 16 mb private InputStream in; private DataInputStream din; @@ -50,7 +56,21 @@ public UnsafeSorterSpillReader( File file, BlockId blockId) throws IOException { assert (file.length() > 0); - final BufferedInputStream bs = new BufferedInputStream(new FileInputStream(file)); + long bufferSizeBytes = + SparkEnv.get() == null ? + DEFAULT_BUFFER_SIZE_BYTES: + SparkEnv.get().conf().getSizeAsBytes("spark.unsafe.sorter.spill.reader.buffer.size", + DEFAULT_BUFFER_SIZE_BYTES); + if (bufferSizeBytes > MAX_BUFFER_SIZE_BYTES || bufferSizeBytes < DEFAULT_BUFFER_SIZE_BYTES) { + // fall back to a sane default value + logger.warn("Value of config \"spark.unsafe.sorter.spill.reader.buffer.size\" = {} not in " + + "allowed range [{}, {}). Falling back to default value : {} bytes", bufferSizeBytes, + DEFAULT_BUFFER_SIZE_BYTES, MAX_BUFFER_SIZE_BYTES, DEFAULT_BUFFER_SIZE_BYTES); + bufferSizeBytes = DEFAULT_BUFFER_SIZE_BYTES; + } + + final BufferedInputStream bs = + new BufferedInputStream(new FileInputStream(file), (int) bufferSizeBytes); try { this.in = serializerManager.wrapForCompression(blockId, bs); this.din = new DataInputStream(this.in); From b9994ad05628077016331e6b411fbc09017b1e63 Mon Sep 17 00:00:00 2001 From: Weiqing Yang Date: Tue, 23 Aug 2016 23:44:45 -0700 Subject: [PATCH 075/270] [MINOR][SQL] Remove implemented functions from comments of 'HiveSessionCatalog.scala' ## What changes were proposed in this pull request? This PR removes implemented functions from comments of `HiveSessionCatalog.scala`: `java_method`, `posexplode`, `str_to_map`. ## How was this patch tested? Manual. Author: Weiqing Yang Closes #14769 from Sherry302/cleanComment. --- .../org/apache/spark/sql/hive/HiveSessionCatalog.scala | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala index ebed9eb6e7dca..ca8c7347f23e9 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala @@ -230,10 +230,8 @@ private[sql] class HiveSessionCatalog( // List of functions we are explicitly not supporting are: // compute_stats, context_ngrams, create_union, // current_user, ewah_bitmap, ewah_bitmap_and, ewah_bitmap_empty, ewah_bitmap_or, field, - // in_file, index, java_method, - // matchpath, ngrams, noop, noopstreaming, noopwithmap, noopwithmapstreaming, - // parse_url_tuple, posexplode, reflect2, - // str_to_map, windowingtablefunction. + // in_file, index, matchpath, ngrams, noop, noopstreaming, noopwithmap, + // noopwithmapstreaming, parse_url_tuple, reflect2, windowingtablefunction. private val hiveFunctions = Seq( "hash", "histogram_numeric", From 52fa45d62a5a0bc832442f38f9e634c5d8e29e08 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Tue, 23 Aug 2016 23:46:09 -0700 Subject: [PATCH 076/270] [SPARK-17186][SQL] remove catalog table type INDEX ## What changes were proposed in this pull request? Actually Spark SQL doesn't support index, the catalog table type `INDEX` is from Hive. However, most operations in Spark SQL can't handle index table, e.g. create table, alter table, etc. Logically index table should be invisible to end users, and Hive also generates special table name for index table to avoid users accessing it directly. Hive has special SQL syntax to create/show/drop index tables. At Spark SQL side, although we can describe index table directly, but the result is unreadable, we should use the dedicated SQL syntax to do it(e.g. `SHOW INDEX ON tbl`). Spark SQL can also read index table directly, but the result is always empty.(Can hive read index table directly?) This PR remove the table type `INDEX`, to make it clear that Spark SQL doesn't support index currently. ## How was this patch tested? existing tests. Author: Wenchen Fan Closes #14752 from cloud-fan/minor2. --- .../org/apache/spark/sql/catalyst/catalog/interface.scala | 1 - .../org/apache/spark/sql/execution/command/tables.scala | 8 +++----- .../org/apache/spark/sql/hive/MetastoreRelation.scala | 1 - .../org/apache/spark/sql/hive/client/HiveClientImpl.scala | 4 ++-- .../spark/sql/hive/execution/HiveCommandSuite.scala | 2 +- 5 files changed, 6 insertions(+), 10 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala index f7762e0f8acd3..83e01f95c06af 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala @@ -200,7 +200,6 @@ case class CatalogTableType private(name: String) object CatalogTableType { val EXTERNAL = new CatalogTableType("EXTERNAL") val MANAGED = new CatalogTableType("MANAGED") - val INDEX = new CatalogTableType("INDEX") val VIEW = new CatalogTableType("VIEW") } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 21544a37d9975..b4a15b8b2882e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -620,12 +620,11 @@ case class ShowPartitionsCommand( * Validate and throws an [[AnalysisException]] exception under the following conditions: * 1. If the table is not partitioned. * 2. If it is a datasource table. - * 3. If it is a view or index table. + * 3. If it is a view. */ - if (tab.tableType == VIEW || - tab.tableType == INDEX) { + if (tab.tableType == VIEW) { throw new AnalysisException( - s"SHOW PARTITIONS is not allowed on a view or index table: ${tab.qualifiedName}") + s"SHOW PARTITIONS is not allowed on a view: ${tab.qualifiedName}") } if (tab.partitionColumnNames.isEmpty) { @@ -708,7 +707,6 @@ case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableComman case EXTERNAL => " EXTERNAL TABLE" case VIEW => " VIEW" case MANAGED => " TABLE" - case INDEX => reportUnsupportedError(Seq("index table")) } builder ++= s"CREATE$tableTypeString ${table.quotedString}" diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala index 195fce8354134..d62bc983d0279 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala @@ -80,7 +80,6 @@ private[hive] case class MetastoreRelation( tTable.setTableType(catalogTable.tableType match { case CatalogTableType.EXTERNAL => HiveTableType.EXTERNAL_TABLE.toString case CatalogTableType.MANAGED => HiveTableType.MANAGED_TABLE.toString - case CatalogTableType.INDEX => HiveTableType.INDEX_TABLE.toString case CatalogTableType.VIEW => HiveTableType.VIRTUAL_VIEW.toString }) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index 9b7afd462841c..81d5a124e9d4a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -379,8 +379,9 @@ private[hive] class HiveClientImpl( tableType = h.getTableType match { case HiveTableType.EXTERNAL_TABLE => CatalogTableType.EXTERNAL case HiveTableType.MANAGED_TABLE => CatalogTableType.MANAGED - case HiveTableType.INDEX_TABLE => CatalogTableType.INDEX case HiveTableType.VIRTUAL_VIEW => CatalogTableType.VIEW + case HiveTableType.INDEX_TABLE => + throw new AnalysisException("Hive index table is not supported.") }, schema = schema, partitionColumnNames = partCols.map(_.name), @@ -757,7 +758,6 @@ private[hive] class HiveClientImpl( HiveTableType.EXTERNAL_TABLE case CatalogTableType.MANAGED => HiveTableType.MANAGED_TABLE - case CatalogTableType.INDEX => HiveTableType.INDEX_TABLE case CatalogTableType.VIEW => HiveTableType.VIRTUAL_VIEW }) // Note: In Hive the schema and partition columns must be disjoint sets diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala index 76aa84b19410d..df33731df2d00 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala @@ -424,7 +424,7 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto val message4 = intercept[AnalysisException] { sql("SHOW PARTITIONS parquet_view1") }.getMessage - assert(message4.contains("is not allowed on a view or index table")) + assert(message4.contains("is not allowed on a view")) } } From 673a80d2230602c9e6573a23e35fb0f6b832bfca Mon Sep 17 00:00:00 2001 From: Weiqing Yang Date: Wed, 24 Aug 2016 10:12:44 +0100 Subject: [PATCH 077/270] [MINOR][BUILD] Fix Java CheckStyle Error ## What changes were proposed in this pull request? As Spark 2.0.1 will be released soon (mentioned in the spark dev mailing list), besides the critical bugs, it's better to fix the code style errors before the release. Before: ``` ./dev/lint-java Checkstyle checks failed at following occurrences: [ERROR] src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java:[525] (sizes) LineLength: Line is longer than 100 characters (found 119). [ERROR] src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredNetworkWordCount.java:[64] (sizes) LineLength: Line is longer than 100 characters (found 103). ``` After: ``` ./dev/lint-java Using `mvn` from path: /usr/local/bin/mvn Checkstyle checks passed. ``` ## How was this patch tested? Manual. Author: Weiqing Yang Closes #14768 from Sherry302/fixjavastyle. --- .../collection/unsafe/sort/UnsafeExternalSorter.java | 3 ++- .../sql/streaming/JavaStructuredNetworkWordCount.java | 11 ++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java index ccf76643db2b4..196e67d8b29b6 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java @@ -522,7 +522,8 @@ public long spill() throws IOException { // is accessing the current record. We free this page in that caller's next loadNext() // call. for (MemoryBlock page : allocatedPages) { - if (!loaded || page.pageNumber != ((UnsafeInMemorySorter.SortedIterator)upstream).getCurrentPageNumber()) { + if (!loaded || page.pageNumber != + ((UnsafeInMemorySorter.SortedIterator)upstream).getCurrentPageNumber()) { released += page.size(); freePage(page); } else { diff --git a/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredNetworkWordCount.java index c913ee0658504..5f342e1ead6ca 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredNetworkWordCount.java @@ -61,11 +61,12 @@ public static void main(String[] args) throws Exception { .load(); // Split the lines into words - Dataset words = lines.as(Encoders.STRING()).flatMap(new FlatMapFunction() { - @Override - public Iterator call(String x) { - return Arrays.asList(x.split(" ")).iterator(); - } + Dataset words = lines.as(Encoders.STRING()) + .flatMap(new FlatMapFunction() { + @Override + public Iterator call(String x) { + return Arrays.asList(x.split(" ")).iterator(); + } }, Encoders.STRING()); // Generate running word count From 92c0eaf348b42b3479610da0be761013f9d81c54 Mon Sep 17 00:00:00 2001 From: VinceShieh Date: Wed, 24 Aug 2016 10:16:58 +0100 Subject: [PATCH 078/270] [SPARK-17086][ML] Fix InvalidArgumentException issue in QuantileDiscretizer when some quantiles are duplicated ## What changes were proposed in this pull request? In cases when QuantileDiscretizerSuite is called upon a numeric array with duplicated elements, we will take the unique elements generated from approxQuantiles as input for Bucketizer. ## How was this patch tested? An unit test is added in QuantileDiscretizerSuite QuantileDiscretizer.fit will throw an illegal exception when calling setSplits on a list of splits with duplicated elements. Bucketizer.setSplits should only accept either a numeric vector of two or more unique cut points, although that may produce less number of buckets than requested. Signed-off-by: VinceShieh Author: VinceShieh Closes #14747 from VinceShieh/SPARK-17086. --- .../ml/feature/QuantileDiscretizer.scala | 7 ++++++- .../ml/feature/QuantileDiscretizerSuite.scala | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala index 558a7bbf0a2df..e09800877c694 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala @@ -114,7 +114,12 @@ final class QuantileDiscretizer @Since("1.6.0") (@Since("1.6.0") override val ui splits(0) = Double.NegativeInfinity splits(splits.length - 1) = Double.PositiveInfinity - val bucketizer = new Bucketizer(uid).setSplits(splits) + val distinctSplits = splits.distinct + if (splits.length != distinctSplits.length) { + log.warn(s"Some quantiles were identical. Bucketing to ${distinctSplits.length - 1}" + + s" buckets as a result.") + } + val bucketizer = new Bucketizer(uid).setSplits(distinctSplits.sorted) copyValues(bucketizer.setParent(this)) } diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala index b73dbd62328cf..18f1e89ee8148 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala @@ -52,6 +52,25 @@ class QuantileDiscretizerSuite "Bucket sizes are not within expected relative error tolerance.") } + test("Test Bucketizer on duplicated splits") { + val spark = this.spark + import spark.implicits._ + + val datasetSize = 12 + val numBuckets = 5 + val df = sc.parallelize(Array(1.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0)) + .map(Tuple1.apply).toDF("input") + val discretizer = new QuantileDiscretizer() + .setInputCol("input") + .setOutputCol("result") + .setNumBuckets(numBuckets) + val result = discretizer.fit(df).transform(df) + + val observedNumBuckets = result.select("result").distinct.count + assert(2 <= observedNumBuckets && observedNumBuckets <= numBuckets, + "Observed number of buckets are not within expected range.") + } + test("Test transform method on unseen data") { val spark = this.spark import spark.implicits._ From 45b786aca2b5818dc233643e6b3a53b869560563 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Wed, 24 Aug 2016 08:24:16 -0700 Subject: [PATCH 079/270] [MINOR][DOC] Fix wrong ml.feature.Normalizer document. ## What changes were proposed in this pull request? The ```ml.feature.Normalizer``` examples illustrate L1 norm rather than L2, we should correct corresponding document. ![image](https://cloud.githubusercontent.com/assets/1962026/17928637/85aec284-69b0-11e6-9b13-d465ee560581.png) ## How was this patch tested? Doc change, no test. Author: Yanbo Liang Closes #14787 from yanboliang/normalizer. --- docs/ml-features.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ml-features.md b/docs/ml-features.md index 6020114845486..e41bf78521b6e 100644 --- a/docs/ml-features.md +++ b/docs/ml-features.md @@ -734,7 +734,7 @@ for more details on the API. `Normalizer` is a `Transformer` which transforms a dataset of `Vector` rows, normalizing each `Vector` to have unit norm. It takes parameter `p`, which specifies the [p-norm](http://en.wikipedia.org/wiki/Norm_%28mathematics%29#p-norm) used for normalization. ($p = 2$ by default.) This normalization can help standardize your input data and improve the behavior of learning algorithms. -The following example demonstrates how to load a dataset in libsvm format and then normalize each row to have unit $L^2$ norm and unit $L^\infty$ norm. +The following example demonstrates how to load a dataset in libsvm format and then normalize each row to have unit $L^1$ norm and unit $L^\infty$ norm.
From d2932a0e987132c694ed59515b7c77adaad052e6 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Wed, 24 Aug 2016 10:40:09 -0700 Subject: [PATCH 080/270] [SPARKR][MINOR] Fix doc for show method ## What changes were proposed in this pull request? The original doc of `show` put methods for multiple classes together but the text only talks about `SparkDataFrame`. This PR tries to fix this problem. ## How was this patch tested? Manual test. Author: Junyang Qian Closes #14776 from junyangq/SPARK-FixShowDoc. --- R/pkg/R/DataFrame.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index 52a6628ad7b32..e12b58e2eefc5 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -212,9 +212,9 @@ setMethod("showDF", #' show #' -#' Print the SparkDataFrame column names and types +#' Print class and type information of a Spark object. #' -#' @param object a SparkDataFrame. +#' @param object a Spark object. Can be a SparkDataFrame, Column, GroupedData, WindowSpec. #' #' @family SparkDataFrame functions #' @rdname show From 2fbdb606392631b1dff88ec86f388cc2559c28f5 Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Wed, 24 Aug 2016 11:18:10 -0700 Subject: [PATCH 081/270] [SPARK-16445][MLLIB][SPARKR] Multilayer Perceptron Classifier wrapper in SparkR https://issues.apache.org/jira/browse/SPARK-16445 ## What changes were proposed in this pull request? Create Multilayer Perceptron Classifier wrapper in SparkR ## How was this patch tested? Tested manually on local machine Author: Xin Ren Closes #14447 from keypointt/SPARK-16445. --- R/pkg/NAMESPACE | 1 + R/pkg/R/generics.R | 4 + R/pkg/R/mllib.R | 125 +++++++++++++++- R/pkg/inst/tests/testthat/test_mllib.R | 32 +++++ ...ultilayerPerceptronClassifierWrapper.scala | 134 ++++++++++++++++++ .../org/apache/spark/ml/r/RWrappers.scala | 2 + 6 files changed, 293 insertions(+), 5 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/MultilayerPerceptronClassifierWrapper.scala diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index 709057675e578..ad587a6b7d03a 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -27,6 +27,7 @@ exportMethods("glm", "summary", "spark.kmeans", "fitted", + "spark.mlp", "spark.naiveBayes", "spark.survreg", "spark.lda", diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 88884e62575df..7e626be50808d 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -1330,6 +1330,10 @@ setGeneric("spark.kmeans", function(data, formula, ...) { standardGeneric("spark #' @export setGeneric("fitted") +#' @rdname spark.mlp +#' @export +setGeneric("spark.mlp", function(data, ...) { standardGeneric("spark.mlp") }) + #' @rdname spark.naiveBayes #' @export setGeneric("spark.naiveBayes", function(data, formula, ...) { standardGeneric("spark.naiveBayes") }) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index a40310d194d27..a670600ca6938 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -60,6 +60,13 @@ setClass("AFTSurvivalRegressionModel", representation(jobj = "jobj")) #' @note KMeansModel since 2.0.0 setClass("KMeansModel", representation(jobj = "jobj")) +#' S4 class that represents a MultilayerPerceptronClassificationModel +#' +#' @param jobj a Java object reference to the backing Scala MultilayerPerceptronClassifierWrapper +#' @export +#' @note MultilayerPerceptronClassificationModel since 2.1.0 +setClass("MultilayerPerceptronClassificationModel", representation(jobj = "jobj")) + #' S4 class that represents an IsotonicRegressionModel #' #' @param jobj a Java object reference to the backing Scala IsotonicRegressionModel @@ -90,7 +97,7 @@ setClass("ALSModel", representation(jobj = "jobj")) #' @export #' @seealso \link{spark.glm}, \link{glm}, #' @seealso \link{spark.als}, \link{spark.gaussianMixture}, \link{spark.isoreg}, \link{spark.kmeans}, -#' @seealso \link{spark.lda}, \link{spark.naiveBayes}, \link{spark.survreg}, +#' @seealso \link{spark.lda}, \link{spark.mlp}, \link{spark.naiveBayes}, \link{spark.survreg} #' @seealso \link{read.ml} NULL @@ -103,7 +110,7 @@ NULL #' @export #' @seealso \link{spark.glm}, \link{glm}, #' @seealso \link{spark.als}, \link{spark.gaussianMixture}, \link{spark.isoreg}, \link{spark.kmeans}, -#' @seealso \link{spark.naiveBayes}, \link{spark.survreg}, +#' @seealso \link{spark.mlp}, \link{spark.naiveBayes}, \link{spark.survreg} NULL write_internal <- function(object, path, overwrite = FALSE) { @@ -631,6 +638,95 @@ setMethod("predict", signature(object = "KMeansModel"), predict_internal(object, newData) }) +#' Multilayer Perceptron Classification Model +#' +#' \code{spark.mlp} fits a multi-layer perceptron neural network model against a SparkDataFrame. +#' Users can call \code{summary} to print a summary of the fitted model, \code{predict} to make +#' predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. +#' Only categorical data is supported. +#' For more details, see +#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html}{ +#' Multilayer Perceptron} +#' +#' @param data a \code{SparkDataFrame} of observations and labels for model fitting. +#' @param blockSize blockSize parameter. +#' @param layers integer vector containing the number of nodes for each layer +#' @param solver solver parameter, supported options: "gd" (minibatch gradient descent) or "l-bfgs". +#' @param maxIter maximum iteration number. +#' @param tol convergence tolerance of iterations. +#' @param stepSize stepSize parameter. +#' @param seed seed parameter for weights initialization. +#' @param ... additional arguments passed to the method. +#' @return \code{spark.mlp} returns a fitted Multilayer Perceptron Classification Model. +#' @rdname spark.mlp +#' @aliases spark.mlp,SparkDataFrame-method +#' @name spark.mlp +#' @seealso \link{read.ml} +#' @export +#' @examples +#' \dontrun{ +#' df <- read.df("data/mllib/sample_multiclass_classification_data.txt", source = "libsvm") +#' +#' # fit a Multilayer Perceptron Classification Model +#' model <- spark.mlp(df, blockSize = 128, layers = c(4, 5, 4, 3), solver = "l-bfgs", +#' maxIter = 100, tol = 0.5, stepSize = 1, seed = 1) +#' +#' # get the summary of the model +#' summary(model) +#' +#' # make predictions +#' predictions <- predict(model, df) +#' +#' # save and load the model +#' path <- "path/to/model" +#' write.ml(model, path) +#' savedModel <- read.ml(path) +#' summary(savedModel) +#' } +#' @note spark.mlp since 2.1.0 +setMethod("spark.mlp", signature(data = "SparkDataFrame"), + function(data, blockSize = 128, layers = c(3, 5, 2), solver = "l-bfgs", maxIter = 100, + tol = 0.5, stepSize = 1, seed = 1) { + jobj <- callJStatic("org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper", + "fit", data@sdf, as.integer(blockSize), as.array(layers), + as.character(solver), as.integer(maxIter), as.numeric(tol), + as.numeric(stepSize), as.integer(seed)) + new("MultilayerPerceptronClassificationModel", jobj = jobj) + }) + +# Makes predictions from a model produced by spark.mlp(). + +#' @param newData a SparkDataFrame for testing. +#' @return \code{predict} returns a SparkDataFrame containing predicted labeled in a column named +#' "prediction". +#' @rdname spark.mlp +#' @aliases predict,MultilayerPerceptronClassificationModel-method +#' @export +#' @note predict(MultilayerPerceptronClassificationModel) since 2.1.0 +setMethod("predict", signature(object = "MultilayerPerceptronClassificationModel"), + function(object, newData) { + predict_internal(object, newData) + }) + +# Returns the summary of a Multilayer Perceptron Classification Model produced by \code{spark.mlp} + +#' @param object a Multilayer Perceptron Classification Model fitted by \code{spark.mlp} +#' @return \code{summary} returns a list containing \code{layers}, the label distribution, and +#' \code{tables}, conditional probabilities given the target label. +#' @rdname spark.mlp +#' @export +#' @aliases summary,MultilayerPerceptronClassificationModel-method +#' @note summary(MultilayerPerceptronClassificationModel) since 2.1.0 +setMethod("summary", signature(object = "MultilayerPerceptronClassificationModel"), + function(object) { + jobj <- object@jobj + labelCount <- callJMethod(jobj, "labelCount") + layers <- unlist(callJMethod(jobj, "layers")) + weights <- callJMethod(jobj, "weights") + weights <- matrix(weights, nrow = length(weights)) + list(labelCount = labelCount, layers = layers, weights = weights) + }) + #' Naive Bayes Models #' #' \code{spark.naiveBayes} fits a Bernoulli naive Bayes model against a SparkDataFrame. @@ -685,7 +781,7 @@ setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = "form #' #' @rdname spark.naiveBayes #' @export -#' @seealso \link{read.ml} +#' @seealso \link{write.ml} #' @note write.ml(NaiveBayesModel, character) since 2.0.0 setMethod("write.ml", signature(object = "NaiveBayesModel", path = "character"), function(object, path, overwrite = FALSE) { @@ -700,7 +796,7 @@ setMethod("write.ml", signature(object = "NaiveBayesModel", path = "character"), #' @rdname spark.survreg #' @export #' @note write.ml(AFTSurvivalRegressionModel, character) since 2.0.0 -#' @seealso \link{read.ml} +#' @seealso \link{write.ml} setMethod("write.ml", signature(object = "AFTSurvivalRegressionModel", path = "character"), function(object, path, overwrite = FALSE) { write_internal(object, path, overwrite) @@ -734,6 +830,23 @@ setMethod("write.ml", signature(object = "KMeansModel", path = "character"), write_internal(object, path, overwrite) }) +# Saves the Multilayer Perceptron Classification Model to the input path. + +#' @param path the directory where the model is saved. +#' @param overwrite overwrites or not if the output path already exists. Default is FALSE +#' which means throw exception if the output path exists. +#' +#' @rdname spark.mlp +#' @aliases write.ml,MultilayerPerceptronClassificationModel,character-method +#' @export +#' @seealso \link{write.ml} +#' @note write.ml(MultilayerPerceptronClassificationModel, character) since 2.1.0 +setMethod("write.ml", signature(object = "MultilayerPerceptronClassificationModel", + path = "character"), + function(object, path, overwrite = FALSE) { + write_internal(object, path, overwrite) + }) + # Save fitted IsotonicRegressionModel to the input path #' @param path The directory where the model is saved @@ -791,6 +904,8 @@ read.ml <- function(path) { new("KMeansModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.LDAWrapper")) { new("LDAModel", jobj = jobj) + } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper")) { + new("MultilayerPerceptronClassificationModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.IsotonicRegressionWrapper")) { new("IsotonicRegressionModel", jobj = jobj) } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.GaussianMixtureWrapper")) { @@ -798,7 +913,7 @@ read.ml <- function(path) { } else if (isInstanceOf(jobj, "org.apache.spark.ml.r.ALSWrapper")) { new("ALSModel", jobj = jobj) } else { - stop(paste("Unsupported model: ", jobj)) + stop("Unsupported model: ", jobj) } } diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index de9bd48662c3a..1e6da650d1bb8 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -347,6 +347,38 @@ test_that("spark.kmeans", { unlink(modelPath) }) +test_that("spark.mlp", { + df <- read.df("data/mllib/sample_multiclass_classification_data.txt", source = "libsvm") + model <- spark.mlp(df, blockSize = 128, layers = c(4, 5, 4, 3), solver = "l-bfgs", maxIter = 100, + tol = 0.5, stepSize = 1, seed = 1) + + # Test summary method + summary <- summary(model) + expect_equal(summary$labelCount, 3) + expect_equal(summary$layers, c(4, 5, 4, 3)) + expect_equal(length(summary$weights), 64) + + # Test predict method + mlpTestDF <- df + mlpPredictions <- collect(select(predict(model, mlpTestDF), "prediction")) + expect_equal(head(mlpPredictions$prediction, 6), c(0, 1, 1, 1, 1, 1)) + + # Test model save/load + modelPath <- tempfile(pattern = "spark-mlp", fileext = ".tmp") + write.ml(model, modelPath) + expect_error(write.ml(model, modelPath)) + write.ml(model, modelPath, overwrite = TRUE) + model2 <- read.ml(modelPath) + summary2 <- summary(model2) + + expect_equal(summary2$labelCount, 3) + expect_equal(summary2$layers, c(4, 5, 4, 3)) + expect_equal(length(summary2$weights), 64) + + unlink(modelPath) + +}) + test_that("spark.naiveBayes", { # R code to reproduce the result. # We do not support instance weights yet. So we ignore the frequencies. diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/MultilayerPerceptronClassifierWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/MultilayerPerceptronClassifierWrapper.scala new file mode 100644 index 0000000000000..be51e74187faa --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/MultilayerPerceptronClassifierWrapper.scala @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.hadoop.fs.Path +import org.json4s._ +import org.json4s.JsonDSL._ +import org.json4s.jackson.JsonMethods._ + +import org.apache.spark.ml.{Pipeline, PipelineModel} +import org.apache.spark.ml.classification.{MultilayerPerceptronClassificationModel, MultilayerPerceptronClassifier} +import org.apache.spark.ml.util.{MLReadable, MLReader, MLWritable, MLWriter} +import org.apache.spark.sql.{DataFrame, Dataset} + +private[r] class MultilayerPerceptronClassifierWrapper private ( + val pipeline: PipelineModel, + val labelCount: Long, + val layers: Array[Int], + val weights: Array[Double] + ) extends MLWritable { + + def transform(dataset: Dataset[_]): DataFrame = { + pipeline.transform(dataset) + } + + /** + * Returns an [[MLWriter]] instance for this ML instance. + */ + override def write: MLWriter = + new MultilayerPerceptronClassifierWrapper.MultilayerPerceptronClassifierWrapperWriter(this) +} + +private[r] object MultilayerPerceptronClassifierWrapper + extends MLReadable[MultilayerPerceptronClassifierWrapper] { + + val PREDICTED_LABEL_COL = "prediction" + + def fit( + data: DataFrame, + blockSize: Int, + layers: Array[Double], + solver: String, + maxIter: Int, + tol: Double, + stepSize: Double, + seed: Int + ): MultilayerPerceptronClassifierWrapper = { + // get labels and feature names from output schema + val schema = data.schema + + // assemble and fit the pipeline + val mlp = new MultilayerPerceptronClassifier() + .setLayers(layers.map(_.toInt)) + .setBlockSize(blockSize) + .setSolver(solver) + .setMaxIter(maxIter) + .setTol(tol) + .setStepSize(stepSize) + .setSeed(seed) + .setPredictionCol(PREDICTED_LABEL_COL) + val pipeline = new Pipeline() + .setStages(Array(mlp)) + .fit(data) + + val multilayerPerceptronClassificationModel: MultilayerPerceptronClassificationModel = + pipeline.stages.head.asInstanceOf[MultilayerPerceptronClassificationModel] + + val weights = multilayerPerceptronClassificationModel.weights.toArray + val layersFromPipeline = multilayerPerceptronClassificationModel.layers + val labelCount = data.select("label").distinct().count() + + new MultilayerPerceptronClassifierWrapper(pipeline, labelCount, layersFromPipeline, weights) + } + + /** + * Returns an [[MLReader]] instance for this class. + */ + override def read: MLReader[MultilayerPerceptronClassifierWrapper] = + new MultilayerPerceptronClassifierWrapperReader + + override def load(path: String): MultilayerPerceptronClassifierWrapper = super.load(path) + + class MultilayerPerceptronClassifierWrapperReader + extends MLReader[MultilayerPerceptronClassifierWrapper]{ + + override def load(path: String): MultilayerPerceptronClassifierWrapper = { + implicit val format = DefaultFormats + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadataStr = sc.textFile(rMetadataPath, 1).first() + val rMetadata = parse(rMetadataStr) + val labelCount = (rMetadata \ "labelCount").extract[Long] + val layers = (rMetadata \ "layers").extract[Array[Int]] + val weights = (rMetadata \ "weights").extract[Array[Double]] + + val pipeline = PipelineModel.load(pipelinePath) + new MultilayerPerceptronClassifierWrapper(pipeline, labelCount, layers, weights) + } + } + + class MultilayerPerceptronClassifierWrapperWriter(instance: MultilayerPerceptronClassifierWrapper) + extends MLWriter { + + override protected def saveImpl(path: String): Unit = { + val rMetadataPath = new Path(path, "rMetadata").toString + val pipelinePath = new Path(path, "pipeline").toString + + val rMetadata = ("class" -> instance.getClass.getName) ~ + ("labelCount" -> instance.labelCount) ~ + ("layers" -> instance.layers.toSeq) ~ + ("weights" -> instance.weights.toArray.toSeq) + val rMetadataJson: String = compact(render(rMetadata)) + sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) + + instance.pipeline.save(pipelinePath) + } + } +} diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala index 51a65f7fc4fe8..d64de1b6abb63 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala @@ -44,6 +44,8 @@ private[r] object RWrappers extends MLReader[Object] { GeneralizedLinearRegressionWrapper.load(path) case "org.apache.spark.ml.r.KMeansWrapper" => KMeansWrapper.load(path) + case "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper" => + MultilayerPerceptronClassifierWrapper.load(path) case "org.apache.spark.ml.r.LDAWrapper" => LDAWrapper.load(path) case "org.apache.spark.ml.r.IsotonicRegressionWrapper" => From 0b3a4be92ca6b38eef32ea5ca240d9f91f68aa65 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Wed, 24 Aug 2016 20:04:09 +0100 Subject: [PATCH 082/270] [SPARK-16781][PYSPARK] java launched by PySpark as gateway may not be the same java used in the spark environment ## What changes were proposed in this pull request? Update to py4j 0.10.3 to enable JAVA_HOME support ## How was this patch tested? Pyspark tests Author: Sean Owen Closes #14748 from srowen/SPARK-16781. --- LICENSE | 2 +- bin/pyspark | 2 +- bin/pyspark2.cmd | 2 +- core/pom.xml | 2 +- .../apache/spark/api/python/PythonUtils.scala | 2 +- dev/deps/spark-deps-hadoop-2.2 | 2 +- dev/deps/spark-deps-hadoop-2.3 | 2 +- dev/deps/spark-deps-hadoop-2.4 | 2 +- dev/deps/spark-deps-hadoop-2.6 | 2 +- dev/deps/spark-deps-hadoop-2.7 | 2 +- python/docs/Makefile | 2 +- python/lib/py4j-0.10.1-src.zip | Bin 61356 -> 0 bytes python/lib/py4j-0.10.3-src.zip | Bin 0 -> 91275 bytes sbin/spark-config.sh | 2 +- .../org/apache/spark/deploy/yarn/Client.scala | 6 +++--- .../spark/deploy/yarn/YarnClusterSuite.scala | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) delete mode 100644 python/lib/py4j-0.10.1-src.zip create mode 100644 python/lib/py4j-0.10.3-src.zip diff --git a/LICENSE b/LICENSE index 94fd46f568473..d68609cc28733 100644 --- a/LICENSE +++ b/LICENSE @@ -263,7 +263,7 @@ The text of each license is also included at licenses/LICENSE-[project].txt. (New BSD license) Protocol Buffer Java API (org.spark-project.protobuf:protobuf-java:2.4.1-shaded - http://code.google.com/p/protobuf) (The BSD License) Fortran to Java ARPACK (net.sourceforge.f2j:arpack_combined_all:0.1 - http://f2j.sourceforge.net) (The BSD License) xmlenc Library (xmlenc:xmlenc:0.52 - http://xmlenc.sourceforge.net) - (The New BSD License) Py4J (net.sf.py4j:py4j:0.10.1 - http://py4j.sourceforge.net/) + (The New BSD License) Py4J (net.sf.py4j:py4j:0.10.3 - http://py4j.sourceforge.net/) (Two-clause BSD-style license) JUnit-Interface (com.novocode:junit-interface:0.10 - http://github.com/szeiger/junit-interface/) (BSD licence) sbt and sbt-launch-lib.bash (BSD 3 Clause) d3.min.js (https://github.com/mbostock/d3/blob/master/LICENSE) diff --git a/bin/pyspark b/bin/pyspark index a0d7e22e8ad82..7590309b442ed 100755 --- a/bin/pyspark +++ b/bin/pyspark @@ -57,7 +57,7 @@ export PYSPARK_PYTHON # Add the PySpark classes to the Python path: export PYTHONPATH="${SPARK_HOME}/python/:$PYTHONPATH" -export PYTHONPATH="${SPARK_HOME}/python/lib/py4j-0.10.1-src.zip:$PYTHONPATH" +export PYTHONPATH="${SPARK_HOME}/python/lib/py4j-0.10.3-src.zip:$PYTHONPATH" # Load the PySpark shell.py script when ./pyspark is used interactively: export OLD_PYTHONSTARTUP="$PYTHONSTARTUP" diff --git a/bin/pyspark2.cmd b/bin/pyspark2.cmd index 3e2ff100fb8af..1217a4f2f97a2 100644 --- a/bin/pyspark2.cmd +++ b/bin/pyspark2.cmd @@ -30,7 +30,7 @@ if "x%PYSPARK_DRIVER_PYTHON%"=="x" ( ) set PYTHONPATH=%SPARK_HOME%\python;%PYTHONPATH% -set PYTHONPATH=%SPARK_HOME%\python\lib\py4j-0.10.1-src.zip;%PYTHONPATH% +set PYTHONPATH=%SPARK_HOME%\python\lib\py4j-0.10.3-src.zip;%PYTHONPATH% set OLD_PYTHONSTARTUP=%PYTHONSTARTUP% set PYTHONSTARTUP=%SPARK_HOME%\python\pyspark\shell.py diff --git a/core/pom.xml b/core/pom.xml index 04b94a258c71c..ab6c3ce805275 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -326,7 +326,7 @@ net.sf.py4j py4j - 0.10.1 + 0.10.3 org.apache.spark diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala index 64cf4981714c0..701097ace8974 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala @@ -32,7 +32,7 @@ private[spark] object PythonUtils { val pythonPath = new ArrayBuffer[String] for (sparkHome <- sys.env.get("SPARK_HOME")) { pythonPath += Seq(sparkHome, "python", "lib", "pyspark.zip").mkString(File.separator) - pythonPath += Seq(sparkHome, "python", "lib", "py4j-0.10.1-src.zip").mkString(File.separator) + pythonPath += Seq(sparkHome, "python", "lib", "py4j-0.10.3-src.zip").mkString(File.separator) } pythonPath ++= SparkContext.jarOfObject(this) pythonPath.mkString(File.pathSeparator) diff --git a/dev/deps/spark-deps-hadoop-2.2 b/dev/deps/spark-deps-hadoop-2.2 index e2433bd71822e..326271a7e2b23 100644 --- a/dev/deps/spark-deps-hadoop-2.2 +++ b/dev/deps/spark-deps-hadoop-2.2 @@ -139,7 +139,7 @@ parquet-jackson-1.8.1.jar pmml-model-1.2.15.jar pmml-schema-1.2.15.jar protobuf-java-2.5.0.jar -py4j-0.10.1.jar +py4j-0.10.3.jar pyrolite-4.9.jar scala-compiler-2.11.8.jar scala-library-2.11.8.jar diff --git a/dev/deps/spark-deps-hadoop-2.3 b/dev/deps/spark-deps-hadoop-2.3 index 51eaec5e6ae53..1ff6ecb7342bb 100644 --- a/dev/deps/spark-deps-hadoop-2.3 +++ b/dev/deps/spark-deps-hadoop-2.3 @@ -146,7 +146,7 @@ parquet-jackson-1.8.1.jar pmml-model-1.2.15.jar pmml-schema-1.2.15.jar protobuf-java-2.5.0.jar -py4j-0.10.1.jar +py4j-0.10.3.jar pyrolite-4.9.jar scala-compiler-2.11.8.jar scala-library-2.11.8.jar diff --git a/dev/deps/spark-deps-hadoop-2.4 b/dev/deps/spark-deps-hadoop-2.4 index 43c85fabfd481..68333849cf4c9 100644 --- a/dev/deps/spark-deps-hadoop-2.4 +++ b/dev/deps/spark-deps-hadoop-2.4 @@ -146,7 +146,7 @@ parquet-jackson-1.8.1.jar pmml-model-1.2.15.jar pmml-schema-1.2.15.jar protobuf-java-2.5.0.jar -py4j-0.10.1.jar +py4j-0.10.3.jar pyrolite-4.9.jar scala-compiler-2.11.8.jar scala-library-2.11.8.jar diff --git a/dev/deps/spark-deps-hadoop-2.6 b/dev/deps/spark-deps-hadoop-2.6 index 93f68f3f9e3fe..787d06c3512db 100644 --- a/dev/deps/spark-deps-hadoop-2.6 +++ b/dev/deps/spark-deps-hadoop-2.6 @@ -154,7 +154,7 @@ parquet-jackson-1.8.1.jar pmml-model-1.2.15.jar pmml-schema-1.2.15.jar protobuf-java-2.5.0.jar -py4j-0.10.1.jar +py4j-0.10.3.jar pyrolite-4.9.jar scala-compiler-2.11.8.jar scala-library-2.11.8.jar diff --git a/dev/deps/spark-deps-hadoop-2.7 b/dev/deps/spark-deps-hadoop-2.7 index 9740fc8d59698..386495bf1bbb1 100644 --- a/dev/deps/spark-deps-hadoop-2.7 +++ b/dev/deps/spark-deps-hadoop-2.7 @@ -155,7 +155,7 @@ parquet-jackson-1.8.1.jar pmml-model-1.2.15.jar pmml-schema-1.2.15.jar protobuf-java-2.5.0.jar -py4j-0.10.1.jar +py4j-0.10.3.jar pyrolite-4.9.jar scala-compiler-2.11.8.jar scala-library-2.11.8.jar diff --git a/python/docs/Makefile b/python/docs/Makefile index 12e397e4507c5..de86e97d862f0 100644 --- a/python/docs/Makefile +++ b/python/docs/Makefile @@ -7,7 +7,7 @@ SPHINXBUILD ?= sphinx-build PAPER ?= BUILDDIR ?= _build -export PYTHONPATH=$(realpath ..):$(realpath ../lib/py4j-0.10.1-src.zip) +export PYTHONPATH=$(realpath ..):$(realpath ../lib/py4j-0.10.3-src.zip) # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) diff --git a/python/lib/py4j-0.10.1-src.zip b/python/lib/py4j-0.10.1-src.zip deleted file mode 100644 index a54bcae03afb823da3b2b69814811d192db44630..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 61356 zcmb5VV~}XgvMt)SZQHhO+qUi1wr$&XueNR5-K%ZCwf8;e$M@pC*!#_hnlb-W&WNnc zF)Bw^DM$l@Kmq*gK~}Dk`1i;Eyg&hP031A7tmsu$AOV0^teDkB%b3+&J)i*qLC$~y z0RH___&Xit?*s?{amO5qn}#$mRagK3ATa;{jQ^c(W$0$8Z)|U4V`}VTX>aFD=ivEo zi>=ed>i-)5FN;??w)UGGNZ;4`{m#7-+N%TPoAXuZKSQZ< zZJSrwri-dX8gxfY(xyD2H%&C)2aj4upbYh^pP7Rt+?E$$;571sub?ytt`oS7->Y)1{vg?)^8gvqu ze+{{59`UlBAocpk7BJ6wLSJRbCBCvMToYZ!^m@jCk@{sGNmVSdo`~^OjTA@;SyCTYacj=b2?i1S3Yf*8 z{>-78VE7oXg@}zwM)l63q`@TRKmst83`CgQ|4U{|4rHHjk}OGe257$b;-$L#!KOWP z3LL=2ASR#y7O*!RohudxBgDwY>M$tZNX=O@6O{vfxmqQ;L{y6Wveq5-`(^I|i32FE zfl$b6Y0jF^nMjVcd0020&KDOKy{!r>87R3xiltIQc=D|jRF5bkxf`N1MxQ4ekOERi z;HfQ&ZS0Mp(;@1;|m)YSRo*1 zqDSPxy`TiUb3zdK+RZIk-e|kqPtwePo35+6=>_A8AQr_aOqa;POP4T0f?Y^lf8+-_ zV3Y}U9IPdR5T3n!Kv>TR z7Q8IGs$0bYAh;LB{14BYI^bu|pE^7I`!P^@pd&cGkoK;@v`dD^Gsg8#5I7-~M$33C zs_M$MuTRY4AJxcfA(tn9Ge;~0?K5&9i@8wH$PUfPgJ3#y)qy(iYN_yvXHiVipr)vK z{CNk40bOK^AorU=I>JY-XTq2S0?d_d8t=B3+^;D=6CywbqKWz{Qvi1sAi*;6q5xE2 zv~{V(hV3>nNWL7lLXc+ns^)n9+wO!+FRMqZH&&){I`l9Df$jBh+;R(r;TTIAa-Q9> zGQ#|cimWWme$S;NyIEOplxoY3-mMzg?9JwYwsJwYs#=rLwf>WbvR00U8lE)W8_m1ud2N$rz6 zBDw=>iuU~S2~hPDreb!@?+BP#<@ymXN`jQtmG}3#Gg|-Q#2K=Migs6KY`(Ad#9_*` z;eBejs%_o&`cHr>o9NDfu2DZe5x{y-0!`yrp*CQWzT&f&v2Pz5$^Ze*8YGFJQ&+)~ zg&G@d0-1tsNG+j#U3Jx?wZ8oKQGrRu(#~nJV3T}Cd8uL9U}YW_>PdF466sL2JXHYN zl>4uu$;l%A!b5g*bMez&(Nl4A;O9?ydF!-meSe6-wqCp9HcY!@#Vvtg5JHTm5A%(h zio4Hxw?L$`^Cee5yJWn+IkAwaqRE7kql#|C4%T4&G+#;s2wIxCpVht!FCS!z4BO%C zydQC<2y!!Skjp{%a}F!^LwI@lSnxPwKM*KXxAZ(J-Y<>qBZ7lZ?oM;z9N;MK)OtY_ z9DglU^`prNunOB@h@kg(b%O*d8xhbJipkr2R=iJx1L8-<(n_&E`;NM`;P5-0mcsdn zHt<;fOJF8uEdUOKIS8RB>hVr57Hxqbo2g_iviQ>gTzk*BqPMWK6onMkW`vLx===SZ zZDM06ysqCMU9HTD_jJd)1~#Ac^&);4#t=yuzbdI7RKRfrDQRppXf(X2ABJUFTduYh z(thzm;Q3iY)FN6(vBvY6a;#mnn@Xz)Kx(M6kkOKqA+o_}^O6xmSrF6-gos0AcQ6+o zR3zwFCq||{{*f5r4?lR%Z*51_`HanmqPIeMJ--Kn=}<9;Q5sRV%%Bn$6YfxH%RtTt z?J$T4Y#LUB?lKVH@8| z3qBZGaAz0b!p-LABH-oSbD&yMzEysJ)RE4)(3^Z~b9~_lC%(Rr#PJ5@&Tl_nE50UX zXl(#fO`{)rvisQ^eIe|MFieKj6=&{#)2o#=lrCg;m4A7EVOHgUYstsBiBU@&fG5KU zYB8|M4%3MlM=_jLsV7iJokw;}3QB6x5(MnYoVrUy9o=8FtiF!SuN=5qOg@*AuS_&& zW}sIz_w+$6;~_LOX}AByD8N4d!j<_kK`swZ*R3lnv-+~&eY&vugsBIxPI3u-k~xub zY>;)o>E%|-UNrK_lQm7U9euSFg`P2Fd`C4%C*$)u0a` zzqFbPls}c^bGk~WRb%JA489T^!x|`Ea%0qUx$>T`g(f3ESqWcu^?ne4^-+<{WCvqy zyvw=+kyr08B6=h`tjEv33GIj`t2WcIpRWrE<1mNk80>1Ai;pD>14cXxEF@VoAT0b~ zcjH?$5Za`xi?mc+hgDE{%U&amU&nVmNovOPk^@0ik-TQ3P5OIZZb0m=wohYQZZrPQ za>bgX6jtYXvLI!r^HUq=WR-|6k4k((5N@)i@#nW|dg_yf`hhNRPjSGr_A&bvc2)?x zs+D$H*Xw%W(Xt8lgx*#2bP2s*EA_9thg+odcKiaE5&5P7OS0X`{Sl|vZ}I0c^1*qu z62^pe;E$Wrr`_30{L}7lYnsSu_+VeiLG~r2*LAwCQF+?BP|)IFyEFs-u>`l~va_L$ zAoB8a-Ps(3m^3Z9GBp9UDMDUf91|WtXFng74N$E4{&EH3UV0H^e$(Pf_Kfv7PSFaB z?hWfChy&H}5JTJe>qzbDMBUHp=jIttqP?tCFkv1mp5iMiDRl!b+^-Q1NJ~W@$(aa8`|@y&gMDs?2~crWvI(} zMC9_*aC*7!O*-(iC+qBK1^XGttkli|YcY}=lJ@3BE1-7Tm?0_BG0y)n4EaCi_(uermmx-I5o9Ru4uqib&&!9h>+L`%$EY`=xF* zYc|@nA&!p~aOcoA7GRJH21*-h9TR&Wwt2#q7PjG^guSid9b0(8-vShyCQg zL9fS@;wZsDe@K)tcvo4}3~a?tWSuXEYda)wLeXHd)8?je8fL|LMbCP(ua&F=wbt6} z>iAi_&bGlGMYTUQ0=tMP6oqKMfCIxF4reHYsIbM%ecmXl8n0!?hKip}lB%xO4PW+0 zMq40}*P2}lKIjJNB?8H6HFBJEj2(T{mB0*x_63Ky&xX>HQ7ONyn)%}0xNLvf5!?zO z^);o;J#3N*;gci8Y5!yfR5Gh1I=HzY#(cM9JZcP}A5z-?IuXkl>`vSIwvcPz?n}Pj zayQYU=Z3GrOYOQ@yRa?K0WV|kxaMTBW95eTVZzeEi`miH@pkz*rDLnO{q@f>?ZSAo zs|_9iz=#n50O5bjGzTYp7yG|u+J7j|TClADmS^`}exULn!#nLl3t(N)yKaD^irVhD zo95iU`PyU6GMiRU4382QwC}t2p~VzRiAi>j=qR!xGf~Wgo|3J$+LOG*VJzdw{E(8# zHj(iOu$y3vlM*v8ffQpw+#bbuq4W(?Wnlz62}H)&jeJ^7hsV~ULk<3B@rbYDk~6=L zE8XhGC zm_j`8kGzoKOpA0;d%STH*YHdPUn^9zg58O%=~haNiD+{SsQ z!}EqT>kTQxafgIpStdIW)DTW-Vk{+AXr5mXW%-M!8hxE++3syP$&ngm_)QSB{V_~Q z))Q%7vmjl;5S=9|>SO^I$g+1+-#hZfB-4-3h(zh3-M53M)gk{A@c4zGKlyGCRH)HK|Yk%lmoqxX#I%#gj2JI`zhI7H`<$EAZ1IswU#Y zsUD}t5jLINHNPJujoifunBAQbXG+KZ)rw(sx%WG8I4>Og^z4nI{8%pa;bq4L>cC^M zYW3RTJf;?rt*vI?g()Cp-sNEWWHEzV|2Mm#&n8g9R$I`^&OlDDtV z)^*f8H^^((?6K$9$w;T0K8)R{H2in!+Bx~AMh=Xfj!syB{m)N!M-a zjkyp0+#bWVbbP4}l#Mk{PPfXe@n&bH+~~DIVd_esjh8p8+wuh1I=nc0P=i+I2=kOU zR>@TB9?SJs4h;3wQY+X4Cr-BxfFb&|67RKO_I#liob9CyC{0@;7d|<@pTwq-9ANu7el6 zN_)?)>b-qOqdAjG;H(2M4Q+kA_5A?E^y85&`dGpcuSj+)2hWuO zZB;Y%9Ds?I1c!)Qdu;jjKv-Q#Y6ztj&@=TmzMB9Ywh5aYV@^Tv?nT2cGpbG-^Cxsee?m6FlBozvTGUD;!*XE2Yr4w= zJNhwpfeLZ?$mS z32#}XIo0dHh1SVkfRFVW?e`PR)KyP4(|Sm{)_BU;08;8AUN;D0K2$#%`1Q(nGRiNq zGoOoIDZ(0}T^rEJt@^#41yC|4tjLuh*^dpKYAsDeFhoCNeLMg91aE%c70y&Jrd*T*{@-nD7q1&_^w8E4T zJ+%#pkVez|mg`^)OjhgxJ&DD4@#@OnzNdz5FdV;(tn0p2eUi#^5yGlrPxd)aFwT=n zOm`J^tX9`SUBiWi9ua)gmxH)c-Z)P@)nF?DZ6a;0+}%)UXuP={tp!=7ZexR4XG)jw zT3-tVd1{PMK+Uz?D>S`RSyf1?uVmYdZ!;9K-fCWGf-MU7IuEh+?$@t-`o<0 z#}DlWJ-K9bN!L+6=0nD%ehi5)p&>W~@tSHbB))qG-$PDND(1V15d8?nZOkVST*T=N z;EPuZ1~u@6`sOyBs~K4nF6$ttQNdl492VqWqkO8@58QpXqEb`V_>4+ICYij_uo-nz zuWYqMrqqhElP-{5PDOcit;j-`gv#NMfg|MTBcsgksCy)TqA^Wc856+fiYEy!%46MJ z>;j-TfC1Y$Vd1Q2;W;oCdC;LunkA3akO^h;aoZ0X8UE~N$wcYapN^zT!ay7RS^$Gz zld(3-k)P{!O9oYVXd(UF2SdIDC+7|p+?mHdVE(hzxm_Eu2nUW<)2AQ|o((?E+)`*E zzrBDBQ#M%)4Wd0~fpAz2>_ed}4B1ihL8EVA2rN89zyh;lhN^Hi^asvPb)vi%oHgj& zCzCPRcVra(NHZtmDAwePXqWG8`dX?L(^{g~e-b%zJcc|F{bv_hP$EAP6#;c1t^fwN zc_|Vs+BueBXCnoGtK@lKqJ$=Cj@SiHIL`7jA0sH@y3uNPNj?Q>)TdojA=*6KVv;O!4$tdHVQk+xL*eLY4r9 z>9_ePM7%Ub@ErFLzRZ#FgPh#XjVugqMnId)_z} zF}yL}L=_$bWy`Etg|VEDltF2I&j7Tb5W5Qb{>*W7i635B%v_((KM4EOKq=zr{}8d~ zihF&woQ@|WAq#?Ge9*K9am?=G8VQKyqzp~!7?b3;qAcV_C)#GHi?ExSpb9E4BXFnk z35Yq^6$CEGy_FV|bnE=yV^vD!aHPd9FcS6F;O~mHy3mTJP$gLzVh7;)5k-6kt*4GG zNUXgtOaq!HT*Sbd!hnf4eWlVv<5*zm?vz|lEph1AeDl)#TZ~zR0RE1RJ1eM)s?rgZ zh|KkuT0&xNd;a3Pec_Ui$DI(|QidVf%>&&Tp7rQeyYF9bVvyn5Rz;r~s3O7Q>ckmq zrwl9y`{{tl-gJjjKtm+LX<(66@^tLPpW(L}-bzIec7m?#g=Q8r_yS_r{qw8+)xzff z1ETAxVGn%hog%`|W8+7ox7utHmWkqh%{aqwI5}(-3QGV!N)@*=WpZ`};6MsPATD;3 zq(v5E5G zM@-KGl0f9;C$g9C|tbun_jOr!7is-NaEI;_&xp8?~eIn_xj_@h`YD*H|a2T1Jd+(Qd92m z2%4uvqOibz>L$^R1Za=)Tq&Xoz6g|b9$u(DsOnEVNMJg!M&0!4g7cc7l%{b(>=2aL zSTl6|``Y;5@WV8wO=5?!KXqs`Vs_7Eyl;?2)m&`59eU;|sL>W$| z=rnyV6;7F(+D@@=g-+pv&A)w`n735}&abK9f(fFi7oO&mf=I~eWdDWnMj1+>S49qE(lrQ)!zek&uan0 zS*M?T*|!$4-{r!q`7FQe_Oh;NT1(iibKm{>sq=p5>{oEqQcJScqLSL?el7JM&y70V zw|R#Kf_n-K?%79QYs}VIB#@V2<{D>fJqdL4<^6ix$Pb*80+~!vsfS_z|vjU)2XR~j21Z6!e0-4JUQ;0Wj ztQ>@=Z@S2QWua?XLR_uvI$*aqt$nuQG&KzdIA8Z=0`A%CIlZxR6T~qNOu1f}!{Ulq zb|bc~16&r7bW&Vc-NR9Ulk%x0k4^1X5ibHK*_?Nvk)D#QY7~mWsPe8#6@d*j1@#~r zCfvc&(p0n?)1*dn`s2hE3OM*MXU_zAtMLG!Ut}Cu79-yk zR}}QFiV2b(R}Nel^`}pRea9E^ZSwg*L0irLUdJ1zGv0{%I~70lcQctuSr@i$8Ol_6v1O4iZ{yIvcdLE1DlQ?b$(M^QhoYTc$;v%>M!8jc!cSg3 zV?>2 z+q}^KD*(8dI=eXk|7iARY3W)5fdc^i!Ug~!_@@zL8%t9=7ky__CpS|k{eKz$530hI z#^0td8^XVuzFhhweW;nu9}9HDN~@Ad$HU=0bi&#g%`UAON`+!dj@|mb#FXn^jyCLq zVQA@!EJ&X2-K(dunG%Re3M5$9tmei82PzV})k7==%JDTmu2`*Pp?ov#W)&!e6vK&@ z{Y&oDpe++4mae4I=_~mzlM#~Wb#_78UgF})m4o^%1!hE?HO0E*W3e1T#YhFn+%Dd1C z_M|V->Skbm$x8(ks>qb!i6TToOgv+JBsWnNXUAE-x|*y&F_z)`J3C^P`S2=VM?%Fh zm;9_&HO;Tjj}|HZSiBYp!we_#=)6Oz9s*SCBG}`pb4fxlSFGVi@8uSw?q4ozGz|6fiu0m51zg)c4)ugM!ft;A!BWJ~7HD`q;ODH;FzkXDsOkHi;OO-sN=(W|1CT~w^iU9G>o zO?LwA!1X>lISK9VB}!8lcJgucUBFJ)C#z7OkAzVgz++UfxrDf0m%{{h`#!XHGOVe_?WAl7E z3!g0Un_AGEiICFkrwwZ_!-9tqZlrt^jAd1iPK{5CVGr@bDwmw5ABNVpw76!Cu!YrFm;ap~*0Q+O)LFCoDY|i0@AQbP5J3IynHS*)9aS zLwI@?Gkz32Vgld~ZN*d_)JX1buyKKpM-0efjW`d@>VQk>*Vm9O#1Ku$Z)5cG@KDrz+51um+u*h7dWn9;e!;I27n@; zA49wwC6YP9lb=d>oXawFeKy9!2wQBN`?7c*t2)$%pW(XFKJH2lkLSBWyvJ=H-G_&3 z%X(|7SJYaKf;p>KyPE@pdIhWl0PU=y9KxvmK*yP6^<%n@siW|)g`Gt7??E&cry_?9 z0pik8{*cN1eMX<;D8GYTE%ws5qUfxZ}5eRN4!Q7-x7;|KkzDfXf6eFJ~ zR>l2}&C!Rxv1#9CP7qk&szE*H-u*^6ymd`!Gi;A|2E<_INLl}?JuGfUTxXBAxjEM; z+g?N0OvmgWmV(H+0Uy(v3WN=ZG#OYGy9PrR+f!HJn$9;F)Df@Hf`>jB9q0vdf)XWGxjzvN5q!l=;m0S%A z72L!c-GB^#9^f$0gqx4JiY{yR-$wL5m{Rnjd8I&Kop4Q+MTM9h9}*X=r0JBFoT>ae za+9^n*_m zg=L|A(-}Xb25A_7Ldi$^lI(D0=2|h8%i&uA6Y^1=346JolEWKbe7hn1Y>p`DUUnDW zCqH3U<2|32jj*qqYesMLjy*!%k90t}-8MvLA)eaCEZ6iRQW`wJC6Q=Ax>PDsn7sn zWwhINv|P$AR>PYIY6u11v8L?`hTQv%dqT7E2MVflV>m(qWa{OV89e#p4fhBKdT1Ib zGj&TcRWw$4So`tk*6AU>tLkU0?r4uuIlqzz zn28kVWv$`Q(FIS0sWeBce6dXaO2>1(AuaN>iNQWorpU>Rf}CnJqx747YOgPbyh!^z(WK-iQQ;fMly9FJu z?JOwJ3UC=Aitrn}TGQbSUeWS{SjQAq-LSwqwz#@ahf+kXoZq@U>dl5s(chM8@ zN$)qrqLfB(!45~;cPtVP40ZI%-18x~G85`rhAz$sA#uSysACw5+L4HE9XCOzTsY*{ zEuO4kN;w|>%$K=1s|i=}4j#$%bO(e<6LyLd7qcZMmg^0x5V$_7FIlf~Ojw)lbu_?F z*{qfJ#v_0TzV|}GwLwtx^Jj=vM6Y>8n_}ffi4@N!R|)U>(KP6(*UD&i*VRZA+9U_e z=9sAn;EmF`d@vg>b=4eU^VU^3t`CfFn&nN!jy;GLt>UeB=JU$!?N?ZcI^-l^zs8=((xGaFSvSW>bX<@oxd|4}$VS)?^zWvdjDvx-M(| z-IFEntuV*=oBiRW=hPE#6B#NB(*m?BlVAe`fmU65#7@*0KC?9*q7j^?!7Aev&S?R4 z>Bo3J#UcU5n1(ilv3`=tvPA8- z9F)wJ(PW=&D5uGLg%}dAW&IRzJRk}n&Y^gXgU%nZ69*8>{Gi@>I|y50C=wk(Mr4i5 zyw#N~icU?tl`7QB^L_*L(*t$YE-VF2V8(7wU&vQVuBG50ec2su;~A9GiIQWS0}P%S zYC_c0oqSeTH2sz1S^I?sHoNy74_r*`X4SZL(n%(3 zbm*)Gw(&EwE~(vh#U?AUm&SGaakuS$>U_u%OUD8M%$Wq+6$t9hZL)5`6Iu9D$ZB0t zW<%JlDGd7Li<0y$HJR!7+e)@7z*%ic%4%?@QQtH2pX#b$YXR3GmMS{^~Redkr*^=N&kLX zxhkj*#c)chv;a_~5h%`T&G>O^!7P*jY$3&j;jmr*tyZ}SgR35k!V2sMZj{Hwh%g%c z8N0N2v-SerH!6Spej`%&A7SgG5Swd(EPD|0_2Zn4I_QA8k7h%JDfbitl`6e6f<^Pn zwLgx3#tBq*7&#usrJuRnt}v9m$;`>O$W*+>({P{H*|!J$U)Ky-2_8uqU4d518n}6+ zNNG*yU(;z9CHJsm#5Wy-yPHisB1(^Sub}c;OUoB#fmZcx8+f8PQ_e!C8U@I*ZG7T2 zGvg}X5emx^H>8j-CG|>gi%Q7kolG;%q2&sG>kvFSo9Dsg+28h=ii-p~ zU6eBQW1bEu+{~SS-X<`YYRk0GYB`OdK_OsOktN2fJ~E0E6g2hj)iC{z0E~0%%5*cY+oC)3N;yoACUC*h_d72po1T1a8B{6<62Q5v1HdA z1n4`qDP4#&&45l}c^w|pLMUQ6Q>raE;<_$7^VZd*Funo)nLiSU!?#m^Swr+M;$i(W zfBwxH|3eo40px7sWUc>lMd-~pst`GPsN^UeEr=L9#vc}o8!_GEA=B?ZSG9q&Uv&~( zYxzRxJ#~1#1chFdGAZ!!MBVq~H0toTYb?|yxt8U=RMu2jrav50xNQSaVBV+tcT1M3>tD zS*P1980-G&pG7Qr3fsT<@&x^V#}{W)m;VM|w$*l{vIG$PUg|LZL3`lnX_)HaV103(`0o=8L*b=e~;G03n`kS_<5zT2Sk(2&IgLl1q z`wPtDyylb2xT?F}Mf2JSszh)B8xMnqtQD~|QtPHCqX97%mye8*YH`;c)k+Uc)iIkh z`Mq1LH}ow|OcGJu#1MYGLTJLwWIO5R8%1usuW$6CFM7PTU$wiCn$f}C{z6&;8q%1+ zVW}OBkaZurM(hr4r=9ele94ueBb-MBv%1*X_(3$Y)r%> z<^UbK1Xrow+|VW_r$6&RoUft8x%(P3Ex+!tX zw&JvoEXyv)M}Epk>0zh>oJLKkhH`)_TA1Rflt`$G=|G8tmE-`;)Fh@5?@)*p%@d_x zyxXO4G|rGw>k!Y*&x2XxrMk^*v{25WLDNPQt>|EVFPTA@3jvV)J_-7Jv#DRIB9SkA>)-ST_}enWY~}+L{^ioHkaq~ zys)0vy4Q@!>p@e2tazlJd;h{CF9eA>hdx;Xuwt%1tUsLPx~L;)Q(2$J%-rlL6`qx+ zhl*>3j1#==pSu_2hfKs(4O6eckyn(;4h!Z>OJ{c7Ep6ad2r*R92()J;ESL~CPUhsG zCB52nuc5RH^*<|mz)dU_641DP8>IUg8|51GslaC|7CXK1Puzv6A}^Z*W5Y&YJ=}M( zC_4e#k8@oO^5WJg|KM3GTfB1kfZj)T!InoC%VJU7`njX+2*iICGWa@#7A4KOwJTt4 z-U}jS>08Q-R!LV}xyJn=sKG@p$;uXd&;5!_>*w4^TE?OKls@n~^?7`HyZ15X9fkWS z^XVB~T@CkWp-a?@Bioka)7_gNRR5C4LYcF8|EY3^$lHhgXB-t_Q^-91jUyEh006vy z#*vYyi>bb|i<70D`F{h3Au5xxSqv!K52$V9V3m+ob2OC_>&>#%uJdYErCExmoDPs8 zBc;!z$b1#M^Z1vnAVLeH1ZdAsoTGfBahx=IbOfxL6))5tYrplopHm3hN`VOPL$RWS z1T>dNkj8SDD+y72`5|VW#L`60EY6fQ4dk#qyxbH&&uTX|4t?E4y67Wlt%#7ZyzMoKrvq zsz{#h$x6Sb(1bOZwm8(Ie(Y$UGibREV4m#7<6X=ZOw113iCvGdR{!w2V#Mrm+C(h? z{+Ix?92N&ajh~~ANj-_rp04hA&yfP~IW&))b=`<-+s2K{ddo%|9iEo~Q;-Y3TkJb`l? zY`K_Ww=XRnG9N!w1eWVFfUJMFuQ9yc!9jj)#5a-kn^$s+HiIKcDr3DQ)uUmY5}LcO zF0Tn%UW;0Lc38Rl)VJ(2O9eL6&NE2zJ5bUN>Cibem5m>(LMUV-YAeA0u}NgMKZ%m( z4S#DbdKq8e*IbPLdUJL!Y4ihex8*JI^b^HG^DWF^8W2ixTnU=-vdy)d^!J4257De-lY1YTAD_U=+XAI)bIJv2-#P!8RaJ78=*47Qq#dh$ce| zsKC-#8!nWJ1Qn#858R6biPT1ybiJv`ea>C?TDU=mbZi4Srb5&=P_-ixGV;2(Uoe4r zn#XJ#-NbpIy~Mxlf`bS|-nPyH6*ARpnFJ^iMJr;6Q;>Ab1xP0_$!ulTCag3$^J7Hj zx@oA+MJ!pstwXfjXVIr6=j?1-sKUt0cRB0r=nYNVmgHyMT3MB^cWjbqyUbl;{@|_E zGNXWk^g4IHwRD7!XG?Y%eO_Zt@uq_^R|Ak39%?NG7GWO2I4bW*DUEO@$8st9*Xqb< z>B`%i^J3t&;`$eSTZCKOt!6oU`Mr-)>SojNj|?^9ly9rhEy6>!A?0U-_Os_l=#h_- z^XJ?62M%$W;e?2*B_ow5rEX-Jv3Re}ZF+Ovy%*9yRs0CWaYReL^3va(0_-JeOgFF^ zBsG~i`uaL*r7Ag`1gq_@p6)lVaPyE#cA11LxWUr-#tu|_#xK1nHdTEs)xsJ?QYjT$ z(q*(9u}sFd>)8f$tz?fTpHCt=uK>r;jPZ~Wk!`Bwmr2CmjIou=WBc+&ECMQBj-`H9|rOe)dy?n-z>gK(FU%4cu)drm%_e=U;PKwru(nb_i@S zYO2Qe5#Lr`BZgDIALVR}x;Xb*|8E%k~I%k(}BhrC_#S{)w+V|w( zwe#O^4lf_yJNbDECOm)L{Q<@0>n7Tr2g)+4viEaqDONY{-Sz3@`nX+i{ta0DC#H1< zSqxTMxnDZ2*bLJaR(g9WH-_Wkws+2U6zLpUxuAWqMJ?c6tr?YE3`&Ycgv0D6qM3wY!^Kfp%Y;684fS}{0PNE=4dCfZ z08|gC2#sOHN*eICE(r?+mw`>+ObveCknXpWEf=2}(NI?WjrZ_xeG5 z*^)cdXH#}5NpW*usD$MjhZle87p2z${=d-a|Ma^4bte(k{}uFKHx&Oh(Edqv`udi3 zmM;4G|DhlvCQ$HXks?s}+r}bL_$#^@nwvuX69Gf)NV&)`000KO0RZs-`Hz2Z%gha3 zOx+DV{~Hv%;yriT6kGUtLNPuQ7YGxQP-w(mE~`^Zpw!mWrJj&$r9p=x2}7|#$amB* z+p)+q*04P^^p$s)pBgBb$s=Dlgu}*iLi*;(A<@GaMR1kmDO2r&L4IYv?r8i*kw>%J zo3R`!D0MlY)zJ^^KKf-B&VP#hX1PzciesGc9?=#(02yP<5IDqhDo^5r;e5uC?Nb;5 zvcc~XD;Jig%f!X~+Qr=2jd7jl0vbIOy?CcomNGZ&bn+%tcZ4;?qspP*2##6b7Vn?H zU102fF9}3POfP}+&Z-OrlS9Ii`m9si%CD8=(BQ%xFbxaDpMX7OndWilk)psJxrO@O zpd(lW2JC}Iu9#**e?@mUXs~12{v%;2#SImdf!_Z6g*(ROP5e3 z8yeNMn?P>MKq$wdG=bBd7`=QELg%sX$(Q9hTuk4h`1f13SD)EI7~|UQ@vXzp)m@3R zpPS>jWdV#bmvetjb_BsedlIBB862BgdLV+*H3LNZXs^_^MP(+c4;dsOkqdTCUF$AP z5d}!qp7@gV+e0!LyWyiGPk=&S`Q&E0V!q%$Wx?H$t`d1ZJ3;~ASu$9O_#F_@_C5+X zHa2$A{&3em=wjv{L~nBj3>zId@w5wM@59f$P5bPJsQvwU`tAFzq zf13h$^G@!ZySim8&BgQ{4RPRH{*piv+Hn}=^XV)-vI&MK${a~SCc|s|z zWY~+iPIW(dB*9QNH|}~iL^GZ#6xu!7tU&nC6EMQmIkc8Zv!7-M;xUDEf&g)6V(U|G zx(_jsxJ>qT)wVk}$?W9iCJ8r7rw2^?uWThK@8NS;_`l3mxhqP zwn5`0pyS+6+h_ZHGfIp3gkmZ?P|*hVE$r)f;f0LuS(WqyDr4*Te*TZPz4A1{li6_| zW2NcSzD(oJYIU;*?X6t|nnYPLeWGUb#&mV(I2F`EqGhK>H3YrU*eHt}Lps_uIaCdX zqLc9W5NxSQUW$Lf1X^_?jp>9H0IR@Lg1-<9w0+q3VQ()h2fwu zlkA!Cm^2;aGa`pJ6rnO+(bND4P~qv2K=Kq7$?2?6lWYazg zSl$bYEGpk22T}G?RrrnP$M;04;MVDHrE-MWSPa-cv<-`gY$$Y|C_#`>TIBQmI{W2? zsy+J)m=Bn6QZ)s;)v0o2Wl{P8LZb=65J4;(-7!m4_SeU`pbfa~4;M4X)imys-ZB-a z#nxHfPWbb?#1%V-9O=NYqJ7G{!kM+!exejH-g;99Qgl3P*$iBrS&kHBd|@M6XYeMi zG(`vloa!$^bpn-74-uG628+9~#`GK3$QI*A(#dKKhpZCX7;{SaGC~lc;e0}U`GrHH zkqo5Ph_!vcK~P-{9*;?5jYpR9Tt!X#)9GUS(Y!~fpkPT^5XKT&NAMH_Rlp5}i+LB? zz&GEabNKwv;9U$bup)f`cIw6Fq@DXc(*hkLeWdI2p^)fS1?CK(c+ln^l??&!(gF+H z!bz`QiOx#W~lC#sTWDq?N{-VB# zdX$+ex1-oPFc}MN7WLce)rf}YFRQi3s;jsOQy*7-w8nTd7_Fi%G7_mSYd@HzBFd3F zg+Hjw5Nw?bhXVg|##@VY?LEAtppaktsEG9tNayP=WLhUkW|JEltsad&Ze1xgfd*&W z_#Rb|*%=MR{Adn~8404;i3Yk#!`}gwSVaK-8)}cA;rEP06ix|ptHZe0q~riF;WAW>}*mp`wzje=<}iB1U8fn4TTSm4yc49q0nqC zT9Uhb};ab~c^cn74@f_fnp?n|TsFhA!Aj9FB^*WB+y?e3i1t1sAm{K>+I4 zbrT-y7igk|I@GuKO#E;6w#S_kkwW>4lJ0mRpu`w?ZsYmkMG5k}<4uS(L1;Ck~ z&jW*z+2S*?I87|$oj_bqc#N9lt*xcYv361?9QdRx*3E4bC#@}LU*lwb8WD-HKkE^R z1_#ojQKC^sP)7J!s>~d6NrGLzM6Tp}1v{#$GBp{}2<1?iQzLtO-_NLfnfE;vXk3OU>q@i4qa@`;927WCLY@CMdf7M4vuBo)*yj~mexVmenYR4TH7LsrX1hu7GL?$gN>`p0O zhafWE>=zLD%Q+FQ8s>}3uOpoieF`};qtv=G?GcbvucJySpou&>Ux;4pRd=(fFtAd8 zc?8ap=##(joz^*t3OeE?S`NIR9S1f$blE^^s)=`8F55;ZN{=1{$~HBwYj+@#V7~-+ ztPUo7wIrPsUf~xnRo#DHWHgj|-Nu^Y!cjjLc_wK6Ka9O&bZFnU?V0R2*|BZgwr$(C zZQHhO+qP}&7&}(wRJD8G`=7d1?|hiwS6g$nHRl+;_uqKeX-=9;_Lp>@8t4=?E?Qu# z(%06sGOz`|gr%2#lt%OTy`sN1aeKsmYM**si-4G-4vFG5*-DnOVW`!FNAtax&~e<^ zzlA~EXZ9t@k-4wY6=34uy8pY<9$6bi6(YCM5I1@v4erwyy(4lcA&!wDvay{xal==j zTv)5|#CNF9rwV*ClqY3ehg>Se(0+6QwOt;XZObZ>HR!oIBk>* zsNPdqXOJszUXe{`oRb3X$OznYb(U1NBJ-G7Tf`}#Gud{YPuP(+q#4Tnt#>dK#p+?6c!_XWO8M1G?884_iRh;YN(YqN1gt8nepBur_&LE8Wpbo0?AY?>oAF3q(PY>7RLRD(0AecgNIcPlc&9EZf> zIP+qbDW_|6s5Ci&&hHl%$AD*^V(S%$nxgUL$}c*w1|Z7*w_gfPl9_R-<-3Tm{oI3c7S%3H`kus zZ@mbNnnt_SX|vkxm$#v15*wEBeO?*NhE9*7AtiCw;5_Xf#8gJU_^9)!S%mPMNXRA0 zL?P+Rq!*gM$ZI}eX9CR{+A5qUOIgO4qu97y-%BC*tjaUDevD*A1?#}AYbO#GOq=s)`PACPSiCu2@s4>|u@19- z1Y3EUX(Yfu{$%mhUAlvNK0VS`RZ7COgfW>`KQ~T`@wVtcV=nnt6$N@*#z9!m_a17RaGi#_R7POTygRAt}8!XLePwJb(sCK?O2aY#@7^ z*wn-FdBoxHy^900YtaPnXt#Fm8b_7rY2DFdY&B=7%%mXOMA}s2rG9<7uQP}-P0RxM zir8E4Ov6suU&cWvgDZB~yU?{Cu(4!&eeO+@Ge*_B!aq@vAjr+phH(`qIAI$S##&D~ zup_ZW-BWcj0yrpvaUM8anJ`_sKJ3RvJ-xg)ZSi`#@jaGZ_XhUn5d^;mqrRJY#78*r z5e==U)VnO90i__pTQDw0JY85L#BZw(*p%MW2ku$$-F*i@%VP7<0kZAgo!MJIr5X^q z1m1W?(Q`+|X`8o5l1ZZ*E?V|ja7vuEjzy;@b6Ka?05~x<=7&c(2YRKQs*b~9wH;*o z<|e`E`Fq?YxfhIbeFR&EtBERCak&4gs{+Bj2Md)*Ke_a5AJ{u7Gv!eIB0lsxEIVJB zsm>rDoM7K4D1w`5843W>7m>NjCbBt^n$Q`p}8LBp=qCS_C_9tzXfa zKdLNFC5-biMTe_@8v%(S(s)UnB6W^U@pA>9ghdCD!T65cp95K#Ame84(QC}C^{EcB zJjq*b!kcnkhm7b>63M5)->Et@FM-+CyoG2T)eDEO*{d%B+em}bTin6FT!Kmgo#W#< zV=N8|>TE;dbB6n54P@vt0Ox_5hWM~z#PQ=)ICYJa^H%m7-P39_I$K&6#qks$xb!)g z7=On&M*FgK1;*&Y?tQwA%o?br8}wb6@F@>IbG;PMvlAT?deg6aF)Vvs^KI1HT6@6K z`V_&WF}f9rFjX!Sx=Wdsy0IFC>>f+sJI^YC-ep#ttOkb>+LdemX`?}%otR+mMxTgA zAfNQ$m(?Brg~}_=F8Ref)T&OpVR0FfoeObRbv)I&!4~aqlkpE@M+9s0G0df0QE+lE zXe*EyeG89NWzHzz4|2j37%|~vGicn{InM;V)5pZx$O-D$@^9CXnw0_<5Mfqa2)e7{ z$nrqIP7EP*C_Go#qz5Z5?T5}*ChZSNhf3-UH%xn}VL*zpbyp%)3ypWpCTb`&vprbL z%c_FX05Kc&iPQZgLAhHl{zw@PRosKUg`f()?KITXwXjiK;e) zrdyEu+GxWU#1TZE8xOzJK1=kA+qt#`Owo3CpwSN#Heh-^rtc(Ne|&`Jd>+nn<%o%z z6xwN0r;e-yC$BNT7c)_w9Fb^1&DkaaipQBdw|-mK=#;`ejJm-iafK*xxE9juxDcio zmA4#L;^8;~7A@Hg2+ZCu6q_(q$t^CgD(3c*@t^ z_!To40HHfc*-Y2lZ#%7m3#!N&Qq~I`_u*@v8Eq2M!%e4gDKo`@bZuIr6k}~qd*L!o zJ7{U(hX?(@3H1Kjh^j3!&cV*%ymN)yi|v9l=H-Q0RHvIpg3|dZko2o~+x(K@fUreM zU7E-x{UFM=8yONC_*Y?5 znxGkg3mfcRiuNbUM?u<+%#Qot5;845+gp2ij;<+~sY@?@B*XfESqT~h)*#3&=T$ou zcJRWyW-ot}dEXl`ZAL)X9k6=8`)W{P)76$V-frRqRE^5B4l>^dMMeTBAN zGis0{G7k{S*9^3aKTEd7$q=3eS_)w5nHN4S^ zikUkcGT6B|8Qox8ObTGf*HQdXAExEu=E7h_R_zxD(i>L@yRy*;6xB&7KP1PN68>E5 z-`%BaA8qEC)>J#+%bxCCIytrbdX#Tr-DbQ^-ZYyCZx6Y-S%WFAmh(0F^L%`%LIdj7 z1&Yu=y}|f!hPIWupASp!YKIHjH23?1=!syzuliKKxP{$r1)qd#RAuZ)BTo0rRTH1K zno{Lp#8w#@Gu=<*Pha1sU^wVWBa?h|JCz4A58)GJea+Z6ly!>H(+syb5zj@max>D2 zdVR;uiFCiF?Wz{&BO4jCxy3dLRM1cilYTnClsh#u&UJKRHWULgNz&--I}{xx0e}54 zMW8PDyJa8-GVkz}L%G%ob9VNjAYF)(>fH=mUZEax4%xu~#OE6=Fm7f`Cs*HDi#8HE`Cj)$Mt0e4 zs(b%n<*Bc918zw;1Cs3lPxL-{%k(HYYISW8TjhIH)4Tot>TZe(^vwK$JxR;p$JAXE z-%Ah==XWA0B|LLJ1tWu7znSyV$23PAk&1Q#S#L{2W$_ms+>hi#o93@6hA?vM=0z<8 zt+Ru{Z6DM6`&18*oHyj!8t z*7^K9(oL?unasJ53^ZtoR|&Pm%58Ix*(KV#`C7bW7Q!<~`gZ+ie^IqpV6RZ^AX{&3 z$S-Eg4pAXcPP~EO3bYUp!07chh~36R&UKUNPo_0y=k=GT_59cuiwcX z!_l1;kKrXY(o@zS=5K(F9C-teLs~llkZ^q*`dALhcDT_26pY|K@eI?AdjaD2OFsb; z6lO6*hZl1Obu}d9fO-t`M5Og>V`>ZzD&vREdj#!0=%Mfh?fsQ9-;#~diY0!Il0W;w zl*~lA_8w8iBEVlEOsQmev%)ov+!%;-BTooX`!~%Yo2~|L(m7+@+=2ai+7Vc{W6&G8 z?Y43_7Y5}rwemBpvDP1XAe}RWgz%2ODAfTyVvBbrn^f_dzL9yl4V4(Ne1mi51z{(# zP@*SVYk`NnFiUp~Em;$DRW@_XTgxKGkJ3l#1y)W1@5r^a$aRLGGPW{P8iRnn>e*$A z$4q=9-bbXyp>+&^-n>gskE(zRxTpzk zYy97xhSL2$MxmIsB*i{B!h*OQyRFUCnn7a<=k4bN>>feE=Lz@l1iJDJAoi)d^HnPy9ih_bKe zQEGvXbR1!I$iubIWd@YUsv_KEKj_!$eDPS~3F8=sm~erI82bf?^6$7%Y|Pw=wwf=V zYP8b257Y{HnCxk!37R5Z+mw9X5woRwBiS(^VTwegGGY!=OqNPXIK52=LBb;722l{< z0E#5=Xo3;}0zyNTy>O}0^p>Ojn89igCvbLIydI(=mY!Ov+7e2(^USHb5QF~c3Mc)- z9VW?gAyfkO0EsJiywN6}WR^*U5CM%)JbY@BMh0vxSo|p*V2kL#Ur**F$_w&~-B&0(&DTh~{95AP1L71o~r!0&>=aT@IIk z+dgEPkKENK8F}|7xlDxLhG5oO8!wS$jZd}9vPpXlbY?Dlda91+ne6Q?pm{*;G12l@ z>Kb5HM|W0GJy0OqS?~^&f8u7sDwY-Bofy$!usCTbYefgLL<6ZFdS{Njv*<`mX&zl2 z1iI7%?Z}9Edm*XV0Uo_pt{vMoA7%G3fu$Z;^lJ%D2=)r{Zfv^t?&$ooF73HrR9hO* zNEy~{eV3TrWbkt9mMBJ3_@vI@KH4$CoUS^H*wnh|IB6ZKn=C?)B0fchNAt_9IJCYq zx49y5*(P$`zp?VGeQL%u3H?4->so%FK@HY<=(@Sxwd&&J?&;d_r0sa3Z7}oL#JrPi zbO0xpkgaCG4lgCb6;5-XrcacB%cD=SgG=it0`D3$$ivomN+f>GRNn>O5=5f4uT={s zjXEDD3<8rBCy>dFYv)};C5W|9E)00vJvjZ`59Kvnf>mT4Wp2TD+ki=XVQ5y}56JNthd2L3zW^PgmAHU2w0+o5YDh4|4> z!^aBbh}9~+$|Q1WjlJQQN4%j=5RM_t{221}j9reJm~gbR@bx`>~Gmm7<9*x zn4X75h(fF-mNYzd3OJ0Ps1!%%46k0yHAOVsE<}opZk%rvzQ-f2h+;dj|Ik&e*j2J# z5jQS6?&yb!b}zXt=LC7MhHLmGv+utcVZ-IS~-ysx%dj2M;2kpc*S( zJ_M+ZxCihaOhnQnix^HENj7UV3y_ajih@*?Va4^J+w1PHE9H~3_iFUIB}}fan3ZG7 zu%}Aj6_P|kqi{$;%oA&PE;(|jQ@Z#HE+?~#`5Tv9A5N$ocoo1yJSm7SS*Lu_GoiMQ zA*|IH1s*9xTk}x? zKoc;}Ga>pm==jCiXJJ2G2XjRsT9zgBwQgSr41zdoXgl9zH zX($JkP;j&a9|%7 zak|`fZ%|@|ql5c}Ux)L0a*qmrPe-8Y+u5|j+2Ejf!Wu$`>A&i2Yr+Y(8zaI@v%p%9 z`_+8?qw;I#hClr?Ov;bu`%$7ao>=OS6)p3BZ9?`K5&*AbfETZA7}Wp-CM*R6`zFn1 zK#5`HA=ax*{^t|H zNu0aw&6n7(P+m~n4xBnn_%&C6Lf6r~6x#JLiwWn)dd)0#YaVTTfn!VUyV zh$?C=6?4NzCPylgiUyKdV>^Xq@`DkI%?6{A5d)(BWp|-IaFcea}Fbd@el0VsGmYN;K^gjJnkOqpLy$9IXc-F zZ(C-vHuuxC6p~B=2;60eG#b-_E?tv#fwe?yl*c|LBm4Hd8QHd4>*lplOvH7&BL5a= zq6!ckQMZ&A#kbyJM~0;)bpauux#dnMDClSt6HeU(>#qzPnTO1b|oS3Rk$_#0BzL0@61 z)}(B8Q^bb7G#Ka*(9P;MavREV#UBr9N}+r#2jI+fiBMk+X^yBU7~ZRynQ!jq>2}q) z#I91Ip5u5XwIp#QLtbQ7++B(v823sm^Ydn3+rC*#ol72klrZAX1YmxX`lW{~Pv`!+ zFRpB~w27g2F4u*ypy0z=U32v4_@Qf@8KBtKVYo)iYv6~W@YdU^O8M1{7y~cTzWKG- z0G8=->3uO35zQpX)~|+qPoJBYc8nWXqv#Bc-@B9?v(Z)gNT^9DX2zbsIMlkXaoy6E z(-HPwAriw~fRjCh7pVa!9eE%(6T|Z2szqLGAK&D}nb7UH)LLmN|AB&X&fN_2BGEdI zZLa(Snr!`Alm8|CZ54aj+H$!X*ocx!`E)hZbJ70!Q}9FW47CZfo9W>317|IT)6lq) z%w*TvPK?7wjaP~wJM#$t#`q*PkTb-kD)JkAP;qZNUirC&>J{{C%A%MYY}ft4mj-@Z z{{DEGTA3bQo#L=5`p`{E)=AiHh4S|NqTO+h)<{Tz{te9zG4j@T_4)97>paRD^qnpC z<>KuIea^d$Op#Z5M9(;C+16S+ig3UnsHNrx12^GM00yACJy0EijfC&CJ8>|bxvi7rU?WM3%N z5=6v0-Mbr)Oe)hs1U0F48VM(P$}M_B_=iXyDIODOi(f`r_=bP+>jDQ`>#-KxebSOu z^(14a&E7Fiu0|{3s10j9FUwpsM&fGI;wkn|+2!%jQu7e$A zO89XOvUD0?y~G~I3bR_ZP&lJp!Etp2Q-_<+)TJMn-IEVivd6;k+U}C&w=;paPAB5* zha6=D#PBFb)8t-ws>ZKF4MuAvP!wo&h&)6AF_QLXr-+Tt+C(2LETYwDp3q-^z?=Hj ztoTv5J7`*9Ze`_)IPJ$xR(gx83+*XMGyj&<1Q=x2$%Q~tRPd4FxRiVpMwe-AO=r^J zOo9qi$j9&0Rs}LlTNvg!D9R5AiIH1n5pjlN0N*i!1}6R%@PB{3LB_Sh&-OWUZL2=? z%Y3$YjdCLk{#-~Z5LovgD$ib}d48Mm@{DLnF>KP||=KhWs^MW(dQ~QNl!i!corMvzyKe_2e@Bu3@EcvZ#$5jGvNQ{bX zC?9A2mW3$7oKa zg+3BqXdr3Vxp+eS_OGusY;P!2P%`*_k54jEL@IAHCs`HKi+{$szrEFoC^%-5xq$|m z((@M!*crfr)`gMVd++`0(R-Lx4W6B6DYXu1uy*MI14c!@o=2y3LRAVELbg#9#E?Wf zpGAciMF@&Cx(={^5jDgfSa*B&2$K`VKSF7w_~+W|YI>YxSjtkezV?n9#+GfKv#YX; z=eEODco`rWH8t%3+72}RmkhbSk<0<4TRZ1BULR@_0!N&%dpRF)Z`Fz=Sw#iP7S2I# zl)bUQ+JQ8NVAvwl5%)Cyq2cT;!hltBBS^cX8$GDld$*1)prjK}RAmyqtIU1SBWVLQ zq}B|ucCw&rA&&+^1OXCp*ufE~#0|Qw5zZl%V6B75PTUa++513wdva9{S$98r5_KdE zquzIs4d`7%RnlMgdCB=D9+*~L*BTPN3X7X-@1ar!%5B-9CVFO?SSo}WsOv>qrcj>M zl7k6=#a69mP;KRffimb}%|tSJcBtiTjYV`tqeV}@YB)jkveD9W#4T;42{P(Z&K!*J zEMb^+*|W1fe8JMDK^3OI@(*#?ci~76qr`U4;IKW%T>e_ci9OzMAC!hnxr_pjcV4)Y z8-1&-nCPE6GX-m&!sI*RUlgUIIOC)PIyY!zGg0*$c&S1|h;gg04iHP7g>25Xt^|HU zh{7AdhxWQo8Ya{opPw%5qCN%7rU@S@1rSbZ)+&W2!HJ3@zmR9=H~50B>l%< z1TN06HHs13Q3;l|&=6Dy-hqf}h%0t~?X@?8g;`?D+2!{EF$IEVn|yxHmI0EHO>71( z*~=wxE@45zVC?O=-KsZUFs?u2d?S5^pX$ytPub@H8dQ=Ae!vp8|B5e0vq{=AdV*nf z>s%Gl+O3=>;V7f6q2#m70e&WhB#OGv#F7gYC9J<#T?k6Ae}R#Vy6gCQf*2-MV2oypQDgCO@u3F&&VJU}d)A}l(G_pbV{MT_D* zCzQrZ`3Ma7x&0H$!Q$}xF9XG}@bkjLi+L3??ey8$Tz^`PWcjM9CLMOeU02XC_x@g! zjRoSsOwRff>v^kli}2G$l(jH)>1=pR=@AwK5yjAm=uIX@3{JvR*_kVQ6FO(%OZ{?T z*HA5$(U*auyPRzV(fSK7op>&tJmek_A`88IF6u!S7;?ey12!=CD0Xv}LadpwLl(~J zM{)DdEyr@w*c;iGUS@Brlk0{}TCygGCJ!gRMtJY&L?QpRTmp+vZafUyBUPbcN^|SC2DM=ME5k+FDtp~^Tv0kc8sfR zQATg>JH5%FHmCKK3i;J@q(hewhL%Zn8cC2U5TceYiYkg!K5s_?6sb_!?3f;Aa|{ae zV*M>XwBH~HR%F!tjfjcGm75W$#J>*S0u9nU@YJT6|C-7DUJtOzusmzr&qM15v8h|V ztzeoN+mVUX^=9hPg=>KwJF;&O?w1rlzm~l)-Z$0%asJ#L4Ce-`%Mq@4?0S0Xgfc?y zIvM0EU$4RjR_bV8BS)X&WoHLqF;sN8_b?qLtgomgOU;F-2P&?`Y0|k`wqzUl{xEpl zR4T1)0>1ZxNvE%F-i-h^Z#>Z|pL%^!F`U}onMa4`T7iQt+#+^p<=HK}aJKiOcE@eU zR}Z*JTYkMpa!nL~{EpbgzbUdY2=o#?TP3wn6dwVg1vNX1q^2_PD*avN z?P$(D%NVMh*xWxi&wqG9*6;MJdiWValVe2HtG#hhXJxg~;I8Ju_0XoWxk+(}RyYc%v*8rC%IK&)d$CZMJYgzHhLB70O`kC8Q7IG&fIWoh| zom8WMPU=>q%WkaLWxy`5kmz^zuO{Dg(Itnzf8(mN1h;Cp|4hc3h5w)P+T~YMXR=#fSjDq;`F1F>K#I5wLLg#k3*+F&`&+_q`vkzC5F6YK zupxGyuNhY_8Wy8~RPe(p`DlPBcxp=;Bnyg+TO_(dNML=3n6c!eDtW(bVK^fb({A<- z9j!#-p8sCXRnq+j~F#{dc$*XU*Hyq`J#EC-6?IZxS#0(kvz z(+%ZuIl@3%4b_Nfgj*cq=y+%VmXPd&$Sr1BlObO5>=60e`rG?C210Xlz52xyk`Rh% zM<@q+4{Jv}hqyhe2C1snyw#y(X_RvWbH0U^gs!#R;eoS9Y_w`(qN%}rAQFSo8$_v+ zp3PvUaBcNxMukB|W5;8)^rl%OIDGhhr|@cehIP+e=p)FAHC5vn<9rprs{>}j34P!u znE)xNSwVCOc_-U(Vmcys0oKSk>wMo1khyYB^J~e;5EK)RDknp*asMhpYSBXR2+LX% zU{RWpM;KUMkGdn{=lR zVJ7X;(yRgt>i*0kr0e5b3}i<@)WvE4x12yP)V?XIfC0-5`!vep$(x5V%_V0INOI(S z^_*K!6RJL@L_Ti1&7S_1jj%p9Bd^AUp<1UIJ#EQuS!C?#w>Ib?YV|hjYAm#l z%RCiTrm-dmr&YL&5a7W2NcB~BRm*5YX3mI>w`=e4{xKuLE@~b{nV7a)=q#Gf^Jaej z;i_GxOI31_{=0X-lu|17g*>4%5ruWV>@U!zrvy+InYv>*ZlML77n&H0>E{{yS2*#) zh+MSsdwCUmq~9bB=q>Ep>;75=NhavvWk?cjUxIlGU%(hE&kfYKQLF;UH3PAr&qH>y zTUYt%6Wb|=c6^o{oGF7(XFa7Y<6c2XE%;hI0Wu$rXIQcC?Ih#SWKZDDW=O`deeZz(EYI@y;My*KGS+85eO~H+sq&i zWERP>bf8H->}1|(`O#kjLFOw!p%^|ohPF%7piE^$qfkwXB}_N0<&kxG`hL*P@`Deq zTyDk+!_v(G_W-CdB(PNSU)p;Y6VasCXB=&xSq!PSlVU!HmD@Y;We6(gZw-mk1Bn%G zTaFhXnGLq=@S7mNf1bz=K^05Z-6>H4JjCV5$_?%jB`5bG6hr%qDnGc4-r;;y@8b%d z#Y>JB443qGNsHxfBA%Ql5A^hV)j}_e66Q87Cpb%`GiE)nosE;ne#xYZP=Kn4gt({p z8{tI+;9jDxQzu3!=jBHwgzKxuyIbYw19>dRXut~S=PYY9E1cA=lvm!=(^7mCdJ~LU z=GDk^;^!WJD;VEe#Uj1b(7evPPCTao`<5~@<&Lkhp#$QoNI9`gSZe~z4MsO;E@}I6 zkrVUn+}$Kn6i3&qIk3`i0NRuf(GmftyV1B6Nv9e3n1>Ytxfn!pnP;?Du2`g6ei8~(w>+(zHZ z+~YsPZ7!?s{uKN_1QT8SE!0d9#LR&_IJNPWP=406j#Bv`t6a^}hGdC*f#=m5r0=AQ zX7Ns%YFq?y*N*pV&h2p*?NxONJq!f`35OXXcV5DIt}M^< zjH>yuIGXjmUArI~x{SEKp&OEQwFqzja!&45Ay1F#rP=8vxNIgJvXN`Z(j)v{H56As zMmE*Rv&nfQh3Pk?eo*ja+nxdC0E`*$1c8{B8c0_-YC`oO(BUt@#2kStW*-qLFZDB> z?r@)Svb5ko=`JpQ^&AQVWM>Q;S-D4wKK_#g^>LL#dIqVM@dWIiUEw~L&OB}`V{HSoUgqX@3W8BItilZUjix2ORKAx z0}X#FH-zVEP+$b2*B+tJ*aggPG89HH^k53rflYQbf(?G?5dc+t#qKY!#sC7Ohru}_ zT2DTz%xOcs6I=ywX#H#*)CG}gcA3=x*U$Wjy&@nGBSvncUW@q1F)udi0)KOUh`$-= z%})}3k_IA%5fRhw0J2!pe!CS{LtWA%65 z%3TYISLQfzfI!I+>lmLvI^SO+iHkj|KG7p6qCBKTSE7(0Wyr8Gu?04!=%S~x0K797 zZDO^SEJI$jPV@vUnfNYfFsS;E`W@1cEBI~p?)}}Fkuv~$vhH%3&1djlR=Zm7to?U* zE>z(*(ukTZZLjphfh*v(5pI-gbvn;crd{}Jr5;&!-wX0u+o)6jwd8?Q(=Cd={@^#r znt8KSPbQBS0`I>Jo&||2fA&^4F03;`t&(jE_O^!HbRjN64se zEdv!=O?oUtOpwvP|4pBBji%gS{GpKX{|qxl|L4Ek(b&qw(cIL=*oel?*!ur(l4*C! zI!vWl!oMIZyNkpwbj9q{#F6XI(=Rc@ety=CEiiB?P=Wk0|UBQCkuc|~CtugkXzL9_^R zdKPia^4yjJ`~n@O3Cd{lfR{dKs4O7b*Bf#b#cf_a83eoQ(m_`Bj&rKA2s{mnia}Wk z8}SzK@(??1ecx2d8i~_UWei<+q16?^f2nnr%!(!F21mnMefGc-5JPT5aBo!seF_*y zK4-ylO5T9zvFF@4d9Ko)WL9&!4{S@eN$?vrT^kf8`N}FI22H%K-2a<@Lzyz|x+-%^ z@AvUC5i0__wt^I8?uPi+g3LO8MTzL9duq6c0W5w1w7TS=c|Hak`HjH=Im}Obec4|^ zN*zIkXQD>o`CdlZSdHPfG^6yq@xdu?E-LAf^)_;{Uj}_8RP1TU(M_G zrDnw|Hbe{V3}ZE|^$58Se#4seSn;&RC(>?MR!H0q?F0O5B$v!(*wuG=pI zXIJxRn6YqtGkZJKteW@TK$ekP_BJ(u`?;aAf+eK`+V7(bNLb|J6=CA{Yb%W zL8tS`UUofG{;lXw)O!;Ip9*E%O7yMQUk;VsSwny#>S*&_kF0SmJ1$dxybEZqZM1*{%cH zxoKnOe96LQLhv?3^DD{3IYVh@@UFwT-{Ao<5Xde*Z#Jt+kHS4i2!XJFywq}|CJN0J z5VYcavR`Q^G!kNp?3l$^U8qXnn=NjBTLtd-8V=czrY9La?W!xBH(5uZ902|RC;WY( zz`KisQ;JU)AE`|tL^dD^-T7#?63qi)EhL_@8jyr2-!d#El3qF1Fgy>v50@mpX?>ko z@aUy+OhG(~9XY~QKct!BoN>S7CMK2=>O)~y?|*@o*yC_gczjZujO`p@HvaZfB}mgx zc0(^?5=jKZ1~wD0?}D3wlAWhWNqE$>TE6?aN{Q%rC;>;}JL?cA7&|MT<23lfiN)+y zT>}D@BBlZ^3A=b@Q$GVhZ%a?9=vD?J`T;t;5ISsmi43_Q{ zmE7(fIBV0SzliURx84X#7~0A&tNuArj+y25OIwO_d>CnHAOPH0!R0{dZ|J#TY)1KrD*v46)RToB#c2EH7e;8RR#& zw*B`vx{JobTlzFF+?VJavv#j@*y*?Nxv%<4>^`355agHF!%1vzw(2L7V6C}E&f#}? zxGn-aZAXuT;!CaR*WZYIGJ%r^F0)tU-0kXN#8@5jyp{%K3#r9R#(rr>(kxa$-lj6l~x2V zBLo@oS8zJu=Tp(sl;O3vZ$K+3^rq@rPJ=NyTP?ygYUuUzWZXD6E;L`=3D<6>Zdu zB`)R?Q~90-Zbi3qq-1~N_Z&4D*-9$ngcj!;A)mw_=iRErbvL>~e+CNVTu;xTGZuJ zsrg_1(|jJDxOEv9n7W={xSN;wZ0FsKwV$+?dB!zM(k%|{ytpuBb6o2O%J=LMPd?U2 zrvlvG^UMjTDLnGz0l9I1gRkb1Oc_Clv6xVgc=^TRXa5S#GmVIm7e|dB3$%6fA1cEO z?6hfkh-N}#=Zl>4q##7DH)IOE z_lbfv$@++E3876b&NwrNJ!YXY<}*GLvfYXK`ErYokwH3Z=n!yRH^OSFo{%CVR$yJ; z>cWuRNjLt5%4Kn57xUwePM}72;%`JnpSB+4Y}IvAKYt1CTDjmw?rad~63(5mdEi$( zo15-=1|p&D}%dW@KIlN?#O}QN_-nxG?4SWi7nWk1fo9`$fvl!0~biM^E+zomt zn@=&>G`K>A6$ZS!sskJeLdmIL$hS(NqlpAZ)6G~M7i0#U`L*FT9~6)CNL!YTypeju zjSqaShY9*@WFIV6<<3NtcO^eB9oB=)++7N!aGubWmjs%TCC_n9b3Sbq+0%+K2FY`O zlQ@YZ4tgY(5ZpMLRv{F;(do8@if%Bfn^O|Qb3BWL`$_{DPq0XDGLl!}M_1A|7Zp$C z4s%{m>ae2{>rKy!G}6&tf3A{zOkheViYWh9u~gnD3B;sh1au)K6|J#FT9u8SxMD3~ z<4k-!6wdSIzn%Zg>9}D30rtPP427>!j7Se?P5OtlWBarId$i=gb9S)A8CCza{>>1q zr~+Qbbf*9=CN(1?C3`GEIWemo7As3hO-?Nl9jiJvMLkAKAulFHF*8d}Egl_ui$C#K zufe!ncsTvlT}$ikPL4#blv+v+R{V!V_P-wMUl5>`35?ht5C8xejQ{Pi{sqvsvvv4C z&8ApZlZ^eDP4Q!jha1;7?t_oO+AxquJ);z!BOr?d@c{o7Oc}j8B~c`x&{^r5yD!Kf zEZ@Sq4O}J@!t`1C-G+>}263z$Pcb|62(ZxePt!JQvr#!pdZZVN<`5AWKff<`xO z0d^$?9@j9A55;)=p2kMwLHva%%ZNFb7dqb=Mc?M`UdIY^@X!Qf_#uv)peB&5p31Q}K z4B7&*HL|o=76J~a25tg(+_`BJLs-3NE9q>t+LiFOw2&>B0{W8YE$6HHThIXhxJ8FT zT4AWIxpTCptd^5xX5XA&4}8@qO|k?@s#3`zg;)z*Q9N>I?cdE2826}0`o++M+XA)_ zqX_=+)d+U=q~IUd-U2qir8z>;1Tyyyi>=~*7#8@93-+s{$!TU^9%dEk`-=$<@>oHG zV=|sjvle)dTTA%R&N~q-nzJwA)QgCV@9RV2W9g=(Md$?4*xDa@7Xs-8L>@=5Mt?X4 zh1A*;g>8TKo7E$9%G=y@mTqPa1WTqBfI45uoSg1Y8Jyk8{?gn5_!d~P#o<|&Q&JFtB76Y7po8{ggGtY>Tn{~8qW_Nox`pQIg3U21g_z|lM>1DHZ zQj#&ci4$#DSmLG(tw-t){NlKW!JS)GGx#Ddf!jI)gF<2@Z3t1rgHKwjc+}w#qY$($ z|7nlJsR3xSisrxmZDbJ<$zwODn3`hxm`KAp72fiC;8UgL-h*t?BMwHJY>ZGr@n~|4 z=$m^Y7jZGuLDYv|!_>hON@qP8)-r}xA!hG{5LqX)vCj~G`y{?!<=bL&N6D86S_xLr zmO-Q~ae>t7Go;P7sl9o+XDFD+6So`mV@9Z9$1T2Q8r!g$6T%koVd0F;6j`GgJj{+U zkS=xg=e^SA?$d(N`~1PyO&w#v2q7_<(A zPaAc|>{W`UU{%^2W@r0Us`~rtGC3_fI9O`F}1F zM=YT)4q9ZD^T6eD(xKlbskVR%3{R3|>w=@Wmcwn0+NywuCKY?R2NaQ4{U8}Np2((B z8;#f;mAvE1Ftl<$+Sl)^Vnvu%WPJ!uDRI55q%U!#zsY65mzSOt1*G+5mV-ruI)uAb&k9N8>-%rx`i^t+5W zeGx%oV`2ux3KSc^16@fxfNq!EmRX_A#wQNMPmLg-BuP#J)gdo)*BrYRW3ps(RAv#} zd9v1|zDyPWsuiKMJRDu!M6P!pKB#xv(V;c#J(3q%Ijmgex-7|NzqcBol&*6f^ zlq`xweY;6L0smm7+YUOHP5*fgD83a2E&eo67vKN@;Q#AJ@!ze}|3lyXN_9JOQv}X; zRhM21*8)|uhDYs&pEraqk>Dv#HB7YHEj)O9Tv1E!$ z46PMwQpxujKBc#)aLuVB8OKf10P0(Xyi~uDEvr7-fLBAc0ywWF<7}eAy&+cbh zXchE(g7-DTN{+_JRV-JAu2fpvR5`Q&>9R{|nUj_w*^_dBnhw(V*Tf*zjHt|u2bR=O z)Kj-Gdw*)4ygryTdA-xksO-phJg|0D$t02#ENa<(gHt=i4M^AQ8Q7j_06oixs@by_ zxPosBwjo;12oKk=g7~X=b?Zl6?{+Py7m8*e=jH$dGv^vgx_Gn%Or~x{7IPvS&OCm4 zWV++Gp^Cr+w8yEm0=1$_4rWKEH=Ckl|7{RRS>}L9@i5~PopsrTD*aq1b8)y2K3TWa zP7lbmqk=}-OIpG5`2Yc@uMok&^|k`btqAwBrm1$59r9wXzEw6_U>D~9>c14-AAFCD z&<^SE-M5;XxC%J76oe{Otf3H~k}{|VM7zWjl9}AbLNS4HYg`I2l@g-&<_O%Ift0L* zbVnHG6!S6mIKiz5Rk!L$tT<{%hvo8v%;WRQXzPN>&Y20?o$DQAU0g@e6$pZ=rcm_) zZ`z&#rbp||MomH9z{31wQ|yoCKrknrphLULtv1M9A(`^t^EklTWv>pa4ENy@VBmdT zHbMEq9oku7#wF|i#f2b~zVyGaa*RuQLHHSo>Vs^j+XNoBs9y2n0lPzBDq{tU_O8hO ztT}yZv;~;^GeZ&$;3j&G1{m{Ht2!H15y7t~t{_}iBc&?i) z=2spR^2bO>jABh^GJ6qb%$n_mjlVS#GGW2`HZ|jDw0Tw75j;3D>}kNs<0m1TCw?Md zPcQEH_gp;o^C}vC&gl87EOqjkw+snJCcWQeJ~n8>yrn?JKo)o^csug=e{BiFSBuS1>uoM9xmtiHb_&tuYLDG$0Cpr=W;!VlHp z2Y&nrvI+diWB7t!X~&7pf@@G(KOr}i*Z#yJ^Y{N&iT{O)Y*MTef5pT%MgLd0{wLu7 zJ3IU{m^oS4+5UHKAhKju`%nC@+Eq{Q|GlTDN5D%!&Opcb?;-ymn8-GmB5~#LowJC4 znP*k>KRC+&NALbGSK5E6(*Ao9O(P1m|HS`V%pWp3l6|8RrOI8N zWW!-KQ5y-@ZZa!Sz)_qK2oMZ@Vte`L)Ae;4;0Z`%GSUtHU;u64A~)C9P3{QO>u1G@ zjULUS7puu_DQ&KAj6CST-d&X@GrH-6tU{x6*VudJsnZZrt9Xi({s8(*^$|pHqf`Ap zddA&n&Vx^UU2Z~W&{uYd-8S(#seVh(StbQ zC=$&n*c%rn)uh4Hq1*bh^w=SKn|3mj)g#jUkOs7uL<6_ML)c-$;L%CSy;C1?kxF2TyieM%nF{|Z1`7xTp?-hux==mA5x~Ms zQ*os@1#T8_*h3NH0`TNpuQVI>x?X=m4e%+Kgi}4k{HY8yu^)&L2<%?J<-DCf)*)cH zNhgpFm{KQNBq4|=u1;0V`7fwm;BJ|R=2*ji#fLJFR$wnZn*QR-Fe0m@f#G--sf^C> zRI*vrgcJiB9@g8r+T#91vvs>RTriMI3y3}sAlsNmg1dsUN+zMPJ<=p1=}v5p0P&0GM61XXDN}Rhd}=_QnwniKDC?Sk zQLn+;6E=hioHRX3@q3KS4>{T(CClCibbt*BDv;jnA!m~P-N`I|$ z#;jnFdCepGAN&D(_mE9U-R(ngNoj^4nsWo$bC~lN{)4IFiE*Rfo4Y;V&zdfUGH@g3 z$2F1(D-^nG)oEYgWSWe*4*B)0Msrp6;WS%hm-0^{e4vUGw27I>!`2OkLG1>B^W9cB z-F+!D1iUDot;P-2BaC#|NSK%$e=(9dDhbRTC3C00trL`DxX^h*7mMM7b{2AUpF zMfoX5gv{f`Q0pY3*AR8|mP*mujEfIK7brmjg5(ig3Re1mJM_ng4uk*bsW`x?jHyH5 zL2|<3gzRF7-|Jngb7^s3MMl3CP-&9LueybZZAnHd_S9=gYeT`iH*r@)jG zmE3yi`cGfd+yo%s2z?tnc&u0mhDBFANi z$~8R*%AhAgU9<-0*r%z<@%Q2>T=VGs=r|k+u}-p3^RTOyoE^d0wiAyyFOK;KqNCtF z=;*}Or>3;RE&)OuAlBdFii?H$C0BE6k8snA_-=UhXKu^tR^q@l@X-P;u={OFAU*Z)Dt+Ll{VluX=IoL zv6&n2A48ozAqKmhgJP?aYz-KoNO-aOt z+xe^D;qTd zq19(wo#46{2o#s2MLV*zwbws_3BxS>$*$S8C0Hobg<#W7;qT)vVVw6A&OQfY_=lrD(@H7VcYu`X< zm+D$7oK-lU{Y5-j=KGCcoqgVE$|Q)V1qK#Vha9$)M{L9>aP0&X{B550nS3jFOnC1H z3%&a5X28uPauRb`40(t{O@BkvHH zILwnrst-??8m0-$p}4kRW3*COX`O)8672KO4;d-0WS@EPrK0{L;rjqtlp-hK=jaiV zXU5VWVBI;U4Uy~#48<$e)V!M+u5#phNGkPhjTi*Gfd{~7b%6Jm@iSZ`%!Vw((B`bZ z@=V_`qqPM3VneyA@KbyxRlUBDDo0+y@>}?J>2rxMVLVYI!2@l<$;l+UL}kXw-j5Bv zMUNy(dvH+_l&3xdvd9WNk57Cl<9yV_;8BH~OwzfH7xY937BuPg5dK|9H2VD9k{3voe7mP$jXdEZRQnMA{xNd=;Cem9j~xRY zMn$)z^@aqTD0*$5%-HkVV!D<{gupQhDn%$`%oVS!ZXtI(-gX z{Nst%eyaNt6**uba4u7Y!5R=$Si;S3zbrg1eJ`p2x?*)`{>lxtBYuS*x|0RpqBKA) zQnp$Uy6}<6KToY2?Y6_Z+140$0{Uc^WynjeY#^$JRTH33XyR6vs+R4$-v>w*Vv>N> zU*iHhKe8NulMc^^IWui3Dhb+EIm%;CCLs_#3_vY#XVEcRax_5iQjPPo#a&tg&+QhZ z0;7q)_kE6`kg||VUO4(=W}xf>$-32<9>M2;4ft3-COqRr-G*r%djy{3@@c?J!=yhYD+JzGxYLqR;bNn$ONidfhSr};d zKU@>@BZW)AOqKPi@WEnTJUP8xJaKcfb+xg&Ik8_Plc6E~4+kS}YG$Zm^|xUvg!V)O zJyH4c(Muz~TqMcn`y)c^I*xr2&l_W6cDv&ZggaWF6*scjb%f}{j6*M-P}QK&p>bOR z@z{;K@r8cgqGq-YTQ6K_pR%u)=a1kBI5;;mSxZM3g5ZCj+T7j)%>=cCeGqu=F`jCj zUc=BZD$+k2%jKXShM^ui`7@HDK!QtQo$<7Y3)7Ug+YNuUFI9dfa+q{`DX!lB1`mr+YPR%Bk4r zhDWPI^;=2Z{DU41c!KRYGsf}?tJ){x9kSo&&;u94=-w`?c2SV#TjKI`o} zYIWcleBGjWCiY_WbEiy#+}UnuOkDb||HY^8|LN&3Re~TLz&iu@(ZCahQ_D{@>wWa9#LRJ31OHGK(n1bnA%{4d%a)9b@NXo;0Ni6m4qE5i;Hu)!s={A z9%3clz`Pu^f-55}7*zv*67d|kgA7DoD-VT8xTv&ZO~@_tNt0eT3=t_w=$C>!izxPTBTxJ~HM#@g8zxH+#Z!{az_o z-&NH=!?ZgILVIU*-$%HOw=TJhPA3_zWT)tW8jt}!RhAT)-}+jqMN{4HiHgRj$&3Iw z%Qc2&VuO~!!%?Ynp|^<0l3ql@CdKH=ig9w4@2uDfxSc|ca+$&j%zD`}Qjl$-LxyyOnHGZue`^w0_1&^YTKNg^l1e4RK!!SHN!TXU^zt1>-!D&Ppmep zl-S-hXk5O|qkR6$?g#khilf_s-^%pxqM^k$FaEvhx4JAu75Z|Q$HNP*Kn*q7R(GM_ zpPP`%td%l$RIoomGi%E$T?UrT*=QT|3sn|wg5%Kdx;%K)#nko=d9v^$i}gMQ%|2QL z_tPFR-F9!i)~flNcas#=Ws`vDRI93 zhqm*9qeNUg0+x1H=wCyX1n~d2HU6jf;=kJ(rApLl|B3(I*6^`%+Zs*Wcl(BdX*vsj zo)6E-+Q?amZ6ua*EF4bcNz9gBT>}puFAYuy!2n3Eo;dBZtXKhK!bEv z>#MA)s=RuTczm8%GS-}leDq}ZaC(2d;6F;r8PK;^oLPlVXjV~7t|@=A$l6A|kx|t7 zrYN-mmP_J!C+nL@x{K`8Ig2_b$=gLK5kGZlk$%isd%clS`LJ8c$+>&%9Irzwl-SJb zmPsA?E0ZOS?%^~<+ssb%e%h5(Z1JSYmYFuunH|xYq1v*Jsn}HZj^vqxqkK|Bu?3Ww zsUo4#B0*ca;z&7(>Dr4duH=w4A46SDuzynMPLK9Ve10TI`=~mNh(H1!iUw29{^?Ubw3>)hB%~ zXXRBT-qM=V!mi3+uXkaVC2!J3#qEK`)cFOg1=Da}56&r7e`2G+068l_3I)@)cOYW<0wkp9y$1hSMqv99H^jC68Cjb(%fGULI*v^Z%`nD#kb#J%#Z~f+`r^A`4G_kXdk%wR2KutZK z&$QnTC--N^-)-qPuk%A7JosF{pT0%2`@S>a@0Y(D@1)%yUwJ=|X?^Ko{Mx=g zu9-c2{GW07*GPKjNZ_A!x;^XsZ){y0T)%KwyZjs+o<12mczHNEJp+fo%}*a;^t}^w zy}Z6TxY<3PF3$EI-ma28;k_94?^93g9R8xsLv2A`3oxS-`*^<=7kQqvV0ZfM-0d{- z5UM`8b?E+Z?}iR3a(ia)&Qz*4t7uYy=}gXklPb{5GsvKDo8^F3 z{>uhEQ%yrnJHZin@qd9e&I2a_$d=y~0Yu53D#0Y>Ss)D@GP@C$ym!Kj{b6?j8nm~O zIow&KOs%%(B%ragURYOxe&dDDCYXK^^-*P-&_G(MZxvZVcA26sp<-ihQbR1QdjfBl zCphyWP^|$;K8BD)xn=dSTP%%Y9KpynxjkLfTQIcteK6#OQauSy#2hd@VUp zgvt1%Uv+^d&>dqUIb{Ms=dU|VoMPOcpPKaSOQqBHuQLY-Th;qDK)GEsb_SU_0#FIx z-rwni`nYDw7%HZ^W1>xjPe7c{i<+~@bf<#Y{So;6;Nsn#FIIV-Of$MBj)_9}g)$!) zU0sS#73AtMA$bFtv%~-d`sS{XQq9Hxx>rODGu+_(xOYw4LOZah4zmOqja!!*d9ETz zH3q^0l3M|dPm;81L|8D=>x<^ejdOj-Lpla_pJ>?l&2 z#FgCyP^*Z2E%=&RH_y9OpX0s{gK@Y9#33(3b%cGZgB=4TZ$Nv?(ReY^1=d>Fg@KLL zT?K7iL`99ig5JK}i75yD6@CGyI{b%)Y_8K$j5@(Sf8KJ7Zk!+OR^Mkh7YC~ZD0AeM z$6hmJZ=ajO;kci!dvP}6wPc@B>vx$At>o(a07=b|4UGDqQ9_6^Y5RDo0oC z@jtQ_OiDpCHg^RPv&r-w{Z$}GT;mtoUbv}(n=fZ`CNx+4TT%zzsaQ=-Pz!LYAGWQ5 zD^hw*JR|Cqv=-QjkOfgD@~`adtOX3nTP4r~c)DHPo>xJj@?S%HIs|sk?{WzY>;^nw zmKP2*7w5$RfU152c&@=bbc+gVc|q=NctzLeyQ>uGr7L5Pw1!R4z2+4{Qa3{7x{N5k z)N7YqskC|7?fobF_?m`F0E`rh)8ueeYr7s>uEqkyDV(1SLp zMGjQD_6NhJwbK5%n4y2%WZtYw;9 zePc^m(tr})=2MVD^eY*<0}P@3ZOw`@+u#XViO)lu{jh0 zpwxoW-@ldfQU+*Dm%HAe9oW!k35c(s8mzLv&U()K32C0|s4uK2grUON$j%-;$g5Vv^raK)0b+7A`j!B%={ykL zM;Aa%KW-cx1kB-15^9S92nOU}RN8V1@lK^Wn2yr{CO;FF^KE*evsdLatOmneHNcj2 zi3xq|hqoJm3EE2FyvsjqzwfbcMr7mnD=Jka@;NBwjymdY?Cxcz(_|Z98tpbSyZk;- zBe{bTo+@g0BJa*B)x-T4rBqN%tc+8{VsgyLYFAeU#^Mj&3TFTh%$CJ3+SG)ab1fut zmUaD%Wi7n7zJs&@B(`g1)(r3SBLxL!3R=#<-*$XBJ{66kLv9YHagDS54lLqoKgX|Gb@+dV)XBXOgK`9Z4zfAJQW+(&S7KO&j{qf z6Br2YHvF_UOf#dl*pgYK5Vvc_tpkHN6_+#V+synUl%~Af7(1oqX#$xGYqSw9XHaq>3x)aVC1UBJF zO(%v?@<<*(6d9PQ31yqx!^-lK6N>wp^ytH7WXRq-YueepoOm!xv}?jON3>EgbiuAJ zTxGnxqp7e8g(XJYLeQ3B-p{Ib!72Ubvh z{Jbb5UV#GV-ctxiW2Tj{5XWt5iPB}8;Eqobfx9kgw9PE26BEnLWwA&9V%x~5ckj^_ z*|P;xzqFwrUVt39*s-SG4`z$Tj;!b$fCSWLPilwO08`ID;?kk~3OSB>sz~J3}o&$EyCc|NNqFBVjlXX;EPSPE^ zejSo_L?@Pc#ZUE@m<=kilZ+e5t)})w z#F^Wqyw&%>mYQa{(M7{Y0P2pzl3;ol^Mq8fUeA$4aTz&lDXy_(Xzp$^7fvdeEqiBv zuYux?uQEq!b)IZreSeqbV}?m)kK)ygzJP#4nl%|K`cUSp0T6g?1z|(!kE0tVQX8mv zMvM3i-QBujY?e}rOe4R7I~WM`HI(@~ z)kcBu$7tf$B9^mypAWCt(>C4@UeEe$FA!*`c6KgqH6>i&4rI>e&zx}=;TASf9FBDVYIB28Py?|QEO3i(EAA!iYNUXt_`V(VDgg$S2KUjl6VHpHOu3<>=^ zTajv3kAg8mK9weKf>2G>$T>4tMf?8 z-~wo;@#yLqgxL#lJwT1)PiG^jIT17&+l3QoSq2@u z~NC8f}Qxv}& zkW%U8M`$xU2&O}Nm$}~>PIRCpI90lj&+~8R5=#82L1a$P+v$0}n(PStGl9z%{-`v_ z1M>I*l7SdDG}af?jVUVDcUP7KWkZA~L*e&r z&uHuhGAsa1C7R{s19CZ>GOSUxAo|-d#dhOgQ(Ig>U?-ny%i9GY3Q*{HZYd+>sWwlN z0H#GgnVpq$PR$&p6>pb0RhR0?KKQfSlY6Obc!Gpt`!>1pVNN1sUO^C0@MOe>4^-2R z`a+=fKbF6b z=bzvB$#G*~0J?oqU_I56Og;{7CCq)eEx3z_ObHQ%669j!W(O9aE{%1o_JZ`q z)jTh#F%zi9R}X`(BpIJYCJ{v+FYUnN;sJA6u>P}mZu~@tB9Fv@Y159ZzL%62U}xIl z(A3xxrKn;WRcOI}r&jk0zg@rB3H7^fe)Bw#vn`i+mvXU=A{Mb#rGhRCi+#zqnEZir^qL*je#S;Sl|M zBP8svgvD5ywaRRiRrlg_QiRJ2>&3v8KIdoz3eXD=`X{5t%FRaY-=pEqbd#2N;JDqz z;`&wfRutBQ5fMRntC9v)p)42KOiIM`(l?v#%u4lvOfZ@!XFEXMj#zk ziMlmpUv{)zFT)JJle}k3nK5$l9nxgz=yh=I?8p_pdAs2kpVy_9nl&o981A%t$i$x{ zUP5Y0)m=D^G~9+IIl&Ad7gLY8r@8~qs7^0XmKq+?YUzt?Na^&>or>srtKPwUeAuRg zxd2N)1u&!lL;3Ph20~!)>*&F%j)76!DhGz)T_(;cy3!o04`C3fM>$*Ey%w!o-Hj*5Lf0g;Dka9yodcsI4OO4gyY&30wCp<&S zzi$u^>LwwVv`xn zuW}pB6VjhXKrUA@*T0KdxuRxosm`EO{Q)PO802g+P56*=>2c)>3lbC*WyC59L%kyD zJjb}VtgW_emE@K%t1JD4p&sDYn_iTvCf{t5Z{U8e zKk!nbOC%SR@#UEB=-`$mr%)*>G-za_qlF6=)y0clDa@QeLK=@qxoS;X0A2l!9?*3) z)L<&)G25K(<3_yzmHuH0C#eY;U!r_2Ujb~FwkIO zZ#Y`y!;S4R+QLR+3VI3dJg5EEZVo6gYAl`m4nwdHHZ*@Miy4xYwCpvK0bxv9tk8DZ z(F}b)QgY$lx>xnPqcAjz)!~UUm>eV754$>f%q80CAmNrr26IBlVc7>r!Y(91a$?skw@Bw< zrL%<-`GRM|SuU$Z(r99%-`&1o-3%M8F&;%4%fsZhTt)Rv1*|)QvXm4E8uWGT<#s~n^-U5%X z;)B@=lc_>%>yb&8F*dk)Yz;u@)7fZ?`t05To;mwYKUk+##HU&&S*(C-= zU7zFH|AJ@wYqAp{-W6m_z=B`iD$C!voBFx|%RG@boC)86*Df}@7042Iasz5)6s%~J zFEaY*oB=LCLy9lbdBWj*mt|SjwK%8?dUaNl4wIc|a`ZW~Se!ea8*_}A1OCra{2`hg zshn$sdP0>0l@lN?=&Y9fqaUrA@dj0trIzFb3}qkUsV0YYI#HEtoX*WR9i_`U0lQ>J zHUh)cHX?dm@MBWafg;+7HUn%*V_)RA=wOXwlm&DVrQ($6L7OrJ4h?SF6H(T95t^3G z5li}=LO4q+dHB;{QB^V9BjZFFk3z6wsE9hjrCqy)@@YZ<(|pjK z?rxk8stFwbwfq3yqX)<9j2k%@SHelJxOSc)M;*?!5&KAR-EPK8NxP}PnEAOMjZmvY zIHAVdq##$;nhEn%gsCyHn|$NgG>ptTjkjT35#oS*u&3Q7>2?nAub`_ulWKP4DqgE( z6PCR&oSn_`-{Qyggf)N($WoPF?!2=RA|@tX+Oh=hfLFCVpx(ne1&S-Y!`T|R~v!Rva8XzzF- z<_$vDT_=}=P6V70oE(vho;%*uCtA&KVg}N_bNinQz3H=LixLsD9xJC!EcRJ{{^rc> zGk})ZKd|IH;A8>~0EscjsuF=tbk@=KYZP5jBKPmr!?J1KgaR+U(R{dWfYlRv5EX64 z+_K+Tamg@u))b)A_Z#U{?ph3%!F@Rx<$SR+s_3{^=$H>nb@1@ZWs*ikb+w6q};p(qkf$v?Ojo6{_m1K@0;zAgEmtzi{wo`OE!Cz+3%uPtUFC3-<* zcqKwh?7xiPn3wf-RX~_(iSN=*!K{8qlgykcUXHRSb|*Hyzf9bGyq!2zZRlp{*C&r( z=D))ZT#ew^2^-GXrRtt)XI0XzXl4#?C+d>-qi=E@a(^uWbu9j}D+BKqoU~4lhA`3) zt>V^7XVcf2dN``Z`S3^aS}otwytvPj#J;WSvEBF8o0uu8iYzm}bw9iwrWCZFw78n3 zLNhP)av%@4NuXF9LD58*BE8tJ7c^trtg3Qo>I3U_>8RJxKD`1Fzrcb2UCGx{3x!+5 zv!Y0PzfQve=BOwg?wuxi1vg4wI?}2P#}Se32n)dK8fuM4rKyX}sgadOc>`3mj46tE`+SRn@t z{(x8F&!Q=Yy>5gOpSrpLwo>KqeSQ<6A4mhsgdbr#7T8Z?d<<oM%3vCT&X;P zaVnx!-`X&zN5H3Zo1NDZMWy5AJ;9agfqSLeFPKRsBnCRUA{D%^Dd>lXYk5!m9c^cm zt`n!dXzK<7vljb;yLAOjc{5gDj+51;8)A%{_VkAhr)S6K<4fncK51y(t!xH2G@2aq zy!wrEZ;5q$8a_iY8(*iFlYd55Aa?8blA^F>QRj@_F7K}Qsr7Nq8FH;YJh98u49+p# zDScRT{5wXJtzV2r?WPA@s@0YBbdgc*oVB`%3 zmOW({LFYogy220=Rs;z8D@!O7HT6z4WspIj|o6(RR7J$$OG`(ds^5DTC+HSc6t^D)jBUih4S!- z+Ilmr=qQuTT*k!jdCNrL&Fy|NBdDX+!l2#E+$Ul^(^|C3N45dqAO|PMgHqs7%6-*k z1%9*RL`z55uPkE!$4kMCd5$M!uC!w*R0^fi@WG&m^o)Ty-97xstvO+Ck;{Lwat(Qk z!4#;Q@Z~7Vco>Mv+u?@fB4`M(1*6!16FuBavfxC~C7;2OUPEe~y=YwKlgxvHe)=V| zPC8MH)A&7XoY)UR7W6}9&hQ#~OSM611EA21oTI?_lg=R5&489P3qK7x<|sh%uvbJE zEc7X#u&wwOOZc33nkqZWxMN(%x%dOZR97f}^SdH5k8iBB0f+AS+_C!>B3cjt;?!bP zpB!w9t5svEG|T&W+!hr#PgaYZI9yFL(dYjZlcOab zjK%#Ki`?4faBg^7qCTX#H2O0fL(DF%1@8NJXHUC063 zou|sHulBQ>HE$yvGG0H-80~xx}X2IHgjtLO|hII-xdFld`YKo#gGd=yO!| z2bf!Sk4Axl*^{5Ox=N4mykX$}cH>X{1RlwRaV+@@8Da6161UP+XWG|S)a&GNfj*Hw zHq8_gh7h}!N)_uFd-QIN3l!~}2mNi4B_4lBctq4WVqy%diTg0bH=~u|N72jok?l?C zQja?RsDG!IZW6ZbgYqz3zK@OqRhd$-Rn@Fv0foU~HAgbT4pmiriw3E}gYZbL>k;vD z=ovS2Iz*o?bu7*)?YN&*iw^U*)v%^EJk0p1xhu~d(&9RRTCI9$fFJ&YrYvNGG|nV& z0r861X#`EQv5xef7>@1kzd1yEt{VJj881n~_`(!)AZL|Tk{g|r>eatccUa$iy!aI% zvZ3t`;#Hepd5G2KErSRh$)x!dWhyd&Z{{t~vfhX4stC;_3f`PzVZ#IZ@|1P!XGO;R z1H~xS!pWbhI9A3Xh#drlvks$(C}}_?ELe_OpIAF23buQYhkC*e9q?9a+BI6X4&>_W zy$lW$UitPIA7>dfeQt45VBY#>bU#nP(<6Y+yMfR z`g4+VKMi)DX5I{Um7Nxx3U1PmxCMnN+Dr2^;*&~pwkW-FP7xB&t(0bkksLQGD= zSdg=&st2^OSGdx8H1zQw^|>i-rDCXK3X%Ka9BEWweg=+oBN0JMN`0YYODN z^ysE4=`(8u|J~@1HcLN*7Ra zVPj`KjY%Kri^&+aM%wOfltL+Pdel3GsUPh`YcH7l*K4{REXk(Po z)gVbDiPWK6_Mb;fI-z0nu3dL=B@d-1@*hM~Y-tatPn?tI{{&l1rEVKkCN&qQWIQ+H zA{Luls&h{a%ik0gjQJ|>|4v}32repl&l<~`1C5;n33`C(viz@u;p-gXNNy?tyE@5 z^l-c(!k+y*oJhh1IXT{tQmz$!`ZBQO-+I=s5+2h1?gttyhJup( zFop8x*sbZQt(W;aksbxSh>0+5drU zTox|mO1JP+vNf*69ZveYd)w<3oQddu(+2D@4_6-K?GES7R;BiSVl@7~Fr_92o;GGy zrp3^#p8lpW@eNpN-P?05m=xyJ+y~}xjVaOuKT$@e_sB^olhPDQIcs6q_FFB2+vM3LS}*f6ecu;Th5pCA1XWCuD1~S|20Uxb{fwPU z4VG$=W@V+OtX|s)5pS6rqQI&aD!v-QMt(bRWs>`l7FQ`hlQ6{VuJg7c5<@fkE7bb> zU8*N(;Cl)0J>?4MJfwh)hE4gF(S3~ zAVuGKm)zW2SFHgrwq{N#P3M$yt%_s~Uz#e7Mz>vh4TBh;8NzKN!ga7e`{i7J$X zF)FWbzeS}v>GysuBv})TNV~@D7}EpMiubU+rgPmP`L%eIn6z%yNPXuum0K_G2jIkE zA60XmCG=h&4u>WOC$f$w$NT2OFd?f6FUsf1q$*7g4xgHrhHNyxWrtKrDfX8V^$K2z zOwf%>*K}kC{?+kO`6|E)hIbyFSn9ka1m-GexVWo#`;tOxpY`&Xc;+{oY@U+9{tPCXo5j{?>P0Y~h=sk|1ZdFJa1<8FLHoZfhktNzYha zq-U^e1zbHU0R8Vb%1XdlTGt(0gie--S#ok)eBw#7`UVT>79ua(+X{%#A7wWxlyqdA zVQ9eg-C|pZM=;Fu`9puQOr=q1GVnu<0T{d~e zIj&XIOnwQv;v13^)cNbm-T-XX5-6-{5x=fU_lzBL%NYJ>5x!L{Z?=M z-Fxph_sR@2%M5eQ^PJ!NJn!!Cat_%Pra1u>ii+VE_1`@6Qk$H>hoTmhng#RYblW3#Ebn7l@DF%z zlj;SOwMs5OMhhS9mGLxL2GLEQdc`%3UK*&qDj)YoDDmX6*Q_{nQ_J#evp%pv%k-yQ z4yH{x+HDRUl}+4T4`6G(Z>HkSmQy%O!DBxDsb@O~EK7|!E_jc=B1O_JU%(+cpqD6U z{*mJw)nc|G$-bOUJ()gBa4{VbcOP30&ojnBA|T_yT)(GoJNF9KyqL_YEu+xK#fYJk z&E%tnL`H=2=ZcwaQ}X8nwRKS4qH1mSJsw^P;Y(#?*-6_u1B+SrZ1QnF$C--YMWdx@ zHW~jRgq8{rxI?cN7<~)(AbMA26QpQ^xY$TrP1sA$C_fb92L|?1ms;`S7)}j5H#+ugbp{a> zl~2?(o~cC&e3_fmubCM&+Cz@DymqXRujE(AVRSFqD92U*STWQCvNE1~{2HZAy6v+w zI#_6qJ1g43UMUsOOvv;?lsa=4&{&|PMB^C4+iJLOB2t{rP&F^AwCdGoH~H>8Z_zIo~wtG(~&~N$Go?2m3pF%m`Y>9FXFNbxhD+zP4b_{c3*k3T9%wD8FL z74Ih5H6Q15{`;m}>LuB!IB1@qRJCc&`$r}oU9h~(2y{3CFmxfO#|zSoK1(w7F11m+ zL=zxj+RHjX!P*7UVMfJ?2J|xoO6tZnRkGPg&c-A=BU!8$(1#s_Sj^f(7Datyg93{_ zcD*!f!Dfn;o?sm!5H1b9_T1JcTSjjxZSpE7cKHosKaQQsbb_YjNr+jG5yU;#lQcQ02tCfH1B{-sARmf)4c} z|EZ*#oxTV&R37voA#(q~JyyA?$sUi~Yo14iSu}&!@b;o1bX*NI6=Nf&^C6}pw$+3F z+<7}y)j{Yo;pU#Wvi&I$$9ii|%NfPS&T0{KhGGlc#vsPl^X+;y7MSVp6<1w_t9tjI zo%YAu^^ux^<(2&4z&D)j1$3rLSV7YXCo;~AiG(|QxKc`xWU7m}{d$|aK7{-T1Vi!X zSliu_3LDHrl4%|$mKu}On1XxU__Cd*j~-nduZ4bQFyALT0tt`3zg(Y~ap8gnK_`x4 zhq>d1>t611ar7zxgY;u)^-F?yoEt0Lql}-Hf#{RoXlf>X-4uxo>3x+lHy7m+dz%Zo z-Wd~{AB~c$Wo_+om-VW9Ol0t5ka4p@A}4s0JonU8D9A524Q1Ed^h9TnzB?9I36@nL zCK;A2+FoHBazLLtJ-0Dd>P_TW6VOoaVpOFDBq@Do#r80=JI}V`rh!^>L2aldN z<^kFcCYyU77-i|>fNz6TUJgwWb!B461;Mb~EzkS2@yX3bj-ODRnQUpHeJ=~R1~Ao0 zEFFaSgO+){*vagPQ9Z?9H8_UX>iUpve8^*sos~$sGBzS!lfaMA*I4S_PTt+XYL050 z0f+ICQN{*yd8ep_p7P1eTN@5erPp`vgkL(BrC)E{?(O|t)#3Vjr7;lgl-pPC&4gw3 z;C?FI>*)Oc+b5v!D53t#{q-eB?&(;p3eFkuYnT2Ow(Wut+rFSMLO$JT&1#|4 zii|f#Ywd!^?rWoGxg-?3TK=+DeSPwc2=?J~_hw6K5kub*;Q^tjp2I77n0V}*5H@Oq zcCi&L)$OfOQRC9F8gWbEZ*vG(A`g2rGK$hl#v4zG}ey9vqFhD zE2`l4Qh6h9JDXuVTnkEZW6JT?oXUUJwcd5W{D|G|>)@MBHDTiAbmlO7ox> zKcWp~#_Dy$g5^~N;rL~3W5QJ9OO}Bg0Q3^$L;`I~>1_#c;8tOso&`XbWBABUR<^9@ z-VO$b@>R5=?4vN&fF09*M+>1Hip63V@&dyw>IR}fD=KAo=|bCs-u^^#S8paWMnO~g zzG*EK$_+Nu%ox%R)gY>E&dM?On0e?@fHN|tzR`<9tfUAjc$0A^01+gaV^*UN?! z(A`#ipvck(+@)1i(}IckyDGc>QxC-T%ep;r4K_iwq_Q*M4$H6WOD=W|3!6$!D2P=P zt;K`g^&M$8%^Sqw>;uvHUYd}1pMYB+iWqmW9EF+ z>!sDozZ8hs3Go~~Hxl<&S;rg)i)pV%=uwm6Qe*T}f09@!XDFI$50E0${f4z_m7+uW z`KaC4NVP&~;Fic9{^jTP4olYjFInsLJ&Q)5(pcaA;(!AI9n$>ND&yS-!;~&9Vh$Vi zh4XBu=_b%vkwV+2lRiKM70XeGK{uy&o}I3yb-|QQ^1+>Bg?PvLT7>?zYJZVNrmPWq z@`MZ;C;70~R`*{=xIK;ucdp6~>u+e$1wfs=-n|_kt^OP`^cnogj7zlmNu+17F>qYT zqugNI+F0IAf0HqQ+hdUcmpcwpOl8EH$}@T_!zh7QyWGCbx%>$t2m0X(wA#= zlMuu_xs~j7NZt_Ig+0y>(VmuvvJ#RDkK6b?4Ldb@(zWZ8xB|_&WcI% z*E*5oxYA|?oIrtyARi+b1YkFIYTlUqe( z65B#%-0H@jQtQ>M=?;pxte*8a52TSOkn0fD7|y0sZ8<+Jxq5Yyau1wAiiMrr(Gb@c zTVvcuiNO7J=KIrJ(sPkGK@fLb+}?|!t84m4v>Q*PO*A*;Sljz`bmLgj+{Ib~A)7NI zR%m;wGBw@%;oaagIlm6wG&4Puh$B8@Vx|mipQ-7$druI*35TR~wn>r)Ljlt->Zy7V zacbCDQJokX0^JTY&jZF=GnzWh}rd{`I?^m}Dyx5udT>ImLBy8hXJRH>eO< zw9m|*xpH`k?)-ks<1IdYs4PGiL(L+W(>sX`JpC5`P4cJiB~Ti{TjTq_g6{%7Q@R5p z)mq{-O7sOtM$F9Z%7Is|d>kz7Zp7wtsI5F?Kwg4f`I{L!djaEev#R#z--fd?LTLe3Xr6lrMCb4i0U&Fi}&@F3Z|0x1IMFZuB*zyBj+q^;q z+26uv{1^KaB!ZpwtOkseo@(b{*EHhapKL5Wk$2H zgmxy@Jv$zjKsyP=)mh_XWm7UtQvVp7wRw&mjA}O?5g)e|8J7dOWaHBilYG@#|A2Yu zabm@aBbTJwg;v5if6~*}TtyiVC$eb*Y$zK`j4f*Qy=SjpNhXNu-Qn|V9`Xb$8xPj>32I?B(ju$Ol{9ffmh947!8HG zTTqkvB8ho&ASuyL1Dq;p-kFXzpTQnj@&{*Q(QIhR73b!RVh>PP`Y2#r+++6i1Pv8MnBCv z;I4#BkenCeX_YnUoSQY%Znwm{bN;y1=byj^oNv!&BoHeVBc?lRMLsqY} z83O{?ZZxC&3F6et6&|XxuMyDCFkj|B1gh`2G@Q*r924Ivw14P-zUfZ;qU$!=2?$!x z&`A;FWIx+t)Q9iW){7{&!RlJ8nCenp}egMUq#*tZqWTp2oH=hX;>o~k@m z5isAU_0KSAmJ}6+`1t26NJ=1ME}s$tnbb?~7Ze({CvyPAxe}+c%j1>xt(kD97Lgjq zYWQrLRXvLG9?qq+rUhpY+n6uS@N+)ib$F-_SWYlh<(SpJ(0<>qa`jRAk@y?3kC$J< z>Sx)Yj=KK8{mL;JoeCiRi#KP0FYrF12T+ULCLO*0$j-s*|nl6JRG?yS) z>q3y!62^4Ahl3bRHE!^+p9VdTqu(}o{*2M7_X-yoDc46+kPxs?fX#Xa*6g&OOP>^A ze1Z_v=4y50wx8%%PL=+_0bq~UX+v*bxnr;!>lVl`dwnPZdFlPiXcJ}|;^N6LJ<12&&>X71xy!MPC7yucacB%w&MOS zvV$3i&_k1v*~FW#szrFRww1G$-SjK0;S77a)n;F4Nh1Jr5@~f_eQA(cHJs7eKnN?t zbjNgffkCo30e_h>p2*OgVmoW1O-saBO;7jpzF56`$42TsGXq*Ogblcpc$V4N4I-1G zOj-;N@Fs&J(zW6x_H>bY@tPc138d~tD`rW>QR1zjl&&ngspJr99dFYDE^);kX}(R# zCA^=c;}MlnVkf3XBii$NSY84}`HmPyH94{yRe=NDo%T#sRp!dn*NkpOZ@gku^dr!_ z)=`P`9*hH)Q8%%;Jpsf%`QcgG+4$Syp}QuS8^lF#X;={EW8RMfu8rSIRb@qrO37ix z46jdc0W#cdQMuziM$d<1b6wO32qTu_275k3RUjV2sjm7= zMa3W$z)V0ZYZ7fhI}O8s-D~QmbbuqtGU(izg zjE#(b@-4}vr~tG{T^Hc$dUnOvBrYi;>gTZ&gC{k>S6tV7u}XwpRs4|Eq=7*>n6b^d{*;Ur%zEnUGrnL~PFc1lI$Ko3=vJXb^xqQ!UNirUb#$RU04S z@>yxxwTrAgR+3c6fM5ZK@R+`DYWCN~_v#$&B4ebTT7<$vDPAh6cjKz!~z49 z?u2w^&Wn@mR6h~A+|KP$*RBaOZl|L+v)?6DUsJT(-w@wBGokD5E9IFlvSu1LjjxewsZ(V+_n^+c!7+{(C$7DeRI}9q*EbA7Qa;1(D9Sn0H#eyJeJb zD+bz#;bg~_MQ@;Q404+!?L9&kc}_-4yZBt8I$yk?H!LJNf5@lm%xW9)lrHivkIXT{ zn88_rU-v$;zGm-uC4pJ2__yu$IKY0(spY%;z*uc|N>MdLDGE=#2A&5I06MBNJpkk`PTfo*#+}W?8k(vEh&J*JLm5^JcJ2pyFlgQ zX(M&>q=($;tsYQWyMr$f|0x))JQz?no|;yyXcY;{g38OnMnucpG6O84s_;n+)g zU?o6^6jGR@re;j&LoT1C^%1X6qSK?FE?@4sJy5kE1zwl!tfkIR6c@7ZSddUPqROLyjFEJT38g#jjYbcWV;A$OI?F1Y z3fZ1cXvT0Aj0y1Xo|B}jvBxJl@V`o|LrX|D2|UL0nBJoOoUoN^5FHiShY_R4{EZ#L z$m9-XLu%)EQnIrGIqGhyO@5pRSWeN(*O=<}LN?xvVHSrs*VTVEw^vRw38PT42` z2qt)~H;Qrikr07cfH0(dcGNO4?AjeGRTSqQ5>>9?UQ{#eLF(;WUC+wWJukwFp!SO$ zp9ILEX_~Q{53Q|g`cm|v;+B3GR&0;0mDqEDkJy?a@}w!WtryHq7oRR_6Nq7sKiq&5 z9f5YzO()GnIqWK}5B>ADLtZ>=UfWt_(i+7un*ptrK>&qu>a>i2KaMNUH(GcD=8xC0 z-e@obW-C~@*6lwnP+go{E&t7izgBP2d4h+ibwryAEVQFTKY1tkt}pMM6(o#6={2MW z@rjvbOIl*kEKVO@vnQnMv%@YEVC)F9$ziY7)AjY&A`(G5HYO%6IeeEN z`?1x%+aDnNFllubjy}7OlC&byi%qal<@Zqf=?6sEoxEqi;0ag9!zwLQV+l6 zo~xk+4(-&DxfM%P%ew^jhA`n_c5Sw3>y>M*yVisn3S`>OI%T8m77@t`K%P(0bhc?G z+XUJZ61$4msWz{k&GCcpw29H7A8Oi(Di11fSvpYX@yZ8!kReM6?Xk}~-!XZ;a_&xu z@Yp1@+6PR?#L;pNzTl90wg{0!c14w4szShuVR(VY$394FJ>uTh{It$>6&Wx+vSJoy zXdL>5wO|;dkKV-y)w)7-NS~iJuoh!aq$M`95O|llBHma3`hj^%EpdWT>-oE`XFM_Q zSq}&E!!U|lysSV?cF{d_+L3||l=*&SCSE1y1w(`JCv(^L2ILfJ5KWgs4(R92_$G-k zpL4cQMpgP4%8L6e-=YY=(kc&nr@qTt+i{ctKcBEtbhqpP9CGO@9^aYM^9;_!7Pq=M z%v`C({Ay$wa@$pGx6qg&E^Z@0s|96-d7tTq0|)o27PIiiGi_k9oDi(l78_oz-=5P! zlg_42TNK-hTX-*T4tn-f%yVKG!0pox8o0E&RQCmSJ-T~RjQ&kKr{COuqMcoOwRh5U zsX(UsRrQ&46Qi=y}2I$Dk^z$tCxhTs3D~$k-=k zpcaW~r7eZMtMO$W>KCri7uuwmk&bmb7Dhbf?YPcThq7%o5q2vNUp3Y}J&p3`=8BS) z+=e_;XJxdsu!}4Vj9$&&uftUv3ud~5?wrTf@5<@sh~q|O&v8PWNtjWwdb4mD<5Ch+ z7c!z{5l1nF{TyQmEnL+w6bpA8=*dxL8uOyk=QYunkEy||J9H8`r3AXdhh#02#go8E zeTad_s8}M6|1Rv|dfM}ls|UxhxD7-k8sy(RiwU($m0?!Je}17Lkp2D|58*)O`^)b! zDcrg`C5yLo$x4Qhaq`fpgw(Sa)F`$ zE9yVDj|JEM7WMC_AfN}x<3Ed#VPtBD1WRlUphQ5RgDaDR1`hBy-)8gQjGN8Bll*Jh z|C5>#rz&9=CTbi^0Y*5Ma3(kw_?(;H<8AnUfvMJdGglMFQUuG;VSr;AW&U@jUm#Lj zBevRukx;-kJ0%H4dhDdTq^55hE4fOSIwN6^X+NAN?lpOb+8+XXs7 zq0Z_AYv3H~uMI45z~TRBzyk>S)duc;)_-~eE8ZFVdwT276OhCGcLH0W<1b5;Psyrw z4r9={g@8Z<$FL2Xx*s$4+ca4DfPm&6Aa^_GN59&I1SwC33Jf2XvGbR678HQv_#qPr z7BumnX5+V!{=SBYpIo4PpOo+4Bydek2*VMWo7*|tfy~W+;UW!jW)fwFDc|9TfItg} zEhGYm{iFCtmLQ;)rO$tB;D5tiFIG!T`F#>l5fG^1NZx4voy5}J{nsd{rAn16T>2r0 zutTtiEmZhb&8h=OVFh%ywso>}ci{a)B~j(Snb-eyRlBpJQtZKYAON-@;CG;{AsqS- zm8>lt9lu8@{i;m;^M#M>U^0PVM-MMkxg{Kn6W9@C2eNesS_1ylrS!j(cs1KN=?6Q7 zIoJk*pExG~9L0}DzNd)&Y7_TD>P#D8?`KEw|2hql4u59=*t!2I0ep)1&r3ti1&#pz zPVvu~;{RkMj`GjolwWNMK3?}{26QkS!(YR8|62p_xp6%D5_ezk~iQKLe)1KV)b8 zjQn@g%Ab)1Gk-_^`5y2`&HrFu`7`o&ec?ai{)}w7@<+}8`@sAEof!&jZGGRqbO;|{ LH`>be??3$?_=WdZ diff --git a/python/lib/py4j-0.10.3-src.zip b/python/lib/py4j-0.10.3-src.zip new file mode 100644 index 0000000000000000000000000000000000000000..bc54f33af1515c0676bd831bc5a02f112b28e0a3 GIT binary patch literal 91275 zcmdRVV~{4@)@9kYZChQoZ9ZkA%Qm{pwr$(C?JnDP&3o@R_lt>{ckayJn;9n~|E!3O z$hFSi`=p`_C>RXTKQ2Le45@#<{LcqC5E77s2b&e6nkF2OZfCnj%U|me7!64AF9;eC z)L(a=%$MZ<3PJi8#K6GP&eGYyfWg7zUub#L%zvU))S*h*Zk3@VWvAt36%M6o$7j@j z#VF9yQ`1XF#b}I9(vLFIC`roFOwUl$OGSm<5RX3q>P^eUhSFX=baiiU6)6-;=w&q# zr4nKPG6fQf3;?<+_m>s+f31I*_Fvw}{BPbs|7Dtift-+tf{NxpJ(~W@n}4i-dGz-% zg#Q)x-_iKrv40;8-2Vg1VDN7OV+=qyHU}61ZscUuZxGdgDf)xNVu4a3;R4@65JB7$ zG9!WGi3kVg%Em@x3tog22O|?Rq-W+-q-ZL}CX{PvrfVjqYQ(3dj4PJf0{{RETdVBE z)Wk#+o6GE+oa_V~6+@GqOr^w}448RCTT6>GGl1b~@%e?tLB6%o!CtwwA;7NC+R((x z>d3@s*W_$(_#fefgz`^x0!sKRw0r+EynpZOe`nsv(Ae6+-qHDQGQXrL6SKjA)cvl3 zG-hbpiyV%(Zlr{HLMt{)LKO?{2?Y{D8?`biT_~*FQQ?=fE5ahC)XcdBRw^IL_Hpz% zH9I3kO*3rIAXVbV=&Q@;^bgU%+oTiQOVr?`)tYT zkR3hp8f-1y&Eo|o#Es1R_Ui7?t9~o~;xN>^r&|}p%kwWs+9$^Y?lP_BbZ2S1=IPsMoRL+B$G7{n+hb$>T2SBy6kZ zY$(ISP1Hyj%=AuVykaB_3P#p?=_+KK}~t`WRvAbcihMaAuvbs(Z%uFN}vg(`YqmG4?GS#(c?Ds?-<`5+2xk zJ`xYY*(oz)L_m~{T$X0d^6%Ev&`pGV=Hll~m&>{vh(Mv(1;+wLG1$%7GpxqU=A$Hb zzig0u!77YK1(HM!*`&|{ym{V8e#Miv@1}5^JIn*aB6!j*VSDHilmO%^6o)!WsE;cj zVLLcEo-i!ojGg@=o7i>?aAMZ^pDQCtsTNC^sadr=C6SCkvD92nDmzJ!wx~~Z6=Y;l-XDnm( zHTKRX3)0V4>zz{zukhjPHLNankDu<$6$x52eC!o*!#3w~OBQRC6r)VzM|y~e~xF&=SUbpNw>Im_wl^p;(&%Q=SQv{qPp$O%DOB zRN>)Cqt}?YI+BLCXhYfMK1u+n$&xak0WP^=N0SXMR@mS;EJarAjUDmx*yyQX`u(pV z`jBzelkRj|d1ISUi+57!%%ho@XK4Rj3cs(iEy>w~qze??cpG@@V9Ms$Ao{dv$|n1i zo?L?y3|#c_n|0<<6U<+S&3+czTZkFsVphn%ezP{wWR9fsvpYq@xYpL4^~hOzOpzjs zv9rk5+y99PIwGI=pxA*!qhYiE1*X!Gz06)hD(TM>qSU zZSAfyMx1R~!I$KimeAWq?tjLZJUyf zUDmFMLy423I&iz0Q@F=Yo6|l2{cZB?@@ekKiOZ_nLMIQ&u+!v+A1ZiEbacODzDmP) zkQ;?3=*^=05+}UH*!Z5*u?ftR48>897R*J?s#E7ebf!YK`V5K(f99&}m$}M=W+6uN z{lVpR#9GJxy;g?<6IK)8k-EUfarrXGby418N+VhggO&DbmJmF;ctJe!+g;WA(fH zSXpd2s{Z2Y;X(w=*+@yPqa~&v9hwuxj4l9L|&iicCAs>m`l!wyc+8p1j;}FNd z^gA2HG`MR(V!6a(36+P)GgdU)s_f3P4ri!wjD|1T%I3g9?6%4+ECbP2&4k-!k4Tn< z!>C*!SCXk>N`f_3^lj|?nTi!x74DO;2f%PN3sof1^gQD#2j_Eh_0`KH(J33QIul&t zSL!ab&8E>NdnAzEWMZtyWBFw;xoA!i1*A)kNoPIbR!tyZ=QVOk{G-W`Cm{<>A!4$F z^n5FP1g3Y26GiEZX3xh4B|ZItdusFJBPJ#9;u0YI->XOUQ49H2fXk1fwm;2UkDBtO zLZAvTSjx+&x5?C*7~G{W5o&yU%_Y5aV)6W&qi+S3e<;~U5iqu$`KBaXjVW>=OUIRq zZS|AvTcon&2G>5O#=`D5UJfoUJbj%O1l;{xd?xfdCbXL>ukbZeUhpH_ho=G)8Y(=q-@w5uZsh+c2*medGx_iE$!P)4f+>R8k zj^EDBA=5-u4%IEEQ?v1un9uz>ijS{!HzmD@!;o5tJ@GUY&2}GV#X)g?FW~{kSPXoR zI|qVjlF|?C3;b!*&VMFRYs-`h-w5FZ+)adkQa7mqzyYMWRr1q4b)04q{QAlnBTrS* zKeEMR-FPU88$y|+oahx{8_5>$_^CuLgd(Q`2vhg9ieaok*+?17kG<8duJFVR`p6Y{({%c!hLaxxf!Fe? zt*%2+OqQFr;FE?tz)os*5VeT8)9l_kv*6`98gGtR++|`EwMP4<+Nl$+*YpWNzQ}fB zD5%KEd0f)j`GaU-TPN#!-YboN@L6pkITxt0{|BCCn=fCE=<7d7=(-W88dWJAh)RN3!9g0vZhGZm}xyd)#S&9 zLHGBUmtUp+h8b4cmE^EnsW20u=>@$soJ@{9W5`F(R^xe)+8Xb&fUv?547J*g>omWd zpcz+a5Er@c!c7lg6whsuRXT1&iNO)UG@Qpa=cf40{QE7skbREqhfysAl3H|^D_i9x zo#O_)HcnIv>NEE~7&I~ZFVzVKeONS#p-q>&*_Kg(A=A7x zTNXko2RDeCs`*COeoUnZ2?+167yhKevLaVWHOmC-f|n2Oltk76JfrI70i+qk^_?@=!d#ZXlwpo$jL-wzMn zLu5rGM%R4>yFDXh_P5-!P#y`1F-Tf~hJerzoV2>nweWJA@F-HDa}bHAvMM1>z>f`- zs*csyPG(R(=951|EvTYvt%xc!%upQu;4%&sV<@#r?9PM(iL@e#>#G$fPfEhJc(N4Q zz4^w|7#Do?KX-!_SKM*LO=$XApICO62OTyzs#%bih>~$al~5zR_I zvrn^KsbB7`?t4JSBc0uJ3|}cE!yUn;cy@Q)h}5GrGpkF5zz>ua8>8(OK+j>~cZ z-rUu|LHGGU$CCGj=)mSS#Go_kTq5CzhK@{oD3$eXhj?c{PCHESHd5`J1M$c=}L@f>Y$K8}> zVej)3V{pJ8^0uE+$AuB$9*ydThyqmf{U-_VQtI`MS(u;;WHzJ-;9yr@-9yXBOBWfz z5IxuSh*E9IXLc`VW!cs51d-&hJcV^lr!tUy;wp)R?~sDhARc(Nu0Ou=Q`n3l7KX7} z+mfZm`j+y+1`I20QCENTWn-z9kop_6kA=Y98bAz>vX!@kX5l1wIDATZ9|@xEWtfp0-4Sjop|ZLN&) z=SA8CC4S@Avvz3>vBKoOsIal5GdWu^2?Unv>58T{E8kRrKWlVtKwb*`(~l|MW{jL4jouc83BuqnQ)E z!gldopyX^&YGaujh*9d;f`fiD(hU(S*i+|+3Y{ znkLT0&9{WC6r}sN^d^%w9r@c;y1bAiE2r93;gOX57HW)IinFqb9b5^FRT*Jeqe=wr z5xpwhuRqBv#O=se9SUOoW@r1rD_#l^nGb{4r=j5Jjv)7Q>R6y}`L|r2;_^h$UN^Cu z&$r>6Q10S-*NN{XJ(SC?#wya7i=8MMzdA~pY5c<P(nAUV3n`)=o= zrxY@|nQ);&TlbopI^b=rPQ2k}_TyDakb`6{j4QpFYSzcV1-#ycMzK0s7QDwoiPW?` zd#?bDn!JF7UXiJb1=eY8t4diNHSI*6;)l8A>-Q+kE6(ig)e7y>t{=bT)X=ww0pSV* zZ+$kr94_>!M@o5pJLh#Hr#0Q*4lB@nn-cGvC60)Zm=sVYCiP!_n5`wvr92bn0eFbR zuJZ``xFe_)1_za%22g)+5lEiEx~vx^Fd?3~W=x+jJ=PjE0|mPlP4pR2YU7~IG$Yx@ zHIFTSm|Kf_x_H~!j%=|k*2IR@g;;Xi>1g*HTl!;DTzs&tt(R&i=5qUv}uC68;h~STp}am(OH60w2Ve{Zmk7B zDxLjw(0(&FXK@$hnGJ}@`7G)-!Lxhl>_bz@+~rk=r;}x84Sn!}fMB0L5m8}J=koEG zIVUrJp>(|I%R%^^6Fpf@GW#oggMOdHUi6;%HWaMM0B|mH~_Hvv{$fBd%U8dx8nhfQ-bwZVNH|7J~6mq|XGmnivO72e~@RSLMK8n&Mi|$nk+(=)mug=gC zQZA0P14EuQ?Bc8%fC^ts>wXdn{9x;}Rwdd;yF?aQlbG|!_2d5VkHPkDaU7F#L$BX} z&j32spssTpo^>~d8(sPw5+6X#wb#k?+`~~AVvD(kfZnjZg59w|)(2jWzMgKTZuU?L zCFVV~{|OD~{h#KE6+irt4~Ut;i91BBJKCS>r@DU#-^7U~yP!=M$OHG!e)J`f4a(Hv3G3TVYIrN>|q1?cr%7T?&xC zc#!M3&O9!Uy0)mO8FK!*3k*RM_aj(Og`IUX-^7wy&A49+aBEBtCTx4bSNHFXPr<8O(@nw7wlaKUpQ=yZnTiHlp}?z5I?Y zcBamYdLSDfaP4DK9$-o?e(o?vX-~CQoX|5JuJD@Pjnf7{LpmW}?@$+0Q&nHG`VMou z6i3zAm(}jtXR3L7O~%E6q*h3jo2jX(sot-ccro+!Xr4)90?+(~XdPegLFR0u&&H;Q zuv<*R*!Sj_*7bFDZ5$I!zl%yy$}tcf?9o`Ku%Gk-d|u8BHR}Uj5UqNbW+Fba+~Ta* zYX^2so+Mn&dC4HCKrfqwgX5w(T!nB@<2U_Vt!fdQJJvqGj*CC^W#`^5hKU{btpKP! zGTrdPau#;-6Xpl$MFK{2eo=*oKY=cRAJNfPD5m40K1N?=T!;ZB-UFs-EvaL1I zPb1}CwMtDd?9&YcY$0lgRFYxSKbPD`q>~gBrf1Xw+Obpn8!k0)Yk;5jE-SADF)#32 zigR3%72HT53QAr*#;e?I@iC1R%>8(m2-jykGTpr#KM9FU00XbD9}iDjvd|MowH_1^ zr5o$t{U0!?&nbIz4$c_((ytDlFwz7YFn&}&!WwY}#wDaUrV$TCEb})%g+@O+Q3S^9 zBXOm%2wyc(^V1omi)ozPIpC!AmGm6uah4Eu^S1t=*bnAc6PTR7Q+;|67jQe{jUUbY zjxivm((dELBCa_uxpxzoo=TK}tg6ERc1TL;t4nr4rpyZ+I62Q@JIQz<$c<(f+ zf^uweHUpR`Uc|9fcT@jOSj9}1f~3_D8zII@-Sbc2JcJ$)>Za3nuHZPbq*_#^sKpL9Li z)|&OX3wG~AUHXC%*Mm|%sf(w?7zOvj3}p5onA7JMb|d2bKhr2NLheZ^b~5r`uPAtp zl5FRjo35KEtn~L!Vq_g+6DPQH<)Tja!!L@D`1E-f!Kv_CAKi*!Uu z<HXyXP78qfbLJmHyDD*2l`asYoAa7;Ms4Ao z9n+orOWXE%uV?SheFOia!Rq;hS3rse1mvdi?;5Oxe=}J8fAk;!qnoV%7{dN@_G)Zz z>tOhIqs1omExQFKwC))dJQvPWRgy*GU>Z+m;vEp^0Kq*FxwwSP%5CKz4O#Ra%=-SA z`>WIT3odZK^%tkzy6^Y*YqRLyjC?Q>aPBRKnv>Y%pAe8%7u-23D}J;;>@ie_VF zz>{{kN8nI4>%8Oz1CM}}dQ*lk3jJMFK~b@FMP1OqaMh8^IUKYA@Ptd(21b+-T@&%N zZDCx~v?|n*#yjuIG{&fkVpuA6s+F^}fZxetrc6(fL>6hfCnzcld}reCsb(dyYpgEB zJwJ$`u9Pf!Bqzlf46X6=nMDJfus{Vk&uLffh;?gr9vxAEd~`$WJzP-9>AD_WUg!j3 zw|qrF9Uz3a>wsXRNOK@rXeSAcXB>Mv16v$+K~0utcJ#xvbV5I=3a6n*|Jo5k7+3R5 zX|NIoG(n$pcSUCwnbJ^H$9ZbJ%NcYvp~q`?KTZtBGDWRZT}>8*L6>#8898@YMh+9H zgtJ@Y7I-}-IhI;Ho)=l_pc0(OXCQTBBTP;KR9fA5q zC^_k#I+3+^YQyJ}xL_piXxMLZ#h@mBL%gQ$8k`BoYxHuJxzhymrEM63;PB(DNQX2g z=D<(63w(v*g8YLH9`UUSrs%^pZG5o`)kV1{`d7V1Uv57^I<*2}X>>PY5l8-7!$aT? z*M(^lMvp${@mHf)1yAQebDFy{sBe|rF$1fGK3c)ukww?-LZSMS^mj>r^$9mt`ae-k zHU`r_iqrCp*2_dzT%v|am|gmAoa^uww;fmVWbG_`=bj`@l`-^cEpV(HbY5E^J{dxD zR4_BcZCX?T+Duh_h;{;3!aa?@EqCNg+FJteBfJ?IB82%*d}9@;$P59N;Y9(FyV(~l zh_Z9RR`3z$Y^gnIjf8tru-JxfqW>oKfY`#X>mq)dP%qRn5aF*Z_p{F2!No$3| zBF>w-(pBn3<?2Q+6N*_GNeInK7QsxT-2~ zdMDjdC$HBNvZo$idN@j?}}F)@&nb7mucZ@#Ky0LT*%} zsk?WNba%oQbTPPq))=*BkY;r0{>;eKMl-a`$2!5JMJ}i$4|9I;X_sx7;`dbwm!CJm zhpSe)sR8NsG_WXt5|(g$-oPLk%fztoy{#d#%YJ#;&{ew04|uUwUa1<-af%9j^qz_D z^uLDx)(z?H-m#t;I}bRp5`rmKuA&m8kuj_V#yBGoksIH{K{JJOt6vB(lM!L`<_i2X z1ub0x?T$3aE#YJ0afn|Qs%hO8U$)k{Fe`p zAQRuiqAA)3{=n88D?UZ%H$D`F?3w?mwPQ@e6VlgEL=SW;!#e1wW#y6=FT^zxO9?wf zq<2~7d)3i>y&ceu@C^No?a=)=Y*-aipqYuPflx=BFdu44qtetQ^FF6G3?lOQrNdT% z5eS`-bb^G}IC0xCC_*Vo zI(!RDRGZvx2sN+H?&2!e3bce&;^%2}6FghE%z5>%4JZ_W);)`A5yGjDi|QLk#rKmb z(V0$)sBcAZ=r3at391#n@yvO+5gU#t4uR%y=(stXtK_u(;l^cAN66stu=_qIkFU5) z-q^8R1A~}@?-R-B_wz`^8RLiZlH~CRz7iBTxzt|M+327(i>5p^LwUk+#xEM*xsvO1 zAFqq~FT~M(=myArkHJ#`l`SU@OP+pNKwNexpTnVL`u9K2E~&f~1TXx>h4V=NjtlYr zh7124S^P)SA=ba>(8|!&(7@Q<#>Ujx+0x$5>2HVDw)PubC|_3weNH`+IxCXx@4HAg z7^7xe9S`wrzqY_JwuFmpdDLlT_?6`MXrFriOx}nU>f=$$Z_p`%irG4vab;rjXA&3j z8Z-L3_ff2|$=4vPZp>9+eh((gw`^Qynl7l3XfhnI$e8kmUN_J|?mcK9fHT#ue54Ot zlOZRNOe=FpwI&MJlNe*38%5JMHx&GMw_%GR*P+_Iq1Q3uR9JOat0mi4VK&`o&j`mh zAQ$UdIK?{-#GHk>)4xyb@?JA$e13Z7$pY;TRJsFwqMeColN>af^4-@UIjk$Bp1iZ^ z%eQEzQE&T2g29-qrC+OtGmt2jI2bbOLE;q|W13tEgG?PBy+_xS63TwU^paTxgYd+) zKS92lHxjznLmMYAI`gm!k>X7SLq^{pjX|-Z9Mh=#{ZJ<`h9RT>LYfzPV-L(zyod2} ztb3O~+6PgGR|DI3pINiSRHqxi4Qgwo?5oyR&Y;h^ITPSLe%&5F~0H}yg~ z*(~vAX3&UEus?|^KLn^uWyv>EpUxEf$ut`sJ2m2GS@(~DHGX>|%<~+5#5>*v^@&R@&hQ~t8yOdyg654)S(8QDfedIe5rjCq&rfz! z9&DF*f+9g}8f32L?76b@-ljEu5)#N65EYOI58RWA$rFu-6=Gy#wI7sgr0%4Zj?RU- zRH>R+BqmLHUhRhd^}KVB!UYmjM=b2QIBQMpL?X}LIHVU>qTuOpnCDQ;+z!B&V>1!SFX~zz7Ta7(~-=YRrfW?M1e3)Y7+JU~UnlC-~kMkpXp$ z8?Gds)XCT{iOTb2+ABrUKzNVeYy=s06*o%#APCP&x$hp=HK0!(-!*oGccb8pAO{Hi zA*~$)Dd$WNC(LUfU5zaX<_3NQp1i4)gFc3dzKM_a85n(TH(RsBzXMaxm znvei1l8o0@m;$-6fe4jI6a=7yVysEW*KM_kL-S{G|H;g?z{4nlp+8lNExZw0W7l9kwA_uIWKSXwO!1$ITj#U>R5; zqGffrZv%Nbn4mY!vk}4`%v`i)$Ce7?78X7%`P?5GTsO9&ge4nBHs}WpKN@%vnQ?%r z4{roGolE5rvlaxc9=cOPWyb^7Tmp9(&Tv`5u=gEw3`V9(h-SQgI*`qemPQsYC`AemV{x?nyNLobR1mch^3HI>+3h%j7sgWCpA&h%JSRW z>pcNL0c(6Nh9(R`H+E|F~B7PPkTuAYY4!D?^r+lb%j|bxw*t~kJzzjb!@K!tplRHk00&kB3YmjKay*O(D3}V)ie$!#v#gVhoFMq-ZTJlG&Z8(O;i&%x$FcV zfIZR&=Hhbki`}1k)sTqW?Uo|B$To;L{)-SMW=%j216fF+Xd1DO&lW9#U>nI4O>%@& zKs-B7_+mHkGgSFhm1e}yWth8t(h@N_Py0dH|ab##&KdEMuqyEWdJ^ zdsRq0A!<4sO*&0a+WR3n_NL1%#S}k2C<0$=sA^>E2=-WhQ?AvsR#O>OL1;}iHVS&O z5@ZfI9X<+D7z?5r!4L_k%r@5iy|Or6>-g}LhYQJJfnR%Xxy`NUx*yS*FpO4cFQ<1P zaBZsQaLU6PmT5F1;v#LTEorE^;BA24fek~d*efh;BlM+!n8hl88~g*wdn?ODIKjAK zLbo~EJVYR)2-K@J-Hvp2I*zfOl;FMLc{ff$9{fxJ9wI)z9S534l^d0NXkD4CGyRFz zmY>gDzeumnr0~4Jf8@3vt`=UAGBwvhsHHFtKH7cnj64%}{5DL4){|iEeATa(GnC0^ zby0YJduCPRLTJjxx{gwh??9;J^b9Rf1$HauOcISYnAcr&KRwjwXrC9e`nWe z{6vUsm2&cttFDiB*Hl#1WxdHnVLr#6VZ8ujB8qQ&s z;AgOlX*MB_92^Ac45+YFRlkVHz1_7>L4Rn2o*v3#VGT}R`3+~43}Frb;RLxE?{gLu zOv;cS24Wq(?njcO2^Au{=t$7?zsMC<5Ur&Z!@jm?fnf(YAazL z+^|AJfF;GBiQQqxmoJH@63T%&j3VZ^HPH9#X__$LQ z(Pe9}_N+DOCwV1}88c*?36F9yE+|_WTAz7KHc1%tA04*U_A`V&e6---{IRqRoljr- z1)JTO&8WhS@3(DY+w4bB=I<6e)vY<`QwFhTZ#~Tj1sHi}t0UU>(>)}Ms(gU8+VXm6 z_6oo48IEVSY?x3WXPzCvARJa^LFvaeDC^k2ojD$Gy^+GsFZ~1K0yP2IqXkU3De+; zn|r@fRn%X}jSdz*nIu$Pt{Fb>4v#cJqpmhO=e;uwFiHkeRBL8A>Kgy_)=-8p4B8bM z{Bbgvl88?IY1POd>&j#6XGe4+h|=4TG<&~6Axub#6r=N<9#GDz8t>rhj2!jViuIt` zkGW56?{_4gHqe=}`DG#By49O_wdrP}&G-YciXge;diBhndUV%yDV}QZKtLek|E?}2{hPY<|D$UAk1kLD`#kkO9W^&} zHgz-f_`A~7=iO^#*zx50jn?7B*I+bJ{35flWvR-sVl=W~FkV+mKK=X+lo%xuDFF=B zKYnTR-+Y}nj^7xQ^sQz!2sbqfm6gdJOcQamI@2@!GYS)woz^fTX`gB(b=C3YwlM@i; zwlNSnrVTC~)e&?u<3r_v2$d=1la6syGMN(|tg?3%#pyL0B>N~{x=zYd$4p&J>Edcx z7y>mBe6P$k)is8TXXZ-|O-rAR>7_8VCkj>4Y~cM1J>_E?{U286?46`gQ*Tyk8VUJx zm)(`>LyoDPyuRG?=+4BS2l}3~+r5#*IAv!k4}jFn9)AwC+YS;x9n1#4rE^AKX~Q!O ze{kj8o|N8_>6Dcrrna$+PTTXDural5v{@Y>fne|v7?_Wqst9(pPhcPew98%)UfPt` z&X1XVS1;$dnedt=S`}RdeZ401O5u3UkFCJcKipY99^XSP1?|;m#6EoV>mB93vtbeQ=tSG(LiJS*K=pwC$kU-V5Vox7D>2-rFfyvtTHo;Xm1-IVJ@gG6T zW3esFLYc#7K+YR4y_r-U)dqj=s*Pkvb@up=k4m7)?QN7D6HZ-bZiilkCo}-o7wikn zjOS|4HqlgxirQpouNsuH#MT34F8mt1&34B}H|Ofc``OZpLCkbR#Qyh>Ky3rw*JHm; zFOIjjo27_}i<`BE`?(=--YdS(55FRX9X|!Hx66r!dl~n~SAmbm8b5YKzs`@N3pOtg z-=}MVb@FZ|!I)@qM#hGT{e-zS+R~f78;ggjwFvV9CmRXrH1%i%gU*+;o0E|`xTY978xa58wYI`w;efC+OJO&0AY_U~+zK%} z|6~T$0{$+5aB8qCS=zcA5Kl1ZgO=pQ_m@G^N1O5ui&K+({}~plh-CZSD|ET*A-@S- z*F9Jd)sUjMvgDc6)fF_H(T#VjXxV7vKZedhpaX>9$1lf5xJHI*SfHc{wEO{)u4WHS z5s7Z7x1Bg@i#|F!U;_0#UrEV18mfmUAi&>cQU_uM>Xm^y2P})EM6AY_-*X7}vge!z zD)WuWMV?|i!$i&tp;>9*IjetLoodoxYEMYptJKzBt2QR8Ss2aCK7|xXFOF!q@P0Tm3YLv2@1VLXitk^(Z`Pec zHi!Dr3R)ZZ8sDpj4s9clT!y3@v{2DL&!}UvTt~`T&q5RtHyaWu`!uk7dkAUG@><>Yq4$e>rNT}`*$-?lrLfwS?+(z&b{JOAwE97(*kGR={|6r*xe7aGE;7AGn z<>3O(X3jYsZ*?&l+|HNy+Bh6?&Lu18V39<{EQ|EluXp~FF0(;GQ-)qMh-e#MHV;b` z$9+iFNCgpGG{@b#^paIb!N6SO`?WQb+ zqB#cHU+eFPC9i0ysbbsW!z{mIgApw_#SMRfs+6|n7-#0& z2Q>$8O*a@gRi&g;<)|=Ho|aCHt9UoDDUbOXnhOC8`~uyt;Lr-+OBI}dS%b8Xy7c86 zD3SRVg5*ybf(pv6PPcNjd0{?}xSjM~tQFd;caC}L!c!PfeBPW{!YFt`09dJtdpSwx zS{kSMbOxlmS@=U0u9@hQ$T;M_XPI5m($K?zP>Iuop+&b|$Lwly^ZelE=JiR=BFK)6 z@8M)@*hftiYlzS=@GylVNrRZ6UcCGqsfYCA{S^J9t*kfNy8|Hsq}OzJcatdvjYsB2 zmeMgG6$h>a6Vv^HuX#*-{-ZX#1mu);Fb~opHB-G*qR-Ek3xTU|9Xr!+O~E6p>h5WH zPlw?gr7bxeX7sIJ3EW#-??yotN=R&T{XYDuNoKx|_YSn?57W3XE^BO9ED1m_7lxYm zye!I^TC;T~0#7q*vgXO%n$L=JDxfTEO?ByYiBvO?gAOc=?n~aUe7xw3ncjL+_lQ@G z(Z4Ni!Ne2{I~~8*GO;-4)^Aa2@Novo&H23?l&zdz4=P>NXKfK&k9rv$X&K0^R^+M@ zfkfpg&a9wq>^!g?DFdoRY+-+_y&5=xaAl}-G^4+&t*By(@1s0I>lJ}#c8f!|ApJRZkYsOd_*%sZNHE#ckLhpHw}hY%7bD4z&&|W!`dQO zNL+}p*_{v6+Ttqb!De?zys5hzIJ$@)AXocTzoTU&+9$y@;nvB*Iphs~yaT5w!lw12 zs)@Nt`|jOOtX!75vKw}2oM)uPq1AQhsY7drKADz%FnQX)q-6FFZd zvzN)VFjwad?5s&G8jodjvW+0Os32ySes@1`x4Z48l$%!6at@1! zzP4ChYv;d}RRQRoJ1*^^Yh`~_N|{#wCa0de!NKwp0t;rs2+?FkPf`>iZqEvWIJ@l& z61Vz*M9@a&6%VKu3Nxq7!^z6&tO7t*25gQd*%uV)=`CC+(Zi9dM+C4Csk)YYS_1V&QMyveFg<9{11KirYY%~4;XPy0hw>p{xY9Hv`Px8DNz237 zk#gRHrynXj>aZv@e)X9t$lZl5);f;`So6-927D4ADj^SfAtn9N491G^hcT5M2P+B{ zhW*K*1pngphrrh_)yK(|Jf0P ziT<|mer1$qFK91A6xU!jr-j|TY`Cd@N>bFHQA?`iJiBX+kRf`ym52)A2B*;D$TB<) zi;g(#rl!(dvAE(?-~!woNX2>7v@R|KLi^?PT^^2BMfpFC1hqvymiL zm7ydaHiu9XQ9?ZeGD1P;#b}gfoZM=oX~^NCXHn5l9>!w~^90$7KsT;2kupAA1?6~d zoR@G`+poE6{JEbBt-r&;tHglVAjkA&9%1GMUCu%bWpG?LL9vYW=$Q)2U$ObbjX~7|bWE?Ip8Kwn4kwtwzZ_rYJB1Ju5@6x|9;koG3#X9PhR9 zd_S1%hwhNht0AlvT9kQC zPm>cg@nMT*u@j)eZ5>a<%8_8m`+MB4`)9Rru(SiZ%Y=Ap^@LIh8T2wFoShI1J#Kr`NXMJ$2J*E z(<`<^UEHa}v^3##V%oG6c!O6=n%CmPjUyl7%`!;%(I_5e&>h960vd(yfL#aoTr!*6 z7kq?~9+KBL_lG^LkHBC9KEkYag*dpn5(ray0%zQpGUdSZ@SqxH(#fn6WiV8FJ z33Fo_1x`yi5tlYc)h?N=6WvjCjUTYM7+-v%Bd;RblZBgDWM2-B7lZ-Yig!ynJT&>fHxV5#b;^%LBh19O~mN4T8QnO718^to1hvlD684jm7K(I@Lh zin;5ezTEq498HEBkWq*Hfx!Z$D-iGOAx`HBe3;V zXWkVlR^TN7sV38ol4;U#vVSrJPn2c<;wARuCdn#(Edy*g&{8UaFKGju4fXVXm0OX3 z*&KQrgfN{8sd-pB6U-5;m^LuUE{ZXC$@%J@I`!`4`!J%;)e~1AL3e=YC9fo;Ld+6% z6*iwo{6#uy&(;q^sHPPYam0zeJ{6_6VEypuK>33|YpU<>vTV#Z$>dSIlF^qNkoads z#)>YK>2l!Li?)I=J=xc}YeJ$d#CSS;Zb{lp)d5;2f67b)?1S}+8^qCGmyD1M#7BdS zz3dEIpiWnIst~QX`4E*V#2S23txY zi=w!F5-K36vNp<8g{I8;ChcQ84v8Em;EW3$WB7!V*BvHgZ&R{93x|E8c{y`9xp<))BtUm2P|XaLz+FSWICD z!>p!yJyUE-jV48>7Se4F)GHx=xIVd%LcWT2TR$FFCcCaVEAVf&GUHJ<=TKQwn@vLi zhX8{)HyzU=q`f@9KB{3!`8deno(ga3`8=XH!@ch*qZQ#%9ePq2(sMDHcOdb8G?7#$ z**tXEM9G}V?mX;S1w8P$`^@JC8P=`f11FC6kw-DRza4q?=i88h&QUSR?RCqNZ^aA3 zs+o88@$q-Z#gq4nj8d)dP2vpcvTY=drW}(Tkxf3Z#%`LKos+T&FWpRXE|bAB`%iQpk*%Q;DhsgZBt&tvRBaTg<&ZaX8pnE>SIpSG)TYPFe}d zOki3_P?}SZnq~&0Ukodp{56tKVuUeQP&9*3{Uuh-?r8RURGcDttY&mV=V33>s(2+jm9T6g0*?Doj#TcHQxThWOxiC)JtSBfygFfh1gsF>gNY{^9E)RudI|GM3{F9?CNznt>!^;E!a}`rkiQwE-Uw~?t>Wh3-ezcZCACqqPhr;mah`Og; zan{xCnUU>B1;c|`M=Lzvgu&{QaLY}?CuhJPA9?_3?b~&?Dmuu7!nHb~F|yuIX5nvc z4rsrB9`oC+PdP<#{a8N|6*nzduE=OU^33mnjNkTqtk=lO=gnAx325WF zMWL}q_ZC33e%vu5WOY5pioDL8WwhW0ehYL$c zUYM3>a+EZPibXN*leHW|yE8A(HQdc=o4SZS)^N;LFgTD!K9IkA&g!7|lNBB!;^fB5 z&)5DFX_1j6O*)1mQkVh6W0otU1wc5CrOw+e0CHCOy&(6VY8pHwFFgh2l9TNr5SpEq z)QEOco8GH#}aqNz|rsdXXqaf*V+)|Yn7kRv&F1@0+Otrd#t723? zlo7$Xt6e0{rwg{8ms~f^hy@xQl`|)RkVhXnQeyTC;>(L9%r0TkuFFTTO`F&C;>9V( zuGZUL4t1<}9-udkwFSNg10N`)^I`>yQ`e%Jl1>X|moahokWy&bGYBoMTA$naut722 z^>@M;$9JMI8AY8D8*2JYqcwgU_;CytP?B1pezyp0V^I&8*k&Z?dA@9QMex5 zYq9^VQ{GLQ>|wgMEDn#<`!cyQGszlr;Yym7Ce!89=h@@87m7x#^=VYpvIODgNI=R* z!E_82i`=GGH4TmZ>}T2R_ovw!Nl=m9ecpGFdg;L=d9*I3j;w1zA#0v zZ#_KQQ&lm#XyqpVv@`yucUc2Eeo(~NDWomhc^OaqQB?7;F58~$S-P*0t{eE!x7;DD zDxBcr_dZyV)8e!DNt(5yf24-u#vP>=joC2^cJ0x`8lyR;UH_#%M<^BVLIB3CgLvx_ zglqnofnmmOz2NTY<#v_cZHv!+!#24!!~_9P7=G|*3> zZN8Q|K9S{7bJxsTfnuCYm?DMAdc#L=iNIpT!QB|Ey z62n@fdI9ZNONs1EBWZeq$pailrqI$`)j|#Ydfv1`8nv^^%gSc^#Jg5^y?1-*{33!# za}TO>@Uj0dY+WLkhvK}O!*E<=olF7-wlI%ADNaUh${hzE%C1g~x3&MJLG*6PAT#%Ka!SW5ct91rdo28VjnQTY-51XSKOs^4+dna2y~ z%ytu+7%ws~5beKI{>=g0LGb8sd}b>3qb8d9T>-?MXo;{iPO*(6puu!P@@cEX)9lp@ zn$13XZwQPr5LD-Tg&WdQ-kRAYrGp^ws?5R`IKcL!THXbjaOOk zv2azY-0=bdaHjr{L|y}2RK|McR7A^0CH9S5Q>JGxX%i@pk+#DMIoYG}N*VDjD!f)S zV06B)IjW-QHegyqF``v<7A;26_~O_+ToFu)5K94^h1|9=cs0#JsTE~V+^PURqLB0z zT=ijURdLAS03r3QKvN?DhKC`PKseJvL~~z{dd^#DLbvXFQl=^0y?za&8E)4Rx=%cJ z@wW5b(3nS|Ag@*MNeO9(s8OrwLIfmLMq_YL*+mnT9Q7E8&BMf>2=*DiWU(cfMmr{f zt5v<*Cmsxm?qNC2YTLX%>&N#GyF~%S?4%vXEYNW2wbF0TJ~H0+1iFMXU@?9z}aE_ zpR6p*Ad@DfG5oqyIH+S7FTw2@V%gS>u7QTM)#(GFS@&SW*(*70G42FVzqw3EWO0(< z9%4pRqsl4WuO*b3ZqGsH;!P!=F^M|W7eZ+cm<4*>zi zkwO86Kfry@S`2iaOoLiqJw@R%>Q@IE(ed8WykHfDO$hVfwjbvwR;U2dl)LJ-q8H1X zKiWymGGCKsbF+y~oyn{n+{oS5gZ|LoS~CzLkc*%qaMtYYNkL)4j;s`Wpm&07Tp?}- z8@V+YQC^Y@Ht*gI)o|_TxS1_aSe_1b#Dnyxp^R|#P zsFG=3t-zUag{L8{|Rz>s-dlSI(&;+dTpODtF~$| zAnYn)Rs!7LHRtcKfoGb$#-s|z#lW;LTs0_4ja|24G6gpAhs9daiBmia(I!D(B%H~M z^OzqQyLn@JBzsM5>`q|AH;OdQ!K=nb>TWk)GFwld%l8X2xV3#%qe7awj8dU;X7#_D zNHgN;_EPJ|BxPxv)16~Zusf#~RqX>hTaLjB-eq2zHL8jS-3u5zf|%4#4(g@?a9QY5 zzFAk1v*-xD>t4c{F?Tr6BsIf#1g2rA|3)1NRriv{Q*T-k`+j6cH!Tt_9V1xk1$k?s zQd19?C~C57oKGH7-HBk{^!0hMz`-h`-)6!-%G;y>OBSCWd2`$3dPHoAX z4|}Ws-H*K4z+Bm0Ox#9=rcPjq-lkQ**++z?`^@GHP7R#BSDVND!P=m(>J0^#VNKQC ztzkfPiF>B^UK>FTBVCg5b}V)u=iD51lZSXY2G7x}hQPDr_rqChIbI-x=Uh}QyN^3$ zBDkgT+~vJlcV6d+-9E&-qe>5>UDD@bDI7I3c`LRmTOosj#l!=jaK=YBHQCOK!rEcT z=D8_vvEf(Fr?;vG&!m@B_y>=j&Lk|U+WUr-3H=%ia|p&1-mo83x4HBp9|H{ZgGU+6 z^bKD{Ia=b#FptpQ%R0vXN+x7Z`D1st_moVABqq-yYr6MXsqVG=~w2H0sg=PnK0^l;;QR@l-#_}OZHYw5-C49zd}O& zs2vW-3{6LIreY)t(rJBb-rz>&Lp)5~f~j&

=vSC_}uBpBv(c6s8XJM{5qj)EZIJ z<5(LT&_HGtF|y4S1P8@0INWRy4mE32q`boZwRXkbg%{0Ihp!+d8JB^A+myAuhA*b{ z>yUEUYl(LMx;k@if>vE{u}Zs(p#;}3&oAZ5UhJQq$GJx7zK~d9ym_+K_$(C<3#%@5 zXbo-MArGc5Ph&i?`G%C}Ki$=CJFHfwhsC;oIAs1Q&& zLjwYISPVeczOuC-0^RJ?TI|>K{MtZqOkrrzqB~b>agIgiY|AB$q^mNhrb!4JT~HD! z^T?-c1;Mc@Mj9i#Ys=MiYu(Kw@xg6P*5_{EMA4u2FFW^~87LqYB;NFAOL=~9syCdD zk}Gk)^8@v?@Zj1kZd!_&y2#rzwO7307Kim%8(BAIW?Cr1<|{A+<74c_9IM}-exYO` z1v~@z%NtjJ8x9F#E%8BDPo@sA^vh19o1DExAPBazT8CMwXBG0@9nhCpQUO$9t(nNR>1jzY&my0WJGT(x2)6RV|s^M^`+tr6?HW5Gx`Kw1c0HRV3n^FPoyH*2zdkoTab9 zW3qH_-^tysm;lb}(N|L|-EhH-9)?FNZb+u?(I9wT44>}(Am|;}n9jw&WHFU-MjG29 zK5SC8uj}3lNM|i6+d-{iO|c%wgULgZu!1N2L?X8@rIh>%Qd^Iq>fo6&7aKfxO{-l>Fnq2V)Pl=N(nakEri z=LNt~0z&(Ia&#l&rTe#ez5Q*0KYL)O$8Tr1!wQL_@8{Z6V#6)JW2ewDH#eGS_&PH; zD;=e7F1$$@Oa~v90-2TH;#%&yc{X$9W<)*SUI2VM1zrH1guW1(fc;;@Yy`FWxpcLq z9*$}VK4#;fR;*i4yL&sC?2N}JjR8r;H1kQ|G=;>2r!jPtI#_|0jEQk3HKQ8_uF+ja zxZCdzHJN+${nu``?SH9~{o9_riECES-!?&-(4M@WRO++>!nC``NF`a1hDcBOBrBmD z$dwF!8ALPk^ailu6ZOmw^^miQeCg$WcK%E)=|ON-+OG-^JM)L?0Vv}EWkQs2$Nm%e z!>{#(QZNi~vB!LW3qnbCr*N}`PTa}v!`#)JItxdkc*n@YVx!PS@)h@9)r*PuG^D!E zzdscm1y+kTgfA-73yQLb;gw-1_Cdh{Dm4+NLyx&=$2<|+>lnsmz_=l|QEy)qzejQ7 zc9h<%n0s04UlhJIvGs74MUQWU>KwbdZ~>rA$dVssrY-{mv#8bn*ES;%JxA~Uda?$I zLqo}=)Ue!dS65+6(qOCgM)Jk|jRjt|cH6V~KT2~&MOqAYOwSd_S$4cv;Vym*A=na9 zVkD?e_h-LSE|&grlEj?~E-$6&=s6e^IaJqtf(bPsDG-s?)S2;3LEqf(m%G|`Slb)4 zowyxEn>XSqB{+9(=S4>lk@}Lx(ZMen#mY(bWhKyZleCYR%V6+LUd^x8%4Are(Ji;e znMc`PK0YTlyFZdW%N-?d`ufI+!boO%*b#;XP4_iHIgoFekmkYM(H23hNK$x^(@?C4 zQC9N=-8+7tk21*u$l2`94)Y^W5)epdJZ~I&>}=zuj~5v16|G#rtY5K@{bx}%K4t)K7qGK;f! zfa6L&{*WBJ3#X~UD$0^=>#f`QNZ5}vNf@5nm|IoQIK%6=Gjr$9ukmyp zO)PA|EUDuh88&BBm}b3Fk;;2pSnUtK!zT6<{@kexSpsw6=;SU2Ebhhv0!9tJCUPcn z&QoLLl{di%R`#lL5VA=8l*Q#}121DZS*`-0Wz}USKKd*>Txs70h+q9a1p#+12^jaW zk?Cg|5Tyg{!gVt7+Yr7N1*L=qSg*=WsEtQ=WoY5|1-E5F#iW*uK5ZfxtBsaQ;(66Cw1Dmfs z`qd#o?%kHnnQ=}&DwgJ>w!4!$HB)cPqup{m%Sv1mg~Y1aEdM0MisQ*x`B`b3$L4a` zR(=;DIFA U%{|V;-kAC|~ddl;nn@!THBb6^w>=N!StKMRJq%rNcwl+8U(3b%>Mjnp_N4h|xbQg{ zCt%Hp=SnaWw})a_CpeKY4xvV;WopA3%P&yC+|gS+*SKp-ZNKV|d_K7crI;7&pbXEB ze3Ms(p+W_eHA>(Y=v}4W);zFF(C3P=V5_3SXEQqurGK{+e=P8%1o|(ZpybSN3BI)_ zp&g`|o_TZsgs%nX`uG9SdW<*2fPk%tKVOU*BK>G!Y+V-<7tzBsXoxl_?loGE?FeA5 zpnf%m|2YpAYf+~y$G2xMiE%sX3WrYMa?p`MzLi-}fXZBA4bbIJyOCQXMttrs zWPFVfVGMI=7)&3stXv*Kr$Nj-mMGM*856sJTXH*LfFkNv#bub&y7@uM>@s~9DmK+d zh(5ifh~>VaQiKFvs$)l%1AT)$oLEdo6dP?=N94q(8qZ3w$Xb@kq)L!hyURibDq{)~ z*ll)V1ff*HXYSm#PJFJFRK4^J!Pxc9%Y|PSB=_0kF7C4Vp7S@<(@#g7j8dbeEF!X| z*ZiBe*R+cDCkzx^6J#lr*QBf861NOTeGyMEhMFx1_CPhQ4h zRyzUV|Ca(2?;>*Vw@@GeIJWfnpmYZcWHQimweTL+yJ*=c^+#of+ zq7ZBnihL`u4-;YX#4d(5FH=n{q$el>6P=yA@u!iYgK z5j*m7iR8X==J2$`!QSHp8Nw1H@=3rYg=io~HPqJ^eYG6{7E)OZYAhZFxA?bS`;IKG zErNISq+fK)SHDY(4Z1fx^_uxNZxq%;XXAR);X_Pe#(Vl=GLNs7CZQXpP|SO&Y9}ej zlLKkz_R{cX9z=wz?a*Bf(z*bTz-_o3zj)M26=nbBW{&v&8qFrJY{r~#&zxXpB6-o2 z-K?wD$hMk&8sK9-#p|}xCmT)4P;T{rJtQGTzp?!&a-VhU>(x1JpRjrR`r~8KeaVqx zD`BQ&%KhOD*W}ULo0PFph>VH$#k-k>XUo>za(mw=9sAs%ysB;^MsE5X>uNBmPSkVr zU>^t;R%e+yDEQw<_m$Sg+u}D8vrH@XXK?51u4h+pchbdg952r5Y!t(DGa4ks)2ql| z){sbO*1vkqf&y_^s)UmxW0wQ!wScO3kbr;yLwO3~<(Iq5QM?v6~nGEwM+x z^3<$3*_`_$fP+<*I$J2ukyDZw)Utt9fR_XoQ`*NxiE5WtUduK|)}XR7sWZINQO{i} z0h~rAwlFM4kSCUC2ZzrdhdHcZq=lu8iFGlpY*(nk zZS@JHW{N9AF#WZgtdVXBa#y^0`p%k-iAH4+ip*TVLU#yr*9}igBJ53!Qy!G6-)!Lc zQbbb(+=BDfNu4zg<0xa{DDtudl#NyDFraDw;v(?*`oFp0PEfw+kO;i*=i85=tC9V4 zr=(N257Me2Tdvc6A0WeZFSr`n;5eCH>D)#zqO2ia4KH z81J3Zqt@p9KJGVwd191XI!x6vTU;$UhO0ikJ3?~_s$g_T0%6oFnWCvl2((!2)eX@1J&Yp85|8vVoAPc4Vj z@IG8*fXs;t3Eug2*yxospFJv4PX`MuIw-5Npz7C6d@E+nQ0wf-pHtVJ@7MK=U+s$* zUbgR#X=Ml-uZz`wVsC9Gl9^u(qeVCj6yP_0)naap_lpfXW*m{-7L9(E*IH@@Cf-Mut6Y?G8t#uzhYLVr(?7X@l>cja8H@?$D-B4=@xzT&Z zEoqn*PTNq|<%%>aF0E!Yne($4Q6xNcYDqIO{L}{4!Pej2%hQDT)Sc8@(`}2Y&VCOl z;f*E3l#dy(Q{<6^s$ojfzu^~dJT&vdGkKd*eJYs0T6K8O22t$wKv>5vZnyPUFcI!{ zH%EDLD1OHKa~LR&mPYmq4(vo;YaCuO92cr6n7Fw_()?rGGDxj#H^$` zU-$DDwPp1*?Zd7$LT6Lt6c1p;NCP^Rkhvav)-X^vis@Y1sYHm9Lm{PG&Py(km((|s zjagPi~Z{oERm=sCJoJO4@1fvBV> z8C|rl;$yH?mr6y_ou~o9=@$zTNgM6_S+Lw!w+PEc`a_+@CZf}57Y#91uKFr_$=*>! zm3*ohCM)bL!qGM8PSoUqG?mR-bhf;D$x&~D3M6tDfak10_T8V!M^^`lS4*|(8cIZ> zSBgjCGup6f2d*`D5T+ZOp~xOKRz%>+ba-BTA8^}aVP2Qljr9pU%1fYtzT=%CdS_nU z%IStjzech6*Szofl-cTfbSL~7GYqDRPg(C@y#g$}HZ0(}#p?QdWr^+m`wqc?(Je@P z|291xeaY3-dWnz90NozfW>=sgKUCZOSHz6sNov_enFNp~Wl4NQ%OUa!XRE(TxQ-E6 zV~UlUzmhNrrg2PYFDY5iUIi)5sO?|qtCdKbc;w+U*!fFIHF9ee-BI!9`PuSZWmz-Y zUn{j={5G++t2QYL!LS2oC&I%>`%9rjoZ@V)P>2i-0hPJoVzgW>5f_o!yYA$jK8C~+ zuaOy)9FIvF=w=o?J_qVmMWp~zW&lAnlpP*5Mrfgw=3ceth9Y=SCg82vU3%L%t=^2{|ae4kriTd#as8+k=`g5f930>{K_W*1S6Jeuo!3AS>~9c+6&x z-OJ7Q@j^iSMtROZjtY!=l#EV-@_W~|!i)^dxthSB-txS6RZ#vnD>kJi&9v8OCZ9Iv zxfI}<0Sb9cQq{F{|5t&WZ32O|kuLa(FHBk4JbAS8I)`DOkZjQ*amAPy*P>XJ-W~6V zDoKL(`9wsp51N!rELDWKzH(P(HRvg94EN3CsOA)S6ZI2&VqPaOQ1j z+&2>TjQYod_fwL+>mgAlm2>Z7nIWj8m*S#@iJ40~wI$4cCbT+dU>LE{mQs9Cd7BTU zju#oSS^9nU`!XYmRjk?Bf()>Z5MHR{wIq}i`%eLrl+J9VtD-s=N(<$W|8PC4Aa_1i z#hriXbn(?n^uH_7LUMUCuYo{Lyzs#TrFFe~ab*+0A+2>J*0*a>2@6iFR|f26)J)^j zXQ#^}E(EB;VT4SNCl<6`z@o|@W_F9DkJcNdbXlsnLdjJwxgw0}gbJ%%mwD%;JE9G% zr{0^ZOBh6Z8P|(+TP&e_FXG)jf!SMb&C)u#o`rYy3$Zu_R`LCXMAyGk;MP0O5Wvkj zw=5tmolWmr{2S0~?ARXfYd~Ns{2a7BF@US|I)1E^Z8B$9MI#w8Hk#@O7x=Dv$O@mM zrshxNyHC>a=dS^Kazvd(pZ5E0Z*^}?8@_VqmTO7>B`8%fEZ!b&v5CXiM@FuXi_L`v z6W7mbCy7VXg(&3pKn8}1I4e4Qr0WyYENav<>&D8$Vtcfr2yFwi3uKw-K(sNY5Jh+O zNkqiDiF+}k;QqOQyk)p=VyDh?n)`3_3V~4aDUQd3ctyH-Fc=MhwnN|_6!6V4zTnVJ zv>M^+h8sbFPK|I`HWnWVNALH0fQhTK>-*Q)=c1&2<6mJ11CCf5E$_QeeK$N*m(7}j zI@+c=3hGSlM^nm)267PEvt}AxZYCeba;f-44^a({4ieIgJehzq=)!IJj9GxDL@QJv64ukAS#y)X~&Wf=52m!j_&HT*rK^CDV~?(l{3EVeDq<5g0$a{Zl}nyERvVFhL6^{8{1ctZg@cT4jXhQPgs`hch?0^~QJ(V=q1HEhc&>&x%9L zoePfzjrbRe^U8+fFL#I$FQfIK& zID(?6EL$*j>Fy~t1P^A*px+gdQyLx|7c>7Ao zlSwkORF=wlT-CUFYW&0kEwJxJfTduk$!gUeyhQ@i%B3tKVGXpeZn#}4;~FB3%c_n) zH_zAX2vCzXf;vM0oubfhZNTasoyIH8V<*g^b7Z?QF`qF$vsH&``3Kq(pW$h-Imqg)rzO8Lzz&4RNP+_+YrkR7+jv7Ez1j&)F*!(jgb)m*I?mC|A zuU@oSq;M-~6Qs`GRKrqhW|S;1q|aHkiqH_|oDXe1q(G-Ej~#mi&Qc6I|1rA|CY~~I zb92r&D<3!COC!uETVe8{&G#Oj?YxM0pRz=!r2O`<(}*9R#7X4ms}0F2QFi z_>s=_G#4TwS6`XrLL_yz#VB=Z^%ICCb2dsti)&61?sA0J-!~;v9r$qpBafs`kzU*| zoW;vC-bfB3Rar0e?qY|GVvB}~JK?HWsknb{2mA;A?F!QJ)m|S#bmdQs zeS=hGOqa$!9D|7hsY&3pr|`E=y*=in`Ok=h_93mwt+(GW3seIi^5TX*F3#jGXxiUD zJKsLp04z27x2$3JS@+MjfNM(?Uf6|zs_W$h+anxr;WO&XAVW@h49;q1lPBQn{m^Q1 zoY|xpYIL}Zmx9%*XfWVT*rYa*h70-|C#)IFr}65V#sJfNM-?mQnjCnd;FLQ_QiZxG zOBMN4Suwb(2L)0e$3LY$*Wtgdxj0#8-;rp7KYcTSAwtWOWfjiZin-wJgLb>tQ+ z4wO1qS6geYA06MfdRB1`jzT$W>Cj4AmYuNKsF42F+L;oc@vw#paBK&(!S zfW?i|@e2HC>ZAaVIITEiF|N^&jyef57J$uvE$;qqK1V+OBWd82F@Y>ixHNr zIv~O0hG>3^)i*E{9#~ULfjFkpV(X%f-b|t%s!r4dGIPU4o2@Aof>whm&LoS%)6tOS zSSqq$iw5b6&G2ySa82%h^Q!J^k#wxVF$Scat&`{t(aECPb6NNmC)Zb7iN816Q)0v3 z5f&!&Y5Mg=b=r;&4Aob-HzyzXsi*RT2!r9l)e+!(yvkt+q4)U@8-I{XTb;t)px*k( zn~J1vFc7Um4vBvV-N1HlTR$WyyKVd-$a@~tV7!#Graklr-$+JP8(W#VT;i1{U>$6n zzq$o&kq%VG^ufkP8uAGiHMqeO$E3hrnw7~>?dlgvPJW!khE-*IjcnbhWEF8~(hjxV zHYvNO2PJ)nq~^m2W?r$ZQS9LVI81CMx?-kfvL%F9!QEZRg;K;cV6Po(ZENdkdMZV) zysc~2sPSeLC;K0QZGj3aF3UXBy8QDOLIUfY(gt3)&mnHkLc%Psgug58*bhC!lg>G+{I?repC?57mSJJ5uZr6E zn#rn5i|$DJI0iFHZdlqsf}@`UHsC8#d6*n(#Q1i>5EbC@PA7m{vh4YFtnN*D+uf$a zCz+M1iz?E{7leNuw@n%);3mZnDz33$-QTafp!iRS+Yy;#ZQiwA2{aE+OF6LT0hGWnqySLrg3cNy|L$U z%&qRLD;>`p77H4ILVk$Vy?$E8nHanWp2i-f=};RO%wll<)F7ovEwTpo(unzq9{YK)0>u0rlZdk)2gAWP=_fs z=LdC@P5$azJc7u^=oTZC&Ng~QAAD_nydLskW1-r&qM-6jek+oL?El=%jK7s9QfB#t z;LNSX7`7{vNjov#Qc@JiSey_PS+lHZCxuWjS-C8*Ma_rrs`4 zlo7hPG~E4*;qDgkVl29T*LWOgnqbzGnyqNonu9AMD(kr6ARGj5bKK0eO-yv#ix=kY zHZNZI(x0K&usG`zZNJR~z8g;icsQ4D0(J&sqFMHW7wQOR=EES`9|Znbv5AHKC=5M3 ze4jM!Zd^P(KOgVnLS^^RXy1Hj)Q6rZD$dnRv}}lyVDgaNq`;oA3959|a?#7SvMv$c ziNrgJ8$zf-iz-cvmlZIE(B4YPxz1qBN{)_ln;Julakc6pm37Y7TTlHfpw_KN5kQ#m zuXyXD&e#1Wv(uueC2Pj;bk}B(fZ~p zmcl~;c(n4$F#VvmB7P$Hp{&^68WRPPk^l^~nSu~cC>D)(R7{v@*HSY+p@GqO`DG1m zmtwitfaDybc&Y>CK3!;F+p6b;M2}c329uTMo+Hb?>>BfwJuv79SL|T-DpjVGHY-0o zYiNPie^KBhd9gS<2aAYpb<|^Q6ZP3Ax&%HxiU2;pfDj6bpD1MinqKhGbDf}=b!OZQ zOQQ%syhA8bR^4CLxz4U=e_o%-^gj59+u11DkOCa{=Uj?O=;`&~>Yx7M;NIKW-B6A? z6BVS@c+r$E*jjRoh9rBC5)ysM_muuqJV1}zc9nV`WfJh04?bybn_Q0_%|3E@)-fxD zW7w`7KDIL|pTjO-ftayr3@MYRqG3@`viwX*X_@VvKc~&Cp~0M7)a=>wDIA+cv zaaRwGs+72smiej+=oMk{)ib49?PqL6h0+VC3##vtLx}>XdI;SipDzg&r2a3Fmv%d| z>5$v+rcAxKK!sVEO#b|YnyybI%YyFx=FD^&TVXyd2Df=cgIw4FuTquDVi7#za)u<8 z5<=g`J8;s_8XchqHuo3v)0q&pWg3S(@xEuN2 zcN6d;wSC`Qw!V;ICi9o?Qw!nazDgq|BQ)6)4=yx|^Q<4~;l5Y%_*r zjBL%8^eqZFsHgOMA%A~_l7U8NPCYTaBeApn0T6G$uaz@UIOhL?RSD|4pdp%hZoC^m7*OPr0GX#{fdO5 zk=ulPnEm$n9E60onWCX(e&WzKCykdW=oz>KC#|`+`I=3&r9CqSt`v)MQXJ(mInTo?@UJ3=<`zFeJu;y}B| zZIQC)E5a=i+V4>o$tpI$Ty2KWP@g?aOpcvnMXzdBsOV5gD>p54T)JxN=>6)C6FY7u zqm?ZZSV`+IPEQmw6If*obl51&%MD2hQ0%5AvucH6&sM4j-9U1a)L2z@WYKd^MFkhu z(Kjx}HCDx?++E*A!wbQxk6foOFfVCGnShT}wTsD8%7`K!mXGa^I}I%6)tQuz)M>1^ zEp^-xXqQ+xRb?PC*Lo}0WDK7Q%-fa_O1AK$@ad0pfd`|-oCTr)BCu?Q#1TKl0q}WBt$6Shjlg+a^*KI2YwG7Z zGQf!7yt^;#ETQL+TOzk`gff&NiYUSD$#D!eati6d7`^eKBVkE~fT)Uf%@q{ffixJ| z(M4knA9aF%^_LoV`0nS8uh6q~yh|BAR;#XGVbBJwy39a7-JY|y`RDMKd zST&P^p5%WVTS$9_5iIOU%oIGjA?BL-uQQ@MFo=RmMHdXpDLFf;5Bw%_P+lg_{hM>Y zJ;i;=y%R>K8=er3_ecV-df*QUjfn@g2RCM$N()c3h??e1^DaCF&41Q4zho!@v!>P7 zH1@L`HI4G|@&@YZ>;4Vce?W>p0Xqrb@UT`8j!4Z$f9NfpsPh=Mb0sh>JGv>gZ@@-Q$8aOItr%l%QzHAinI@?A1wJHlbJ7JFmc`zZ>9|^rhexxS?c~B_Zbjds%yB zQFr2w!q8!=%W8(R2yc(fpMKqBe`P;SJgEl&Y#L(i3g0jk6g@mNJ>--rIa4hPE7Met z>-68)t~}{6{y8QgfbMZ-GqGUsB#hQh*xjk8hMqhb2a2l}W|Pu>>_CHEkTPiVU~j!R zX2Xeg82h_a4bgjx{0g!QrYnZ9$o`524V-W1;{TF4Wp!LnKyeuO6_#*7xz;2ujvTHS zBBsxSpx2D_r_52=_C6E#7irvHXkan~e_(-QLWOE7(L&tykF5I#^3_Rtc`*HD&gw9vx#yPd@qWhx}Qn#N~_@)I{_4G#r#n_~Z$Dhd~we3djZIu)*-v0E|8# z?-c8vt%9J+ecXTFheIK%enz2j%;s>jhQ$oWTx}aYOI70mgb@TOWHN_XLusV@wm@zc zd@@AAYh2)c1w+I&h3dxH_dlWFWCyw$>lc?TgpOh%fy0!KbK~SRY_7ore)ri*Pvr;6 zC;E;4$y0xxf*yr4Duhm=vcEm>oqW3+t%jHxNy*itq%c}qQMWcNSEBMK>G1cm-95?- z{$2o`dsH7H3d1~aF!2%801MJ^7Tv6_i>E{$ySL!5fFp^~H!5W2HyIr=j@E8|=jn&w z1O=Ok^C01DxSNm838(2D7No#Jiomq{>p=YecdQ&_H}Pa?ZlhyDIC<3TKlzvsca3f< z4H6&2R<2li0->WZVC{k8aT2ReFDZNWk>;pChRFp;a#DKzwnM|SgE!}kmBke2N4eN1 zm_+JOQ6S|C6#gMg#YCidc-QJcIdvT-W0Rm(d|5G;&hQm{GtnN`b1&u82ITs#S-8`p zy0{-alo#;8zH0ukS@oK^T>=t0Asqx-Dk5@bs)`AKRe|ul1&oWoekwn}nKig=q^Bh{ zi)@0Gx=6I3GN}x!by95texPY#JsJ+D z^}W5`H4sWf5I8%3TT$J8X?DB_C!!D|-|itne^8)1_@OtWD;mm1KJ6@i1_n=onDF+w6Sxtbm3m3;YaD9XRXx}Ho5AQ z4@WM|CphWJT(DZj{PGq5G+%{3K!##S(*~Me3+{dW1E2pRG$iV`{uLz$1QenQ{7<|O z`s=?hhW~GSi~b*;zyGgghKT>`+P8PKbGG~E+W#*i!xn6-e~$h8E?+RYkCB~r!A0;c znB7wU!}6N0*uPCVyz?~1nx!@^o|v8_Pv~DaY(k2tloLMtv=o`)Oxakou&1Q!tu{0- z(YT8wa-Sq53XPP4qO8_9!+(f5!oU>bAzp6958?C-Gvp8ix`{zgMQhXkp&>p_>XGYv?#vA(oQ1^~elC|l!Xu8t2ZJU+0ZQEIC z+qP|IrES}`ZTr^O`|Nv9pW9#W^S5J+H^%xCE1nf=Js5M&H>STj{%PDyqB)}@f73aB zF`uyby#CbGY1pw-?PdIrbOhe8dcA%Hc&^YOOtWMMf=c`;bjT~J!gA$ zqPRJ_nVKp&sC<-{36|;p!~_x12f_YsX0ht5t`n@%gnMr5gmel=Zp5Lm;^_&5updVK z9rPo!u|46{^~BWnM-b4G7ebOB&gxfV_Hw||Y+G;5k~3uRCm5Dk!It(^+#tDu_SaFa zi9P+dcLDokvw$@C@SLcAIrO_>zFlZ`vEJO03U0c=}lb;PO%w}Xe*DL&kWb@A_=p1Gys?K&}>X2F--u{8^-~MCrOq&+~q_tq%GWeNLV1h(E58P9)sz? z9JD^)ZPh-4T7E^R08Q()CLDfLv1RykZ#}#facQ|1U=2)KTe4?K>FG0Pxz@ir9!;g# zN@H&~@ew~i?o>6~9oKBMLa^#LHLCf42<9IC#OmY^^m)_DPN}h}!g^lavFUi&v2Er0 z)WfPs=&g!4hp#b1IPyHcbJa_8`gA^}?KoV=2t)rnmso8_q(b?Y`GuCu2vG3oh;MTv$5^L{lN&-vS4stTU7( zVqVwo*7$g49<`PGX_(cD=P7rB1Hl zcI+dLTZNbe7eQRvEchOQ;CR&mxmZ^=RmWx3R-gP$P)i6sLlmUjQ{# zYeKp^~{t5lhjI>-}Cd_ z#~+6oZP}+OcBmehujX(e7^B-@EmR_);At*bZ+kIfH#-zYnW8ZXLX8yDkCe#+!7OMO zry5hyI8*QGi^_xiHC{B{=$_f5)?jE9e?t=yzu;)%CThF!YkernIqJvwq29}0p||e` z6e=$C-beiS#zq}o84&*|Zb>GZO&31~Q~iS*E`p3Zs_aEG)2Z;~FZf@mS9-KE%ih|@ zxgeE(G6@nzrGllo%M0IPb-)YJa%mZ(Rg{9TjB##Jh02=jM1;GeAPPrCwzd^>klW;Q zq3{KH)RkpN%c&p1^9;d{NTFQbBN_NC7k;xz?Rpg`&Bg$zg921CN_!*rmx)+J5@|m) z?5tQE- z9|q1u6lKx2_6bnYq|&%lgx6S8H9_Eftt6eeS1mUQKH(jacFtI>gP=6E=20rI&j^3- zm!TGt>MQ3oS#JeHRQ;L{Uu?n=p2+Yx8oU7HY)w^e?^cr&TBdov;_3+ zJ=8<=z=&XT22E=hM0-PlkV>2O_Xy?ylW;8dUcNH zD=Q=s_tqhf_DS4y;qNab?Qp{X#wUS*8hS|aa-GQkj%}!zbCmtt-bs%P<;SI-&r&}v zjQ4p(nVPizt*wkqEOl=!z_7Dwd6T&+g+_#pWS;b5GV+5{S%#^�nWEj(}ahv_hqz z%Bk#``WR_doFA(ru{gM?yk%3#Goai+9{j_txxJpb>%eg6QL6$eUMg}ey0Y~P{os$N z2)|HsW(wDCOdVxX2I}COIWXPYv?T#9yjg*IW2v*ay3c5PB^{1tzZ0Q3;L9l^Bpc(LDC^`3q~Q8@PG z`IkGB+JGpE6IZ(`MUihFa!jdHB^ZLMrk>dX*$Zi6S7J-Fcb5gc7u7}?wZTG=t=xI& zByNNNci%h+BLgb6K=}grjEQ*#$r`h`mn9k_zJX=gTF01wC^LW| zZ5pF&EXXFQEPbS0LC&R;3K5l&YG)lm*K919FoyEQGmmH(-U)J33XIiatt#Xu%fklX zZTp+hl{i2SmouOS^iFZHY@S!Pe@s=)+GYrjMH6m&=|UnKWuv3J!;k#L;o`<;2+dO7 z=L<>`2 zjpUL$J2MChe*|^#Y>bUn*8hR_bjCGF=FEzyrL9Fl5O>D!->T22ZL8L@s)9ibjmB4` zJXB__(CB4(HWOJ3_UR%U&mhuZYI0Itz0~YF()P)WWjOLaZ`Bj8O~^vI_)ro#btlZH z+_oDN_SPVTVhbiSW&z#gqoHEu-3CHcogm6V!OtT4ZT`f~<28u8hW<4%ae2vI4k8EF zVlAWGNyTnSI^+y?q?!ATma+tuzh#(pRMZ{EZrPxG+0m6E$=Zjf zvtih`CZOnh`d~zQg5@$0y9@0<+ik5^LEe~%#ZqQjV=~1&gi8v6z}u-ef2uKg5`z5- z4@|%~e*&)2gdis`;cS-4lY8sTYzp^q@=DN!80;HUQyqsT%fa(C_GI)F7KSfqwBAAR z(MdLBv!E{CAtfnNZ86Sz=|;7hOd-Z>BLalTtlU~_c&a)AK1HJ>w?^?pLZWZ{aNL}z zJq&3X5sgyCG5)dsTvJ*b3b81BApOPLuVjq84;3gk--hy(lFpr$mD31QkPb5dunwA+ zD7G<{8JaFop~;v+%TC2x1OH)0v7rxumn8HguOgyCi(f1}+gD=2p0oL>|L4s!r)(?^ zrDz}pSp0R3!S-N{Yq!c>?`l206z8Tg+EgDDB_?MZ_DBnbKRLFK7Rb~k7by8T3_`S8 zI%!!4`=-nBbz__DxTPqBzrbx@B!b#mk2Kni3)(ddes>H&l`O&bZuN8gN&E2U^W(UM z^D%xve#~VmhKiJ%Yh^BAL%SM4zGmx(6%288AYj8iIBD258iltX&>O7!D*^PY=hxx= zV9In%H#@tfSrLH<3C^MX3_n*BIPE6xZ2J1$aAc}<+Be&sz%u|r+qcWd>u5?&Tqkm8 zd?&>-oAH;on%A5q?%^q?RW!=VpiK4l^PIK?Mb?gTr=+v;lDKx0|UGeW8b`r_ZU#J>aH>;SfeKaB82(Zt$ERhpb($DB2ClbGO{E>hvNiR9}lo zHb6x|cR2P<8n-}_bz>Gp7ZOEYjPTMFE+XtGG*!fk(a`&cCFIJm^H{ajalDCFu`f-b zgPPV~B$_h3jQXW@BYQk>Rc@sQ0!&qSh*U0Yy=`!1TcRA~peUZEM+e_If(rG+SBOeSB)#2|GovO_e$#ZjM$m`WUJEg$TZCUgNriS?aKWqtF-#ZZDF}xUEKC)G;eQK}Tz!cmHJzaX`sm4*tig zu!Zs8*+O9b3tNbP12FtYBPekHji5M~oBnZc{cn>(8RJaVzt$rlGC-B8bj&&{eCN3; z{HU-7`xdS{IrPg6p>!=@I)Jo2BoG~qJZn8-S-cYK*Rz*k9I`RjC~6gLviQ_;23!BE z!_(doXoVBeqJIC+4xh(8G!2Su9*?IIn;{u`mxE{+5udoI-;qpI?||bfxX9Pk=LcJM z4jO2K@B1B`cE^G8QW{UfsYno`(O5^!4-nw$WQp|6?zTa_VdMZTzqJ}fo~0#})6v#iFPm7E$W z&~aBuA72@HL|PF168ZHy*VShm+qD}F?h!xr$|xDb%<{;$I=t@RudYbeN#|`F5Ld|0 zqBY02+-4*?za#Ae%yVzBn(JUg>rM-&g(p$0u)zfaQZ&Q>%+xXX8&USl^`ja9a(}uz z?M28{d-nz~oZm_j7?RWllHfEPCyE|~*k*sK=pz!7|?pyyd2 z{!SMk!^SGG1|i&q-0z{Ht% z7Y9-Df~un7F|%{N)9l3@R14rZ>^1+C;x2tz^u~Gci6~c|oJNOB9m+?^^D*%USQ;(M zlLtV-IyGRA6`g?fW5@ZDWb;vg=F{ywwq#Y3KOo*%rYx~UsYoi#t$mQuNVn)-*>{dG zwAvS>dAxaYnFs!!cxK`y=pG-Cfa1fOLTrp4K4B)k5K`*dDPYwv=K9adP7sb< zQBag|U(cBY069Qxb@~iHtSz$HC{YgnGimOz>WCj5J`7BzoW&ion8hx4&$r~~`)+DF zc{mOEu}c@#(rqVcsYo8dHHzCpq*)8J)N$KvfPt8g+3yLZY^}@%;CJo9i5%sMF@T;> zDP1rm9$fJ2_S~g=`yO1*I9hzCFB)q&fd$1ps*-n%s4^^M!^gah!IOPNoIozqvs_K7&DXs2l>t7l^oB-Ft+9UMx_TfK4>zm8$=2pZqW zo?=U8vevmb@kNI|g%7q%1IX^9NrOgpO214xXM9F0Cz;{+YB4%L>Z)_g7Ky?sH&y|4 zPmF8iBqXm{j3x^j2vx6T0#<%^E^U4f9g%0e1A`_A$`-c;*3}Hy@p>g?$sNW!Dm#fw zlD!c)uw{LoG5|N07n8iMMysYI&c}h^@t+MnM`mOD}x(LB%3P@4E9tru+%p5E?!8 zK`cmtkx09db_0glN)RL}OjMPK~5%K+(~9eOjj z^oLrdr~lgQEDtPF8!*#}CgFQ@k5XzPOdmj}_gUbDe8!JR0CbfZk=e+kgv&73wVnMN zjiI{(sc4x_Qt#9(ww`(pPTKF%`=moyIZ|L{=Zv3^L(?7WbuXae9qLwvyvysGgOAx8 zEfY5EZpqrKS{n~3&x1$0`r#)>=gtt?ZRKN7L?DKoo*c7A`Lj0RkLBU;F0oW!fPZy6 z!B#x$Mo<6%aD;!??J)mExBL5M@E_9bAOZe*@yVcy|Ks3)y+QqVvvV|daCD$kR)Yr6 z>S$AK{-T`lnO(Z`|z!QAFynVp${YJx{<@Q>wE( zq`ue8KsZH8ys{3`_yHwF2>h)3)zit_i;pvVOwYD92wE9B-s!2X9|`lTtBbs5kz0N; zBVh_BLId#FX%6n4OH^?$2HvuQn>-he@Z1R6x%hez^ZYwhV`w}NVy+_kI4Ci3ds^T< zuP)Y@M<T1^FmZTU5`?8^kk!m%LDE%Je?pf z@eY|S)Edc-01HzlAeWMGl3}q{NW$x_+Y1sEg4BtE3j0$gf=3Y+3lI_+s_cYGm83Nt z>_+!jfjWY7$l`Yq7qWKMNYxZmv7cp3{0=tgjVgE4FW6$1EE7T_RL|Es>fEtq)0h6? z4YRpf%($jkzqL$u0vPoot+{_z-$eS{>?Q(|dz#I*8k z!}oT+RSy=a#Viu-R4oA2uJ!3-574^qW$OfYs$PQQf5?p9s&sU zqpVtpuQB>grhO1Elz7o>ED@BTVu=7htPnu1+8-B##o)I08RkQ`wTVVvy@}3aVK>28 zHP*%pq?w}=t+MRWo_*~Ziyj`Tqq!zKJ9FsnP&>?Y{1v(eSXEK&<<$3-D0UXSePy3` z8E}fFMSrA1^q8!U8p>Kx0jyELs{39UL+`9Q(o&iSmwN%uwZL04VqTueDt3SeuN5nY zc8v#FJTHOt2;^Pr}!=uG@~92Y!ziVnmW0qamRCW|Z$+-D+f*-)viTaPf3?ta;G2J<-*fxvyj0O4i$hQ%J~GG2(=k5aS7_x=k`9 zNWkYZB-+8J_7a14^c&>j=sPBmyk@9xgKP*Q)BUPZ3nGg=8zc(+At_EMlM~y@yMRU* zW1(E&|F*q%{J9&#Yq$WX$jrvVyJJ-i#vx&|h_^Qj50}R;_F!wB-HkJ6YOuw9Q}+2& zMscdob2w;E2gB}Xd5Hoa-|enRhsrMa?C}T8{Tr9j*WH_|Gph=tV-{wk3wWSgU;uX? ze^N1m%(8QnQ%KY747WG|ErkN>i*ku`XAjlaR0fh8@M?d>pD2vo7 zjYntDBX8cLG>?LnLs-mEmy4}7NK-k@s#p|f$QyImK?n^MKan8)EZd&NJ7)nAm>b4N zeR|{={y7}OL9IiJ&!S%TOy#=LrQ7k8gx_2YL~s|386m)@zBq(9lEqX`fb7K!KJ_S) zB6MPQqNuJv6YXn1~*&T$*v53j%9T5h;PuOu_s8RtrjJ_){as@arM;X)I-T?P-#vNDs~)um3(9< z!35j51&Dw7i`yXXI|L`Q@?w?=1g%AE_}M9oROyOtsaj^F)Xuqt_|DL0aGvPh5he9W z9@QeQka0%>u(M-6$}PTGWtIC^&WY2Q*8#py?4W>U^+Q} z>%fi!zpc&bWNPQ_^#>O3BT=ANFKKpaPG?QmCQiuCw zznqS+_{!F3dC#e#qsRTdi>-<6ln-~#W~QuejQZ~*J}&Y^VCtYuGQ+0?x(BB|Mcr}7 z1$5AFYIRAs*dmFITa52LV8ssB;xvZeAq+%Awk$VnoSd8*HzWZR04PdT1SO12x*-Lo zLd6YCaH6K)zHHnlA+UR{RoOpeBMmC7eC7iSxic z7lwA&QY0I}%1ktbv`oHBxxeRgFXXK4BRXVZ&OL;fMV@1b(f|W^pquZ`$VMmX*sMmJ zp61|C=tQq?&C|RE2QBtfy*^&zI$KPxYf3H-A461A6j?h>e7fD=lcjZa8GXYd#@98S zALF}Z=xj@7-tLmuw!iFt{c?Nt2M;5OD6*LXz26r;jOrh)7l;qcIy~VHbf&7F=G?Rt z>U1O5i0b%pxg`@9Q|Jm;uI+3VN`l*nn{ErD{dCRwdv%y%Ssk+;MCydsvKLOed_EO& z$VFYbq%3!UhsNbuuc{v%!l{jBu^ok5%eUd>Sz%zOG$|$QSsYSHs{fnfW<9U6E3Vdd z_x;-L!~$#vQjQCqn)!LgrgdA&#ctj>e>igQUh9md9zoMBd)cNdsOxSe8<}EEoyZ)+ z;DM)tl(qmz=Qhb`wYzR!o^G1cXiWm`bouesQnMWe`TdKC?(eehXrs%r9@ZJ-& z-5}ij>ks8)G$9q~fGgrCloFOhF`DeqzY2k3Wjp4tWkn_p49uc1c(DMzSHoseu*}r- zzeel#m{L5h=d}-7Ph8BuTKMX*qcH=oKIjD*n>t0!?i32c5!=t%H(7h~Y<@zO>GR~+N1URz|H25rED?5uCJ>WZ%( zZicwCwlcf@Kw@~&q+~NxqG|y|RvY`Wme)IsXxS57Kr@xDg=s=er-6rp?gPPwq+P#% zmIBG?zueV{MygyU)JA8^zVDe%Omo5Rd}t#y0N93Hpg>{sj*tq2JbT&_Ev>Zt z9Nv*?m{n0$Zcb>*-#kWp`PiR2z*-^CZL~jC=_;VWMx$g15))v_yvtP&;L>+I9&Qn! z&49m;`w8{qIT8;iw1KsowB{PHZTJGnbk}tb5DdJpS+mIP08~A_4{Kn~C0Ak=tXHBW z=_3p=$x$utGbLeLM3gIy!%NRZLa5_|=e~c}XhHy2^%cV@%&tu^c&b*5ai4P)l_vGz zdEmWE{1Z;Ke?HrS-Q@Vh+ezqwiH4vK!g14j9#E6ri_YWp)?7M2mRrypf&mes(52YwMnNN%!pVh-5aEpihzp;HG;9oGcjxOzF8;7^n;INP%$da1`(ytpRYsBFm*Sdi<#IwH8l_kq5LUKn?zJ5qyjB z1GW~`CMxxafmi3&?KwsfD|3zKB__ML76;&OH~K4fhtGjE2Eech!WX~so+(Z zp_1nyJ*^8u1*WZhg{mub4^NUv2r*zS^N$#7f&odqWKfv#Gc#K)w}ZJ-tfg+Cxd?g8 z)GhMzG0=fY4G_!sA=UyCIBIBvh%zj7h_D ztZ$Fl-5fQrpHd9nYqrN#5ja*;B7Ny7IXAP<0E+ggOaB`d#?k5hIf#y>i>Bv^O zz31w=EuuJM$IkVuq^O+}F9R%qwsq7FD?c

1}HqaHG?PTz`-~mv9-H{Swi$_4d(M zZ#cT59q{zhuuU1RF_=^()xT;Mi=Mz5W}-FTtU+a1az)lFzM%$?l6IrdH7JG4FpsVf z#LaYWI>>kzB=gR_8pc6O2+e3K^kr+H^ZmwbVX;_MR2)}PE5WYRm}Px~-X2FMg#S91 zux{gATEBoMf;Hn@k!6HI3s6B!*v53qLF~=I+g>br)nQ-t*?X7iI)|2bh&uCS32|=% zUHBTZU|*%HYFmUi%>PEBv<$|6VcMKRq%gRwYVn?CF$>-6= z=+%d}K^Bi=pJ0D7Nsj#n=$(>=OcPih0rDS~Vt@3$Y7JFrA#%H~D_{UbjLsx^GPE+I zb>J6V7n%z$6B_k*-P6daz>+>w{5c#aA?yv_^8jgv^|&G=U6K6ZQBu5EPl{d1F5E`#Ykx9Vi+WSLt-s;km!6!=x zqezsp92GZ=z@4*WfdkL}oQ;ga)t7A-J50ODI)*FvVR-fCWL z|8g*aT1H4}u#~B8Gb`FZ{bFvZ`C9S$J^y|nEz&8C-T%CE$_?6|PE)^vP3vg3WOrKe zizr*zpcWsJ+-bNj*nx&n6=s=L8aMi8+<{+o0~3C^sWUU9g`7W za5jXv=g%*zw`l2kZ|d2l!(w&|(CY9F?Rid-QJttcjmo7!2-m&|YM+T$i2(-k{`H9S zBJ0md33t2)o6M@=vu9_Rv3SENuiqFp!HgB zn3?QQ7ekz(h6MLguCk#vlXvmX&jE8?CYnK(aYRN6A6kLD=0u5uUj(|=;|N1m-C;q0 zMD6aLjjn@@tcoW4Jj;sjV}rG8=_TtvbDP~went-me$>U=3wdmnwaarZbM@DSQ_XU2 z9(rx$koQ45X>LF}pgZ#mpSKL{eiBMRrL$KvR~wqW=!mMGa0;JGA{BHqr z8hmkjk8iD=GWvSSKvCU^EGTs%e)XdL@pfWLHo^=6<-~@V{eV*GwNjR-6XYndTCUyZ zRQbb;egbD>>F{BwAh{E&;}5tlMHWuNYJb8kSEf1zd@`(Q> zigI6uNV@^zBs;2kc|cnDd3Hd^;~;|9?)Y42=n<&0wmk>J)iG%rKc(O;8az87HiopetnaJ zdHthoX9jRRvC%Xtw*;?{m)LJM>hcCBoPm_Ji8n8geQilMNl{f>BG4gueqO^ z&Z4D2h%Ky4z4?2)e@g7*r-i@ZM&ZIM8zsoa>X%8f$<*FjAg*N4s?>nMw22Z<sLjDsartZQmM$q&}#{t$#Yut2<306p1zM`{0a9V#q#-mpCK+qwV!KvEvuw0 z2whR-8Q#wQvS|?ka&@WJZIRv`j3p*JN$cARfTlj~Z${DM3MT~03g(noH)u{5yEeGeyfo0#02PAxaZ!gMcrYOgn`J0YmmDIy(HgDMTEM{y)xrPT!$HP|FQU{|*zQ`dduw{|Y4fk4D9?{}~lCG5@2mGI#%bL~K!Y+j5;A-uqdX zU;`}!6e*)G7hY|2DTJSGrL9Ch&?-l>q%KL~k5qN}8u>f%yivTJ_Kz4-+@W9jyC4sEiL}3>g>|oY@zBj2bKlvo3ws>yh!Jvki zbUCA}5M<&pCh@+7h*~8`4fBjf|1ZqRihFMiTM4z*dnZfz**?b&WS zFFHpE*RaH7ri6^2ngv1&LG%|LU0$STQflmH_vOf9pQlX*9e_w1s^$BhuJ^Zk^}_Jd z_Gwb_4Shg-3(g+s^^KId;8<1smn~ME!HiC(np9oJ^;O@wOSes=o*APg{sP4ZY$JRE zX?zdF66ZVAJ)#Ft#JR``F2uos%8;QWVsq@wQH4*X{`e=(+9YaCnTEXR?HKXcGI1R+ zKcH$qYPZOOFA+95I(N6HhE4!}k$0BKtUrTyv)R>nW$wNsaH9#gkcC%oXnUsZ_g#Xl z4Dp~|sndH5GjAhYDRs$m_?%PJ*hU`vtt9mwn{H6{^ai~_R?nKHcrd%a5PCf@dgLdl zobD{Iom;1eSS8u!^LGt4x%Qa6_-N?a{M49011~c28zQHQxgk1jHQ>39A&p@_N79}3IK`VahWZ1levtp8oixv3`U z@JGzqb+4+o0hOfhb~eiNlelS)Qkq(J4|@&@j4*}KI$jT8B>c;Z4}cgDK0Zl8nrkuA zm>acy`=_6BuUQnMhH`9JJS%pRlVlnnhvz1r5veA=EOdBjPILP)JqmMW9!qV zauangwl$TQMZim|zOBvR0f;fYnn)8z# z)@Q}e!jz9|SF8FRQB}(i&-WVsnJ;{p=Ylp)fR*M@(Y9?jo-VJe^JktPPr^5`N5E}~Rv?4I5X#vv zpiJ2$qP4!km6Sa!3yGI@(wKF@v`C_p;{td#NxoYm@j`RW4ORoj>>E};iY|vD#sKGa zn!>u~=3Xy5D{WlH2QPVay4={hyM`k&I`h^doX&kSaHlT#MmfvTT~pFj>t$-dR_$6} z^O&z*l=ZmnrJ#v-C(=PdinQQK_slh*k@?%<K<{V zZ8b4VYRFld*cq3!w&bFgg57!cmYtjB@M7uX!p27LUC=h#XR?u>E}bCx+JI!*)wWO> zy7i&<>eO19>D&|~M9b;g%N9G|y$n{9)c$N~v~l@$@aQ7Xt_4B)$*EnAYVEt}n@pwg z;3E=tp=?T#<^%VBqxMtxjGd>-yJhoX`rFoH^7^ltjoPZZ=lxG9?L+x@GaJd@meT*B zy8K5MQk;J-r2qKe1busZeYd|OFr$0OByWUm*?BW$5Cm6*g6t-w@FV`sRY~T@& zW059>h+|k_%{bb2nV{l>pkW)@whZpgKCU&5(!1?p8tnExx@A1LwfbpzfrI>%dCKw) zu$C=v;J#01Cjc6zkIN9lDcK4?+=q%8v?HEwx^^c(@_ykfK#IyDhGhR@&Zw@2jN)I5 zX`X<*nq^Fb$w_T|zkY|Py#q54HmAM2RO(Z_He9~I&sluB8$`uIob$^)vPcBvE0{Tj z9DiE4x}FCUiGJt_F>?31F?ij@;7vMvq?0G0S5G?}`(^}Y4X@Q!4)@%kOr}PDiY>w zY^Ks?hGk<>#Q0JAKt12eQQ#e=rUvD=A()J<%!EciV7Gc!sp26s-;mbw2L@9;P;l2CDqGZ?JJk zH%TJS4$WlAJM?o}Uon49;YH3z?DQ^=O4i@o!MX2r=$u~x>ys$<8eO||JdAU!a|>eU zJXgi3{xx60wx(RoOVk{5%2oJIXpg~9vZ2U`B!Q}4!B1@t%5U{w{}6n(a5bV}0RaGD zg8jQ(Bmdi6`=1}c|LAo4KO_VHxLQMfD=PziL(9L*xJwP|Khq21SGBI+$g^L1Ii|Qx zHPfkiaa|?x)L<={!ukd=F07CMs#Uxm|6J4BsZaMHl(=u9&c%v!4Tm&o^2p%c>ZU0g zRP3d5JIgLe$$%)ezi~?I1UqazZC^wnsJes}$*ckq4{Ff?EzAiaURg~byT5r$ zJY+q75rZkNI3lL@QU39ngygqWT~M-o!5j+uA=aIP8~^sf3K7(W_A#5GOu+()i%tbI zfwXu$QPlFb_9Hjy5c_0^9kw>&{uN4B?-j|*EozMeMoG}lJ=j%;0$baOCk$;Az{loW zW^HZ+DLXhwBE~LTV2)w5Adrn@o4u8Q!ZMj8^iMk;np1;mqF-ml%*51^TwvgU9JRSE zd+|}nAH3V0zqTWNaNCdfU(abJZsJ=;DJ8v}v3jR{V_PCRQ2fg9@eytn#`0w<7T18I z=C5Ibr-SIh^NB%^e06`J|DX?Ho;~f5D@H*$htQr9byN0S>+^L+YwUhBcv0-t3>Qym zyeni=)*bqVEe8iRatVbcIQ)>MUn*({S4n0U>tOcA2Jl#yh(E%3^x4U`YOW(m(jzq5K32`XgzXk#^ zTwB4ZRK>ESw|*>?8h5jm@yH$%#l$?;C% zD^u3gAvSxe@Z+Ln)QFQOIs61sm|8gZOBfK1Pb(fd3X_MpNDN?K4;nK{j>{mH;WDZp zi>bU?_*QLBtM78n_uiSy~_ zSCxL)?nM2N9NSd19b$gEyhj4V?GlAhnrc3+bz{pSp2o&9;y$8`UkI>#1O7Zr`B)E&lSvSv{l;^B^gv%7qpQDRjpc7V6M~! zB|i34Wnn?oig7gsPCx)}hr4>KckePy*X%Lwq-JM&@CikP8^%tpmY4SEw2vZK2y(j6 z1Z}0bSjrH=MvfK5I@s^Vb@9r42$B^vaztqEG-PptU#NRRmY1?9mEmRJ%ixkTe~qxN z)Qv5+QkhPK;kz@{uK&8TR+iOD+Yw4^?YlRpRsPi-tz&#_^ji&7hfz^cW@zNpWi}Ts zC)y+lx{#_vYlHVfOHE&;yf9QjgChRUpq7za&#bF_+pzF~r6ul0Np5jAeZ{H7?ndgU z8J+4OH<`i>2x*0E!p@_m18HkyuXCL~ zN5SN{dbCNG|N4z=xyRC0r`alz_Y3mp*7yE%FWx1tE89S<3qZkr&t9<*UeExT}Dz*jDvQ#@h$c?3BWZ*7!#(h#k@|1ZZlKNaQg%T9G63 zpsH5B{54`W+Gt9Vj4Ap|nPr7MuUr)LYIF?vsDqZGQwvKpoo#hTEB}+;&JWjqsZL;K zge+_cRrVwkqL^QH4w#iiwFAqwP0P?pVe`67*zEnZkYWeuCx_9Dr;e;Kzm#-FDo^s) zW@0`gtON8#lB&c=EDOSWh12Si=7XcGSVrG06b_HB#7eUa20wp@nH@9ze*n$;MXi01g*O<r4W?i&H z8L@~vKS~&fFBhOpRFPMGT+mXi2nzT*eRKT0P<#+<*G#CE=G?GUnS9@ni@Bl3Z=Ulc!%7C z##b<#QI(Jw*eN+()_OqG%eDgx*^1BU7kFy809KCc+|-gV0&MHQP@b8aBuAw!0p5T@ z90-#i^DxR@%cWmr&Ci`lB9THJ4DrbS8E))X!zE=VM#m%fWJzi`%A)xG~+5;XF1ceDmVnkyC?-;GLJbR9!q$kOoliA)KDSNbZM_-?t$Uc5&?rv9u z*;eKKQ{-|(hwZs0t^K^nxQ+iXO6;wCN^QRhHN&8Wc>R-7m2!Md6(<2lda|=E;hW1D zX%!1I;B&Nc&u)53&rDqyW@ zwZ+(iOxr|Ru#d<(?UAMdrdnCco3ER6)3X1E zyLW63wd>M$W81cE+qRvo*tTukwr$(CZLK(2$zD(Qmv?vV=k8tge(L@Q*HtwK=9uF+ z&kv3A(#o^=&+SKvbOEo_F;Apma~oFsvL4);5xvonmPbPks)ihzfwHR%L6fp?yIn=K0G)0|AX^Jv^m!O>- z)g;+Wd7erB`xH@mRBNGrb34ivSc}sy#x7{3SH|BW#fF>=h2MBf!6dExktbDM8TFO- zbCB%ReKQd_;VmG?e?+Qd1acb-jUMl(0$RLv`RsR;7GdJpd8b(M;3vXB4lylj9y9Dg z@u}m2tZJ<<-C&=$=4^i~rhK{E&GWNow;KLdv3l>Tr05yxt1%m-|GCOg-;zgqsLaJ?xQ;t6+5tbodTd9jci$+y9?DzW*8Vyn8&aWL30HBHCpW5Mn|6K6@ zt3mu9-4XwbJO4i>dH?al#uop6e^x_V{+bQJcdo8}P`a>IZ3mzSCjvZDpeM(PDrYGS zig1b~X?TCQ;^w;Xr?;0xI@>s)Fr=3SJ{)t-+X)}raezA5VYgBZU^H9}79FaERn`M6 zT@f^pp+n$U&2g2gUycaEv1r3u{x;RkSk_(;#d^FIJRGQ)-|LVA`l$@$fw}&)TlFhY z9Tr&B0~g*OYXDWA^Gfi0J@?C#5(FI>6I4lZj)aY7kB$c+C{A+805&sl8PWOB0YRtp z2wJSTyRe@rVm5yHGG25IjbIwq>)ECqBau2z=4;8=jS5wfWh50FjGZ_LUogen@&Q6O z10yUEuBP#k+g_ZE&*M6YoX_JjY47IC7+?k!RmrCW)w_11m77z@8#d4(QIKu1 zIX)B=0`RPS;GS>|EO3RlJkBqTvAH(4_{yB<8nZfOBu%~Zi%W~Qt`*`{<;bpi&(71x zQ|&x1vuZPI2-PLBPotpB)|Ml$UH2WXEaWIW<9K3b3yvZnCu}_wzxqe)%E^w5t2sBO z9SAW-A|%wfFk91>wh<-rD`e|Vjly+g3uir*WzpiD@O{GAGZE9%8T2LZ`7I-mklES! z=u7W=x?hhgoCKb*T+vN+g(2#t#(o62Q%P)l+jJjbvg(JqnZGn2V}?;HT$UPX*?>VY z$vS!|qSw7%hx@9I6_g`x!X#by4Kp0#HRdow!F!S!Y8a=r*zyQefVkTD&ZnmJ)D+LF z33Dn^O(cX15~~3x*kl2p)L@PjlPOt(wrnp|m-hb3?*#KDdIeDcR3Itn!yQisY#&=L zVH;nCY@W()o^owx)!6T?t}h9;#XJ20d570J{}FS)6O)K}44ATJw5hpUU0Li!!qu#fqUzZouvEk&~bUtaw#e%^xX&S(6t{| z>+J`xc53cbnZ?GT-VoEysn%!Xcu#UmS_cVmnDQJ_SnFtmE8(x~B({T3Zxw7`dwlgd zP793*YGTbE395*o1u?d%)_kk3c!+P{|?u;RpjdR%4Dkc;b~4^BaEN$yT&FPw9+6L*t<;5G}FD0VRT z{SdnWM!u1c{S~Iv6kmY4{r+Qiu#Tmnz5aJj$Bgt($&&1!X9xd~JN`%K$$zPw{?|Nd zX6S6<_U}h^H@sF(TVjcO&(vh|s-Q9y!NDY3iaD)n2_+hu+DarF52+_c0mt!yus|$$ zi5XR&f}h+kak_4E0OnaA8pc~DA5y^m*c=xHByymgcG%Zi?srX%V^Vl)p&Zh2tDcq08FEoPJ-NNDjz z3Y}4+`5rlFo834#xcJ)78-FML@q?MApM`tTb1}(iHcf^(S}5>IFEjt$Mlhbo)un;1OIXoLt)^G^+RtaZhzZ=-ov!nuh+eNW1*by$^Ov1|zGzbHB~wS$^PmBl}|T zDM9(!*WnJL9AAlPns4}k-Tk7{bH13O=EHUrFrGXb7(Ut6@XmJVccQ6JIC`h!27cJ6(ay1&|X)Q1}^pM%Xmx;XWh$JX1CtVk)MG z00^Ilk8|7O>%qvu%h`ocj}XR~$3dVLlmNnHFGkE42fm&m2qaPL4&F;o^1U_;Nh&Kr zY{>tZO`)U->xcrLPs+6?hpyd+8*B)WJs2lZ_4u4V#xD4#$Orf=AUpk;Ltx)`pZs8c zJpJB(#-888H=prVBEA6A>_O*)=|s8?fg05`#m}gZ!{N-X9hxoxO8sOl`BJdCZ_uJ`Lq^ zC-`lZ@gf39@i<}i-Sr?PG;YFj=~2;*+3AkcZ(x?7Vrw2}?V)Nr9DY47{rEQ>T_Qg9 z%t=)s)rJsQCNH(U-O)jXcu~AFrzBtHfSp_uyH_smVAl4RGoW+|alVKVz;#%&^MNwz zS-IHL4WK!t1JwOehcNR>zl2%>vu?Qz7B&hao%$%`m$;%d z`i+q&U{_DqMn8Ms*;u#>>P!v_AA7lzIsLmkd>kHh4&(&f51z2tB*GBNVvDxma2ksR zXifS4bM37|t8v2S#|@pblP2b@ARnE<1{oly)t(%m-P^4uEw)qD@SMhiaV`Y>ms6)X zT+Z{p$gdazzG>r|s{Z@F<07ZwYaxPF0e|j3-$&I0A7{@$#ejP#s%CtC3<0(1Mltfw zjx>C6wH)b1ifEtdu#GiS3+RNklF4^TDv9m%LWUvL>WHngvmx(&4&vFc@8g12enT4^ z2EGW;Mw)Wg#wl+FzH|%)!&Dh0FsGW=Jzh9(I*y<|4yoeTl@r3MBag%Z!Z>uUOsYC% z`FWhy+0&-bWREZ}gb_whHq2+l;4APEhY3ez!H3*W{(U_jDS}D~O?*0Y)xSkSH1k-c z1`&`@oB8T==A4KppvDjSSM1?>h!?1&AXidGX;Z#yBIi_RU#|dv03+21k2ks< zZtOt^VGavZ&hh)#3hllR!Q2(hbh6Ag;8x0D;bg)}Y$z)9jr0kE<{e=h? zT8O1FE+zb}TS`%(7c55Os=2qjy{LbFUl9H+Kz}V(5d~neZbDJQc{r$P$O+oVX|F(w zG@X{b|BbPkF1Qv)3VzI961hvPHaUhL&|3|Ifo@n;(Rg2sxT(36uKRH$t$$)9M&RsT zeJ`{+J1NPmKGKe_$UJR1?WoC>@=HQvRt6k~w(>-zLRW-uUt`{BISia8`B9Y1_ny*$XUTnlm%crkeuzJC z9+*#04+|Aor89!Mw;E{i!ns$W3JZG?kLyecfV*={b=jxQ9199WWlX%0cOk>f`<4@7 zwnS8%C3!Lw+EO~=l!{QBV=zQzo(BH!&odOxif`Uf8Iy96Kd ztua8Fe>jk@oTopn-Kt(|f$=nV!iwZ1IP-fvZD~AR( zPyid)68?-|APJ{F{#O+!86C%nHWDC{i5)-F(U-WbeC0|}>-!^|?%_w0YGr9e(g)W} z($s@si{#qmfqWH-Kag)GeUW{iKhcklVO`w2?1r(~cnU7rRP?o(ML4^%@U|upfTuu} zn!fK`7O0{Y^izQetH|KA)TSwTOIj$#=E}OIKW+R#G|t621qQFSmenDsoHZ7K*yv*< z>iZcj(mav|4g&+MJ%e!EoCZUoh^6&0skG!I;&8@N{3u!4Xet70H(kg9k+k_m)9S_u z>J!>mkq5*i2IYGJOoHWsG!SUUV7&16P?I^>m?RLx+Fr%?6XKw5&T@1RC!At+eFMtZ zpOZ4X80Q~oBAcHxkDz$*-~Dk6hjk2J)SR&>Du}pU;qb?Jgz*qusBL32y}*vHw5~3x zoMXKc4D8qjye4vlUnbN%PUhaPnBuCvyepMS6U<{V?#5Kq{yb)EINaQ{Lp5PYikL{( z#6#LJETRt%`E_knUWV1_UhWVO{gShb9vJuMkTnwNpldj5)Q5|}RM+6qow@-ahJxe# zq_*W`W*=`7pjDJl$Y4T4-k7^yg+48+A}C{o5l9VR^y0-K>n7A(i{}*RJ)NpnmggGT z^%i7oURkpsB{`pw!&kmSJ)43~8?UcOm#UJ!&*K`(JnmEVa7&Rr_FBQKyH%+$or&$q zPG|>sDKaIDHH2$u8RxJC@50T>KFCUOGbXIoq`V$fo9#1C>J^BMG@x+3XjS3Te;8>T z5z74RaYIiyzn8h<3JNahCyWE3Ig%~WB(P)avC!xj)QRE`y(bP8M}R(7+1` zn>aGHnals68qq#ycK!gad=#))!+X;l=tZbPH+hQ7@9L#U?*Ma&5IBVR^sSW|=+jKF zefqe}8c{gsMy~fzRS-g&C!;79lHj7EE)F0p=wBYKOjNfUmR)9miO|!qmj@+tb27s&QnZ=A zR$!}%B2~I1*Giws{ASyVuu>td4M&WPxH`P2juRDAO!%0%$3OJQMukOhZyrn-^f`&; zq%%SmEN27QLOE+o3-ruUZzG1>o!BraDgsmKIP%Iar=pq}M+dmH1FUb2nj8+{>bmSL zRf~dh&hPqA7uDOQ64Y9#*!ErQAbR2nMZk zoL1#6U<4Rh5*!Z=bZRJ;No@F^Cbui|hqj2;R09D8?n{MZ8VK|{^O{>2-0>P2w~<~G ztK)zNTx0F*qQm@iVtDsr`7&Z@+f@2E=G&15=1wibO1c=BtrpN*KI@X%8&&SX$rP9v zMN_V`0v0DJuMO57x}bUI5+J?FSTQ2#FTYv4IiD8grLc~3SfV#(0zqH|yw@43fSIX1 zwyt)YwiwbA%UVVn*~M1tb-h)lWQ=jNtmVS_2UKWIZH7b}6*u8D)UnG;5ss*ymse6> z{WO09U%Fc4FgH_i{PMn%`(~v4E(l3(!3v{=Y1}y8-9&LJf{wRdEIpezojL1YFun~4ZWNv;UwnXx#F9~^2Eq8?|ZyTswihB{n=_KOA)A)ACxC3@@ed#UX zcH{#l^08+|fci_+^2|ZHtwr;7`^s#6CA7&1I(?@E-~2-@c$OhU$V z6OH81I;xlfF7CcU*}!%usvV$Ut5y3-mNQi!^ydA}*_}R`G5ypdJR0#k+tgns4K=J# z7qML<*>_8FOZrI4GXdwG~eWed^v^C8;d!C)o*1soKn_gVl z8m0K#>O`2!EUjQ!E3Q*}VZ54bl%*rp21?Dcs0*K(lw_~=^|vc{0B`US4Y6eNm$@kCWce0K|t)CDiy%q!io7B_Eq} zbh&B)0u}evv<*R!Wq~ZJ>2U)eGrH`yZ$UU$JmboD_Ps4`murqIV^v%}bpjRu;E5QXUfTwhzkH)B%9uKY5 z{50b1z*ReJwhmDx!bz}E$hj~^C;(Mbs2-^nu2y3~uLlPLdJT6&Q)`kxOG zEk0u=ojWcO6B`^ay#>O;-vE3r{hqiQ$~1)Ym)orv@$?R-2HI5l5R2|S8P-bNF-h|S z%naj&nO3ta9PS!$RBL0fszJ%MlmZ;_j4K&Ixc=Zi@vHJeCp?AbYbu$@A@9a4 zZ+xm5=SBmNbOXI1omC}LBX`fLcNjFcL(GEm*K_sM z^^MKjil0!NP1nN4)1tvG3&4qU-2?<=8mIHTJnf9{)#Xdp6;Uf}%MEEu9_#9^b+zo5 zMS0JP4mReNf>&3A>*$>dpW2B_g`O2(nkCzBcVR=v3QOi7_R9v+UCYSC+)KqeiYx2z zTTZxR&7IN%81cO z#^ET#K5v+o47Tmz;R0R#1v*aFW0=^(oveg`8!AU#SxW;?P~zWu4H7;zz>AZc3cMV? zq-_l&5m&4iv-1m{%eDUJ#~A>Md6DUvFzYL#$vObqZUG#BjU`%&^#OToZlRZ&C?6iWUTKC?$`N0GiDS#@kJSneEjM%U zrI{d5g!X8)GXUiAhhvx&t+*RgI%codrq~=}<{e@>JHz1FqKTrce)i;bIg0>;8D5`8 zemZYshpyrc60e3=GgazwWyeVy#s=O*L6TmPIiH%&qG7oRsBbC)by}-VwBf8M`gb`4 zdq$KwHcxko{o%|=HyRh#&CJ1#%xnqlz}6??DH)d5ymQOmmx~ovY4E=5R^-dhiSnX) z4C2jHcWvFF(H-Eqf6V8Zpnx0fdU)WE_B+Z?)TOgEXN(?07q)B{c@fBN;<1+|=NL?~ z6#a1NOC%fQChcTMryI2`o{e(JEaj%Fex$L6I$v(0&=&RQQHN)bN*)0fhAVZx3>N0` zRGM9IwD}PQ3W=d6OqH(i*snyju7oxS6i;+JAWVGaKzhN z=-n>LA;V!b-p75`SZ+Mnkj8cMYzJ|-UpsOJ0_wcycXM1)5olsC7u%=%gWEcsdV|r% zEWNn*^K!b3556P-6ljd<`9EVGru zk=a=RNVCY$+hoD-^H@zypjj6i3vTzK*UNXljr=*h3Ng_~5JAfoB^L@*0($POJclT& zoGL+!c|gSVx+^)=1qKhRkvq}Gxl`r5D@i$+XxjKukLQsp(~y)5eRUkc{j5X50YMYJ zUI=^8&UVYvzs2%obrfmd>+w8#!Y+F3?Hd#qwSKU$*nc{}s>Wyv{Z_qBay{lWw<+H0#VQ3DxF3e=W6hWL!@YQ{0{MvQKCDm#nSsQLZgY#(lph0L z#<+wna=c&$>L(Q?auAC$_$$Y}g4$}lW`yG_XKkYSmPW8AIcgPElzO*EXawwWP)}0U zVy?1eg=$~KB>$2)L=&s#=;U{Be6)Gqq3QUB!FL!?Lr7|`6gMa z9XUbCW!}5(}0{|>V-mKk3aB2 z&rwf88e4y2Jn~AIN*>MwV9!4)a8eAsECDBOH4!kXVcTtzyFi#jf#CVIDgF7v?UVQI zLddZuag9>7WlFUHZ$<9M^@*ku#`w3bz^s}3bE=TxORb2W6fW}dLY~$*DHQ6qMr8I^ zJ;?Z}55R?08afrmD$icE$1eSCul#@t-!2Uw9akud`MrZCW|wG&ELBn_NQDi3&M?02 zL;c7?*NtkWWpV*mU~MzNejQ?k$`&?;it{U~#TVr+l(G62`oN=M>Kld57z&euka{fbp1eF)*ZI>P|2subVr+ZoSz)^Yp$GtctLZvy zl%WK6uGZDxvcN4B?pR%N^N_oU4=2V)RR<$?kn)qj-h6VFUddN+XeN0(YZ8`5 z6%h`9p1#-W$yfkQg~I&#Ma2xFd!Ayk0u(_3M7jX-&C$vc`Lf2MLYW}MQ!5gx&c2o| z9a&4INIR(bzNKsN1ZqzQzHA*;0Te2?k&`UfHL{G>VOthOhYD>%riQZy>TvR28i&ka z=(yU2&`I2Z{Hr&_kN<2BbDKcBZi28hcHVc$?qstiGK*Nkp-iN9O}8t4CKuDu5^Kks zGz!H`0jSLbd{J5kyeY2&KZv7;n*gbc*r3LkS8kP~mPDeynkZxhzU<4lriZ@hKYSr5 z*DU{?#4Ac2^Te;9YQjXRY_yOVDzY6vi9y(G1o~iS!dt?Tm02X37 zNwt-n>PSha(dCZf*V_tBJ0`7G(8X`nC#KT|6gkCIHNQ z;?C^xXfi3C&(`I!z$5Rz%aC}az121}EwOWb46oVL_AVT-zIStEHO-CsAe<@ODI5_myjDVgw@yF^DZr5l@vmQZ!`e$Fx5NT|nTz;VY1XKr|BR_A&*luRb; z4`2k8Ov(GaTl&rEfUVjTqj@U!EV$)78Z7MBl60h>&6q+r*=UFn$qd&3krg~Zvm*}} zizv$8*73+Gg=@<)s;o9_YvHlU(bbL=g}8wN|2wXZ06s4;50mUT1a%};eRgD4JXf%A z1%96Fc1UcZ#SxG*a~zvYcXK~mri+q}oj~5mkj`5WT+~e$QcI>AGRBr?Fe?FP%=s%*Zg#L1>i(R@fl6Cqt$NOMd>=d8Ai+qb zC2*yTwaOhXBz_`;`IK7*HX^+C5o`G;TX#aEG%R| z7JUN>2!@R{F|XuX4tGtxIPcOZPSi+09{Wt)G5Ao-MH9L3Zb96xq0=`KdUxBN28BUa zDi5zSzRJwC96*;+`XObWLu69*T9q2d^!ACWlxq{pmxv>T%(g^1COHtKkPbyMv;9jI zDFXUBoFGsF1>Qx&+#4WreJ`98lvtJoayl%YL)~PJ%9YYi+nHDN!z3$>(54~j?-v*= zhz&d8bPhX$r-`Y$#*nDw7OYJ^MYMn=W>91H%Yi~!>L{hNx0pjz?e5kv!^E$9_9Hj)rR;Z#pxVUSap7I&v2z9HKMh|jyIXRH60S8r648ZAL7IxFH}SHC@%>P;9?u)g3h zT8^WDKBjzn+Ry>>#h8n-R|)|gOg4e?D3_O2V<|~b)i+k4p)R{jvIXh@!m@~>ZjGX3 zZH*=zqXA40H*bqSDB9@{Jk)amA~<@Jz!?-ctqfe~SeeW(un17m8!j^H2ZXmq_F+xV zMaFi5(qjuUCU{oTf>qx1Ui8YH$?s zf!8P|w2F9k8<2)ny2IX^0UvNXN$)Ow$q`y9`5cR((GXrdX43J|*!LA)VMRC7wh#F- z@NdExHDCxDw`O1fS+)j2%M%LuPn1)805dcg9{@nC{Gaw3sQ!7c;lFw7{~x{E@L!Pe z|JVUDH?g+=cSOWW>VWMQ15D_h2NY2cg{VQkXaEqXBSt7%k9F5I9Ru4DMGCE8Vp6-j zzND)Xw`?m;>*$K?qI~3+jFcXRD!^IPq-rP!xT3iUj!KDys+bOxI9N#z&}>a&3h^$5 zSkVGe`sIgh3P}Ct)JQ_4@MA51ac1mK)uDlpDgkvvBMMq{G zg^Tqg0}wykAZ=X6&C6@%3k(}zb9|p&sGxSbVD9m;@8W5L!zdZAWa(l#Bq77593irr zbhVlM-)%0d1+7QTn7ke|708Mw>iLg9c;tm3F&EIM%K(;Ym|TTtd}j_*ndJFAUk8rql;y+C~p5e&~^mkzX=)qIf51? z&AGQNU~S$HB4z1c&Wu(`S6sct{UNBqMJ~z87W~Nl6PY%^xtX+rL-!?p=zHe<^!)zl z{fl=D?z7ChXKZaP+`WY^Q7?{cM~+W-e_=@dk31I2y!ppZl^aCf0qnmpKDJl&z{kIG zq_!~s6h{A^i2iSn)&IH}vbJz?{&z7nqor%N#fIYZT4%6=-r)kG8LK^=9`;OprG=tf z@9G5!6VOoitD~`8NyX^rGa=bn8Kt7#f^1E~AmJ77@%~;fvhajM?hY1o+8{t8aCZ;~ zRv%7b|FiGOv+pRo1|lclN@fGfX#Ls~4xEN^BcDO%l%@Y&ySu8J@4nMbbOj&< zJuUqZ)*dVaNRHCbSnd$Yy@UIQpdURMi7Q^zqe1|ruX@#rqOuZg8}BeL+QCF<{ZJN5 zIDCoan0JQo=-1pm(x7!p6KIFDJ0qCHXOEs8ptLh^bX78=o7`jYGkGH|l+G-OZi=v5 z5uX-PBoPW}_~9{_)E%asG2Ri4aGj&rZu~JC#phr}M@n@rMb7|cGHnzcv%ycXE!ab2 zb@G$Pg7m^NA6%QhTP>MErRCkN&v2O%^^W{-Gb1Zq91YSe^z9NoOBml;>ER^6QkzZ- zn6B#LU^(oFb`pgmC-lmW))J<&@sgK+4T3Oc`B>Q{^0qF@Bn535cP>^$wkX_&{Q3Dl zp>SFAkQ&RA;_rCehX|D4W2E-45b(VxJOMf-NxeP@U(~-?@|cC5AH4CWHv8AwurdGW z%@(eEiBj%Lep8i=;Z2Ya>fK?C&qg+%Kxe@t`Ad7B-969Ja zYnjGij94s{bSa?sL_X-HUhTmTe~zVMe5|9LAYC{l5NNjlo%Pv^Ne4{430+;@YLy^) zpc5@`V<4#yegc!!l2+~k?RPYRhg;z)*cbHw-M0bDG5zy1R}M%)F}W4A?4Xdyy^I45 zhqb@&ey`bd#k}!~_k;2$;!J;mb=n~p(5Q++_zRx611O;c!!~)__yvyLy=zTOXRm68 zjH{f!mRi6n7vz;3iX{3m3rAk)CBHbKRe&`YKTGinsxo~JHN|9!*p~joPwz!_4EnRP zx$er<3nXqC0S^~-h?q58+=qL{%-r}_d1faW`(e+NP*OHSja8{K<=IxsZ+Rg&-aM=# zxn+D@8~#qc%Z?Z8%CK}bvofS|oxq}T5UQ+xnKNti{UQ}t0SSGeJ-|;KaVPiTn!$D# zL+q-_@!G2XUMxAWCgA4Ank=>^IV6P(6_nehia?1Rs_>Xx{)d_;%U0zDZfLEyicvU- zONSSyw`KZ01B$fsRJhVfuaFoJ7haBJ@(VP}+ML4tLM{L|R&yp5@ww)@- z0&`dEFePj4Hy=qZ{Vn?0QcnhE|C{Ne(w&zu`_xmpTy)=nd| zr&!JnlXEd*qj0f#&_?&QnyK?G^|0k#QlZ5)PVz`s|F%}BxorAm>A~1%1|@0jrmCh&6Yy~&LX!!j&x!42wZNjXC^6g?z<>cg zw5FgHY(h>dsoIJ}Cj~nE2r^3dB+!^)eKJ>o*$A}Fv^sA($j9gbwXI*fuVk4W-<6Bf z_hISPM`(o~KXzyo9gvp1yp_K)IWRMHVXJ;P9LWpOR3KjU-1G9*3uA`fb2chay; zA)mRq`!ojJy7R)Qdg=2+$8v7}WE~rsZvzRjbdTJnSLC$n#@jiF-kY$SSUcn;Zv}df z;+ZT2g^AoFyeqaf3i1{|Un93vmK+732Qxp9qNOqNE`ure_6lJipW--}akAi@V-8bI zY8jYc5Ink~7;t`7Kl&3&mupPZr@MJrZ*9HV=%M_TD}&AbT!yx$kDa?pw&c0iidHt} z2{y)#mA^+dw+VBbXN1oe5^909>qbSwv!eXss94b>`^sx3549Vb5|!!hL9SKEAagI) zZ9iV(I%pqML;{lo)a;ibzUvJ1tcWA{L3sGw@*9CWQ~rT)W9}>W;Z0YyKdNI zCHB*}&ORUh`j9#uam3QGKmcTO3*{p*OnEz}xM3{CN8E$V%`;%IE^L_II_OM~alzWZ^BHc1dy{D@J_F zA-KEQ*gc~3ME3?NueG#%aSmur-=={liZkUrbh=T1EZf>UUNbYU@&lo;EOAo`2~$$9 z^uDNsOy1EX;{sZ);K#1uM>av{@%7=JLVY>6gzMrj6V(g)gyena|~LEhub{to`DiwOJFtKa`=T+)ZR8 zyy4#FDEF;8YopCGLhFWa@t3GRw@Dp>2WRsFm^}OY0aJ02K&P`(rrsRLc~`!4m(&MA zkV3g<0Xm`Ad(X`5uNm+9T<^z6h$JT%+1aq1Y}ii>$pH@rcI&y*liyeyf+wt9$^Nwk zqM{juTT(p^uOKF{%LzaxNu?EL4mBr2G?iXvqL-#K^{OwdG@06Fo8va-j$jkqS#L=i z`ZKYaLIpN&4OxYn48p&W^!13crxCtG-r;aUCTTzeMfB0nt2@k}7l|xKvg|lf)s3;?mY-wfOay_a?)#F^^lk z%8|e2=6zO2QG44U?$>#Sx^>z{nCnfZ7>W%O_h(9mo36U<39 z@L#$aaRGrBb=vn|y-w1@%~JtxPZhanoC5^evgyMo=Y!3s}1wG-R!c zrIA`SJsS;*u{eKbj8==g?5b9}W2%l@pUdyxW4)tqb7GQ+>L!Np;}t>^W+vN8H{U68 zaV2^^N%(Fj@hscXb;-*!f--|u3N+kXc8gHj$O zcMN>7m^?)rL&5ib&_{9;FrS90?9RqSJZc8ep-XU+`ppe(Y<%{!0L1wgN}PM3F-y+1 z929?#f{?o{KK7WM#rWGP;LCaJ!R>%+BGT>vRR8%6jxQ&WS|nDIFnDYF@BvC$6Z1*_ zUxj}OOP<2!Z}2`t|6c^}KOE@)b;0Xw?ey>N@Fi;6|GLBbuGJANhmEI~k_vVKfwIuJ zG_eY#aw4li6DO^|}DWzI)Bfk|Q` zv$k!i#+er*GPgxbaV})Z3~CXg?Yv4dB{%1{VWoF-L)hc2x1}~TX*Hol+d_ zM1kv42%y`M-mD?-CeDkM+qy$2{AZP}XhOe^{^Dc<-^|&O?Gpoe$}!(nvqPp1)mO^j z5d~zgpNvaRPHr#X+COlZ%M>R>TrC-?JSlZ^bK3l=meKC&V)#fj?@IRvn)ir-?BKO~ zxQ{nb-l!#}1}H0=J?i4=vYMu3e;Tr8kbZ8-qQ%WyD$#WUq2L-w=OY`k=u}84PkcuG zrp(m_Kv^XLN!4khn$%1)H1z5ns@-bELoTD2j?s*FVLWj97m-n;C8(!Bptrc1V9(b6 zJB?=|Qm!80q$bP3?_6yx*v(-5K!IaEMdarGLMbYh+6j5ei2A49RxSPaFS+@MUo|*9 zMzs`X&T0-cDh1=lK_wOA7LToCrlFiJ0gF+q$+bpEehj8)o@j=IpSscMn5tv%xlsrs~RrElTHdD(O!PZC709`?gb?Exb_QZt&Qb z&-p{mvQnq16H?HqHxTdoZkdop^@D}C*j{N*QQ@l`$+2P&`;O3T-lGSTNk;X#6O%J9l5%3b#)Y(f9#n&0f)lWkFm83l&v2|&)AtgoxVSBMrPi_ z(rSN`8ko?xxa=B;q-Q|sb&{}7nOA9CmcCdIXS~X8a(c~um8O2@y$eT&LV?rh^crI< z%6Z91$*i`pX?iaIm4GK+WNk%j+iq1Ye$i>eBa@Aj<1%iy8lWC*oHke*0Q@WNU=T|m zQ5J^^kgbX~jui_N&qe2J?^4jf&7m8#SHP1FCtO?e@-!YM>v>&dNc0MeCFX~qo%vWcG{+$qewbg(+_e+Hbz=huv14xRlQAcR!;S_&+aC*9H)p3fG$5P%z% z_zl5fT@xxs@OM%IM-Mnjy&x3X$lg9azgm$DWyVwsgo=gU2V{&r;~jb`|HMhlw+eO#ig4h zQHBBSkl}+vThe2&GFC4E%aPNKA{Ee9Ss9*A7Lr z7`V(Qto@+Lx?eRj>e#@0jTjUE(Za0h>Rqk{2j_9XP*SJQxX9;R^cR_!7L@6Kip)l{v#P~&^ zEY=dkqw^c&!Mj58|MX)0D-ooW{ntE-;U6cq|4pOz5BcwZbgus|Mr<}FHg=94`qn0f z|1Tk^|5L{DM#si!vlHn%SHFNzf139%n{2T(ZG3vFv{*_{hB)^ZMzO!Vjw2~I z`TlH1`YzgP@(an_+{#a)KUJ-2<(4mwBue+`m|s5rCJxumF`O?}E*IyXn|p0IZ@?z* z;BMNbHl8`_Gg#EkG}TO&VFU6`&RWwKt$jKJd2cj1u)BP*qZ>J;bYO+Yhx{IAl);2s zY1+HNZK=1xme$FL(c6rcNN^M`@QRklgBHJ8;q73XXcdN8#vJ;YX0503+D=%;mQgV` ze6#L@d<=gtZuN_p_e1)2$=^xS8Ka3t4E5W$WLjgBb4Xrm=S|2ri7dXD5$w zrMQ~|Bqs9`MY>OM;(yd>ogF=0zrLU7zaNBuhV~De-c1?Y*#?A-c+I zT36`s=gw+>MZvN4rkYoFF8n&Lz-ksbYZfFY90&FS@v6SMn(7_45yhg-Oac;mW+7gy-yM>z?@_v-QeNkKS1PvMI0 z;t>~D^kmH62WcuQ;afy$X^?)XbX-PERyws&oQ~fUVF}G^YQWkhO9vemc4Bzw3&jNRDJ`l$dU^ujd9Uc|Wrn1D03Co84mX9r|o*`qhP2BBgbKHF2iEyDN=6 zeaW!ILg89RC=Zg$*|5R~@oTWj6$C*1eEQY?BcmED?l|n&kRc2}IoUkoamIDqnwc(3 zT2YAfT&ntV8+2%!)7o-(d-ixqM11OGR1L*Om@r{w3(#r%g^_UekheLX#iGLo1MoT~ zCUhRr=P`oYmIU;x3Oh#yar-AuI^v!J>Um%V);(uozoWar3Tsm#hQJ%C5<9i(DMt&s zz>q^Gc5GUE3icimKMIW=-a@Y<1-Co;CK1d-gPhns{c=eM0Aic%5ef~~y4rk{tDx6H zbqG#M{e&a6`R@L0AFY@HK;M`PwkWh!UKBZ|&ag3f6GMnx#Vyo~PZMK%cRusCyfL(I z5pC4V4;;;4u9m=RX8^(Mx~@M!TZash_)E`KxTA2c4uMEa!= z_-x2gAjGLGgQJW?667wM&)O+oR!KaiHe!5knbuz2(>e0Jl5VdiUm*uuGG6V`cccZhxb7|Y zYQmsKn#1l=g5eM{1YTJ>%EU!#*O-@l~m`JN^O+-$LSZ&N>eJ!E( zk}CKZUD*_%sLETl)JPF`r8YF~6gm|qG9=ki;bD>HrX%3XXV5gEPh>JZYem&1gl-?QUJs{_`!U!7bs=0Mz{jf0mUzE!j4 zMBiCSe|v{-Q+Giln=^S+t260^Lo`(_$}MQgrkplPv!%?7-hr~VF0VIgT6<`Q(|T1E zO*I{N@cEoN{!FPpoSYmTNS7%CFi15j2x?jt(tHrO7%G`PLXp`PG3Rlzp*fLtrVCaQ zMOD_XT&VTe)TzLQtKYe^!aAJcnxH zj_%<&vJBST<$zNprEQ^3zs<0PWUHzabA@&nELQZ$>M~~m9U=-Nn#5TX?G1BLeOH?a zJZcsF>c_S^*7>Zx+It}Kek1xp2c%vz>rmkl6RO8af+BNoNZ$La3=i;YJTt%+=6%gF zZ)B1VpLv6yl5 zt02OOFNES26or@@hvUFF8m8eYYcwU2HxQnhq^B1>0MCrH^DKkC}Sm7pBqVZ=F zX0Q+Q&Av%Bcb8Uiqs>H*7hP-j8ZYjZ9CuN!v;2E)yyUwtN`FGCLoYpn4db5evRV7dY_o2*pVK8eY{s%8P4I` z(O!$T^rSj3>PZnHB`@08m?UYuZx@4j>(l%{)m?R1l-<)_q)|dxx~02IKnW?4PU-HJ z?v)a05EP`llu$|<2>}5?Qc7tF=|=b#-}vpS@WS_>%eB|VA8^k-=b1Az&zW=Pv`ldL zU^Nid$vGNS0f)%6{OnMhmacr!-a^hO5q-g+R>w6H(je}lbS(xM6Vs&`@d|Z~L3d|P zKx*`o37ib0gRzX>BH1KB1h3y8;>>UT(-Tn=Jle;5>K+_vVQR>Z(Z(uok|{*WWV@(7&ZSNr^DTu}cs!UC=u1zNjNN zZrSZ?(Oo>ANG?#7lgx6`j{YqR&3#FBH4nag9w{YX<^7X>{q|RpTehlr1AB5iK}?}s zHQgFF5zt)=xTxlYgE%RR=*oHV1dNvq1i9VJL|5Yr^jrMa?TPP>U8NlAw;NC*K9pLN zJpqOu$OM!vaQTc^>j(fd4hoV<$8@n*cX$F^@~+b&)P~(>hV@smxAGCpL6P>IPR`-j zBKbZ%f7Qn%lMcU>z|mG-=^<5tWZy)n0q{WbZ%Ftj@Dn3}5)H`930PztO0J z0dt*S@7?CU1GaB~xcpIweyr@AIi0`wV&mIl9Td8j!}La-iD19wuT3ItQ%q^Mzm3c9 zA`)jGMrobgYF?cc51!PCU8lRR;aXKkVMaw%vKV4L+F8k6Ih3@x_1LCjaChKo5$0OK z#Qp>ZKKR1;7t;QD&ORYx=w(_Knp^s#9~KwwXx=W|#BW$InU;UPAJuyu?(qq`MJD`H zY|^%z1t;|N6y>4ZCd<22ad&z{REBipQoc?uA4tQ{hz6d#h;N-*8$|I<1Zb%9$n?mw z^8)d;SNc_gx)3dIrmMm}Kz+}}I!XGhVt3`WL3HsVg9VpcZo8-qi8sTBU4)?}SIn>m zwZeMKPQu+I8|8EzRivU6Uwk3~1P<37ig;ly_)=tN8@4bX8K&d!uQxv2Qjg^)>L1hR zxQfnYpBpSHL*%VJC0`_b)O2rJVUWxsTY)2t=;I#l>GD^5Nq1-ic+r(Vhk!V zsm2$j-LcQOSME>A(^Wqig-s*-hVp8s`g{n6^t_p5ZZaOWV!You7lBYFwGt;ILLkz(Ku_oQfqPNnKUu`1n(GQH(bc z5#>w-qtm67n#xr~yRr{GI zO1-K`a`C2qhO!>x@-%geq# z(ujslYDJ#pz2H*$V%qoluEl<+D{a8{z9;D-BcSHIq#@?rgKkfc$K_mwO9>JdkEFG(=omNqHeSYi-I1f-&QZ@B zN9ZCg(kHrh0@>FMsc^9k6(X$<5=q4xe=J_Nk7k*ggBPzIQO=vaXLq;$Z*%^mCoctb zkE+vlRl5Inw>Eawx3;q~{>=sRvGOv~Z-p>wSBvl*?y~uFtDpq4w3nc{K48A&n>=PUYlbUp^jhG>NLU|-oDI#Nu zMf^OPLmzhkiqmzRDayT|>RTZ=MkRKNOO8zgH{ z6iO{&HZ1jol%c`rv~TX)3>IK7eQngkou5)usV0!W56cEVv5UM%ceCH$PgiDw->@4v z-;p&-G=(S7%|MPlKatAigrhq++hf~T!fkUf;VOuWdoq|?uf8?!&iN&C-Ud;eOJxd& zn9Unq8_mM)MOG;}a_rYdS$g3WU~@_{0w`fD{dUKZ_9pgMpY)g~e4N}L=etjS1e!9# z*)#?4ZpmD*xt?7lWj=G$@kJlrA8oLd7cf}%-&m}Se6`J6)@x(i*#0yg>-~AYIoHJa zjp$2(@Qu8V7Ha5z=`jbZ6k>D+X#RFH6T&x^va4-~3G?nr~%pTnLIuuD%>g$wWg@E0K1tPl}nR$YVNe6gn1rvW(0}N`{b# zJymv}??tc^jJgyt!fq#W<=Yyz!eDr9i1@$sL+R~ZxL$B$FC}ACJU1hncE@NX>OeP? z5jXCe5{}KcLQGms$|(+0{5TB6Qt9Md*FHHG74io4S{4vFZL;m(&wZ6?UwhrGT%vs4 zv{$L}z{v5(RYa7iCDDrj03ZTvPG_4E%jHbzcfwsg8v$=C*b3EJ6S&`F`w z;w>vi%^^eiO+F0A#k|j=v{_kCFbNj2fN8u-cV3c+N5?F#GTbkZK}e5LH!(3aC9niK zeyPi1y6sRtPa-GaE1y(CqBFhB7ZJDiE^BP#z|?Q&k#iT5+CuM}bXtq{BtNG$#EeP* z&D`ENlTm{eTy!y*^_60nR)3u2lD;&nI9BEmBs|@OkL{vdFx^)^x%T=;k!-W%CSiYi zIwMF9_f3xwVb{n$K(f${lR0%op;zWn9X3~O4y@v0bWQ3LgoH$eo8OohyR1U$DhS@8 z+}c=;=_HXtn30SQc-8hS|4zWCYS%Gbe1kUKbM~J7X_7^zQJsGgc7N6?@c`j(S7kGkywD~ffXTA=` zhE5I-%$<$TvELSD8Ot%&(KZ+ZPzNL^QeF>4JKc-tNZcJ>Xs3%fmW>yu zM^F_Hbc+KSkv)D$x{N!*6!TamH7ztcSYIv5%_=Po)_ovc16@Qvc|fB~ajtkUuVA%= zh5jhR>&83F)RzkU>RDec3&_%NiZ_Z+!9tTh22 z-iDQG_G|kH&`}9ZtgptOrzuZvjNA3eW*y0Pv@dUbX%u#S+i1Sc4QtH;-RbVdqDS7S*`Q%mLth)_WXMW3&-J3W99GH`^s&do zM)`afC9*9igRhTgM0SBA41AAsR#UZ^?W@cuE_&9P4TB=1S2PIUNm$yy5SqfZK2daM zb31GV_lFu!xYy>S84@vW;&5Ds-w{78yxP&QyOm}Bt-W&TJ2EBmE%AM>Cbk{DGElY@}ixt?-b2jR-v6v)b@7?P3aBM_mB-MDBqmCOe)~1i!o_7NRY&;OP*7% zXR8CXIv7q0bYRPUGR)5cayv9?2^O@uM6rDr9i+>6mPseeL88!$sbDK*6k`$Wx*gBn zT0|!AyWH(`wXjYV#6d6{|A^G=ieqFmP z;-82*P#1XuQQi(?Jz=_%DFh=cUo5EP zNEjnosPu<#C2;O=4CHLg##<1eX2bR^Rkf{nzIMLvb}*l*wMNasl*%IBCB{;9f7}k} ziSS6L_mRlt`)YD5|&FJ}WPMh|2%;LMNQK(?xAq z=Y>iNvwOAW%eiPO^0~m-o?jp85&G4t7n6f}Ls(jv5O@31*h-_Bl82;3YcF%aVZh{-6 z6XBm1a?d_Ud=c03N4rvd=p^=mv6acGCq2Cs*4SD_{Te{4=8f8^Bi)7p0&3$%&fj@(Nd@@;p`*UD17W^nK&D$%*uS$_HB?twdV7xt8sQ7Ux)u zy(uxQ=@b;B2(O*zKG!B2qfRhnCc>t{-b!!MPrl!HG+~=ghFQN(bP_?>h*cwb zl~E$3DK?c{GHMm6WVH}C%1)-Xc;z6GzW!SZLul?;jeDGx(_0 zOT^P%a=zklR5`IVQb&y=XvCRCSDbvW$Voaef=&<#k7#Juok&CyyAso_LKR`>VLSFB zH<4rPC*(SN{!eqeh1#85>W&+Jm9aL_zHO$%Z^fvO6LYB$V!jo8*-Bewi0g70l3io- zJ=*Jt5Jw*#1csWsVr^N_T1O2kQ?Ay&Z>eKPjA}x7kv&z`8%~UU!0v$k$X8}$0Ce}> zeN5F`9WNDkH)K_e)!El_5^@%^r=Lm4_<9MNEhjAxFTu3k;T<6@w5HHHx42j_MAG!L0C#85 zk|#B1S~xP=N0Y{b%wF6+G`O?!MA$AX&riPUd%ZyXv}m1q1IJ$3x;Oyci>L%(!vuN39RVdE0$)Ec=14zifL>4r#T& za2t2&tG;ReynM79$E%ZowPd{{dWJp=MeQ)IYN@vU$RJ_uumZ;ZEB0;K;0CLioMY76 z^o+H;WW^I$R7q`qG&kIYQ{0@_x?jDIS4vx*bi?OiLt8;_&hxnSxweh$-60xDY`K1% z^LR1r#A+oM|6U?7C#IJno^SSZk_DV9cCTWRjfq?E1|msWi)Cho@u8%Pn1RUl+sj(6 zOq4+>BBECOJNAR68Af+FL*Dcd4D}U83-x4`9Sp%WdbR|9l3}B6o3AO(=rtL^WN3mr zLZY~~q*{`X9-?$VOhFQv_8A%iaI}-b_Pw26WtXRTj4))kjbhl@zPj z__{Dgap*x=Pxc+1R}xso@(w=Bq5VMKFHEx~gkSS0aDY1cbFnm;jK^tnG6@rBH?8Rgh>b1ot! zN$oqQXpy-_^7Khwur4Ma%bPj}rM)84_CHqw#)3MbfgZ)WhmfM?ajI-qmZsJ7LSX^ z!MFLzwa%^jX9ZR__S)g>Epvc81}oCUH9ItO?Txs>hCq`4*;X9D+@G)v(CTgS+(fC~NsQ0hmT(;F(10B<>dOeEa>v(dY z`+4T%c`%pVu~A@;#38|S1zprI2X3A0?h_e11MI$oppB#`WTL2PBC;o0W`4JC*|{HE z`Yhq_JU9LF-G3>+lIdf%v-#ckX&wfynQI?tXt$z1KhWgh&CZX!^Rf38%8t}_uWO-{ z?x&g{be31i-?~2Fyspusc|ZVbaH|7I7O5uS*Yxl<7jC2Wwq9BEmnv%^AR#vC_p<8~ zL?_za=(eaB_->yJlG}SE=gnEuR~%~6wAY*SpQnWM^ho>gT-PLac*((;oNdykc?kdg zj${lecsq=YAB~Hl;Oj`bX=^6oeb6)S%^g8qjy}$s-95pwej(dlS7YrR?SWhEn%VId z({`Q$gmTF)B~5vs?Xl*LhH2hEbUjQpzrjTBo~i&NNO}!vD3$uX5waw=2~(@5Zv=VL zm7rA9$KeW6xKZ!?Y+XUkxnzFTHl>?koVaDY%s94vi?Yp~9)u|z%NcS7(KqzpRg9_w z5rn8F4keSLF`IUa)-?fSLf*KMZwdAE;;mFaat*(k9ogV$tuA_&AeXdGq<)=kI^5*A zFjz{uit~Q3_rto4d_P}vDg&8ZW4na6?jarSuJTu|#vt2IN9a_sRE&&M7AsEzZ_1{n z#;{1F-#}=AmrDp*jn~js_!74FytCcuWgx9q9iJ>J#_l_5-RsIg!}oRp)53>)OVf`! zzi!>GMSoiE?y)ED)G3Ob;)N2&<~65iO+C`;l1*V#MAFW6)1ELxg>aV~h;27#zBo|s zZDbJ76VwbBS&KRJRPT-w2a#WK>QG;^{9G;hlIn8Ss~WED2^~L;+L&utO5T-q*hy+5 zG`^s_f<^KH9|e=(QM36c-u1%mli94+_Oep~Ph3JyBEpt+Pm@rWFosVY;FEX4sOKf2 zr1Lv z(_N$K``p~oE1eSO#FfTP=iqz1?j#?)-dFllrgz}fM&6yF1foU0(9^%s+sRn?UTH)s zSOmwLaQoyj())_(w^PRS3rd+HKx&t-1;+QiCH>#&mZfO(Mm&v!UqrS;yfW)$b@iHy z1@?wSy{s2zAYKCs2{TExbIs~)WFO2=sU>n0onKbZ0fkeS~4Af5fis!K+l{`F1j=_$< ze1lU7vyDDF2{EtKBynuc&D7a>?_ngf6^CuaMm#*71j=W7p-F&;fF3G5pXnF_peuq2 zhenyK&}Sb{y5ok&*6ap>&5+ts(d1wS1%uoR8~yfJEDFa|%_q}0r%qoXjmXFUtJSCe=96_NVlUKM>O~6_)eK_84B@)? zyQV=cWMWq0I6sCGCa1>J z8yNwUXe^#*aU?etxQ>~Q%_+%9`5h{EW(Tc zbXXS-&`Q*JXW@Esh&+r}<7aaBR={1{cJ$7ju(j@FTJTk838Xik;ba`7e(g{nCp*Xq z5_Itu7$b2z`tcs>jmcS84z8-Wo_nt2?A5&gm>7SQ4-k^YkpeGl{`*pV(Wt#M{z+o; zKhj`eu3*E0H%Ea#KWhjN0DFtSK0+(Pr@#D23slsReyqxzP78VI(|-p7kib8u7DuTE zJ1hhKLtF3{1M=TN9RK{guCBR_xudS`4+H!Q>2!VPFQmVSNdGiLks*-23wMK&{vG&J zGTmSzr{D7+(4ZjT!9|UKA^ueg8s~pX`9667hye?o0)Gi1a56!{zu++Io~z&gKnFVr zBI@7)05pQ1M~VsTr+;uV&^NTSvvqL%?MVF(>1=v@NI$E|tb0LF_Z^}P3LpUBq6Po} zA#`2=QGqkbAM^~Z%#Cdv9~e7;C-@~Ol0u+mf)a>-rFaS8^}}#iMDY5m0hkozBVGYL zCi#b^t+k#0WyTO6ir_f;ZwOulQt2SW|09@M9|iz$9RhL%)OO=qmHJn97e!qydFrkLekHgf4glampss)hY5peo*JZ}GR#wJ_j^LQ_;1Z9S0s=7; z1mgOY=Ow{!G+4of$N>NWsxUuUq!TwJ-M`yzUV)g>rGyPaoGf2DXqP|+$<>oTz0LArjL5HVGS{mt&5 zBIeJY9@6U&KmcWe0+WAbcOK^NgU?k6%oSn#l)uUS^-g+T2*UQ=gU11TYOwPH;6Q?a z@}WV_b=1EPM3A2FyD%{IXGQ~CpP1nuQSoeXW zgSn0A|JrK&oH+bG9MVDHW`cV2&Ln*~5o7l&6l%Z|=(REcKn5XV1=N`jMg6Zx23|lp zjggln&j5j(37V}ri~Oa9JQLXgV1Xl(8dwF$aIymWQVT`@-$-&<>NHT)WgE|;eklQr zRxcY4!L*yO!HuLK1gwC>o1kc$+kmab$r_xToR$&YPMyU?yciV(W+o`U^(@ktfUo2) z!zKi$a|8$g05Jso3Mirtit>+GsPEtac{TLKOq>=1HWLKjaTe>#Nw|Jz)u$4iQ@vvS z*}{*(5`LtAfBCwA`hxh7M^>RzD`ada(!V{Mda-B;omx?mpT+x z&I%}u_P>?*>wwV3v|xci&jgLoT}q4p5(IgUgb_l}3TTA>95sG>VB})z@IXk(1Vu7l zOr1*#gxtT+1R-z*^o;2owSK=*|6+RaLWs!(t+HH9uggk=+-nY<+3&Ibsu<)R{EKM; zo!JwypR2{i;2}45LMLmO9KTZjrF?cV9}AtV8StFT`w}uBHv~YgcCLW5d7;SvT=~D) za6@{IOwc@dksNxh{hyTQ;^>g&OK1Zb6M-iD^Dg|04K%cYEP~HGoWb~F@TY|o$n`O7 zs9E$`{Ac)o>y3Yn@9zpLkmWb%b-PxHzefPF&UUc}1v!+NAV$ewdHpx$&$>dA%LL&{ eL16w pythonPath.map("local:" + _).mkString(File.pathSeparator), From 40b30fcf453169534cb53d01cd22236210b13005 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Wed, 24 Aug 2016 21:14:40 +0200 Subject: [PATCH 083/270] [SPARK-16983][SQL] Add `prettyName` for row_number, dense_rank, percent_rank, cume_dist ## What changes were proposed in this pull request? Currently, two-word window functions like `row_number`, `dense_rank`, `percent_rank`, and `cume_dist` are expressed without `_` in error messages. We had better show the correct names. **Before** ```scala scala> sql("select row_number()").show java.lang.UnsupportedOperationException: Cannot evaluate expression: rownumber() ``` **After** ```scala scala> sql("select row_number()").show java.lang.UnsupportedOperationException: Cannot evaluate expression: row_number() ``` ## How was this patch tested? Pass the Jenkins and manual. Author: Dongjoon Hyun Closes #14571 from dongjoon-hyun/SPARK-16983. --- .../sql/catalyst/expressions/windowExpressions.scala | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala index 6806591f68bc1..b47486f7af7f9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala @@ -477,7 +477,7 @@ object SizeBasedWindowFunction { the window partition.""") case class RowNumber() extends RowNumberLike { override val evaluateExpression = rowNumber - override def sql: String = "ROW_NUMBER()" + override def prettyName: String = "row_number" } /** @@ -497,7 +497,7 @@ case class CumeDist() extends RowNumberLike with SizeBasedWindowFunction { // return the same value for equal values in the partition. override val frame = SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow) override val evaluateExpression = Divide(Cast(rowNumber, DoubleType), Cast(n, DoubleType)) - override def sql: String = "CUME_DIST()" + override def prettyName: String = "cume_dist" } /** @@ -628,6 +628,8 @@ abstract class RankLike extends AggregateWindowFunction { override val updateExpressions = increaseRank +: increaseRowNumber +: children override val evaluateExpression: Expression = rank + override def sql: String = s"${prettyName.toUpperCase}()" + def withOrder(order: Seq[Expression]): RankLike } @@ -649,7 +651,6 @@ abstract class RankLike extends AggregateWindowFunction { case class Rank(children: Seq[Expression]) extends RankLike { def this() = this(Nil) override def withOrder(order: Seq[Expression]): Rank = Rank(order) - override def sql: String = "RANK()" } /** @@ -674,7 +675,7 @@ case class DenseRank(children: Seq[Expression]) extends RankLike { override val updateExpressions = increaseRank +: children override val aggBufferAttributes = rank +: orderAttrs override val initialValues = zero +: orderInit - override def sql: String = "DENSE_RANK()" + override def prettyName: String = "dense_rank" } /** @@ -701,5 +702,5 @@ case class PercentRank(children: Seq[Expression]) extends RankLike with SizeBase override val evaluateExpression = If(GreaterThan(n, one), Divide(Cast(Subtract(rank, one), DoubleType), Cast(Subtract(n, one), DoubleType)), Literal(0.0d)) - override def sql: String = "PERCENT_RANK()" + override def prettyName: String = "percent_rank" } From 891ac2b914fb6f90a62c6fbc0a3960a89d1c1d92 Mon Sep 17 00:00:00 2001 From: Alex Bozarth Date: Wed, 24 Aug 2016 14:39:41 -0500 Subject: [PATCH 084/270] [SPARK-15083][WEB UI] History Server can OOM due to unlimited TaskUIData ## What changes were proposed in this pull request? Based on #12990 by tankkyo Since the History Server currently loads all application's data it can OOM if too many applications have a significant task count. `spark.ui.trimTasks` (default: false) can be set to true to trim tasks by `spark.ui.retainedTasks` (default: 10000) (This is a "quick fix" to help those running into the problem until a update of how the history server loads app data can be done) ## How was this patch tested? Manual testing and dev/run-tests ![spark-15083](https://cloud.githubusercontent.com/assets/13952758/17713694/fe82d246-63b0-11e6-9697-b87ea75ff4ef.png) Author: Alex Bozarth Closes #14673 from ajbozarth/spark15083. --- .../spark/internal/config/package.scala | 5 + .../spark/ui/jobs/JobProgressListener.scala | 9 +- .../org/apache/spark/ui/jobs/StagePage.scala | 12 +- .../org/apache/spark/ui/jobs/UIData.scala | 4 +- ...stage_task_list_w__sortBy_expectation.json | 130 ++++++------- ...tBy_short_names___runtime_expectation.json | 130 ++++++------- ...rtBy_short_names__runtime_expectation.json | 182 +++++++++--------- .../api/v1/AllStagesResourceSuite.scala | 4 +- docs/configuration.md | 8 + 9 files changed, 256 insertions(+), 228 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index be3dac4d24084..47174e4efee81 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -114,4 +114,9 @@ package object config { private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python") .stringConf .createOptional + + // To limit memory usage, we only track information for a fixed number of tasks + private[spark] val UI_RETAINED_TASKS = ConfigBuilder("spark.ui.retainedTasks") + .intConf + .createWithDefault(100000) } diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala b/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala index 491f7160bc6ac..d3a4f9d3223a7 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala @@ -19,12 +19,13 @@ package org.apache.spark.ui.jobs import java.util.concurrent.TimeoutException -import scala.collection.mutable.{HashMap, HashSet, ListBuffer} +import scala.collection.mutable.{HashMap, HashSet, LinkedHashMap, ListBuffer} import org.apache.spark._ import org.apache.spark.annotation.DeveloperApi import org.apache.spark.executor.TaskMetrics import org.apache.spark.internal.Logging +import org.apache.spark.internal.config._ import org.apache.spark.scheduler._ import org.apache.spark.scheduler.SchedulingMode.SchedulingMode import org.apache.spark.storage.BlockManagerId @@ -93,6 +94,7 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging { val retainedStages = conf.getInt("spark.ui.retainedStages", SparkUI.DEFAULT_RETAINED_STAGES) val retainedJobs = conf.getInt("spark.ui.retainedJobs", SparkUI.DEFAULT_RETAINED_JOBS) + val retainedTasks = conf.get(UI_RETAINED_TASKS) // We can test for memory leaks by ensuring that collections that track non-active jobs and // stages do not grow without bound and that collections for active jobs/stages eventually become @@ -405,6 +407,11 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging { taskData.updateTaskMetrics(taskMetrics) taskData.errorMessage = errorMessage + // If Tasks is too large, remove and garbage collect old tasks + if (stageData.taskData.size > retainedTasks) { + stageData.taskData = stageData.taskData.drop(stageData.taskData.size - retainedTasks) + } + for ( activeJobsDependentOnStage <- stageIdToActiveJobIds.get(taskEnd.stageId); jobId <- activeJobsDependentOnStage; diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala index ea7acc4734dff..a266164587e4e 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala @@ -133,7 +133,14 @@ private[ui] class StagePage(parent: StagesTab) extends WebUIPage("stage") { val stageData = stageDataOption.get val tasks = stageData.taskData.values.toSeq.sortBy(_.taskInfo.launchTime) - val numCompleted = tasks.count(_.taskInfo.finished) + val numCompleted = stageData.numCompleteTasks + val totalTasks = stageData.numActiveTasks + + stageData.numCompleteTasks + stageData.numFailedTasks + val totalTasksNumStr = if (totalTasks == tasks.size) { + s"$totalTasks" + } else { + s"$totalTasks, showing ${tasks.size}" + } val allAccumulables = progressListener.stageIdToData((stageId, stageAttemptId)).accumulables val externalAccumulables = allAccumulables.values.filter { acc => !acc.internal } @@ -591,7 +598,8 @@ private[ui] class StagePage(parent: StagesTab) extends WebUIPage("stage") {

++ aggMetrics ++ maybeAccumulableTable ++ -

Tasks

++ taskTableHTML ++ jsForScrollingDownToTaskTable +

Tasks ({totalTasksNumStr})

++ + taskTableHTML ++ jsForScrollingDownToTaskTable UIUtils.headerSparkPage(stageHeader, content, parent, showVisualization = true) } } diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala index 20dde7cec827e..66b88129ee414 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala @@ -18,7 +18,7 @@ package org.apache.spark.ui.jobs import scala.collection.mutable -import scala.collection.mutable.HashMap +import scala.collection.mutable.{HashMap, LinkedHashMap} import org.apache.spark.JobExecutionStatus import org.apache.spark.executor.{ShuffleReadMetrics, ShuffleWriteMetrics, TaskMetrics} @@ -97,7 +97,7 @@ private[spark] object UIData { var description: Option[String] = None var accumulables = new HashMap[Long, AccumulableInfo] - var taskData = new HashMap[Long, TaskUIData] + var taskData = new LinkedHashMap[Long, TaskUIData] var executorSummary = new HashMap[String, ExecutorSummary] def hasInput: Boolean = inputBytes > 0 diff --git a/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_expectation.json index 11eec0b49c40b..96d86b7278ff1 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_expectation.json @@ -39,21 +39,21 @@ } } }, { - "taskId" : 5, - "index" : 5, + "taskId" : 1, + "index" : 1, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.505GMT", + "launchTime" : "2015-05-06T13:03:06.502GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 30, + "executorDeserializeTime" : 31, "executorRunTime" : 350, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 1, + "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -74,26 +74,26 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 3675510, + "writeTime" : 3934399, "recordsWritten" : 10 } } }, { - "taskId" : 1, - "index" : 1, + "taskId" : 5, + "index" : 5, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.502GMT", + "launchTime" : "2015-05-06T13:03:06.505GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 31, + "executorDeserializeTime" : 30, "executorRunTime" : 350, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 0, + "resultSerializationTime" : 1, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -114,22 +114,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 3934399, + "writeTime" : 3675510, "recordsWritten" : 10 } } }, { - "taskId" : 4, - "index" : 4, + "taskId" : 0, + "index" : 0, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.504GMT", + "launchTime" : "2015-05-06T13:03:06.494GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 31, + "executorDeserializeTime" : 32, "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, @@ -137,7 +137,7 @@ "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 60488, + "bytesRead" : 49294, "recordsRead" : 10000 }, "outputMetrics" : { @@ -154,15 +154,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 83022, + "writeTime" : 3842811, "recordsWritten" : 10 } } }, { - "taskId" : 7, - "index" : 7, + "taskId" : 3, + "index" : 3, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.506GMT", + "launchTime" : "2015-05-06T13:03:06.504GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -173,7 +173,7 @@ "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 0, + "resultSerializationTime" : 2, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -194,13 +194,13 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 2579051, + "writeTime" : 1311694, "recordsWritten" : 10 } } }, { - "taskId" : 3, - "index" : 3, + "taskId" : 4, + "index" : 4, "attempt" : 0, "launchTime" : "2015-05-06T13:03:06.504GMT", "executorId" : "driver", @@ -213,7 +213,7 @@ "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 2, + "resultSerializationTime" : 1, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -234,30 +234,30 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 1311694, + "writeTime" : 83022, "recordsWritten" : 10 } } }, { - "taskId" : 0, - "index" : 0, + "taskId" : 7, + "index" : 7, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.494GMT", + "launchTime" : "2015-05-06T13:03:06.506GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 32, + "executorDeserializeTime" : 31, "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 1, + "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 49294, + "bytesRead" : 60488, "recordsRead" : 10000 }, "outputMetrics" : { @@ -274,7 +274,7 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 3842811, + "writeTime" : 2579051, "recordsWritten" : 10 } } @@ -479,25 +479,25 @@ } } }, { - "taskId" : 16, - "index" : 16, + "taskId" : 9, + "index" : 9, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.001GMT", + "launchTime" : "2015-05-06T13:03:06.915GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 10, + "executorDeserializeTime" : 9, "executorRunTime" : 84, "resultSize" : 2010, - "jvmGcTime" : 5, + "jvmGcTime" : 0, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 70564, + "bytesRead" : 60489, "recordsRead" : 10000 }, "outputMetrics" : { @@ -514,22 +514,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 108320, + "writeTime" : 101664, "recordsWritten" : 10 } } }, { - "taskId" : 19, - "index" : 19, + "taskId" : 16, + "index" : 16, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.012GMT", + "launchTime" : "2015-05-06T13:03:07.001GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 5, + "executorDeserializeTime" : 10, "executorRunTime" : 84, "resultSize" : 2010, "jvmGcTime" : 5, @@ -554,30 +554,30 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 95788, + "writeTime" : 108320, "recordsWritten" : 10 } } }, { - "taskId" : 9, - "index" : 9, + "taskId" : 19, + "index" : 19, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.915GMT", + "launchTime" : "2015-05-06T13:03:07.012GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 9, + "executorDeserializeTime" : 5, "executorRunTime" : 84, "resultSize" : 2010, - "jvmGcTime" : 0, + "jvmGcTime" : 5, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 60489, + "bytesRead" : 70564, "recordsRead" : 10000 }, "outputMetrics" : { @@ -594,25 +594,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 101664, + "writeTime" : 95788, "recordsWritten" : 10 } } }, { - "taskId" : 20, - "index" : 20, + "taskId" : 14, + "index" : 14, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.014GMT", + "launchTime" : "2015-05-06T13:03:06.925GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 3, + "executorDeserializeTime" : 6, "executorRunTime" : 83, "resultSize" : 2010, - "jvmGcTime" : 5, + "jvmGcTime" : 0, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -634,25 +634,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 97716, + "writeTime" : 95646, "recordsWritten" : 10 } } }, { - "taskId" : 14, - "index" : 14, + "taskId" : 20, + "index" : 20, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.925GMT", + "launchTime" : "2015-05-06T13:03:07.014GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 6, + "executorDeserializeTime" : 3, "executorRunTime" : 83, "resultSize" : 2010, - "jvmGcTime" : 0, + "jvmGcTime" : 5, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -674,7 +674,7 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 95646, + "writeTime" : 97716, "recordsWritten" : 10 } } diff --git a/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names___runtime_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names___runtime_expectation.json index 11eec0b49c40b..96d86b7278ff1 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names___runtime_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names___runtime_expectation.json @@ -39,21 +39,21 @@ } } }, { - "taskId" : 5, - "index" : 5, + "taskId" : 1, + "index" : 1, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.505GMT", + "launchTime" : "2015-05-06T13:03:06.502GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 30, + "executorDeserializeTime" : 31, "executorRunTime" : 350, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 1, + "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -74,26 +74,26 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 3675510, + "writeTime" : 3934399, "recordsWritten" : 10 } } }, { - "taskId" : 1, - "index" : 1, + "taskId" : 5, + "index" : 5, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.502GMT", + "launchTime" : "2015-05-06T13:03:06.505GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 31, + "executorDeserializeTime" : 30, "executorRunTime" : 350, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 0, + "resultSerializationTime" : 1, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -114,22 +114,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 3934399, + "writeTime" : 3675510, "recordsWritten" : 10 } } }, { - "taskId" : 4, - "index" : 4, + "taskId" : 0, + "index" : 0, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.504GMT", + "launchTime" : "2015-05-06T13:03:06.494GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 31, + "executorDeserializeTime" : 32, "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, @@ -137,7 +137,7 @@ "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 60488, + "bytesRead" : 49294, "recordsRead" : 10000 }, "outputMetrics" : { @@ -154,15 +154,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 83022, + "writeTime" : 3842811, "recordsWritten" : 10 } } }, { - "taskId" : 7, - "index" : 7, + "taskId" : 3, + "index" : 3, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.506GMT", + "launchTime" : "2015-05-06T13:03:06.504GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -173,7 +173,7 @@ "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 0, + "resultSerializationTime" : 2, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -194,13 +194,13 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 2579051, + "writeTime" : 1311694, "recordsWritten" : 10 } } }, { - "taskId" : 3, - "index" : 3, + "taskId" : 4, + "index" : 4, "attempt" : 0, "launchTime" : "2015-05-06T13:03:06.504GMT", "executorId" : "driver", @@ -213,7 +213,7 @@ "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 2, + "resultSerializationTime" : 1, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -234,30 +234,30 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 1311694, + "writeTime" : 83022, "recordsWritten" : 10 } } }, { - "taskId" : 0, - "index" : 0, + "taskId" : 7, + "index" : 7, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.494GMT", + "launchTime" : "2015-05-06T13:03:06.506GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 32, + "executorDeserializeTime" : 31, "executorRunTime" : 349, "resultSize" : 2010, "jvmGcTime" : 7, - "resultSerializationTime" : 1, + "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 49294, + "bytesRead" : 60488, "recordsRead" : 10000 }, "outputMetrics" : { @@ -274,7 +274,7 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 3842811, + "writeTime" : 2579051, "recordsWritten" : 10 } } @@ -479,25 +479,25 @@ } } }, { - "taskId" : 16, - "index" : 16, + "taskId" : 9, + "index" : 9, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.001GMT", + "launchTime" : "2015-05-06T13:03:06.915GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 10, + "executorDeserializeTime" : 9, "executorRunTime" : 84, "resultSize" : 2010, - "jvmGcTime" : 5, + "jvmGcTime" : 0, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 70564, + "bytesRead" : 60489, "recordsRead" : 10000 }, "outputMetrics" : { @@ -514,22 +514,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 108320, + "writeTime" : 101664, "recordsWritten" : 10 } } }, { - "taskId" : 19, - "index" : 19, + "taskId" : 16, + "index" : 16, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.012GMT", + "launchTime" : "2015-05-06T13:03:07.001GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 5, + "executorDeserializeTime" : 10, "executorRunTime" : 84, "resultSize" : 2010, "jvmGcTime" : 5, @@ -554,30 +554,30 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 95788, + "writeTime" : 108320, "recordsWritten" : 10 } } }, { - "taskId" : 9, - "index" : 9, + "taskId" : 19, + "index" : 19, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.915GMT", + "launchTime" : "2015-05-06T13:03:07.012GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 9, + "executorDeserializeTime" : 5, "executorRunTime" : 84, "resultSize" : 2010, - "jvmGcTime" : 0, + "jvmGcTime" : 5, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 60489, + "bytesRead" : 70564, "recordsRead" : 10000 }, "outputMetrics" : { @@ -594,25 +594,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 101664, + "writeTime" : 95788, "recordsWritten" : 10 } } }, { - "taskId" : 20, - "index" : 20, + "taskId" : 14, + "index" : 14, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.014GMT", + "launchTime" : "2015-05-06T13:03:06.925GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 3, + "executorDeserializeTime" : 6, "executorRunTime" : 83, "resultSize" : 2010, - "jvmGcTime" : 5, + "jvmGcTime" : 0, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -634,25 +634,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 97716, + "writeTime" : 95646, "recordsWritten" : 10 } } }, { - "taskId" : 14, - "index" : 14, + "taskId" : 20, + "index" : 20, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:06.925GMT", + "launchTime" : "2015-05-06T13:03:07.014GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 6, + "executorDeserializeTime" : 3, "executorRunTime" : 83, "resultSize" : 2010, - "jvmGcTime" : 0, + "jvmGcTime" : 5, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -674,7 +674,7 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 95646, + "writeTime" : 97716, "recordsWritten" : 10 } } diff --git a/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names__runtime_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names__runtime_expectation.json index 9528d872ef731..e0e9e8140c717 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names__runtime_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_task_list_w__sortBy_short_names__runtime_expectation.json @@ -39,21 +39,21 @@ } } }, { - "taskId" : 86, - "index" : 86, + "taskId" : 41, + "index" : 41, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.374GMT", + "launchTime" : "2015-05-06T13:03:07.200GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 3, + "executorDeserializeTime" : 2, "executorRunTime" : 16, "resultSize" : 2065, "jvmGcTime" : 0, - "resultSerializationTime" : 1, + "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -74,15 +74,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 95848, + "writeTime" : 90765, "recordsWritten" : 10 } } }, { - "taskId" : 41, - "index" : 41, + "taskId" : 43, + "index" : 43, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.200GMT", + "launchTime" : "2015-05-06T13:03:07.204GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -114,22 +114,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 90765, + "writeTime" : 171516, "recordsWritten" : 10 } } }, { - "taskId" : 68, - "index" : 68, + "taskId" : 57, + "index" : 57, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.306GMT", + "launchTime" : "2015-05-06T13:03:07.257GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 2, + "executorDeserializeTime" : 3, "executorRunTime" : 16, "resultSize" : 2065, "jvmGcTime" : 0, @@ -154,7 +154,7 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 101750, + "writeTime" : 96849, "recordsWritten" : 10 } } @@ -199,10 +199,10 @@ } } }, { - "taskId" : 43, - "index" : 43, + "taskId" : 68, + "index" : 68, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.204GMT", + "launchTime" : "2015-05-06T13:03:07.306GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -234,15 +234,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 171516, + "writeTime" : 101750, "recordsWritten" : 10 } } }, { - "taskId" : 57, - "index" : 57, + "taskId" : 86, + "index" : 86, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.257GMT", + "launchTime" : "2015-05-06T13:03:07.374GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -253,7 +253,7 @@ "executorRunTime" : 16, "resultSize" : 2065, "jvmGcTime" : 0, - "resultSerializationTime" : 0, + "resultSerializationTime" : 1, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { @@ -274,15 +274,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 96849, + "writeTime" : 95848, "recordsWritten" : 10 } } }, { - "taskId" : 59, - "index" : 59, + "taskId" : 32, + "index" : 32, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.265GMT", + "launchTime" : "2015-05-06T13:03:07.148GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -314,22 +314,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 100753, + "writeTime" : 89603, "recordsWritten" : 10 } } }, { - "taskId" : 32, - "index" : 32, + "taskId" : 39, + "index" : 39, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.148GMT", + "launchTime" : "2015-05-06T13:03:07.180GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 3, + "executorDeserializeTime" : 2, "executorRunTime" : 17, "resultSize" : 2065, "jvmGcTime" : 0, @@ -354,22 +354,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 89603, + "writeTime" : 98748, "recordsWritten" : 10 } } }, { - "taskId" : 87, - "index" : 87, + "taskId" : 42, + "index" : 42, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.374GMT", + "launchTime" : "2015-05-06T13:03:07.203GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 12, + "executorDeserializeTime" : 10, "executorRunTime" : 17, "resultSize" : 2065, "jvmGcTime" : 0, @@ -394,15 +394,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 102159, + "writeTime" : 103713, "recordsWritten" : 10 } } }, { - "taskId" : 99, - "index" : 99, + "taskId" : 51, + "index" : 51, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.426GMT", + "launchTime" : "2015-05-06T13:03:07.242GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -417,7 +417,7 @@ "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 70565, + "bytesRead" : 70564, "recordsRead" : 10000 }, "outputMetrics" : { @@ -434,25 +434,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 133964, + "writeTime" : 96013, "recordsWritten" : 10 } } }, { - "taskId" : 63, - "index" : 63, + "taskId" : 59, + "index" : 59, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.276GMT", + "launchTime" : "2015-05-06T13:03:07.265GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 20, + "executorDeserializeTime" : 3, "executorRunTime" : 17, "resultSize" : 2065, - "jvmGcTime" : 5, + "jvmGcTime" : 0, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -474,25 +474,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 102779, + "writeTime" : 100753, "recordsWritten" : 10 } } }, { - "taskId" : 90, - "index" : 90, + "taskId" : 63, + "index" : 63, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.385GMT", + "launchTime" : "2015-05-06T13:03:07.276GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 2, + "executorDeserializeTime" : 20, "executorRunTime" : 17, "resultSize" : 2065, - "jvmGcTime" : 0, + "jvmGcTime" : 5, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -514,22 +514,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 98472, + "writeTime" : 102779, "recordsWritten" : 10 } } }, { - "taskId" : 39, - "index" : 39, + "taskId" : 87, + "index" : 87, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.180GMT", + "launchTime" : "2015-05-06T13:03:07.374GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 2, + "executorDeserializeTime" : 12, "executorRunTime" : 17, "resultSize" : 2065, "jvmGcTime" : 0, @@ -554,22 +554,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 98748, + "writeTime" : 102159, "recordsWritten" : 10 } } }, { - "taskId" : 42, - "index" : 42, + "taskId" : 90, + "index" : 90, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.203GMT", + "launchTime" : "2015-05-06T13:03:07.385GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 10, + "executorDeserializeTime" : 2, "executorRunTime" : 17, "resultSize" : 2065, "jvmGcTime" : 0, @@ -594,15 +594,15 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 103713, + "writeTime" : 98472, "recordsWritten" : 10 } } }, { - "taskId" : 51, - "index" : 51, + "taskId" : 99, + "index" : 99, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.242GMT", + "launchTime" : "2015-05-06T13:03:07.426GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", @@ -617,7 +617,7 @@ "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, "inputMetrics" : { - "bytesRead" : 70564, + "bytesRead" : 70565, "recordsRead" : 10000 }, "outputMetrics" : { @@ -634,22 +634,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 96013, + "writeTime" : 133964, "recordsWritten" : 10 } } }, { - "taskId" : 50, - "index" : 50, + "taskId" : 44, + "index" : 44, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.240GMT", + "launchTime" : "2015-05-06T13:03:07.205GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 4, + "executorDeserializeTime" : 3, "executorRunTime" : 18, "resultSize" : 2065, "jvmGcTime" : 0, @@ -674,22 +674,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 90836, + "writeTime" : 98293, "recordsWritten" : 10 } } }, { - "taskId" : 53, - "index" : 53, + "taskId" : 47, + "index" : 47, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.244GMT", + "launchTime" : "2015-05-06T13:03:07.212GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 6, + "executorDeserializeTime" : 2, "executorRunTime" : 18, "resultSize" : 2065, "jvmGcTime" : 0, @@ -714,22 +714,22 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 92835, + "writeTime" : 103015, "recordsWritten" : 10 } } }, { - "taskId" : 44, - "index" : 44, + "taskId" : 50, + "index" : 50, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.205GMT", + "launchTime" : "2015-05-06T13:03:07.240GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 3, + "executorDeserializeTime" : 4, "executorRunTime" : 18, "resultSize" : 2065, "jvmGcTime" : 0, @@ -754,25 +754,25 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 98293, + "writeTime" : 90836, "recordsWritten" : 10 } } }, { - "taskId" : 80, - "index" : 80, + "taskId" : 52, + "index" : 52, "attempt" : 0, - "launchTime" : "2015-05-06T13:03:07.341GMT", + "launchTime" : "2015-05-06T13:03:07.243GMT", "executorId" : "driver", "host" : "localhost", "taskLocality" : "PROCESS_LOCAL", "speculative" : false, "accumulatorUpdates" : [ ], "taskMetrics" : { - "executorDeserializeTime" : 13, + "executorDeserializeTime" : 5, "executorRunTime" : 18, "resultSize" : 2065, - "jvmGcTime" : 5, + "jvmGcTime" : 0, "resultSerializationTime" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, @@ -794,7 +794,7 @@ }, "shuffleWriteMetrics" : { "bytesWritten" : 1710, - "writeTime" : 98069, + "writeTime" : 89664, "recordsWritten" : 10 } } diff --git a/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala b/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala index f684e16c25f7c..1bfb0c1547ec4 100644 --- a/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala +++ b/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.status.api.v1 import java.util.Date -import scala.collection.mutable.HashMap +import scala.collection.mutable.LinkedHashMap import org.apache.spark.SparkFunSuite import org.apache.spark.scheduler.{StageInfo, TaskInfo, TaskLocality} @@ -28,7 +28,7 @@ import org.apache.spark.ui.jobs.UIData.{StageUIData, TaskUIData} class AllStagesResourceSuite extends SparkFunSuite { def getFirstTaskLaunchTime(taskLaunchTimes: Seq[Long]): Option[Date] = { - val tasks = new HashMap[Long, TaskUIData] + val tasks = new LinkedHashMap[Long, TaskUIData] taskLaunchTimes.zipWithIndex.foreach { case (time, idx) => tasks(idx.toLong) = TaskUIData( new TaskInfo(idx, idx, 1, time, "", "", TaskLocality.ANY, false), None) diff --git a/docs/configuration.md b/docs/configuration.md index 4bda464b98bf6..2f801961050e1 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -626,6 +626,14 @@ Apart from these, the following properties are also available, and may be useful collecting. + + spark.ui.retainedTasks + 100000 + + How many tasks the Spark UI and status APIs remember before garbage + collecting. + + spark.worker.ui.retainedExecutors 1000 From 29952ed096fd2a0a19079933ff691671d6f00835 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Wed, 24 Aug 2016 22:16:20 +0200 Subject: [PATCH 085/270] [SPARK-16216][SQL] Read/write timestamps and dates in ISO 8601 and dateFormat/timestampFormat option for CSV and JSON ## What changes were proposed in this pull request? ### Default - ISO 8601 Currently, CSV datasource is writing `Timestamp` and `Date` as numeric form and JSON datasource is writing both as below: - CSV ``` // TimestampType 1414459800000000 // DateType 16673 ``` - Json ``` // TimestampType 1970-01-01 11:46:40.0 // DateType 1970-01-01 ``` So, for CSV we can't read back what we write and for JSON it becomes ambiguous because the timezone is being missed. So, this PR make both **write** `Timestamp` and `Date` in ISO 8601 formatted string (please refer the [ISO 8601 specification](https://www.w3.org/TR/NOTE-datetime)). - For `Timestamp` it becomes as below: (`yyyy-MM-dd'T'HH:mm:ss.SSSZZ`) ``` 1970-01-01T02:00:01.000-01:00 ``` - For `Date` it becomes as below (`yyyy-MM-dd`) ``` 1970-01-01 ``` ### Custom date format option - `dateFormat` This PR also adds the support to write and read dates and timestamps in a formatted string as below: - **DateType** - With `dateFormat` option (e.g. `yyyy/MM/dd`) ``` +----------+ | date| +----------+ |2015/08/26| |2014/10/27| |2016/01/28| +----------+ ``` ### Custom date format option - `timestampFormat` - **TimestampType** - With `dateFormat` option (e.g. `dd/MM/yyyy HH:mm`) ``` +----------------+ | date| +----------------+ |2015/08/26 18:00| |2014/10/27 18:30| |2016/01/28 20:00| +----------------+ ``` ## How was this patch tested? Unit tests were added in `CSVSuite` and `JsonSuite`. For JSON, existing tests cover the default cases. Author: hyukjinkwon Closes #14279 from HyukjinKwon/SPARK-16216-json-csv. --- python/pyspark/sql/readwriter.py | 56 +++++-- python/pyspark/sql/streaming.py | 30 +++- .../apache/spark/sql/DataFrameReader.scala | 18 +- .../apache/spark/sql/DataFrameWriter.scala | 12 ++ .../datasources/csv/CSVInferSchema.scala | 42 ++--- .../datasources/csv/CSVOptions.scala | 15 +- .../datasources/csv/CSVRelation.scala | 43 ++++- .../datasources/json/JSONOptions.scala | 9 + .../datasources/json/JacksonGenerator.scala | 13 +- .../datasources/json/JacksonParser.scala | 27 ++- .../datasources/json/JsonFileFormat.scala | 5 +- .../sql/streaming/DataStreamReader.scala | 19 ++- .../datasources/csv/CSVInferSchemaSuite.scala | 4 +- .../execution/datasources/csv/CSVSuite.scala | 157 +++++++++++++++++- .../datasources/csv/CSVTypeCastSuite.scala | 17 +- .../datasources/json/JsonSuite.scala | 67 +++++++- .../datasources/json/TestJsonData.scala | 6 + .../sources/JsonHadoopFsRelationSuite.scala | 4 + 18 files changed, 454 insertions(+), 90 deletions(-) diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index 64de33e8ec0a8..3da6f497e9f12 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -156,7 +156,7 @@ def load(self, path=None, format=None, schema=None, **options): def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, - mode=None, columnNameOfCorruptRecord=None): + mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None): """ Loads a JSON file (one object per line) or an RDD of Strings storing JSON objects (one object per record) and returns the result as a :class`DataFrame`. @@ -198,6 +198,14 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. + :param dateFormat: sets the string that indicates a date format. Custom date formats + follow the formats at ``java.text.SimpleDateFormat``. This + applies to date type. If None is set, it uses the + default value value, ``yyyy-MM-dd``. + :param timestampFormat: sets the string that indicates a timestamp format. Custom date + formats follow the formats at ``java.text.SimpleDateFormat``. + This applies to timestamp type. If None is set, it uses the + default value value, ``yyyy-MM-dd'T'HH:mm:ss.SSSZZ``. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes @@ -213,7 +221,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, - mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord) + mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, + timestampFormat=timestampFormat) if isinstance(path, basestring): path = [path] if type(path) == list: @@ -285,8 +294,8 @@ def text(self, paths): def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, - negativeInf=None, dateFormat=None, maxColumns=None, maxCharsPerColumn=None, - maxMalformedLogPerPartition=None, mode=None): + negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, + maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None): """Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if @@ -327,9 +336,12 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This - applies to both date type and timestamp type. By default, it is None - which means trying to parse times and date by - ``java.sql.Timestamp.valueOf()`` and ``java.sql.Date.valueOf()``. + applies to date type. If None is set, it uses the + default value value, ``yyyy-MM-dd``. + :param timestampFormat: sets the string that indicates a timestamp format. Custom date + formats follow the formats at ``java.text.SimpleDateFormat``. + This applies to timestamp type. If None is set, it uses the + default value value, ``yyyy-MM-dd'T'HH:mm:ss.SSSZZ``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given @@ -356,7 +368,8 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, - dateFormat=dateFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, + dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, + maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode) if isinstance(path, basestring): path = [path] @@ -571,7 +584,7 @@ def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options) self._jwrite.saveAsTable(name) @since(1.4) - def json(self, path, mode=None, compression=None): + def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None): """Saves the content of the :class:`DataFrame` in JSON format at the specified path. :param path: the path in any Hadoop supported file system @@ -584,11 +597,20 @@ def json(self, path, mode=None, compression=None): :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). + :param dateFormat: sets the string that indicates a date format. Custom date formats + follow the formats at ``java.text.SimpleDateFormat``. This + applies to date type. If None is set, it uses the + default value value, ``yyyy-MM-dd``. + :param timestampFormat: sets the string that indicates a timestamp format. Custom date + formats follow the formats at ``java.text.SimpleDateFormat``. + This applies to timestamp type. If None is set, it uses the + default value value, ``yyyy-MM-dd'T'HH:mm:ss.SSSZZ``. >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) - self._set_opts(compression=compression) + self._set_opts( + compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat) self._jwrite.json(path) @since(1.4) @@ -634,7 +656,8 @@ def text(self, path, compression=None): @since(2.0) def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None, - header=None, nullValue=None, escapeQuotes=None, quoteAll=None): + header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None, + timestampFormat=None): """Saves the content of the :class:`DataFrame` in CSV format at the specified path. :param path: the path in any Hadoop supported file system @@ -666,12 +689,21 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. + :param dateFormat: sets the string that indicates a date format. Custom date formats + follow the formats at ``java.text.SimpleDateFormat``. This + applies to date type. If None is set, it uses the + default value value, ``yyyy-MM-dd``. + :param timestampFormat: sets the string that indicates a timestamp format. Custom date + formats follow the formats at ``java.text.SimpleDateFormat``. + This applies to timestamp type. If None is set, it uses the + default value value, ``yyyy-MM-dd'T'HH:mm:ss.SSSZZ``. >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header, - nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll) + nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll, + dateFormat=dateFormat, timestampFormat=timestampFormat) self._jwrite.csv(path) @since(1.5) diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py index a364555003027..3761d2b1994f0 100644 --- a/python/pyspark/sql/streaming.py +++ b/python/pyspark/sql/streaming.py @@ -338,7 +338,8 @@ def load(self, path=None, format=None, schema=None, **options): def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, - mode=None, columnNameOfCorruptRecord=None): + mode=None, columnNameOfCorruptRecord=None, dateFormat=None, + timestampFormat=None): """ Loads a JSON file stream (one object per line) and returns a :class`DataFrame`. @@ -381,6 +382,14 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. + :param dateFormat: sets the string that indicates a date format. Custom date formats + follow the formats at ``java.text.SimpleDateFormat``. This + applies to date type. If None is set, it uses the + default value value, ``yyyy-MM-dd``. + :param timestampFormat: sets the string that indicates a timestamp format. Custom date + formats follow the formats at ``java.text.SimpleDateFormat``. + This applies to timestamp type. If None is set, it uses the + default value value, ``yyyy-MM-dd'T'HH:mm:ss.SSSZZ``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming @@ -393,7 +402,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, - mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord) + mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, + timestampFormat=timestampFormat) if isinstance(path, basestring): return self._df(self._jreader.json(path)) else: @@ -450,8 +460,8 @@ def text(self, path): def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, - negativeInf=None, dateFormat=None, maxColumns=None, maxCharsPerColumn=None, - maxMalformedLogPerPartition=None, mode=None): + negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, + maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None): """Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if @@ -494,9 +504,12 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This - applies to both date type and timestamp type. By default, it is None - which means trying to parse times and date by - ``java.sql.Timestamp.valueOf()`` and ``java.sql.Date.valueOf()``. + applies to date type. If None is set, it uses the + default value value, ``yyyy-MM-dd``. + :param timestampFormat: sets the string that indicates a timestamp format. Custom date + formats follow the formats at ``java.text.SimpleDateFormat``. + This applies to timestamp type. If None is set, it uses the + default value value, ``yyyy-MM-dd'T'HH:mm:ss.SSSZZ``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given @@ -521,7 +534,8 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, - dateFormat=dateFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, + dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, + maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index e23dacc7a1c05..c060091c7fc38 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -280,7 +280,14 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { *
  • `columnNameOfCorruptRecord` (default is the value specified in * `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string * created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.
  • + *
  • `dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. + * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to + * date type.
  • + *
  • `timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that + * indicates a timestamp format. Custom date formats follow the formats at + * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • * + * * @since 2.0.0 */ @scala.annotation.varargs @@ -374,10 +381,13 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { * value. *
  • `negativeInf` (default `-Inf`): sets the string representation of a negative infinity * value.
  • - *
  • `dateFormat` (default `null`): sets the string that indicates a date format. Custom date - * formats follow the formats at `java.text.SimpleDateFormat`. This applies to both date type - * and timestamp type. By default, it is `null` which means trying to parse times and date by - * `java.sql.Timestamp.valueOf()` and `java.sql.Date.valueOf()`.
  • + *
  • `dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. + * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to + * date type.
  • + *
  • `timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that + * indicates a timestamp format. Custom date formats follow the formats at + * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • + * `java.sql.Timestamp.valueOf()` and `java.sql.Date.valueOf()` or ISO 8601 format. *
  • `maxColumns` (default `20480`): defines a hard limit of how many columns * a record can have.
  • *
  • `maxCharsPerColumn` (default `1000000`): defines the maximum number of characters allowed diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index 44a9f312bd76c..a9049a60f25e5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -483,6 +483,12 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { *
  • `compression` (default `null`): compression codec to use when saving to file. This can be * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`, * `snappy` and `deflate`).
  • + *
  • `dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. + * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to + * date type.
  • + *
  • `timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that + * indicates a timestamp format. Custom date formats follow the formats at + * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • * * @since 1.4.0 */ @@ -575,6 +581,12 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { *
  • `compression` (default `null`): compression codec to use when saving to file. This can be * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`, * `snappy` and `deflate`).
  • + *
  • `dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. + * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to + * date type.
  • + *
  • `timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that + * indicates a timestamp format. Custom date formats follow the formats at + * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • * * @since 2.0.0 */ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala index de3d889621b7d..f1b4c11878a94 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala @@ -139,20 +139,14 @@ private[csv] object CSVInferSchema { } private def tryParseTimestamp(field: String, options: CSVOptions): DataType = { - if (options.dateFormat != null) { - // This case infers a custom `dataFormat` is set. - if ((allCatch opt options.dateFormat.parse(field)).isDefined) { - TimestampType - } else { - tryParseBoolean(field, options) - } - } else { + // This case infers a custom `dataFormat` is set. + if ((allCatch opt options.timestampFormat.parse(field)).isDefined) { + TimestampType + } else if ((allCatch opt DateTimeUtils.stringToTime(field)).isDefined) { // We keep this for backwords competibility. - if ((allCatch opt DateTimeUtils.stringToTime(field)).isDefined) { - TimestampType - } else { - tryParseBoolean(field, options) - } + TimestampType + } else { + tryParseBoolean(field, options) } } @@ -277,18 +271,24 @@ private[csv] object CSVTypeCast { val value = new BigDecimal(datum.replaceAll(",", "")) Decimal(value, dt.precision, dt.scale) } - case _: TimestampType if options.dateFormat != null => - // This one will lose microseconds parts. - // See https://issues.apache.org/jira/browse/SPARK-10681. - options.dateFormat.parse(datum).getTime * 1000L case _: TimestampType => // This one will lose microseconds parts. // See https://issues.apache.org/jira/browse/SPARK-10681. - DateTimeUtils.stringToTime(datum).getTime * 1000L - case _: DateType if options.dateFormat != null => - DateTimeUtils.millisToDays(options.dateFormat.parse(datum).getTime) + Try(options.timestampFormat.parse(datum).getTime * 1000L) + .getOrElse { + // If it fails to parse, then tries the way used in 2.0 and 1.x for backwards + // compatibility. + DateTimeUtils.stringToTime(datum).getTime * 1000L + } case _: DateType => - DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(datum).getTime) + // This one will lose microseconds parts. + // See https://issues.apache.org/jira/browse/SPARK-10681.x + Try(DateTimeUtils.millisToDays(options.dateFormat.parse(datum).getTime)) + .getOrElse { + // If it fails to parse, then tries the way used in 2.0 and 1.x for backwards + // compatibility. + DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(datum).getTime) + } case _: StringType => UTF8String.fromString(datum) case _ => throw new RuntimeException(s"Unsupported type: ${castType.typeName}") } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala index 10fe541a2c575..364d7c831eb44 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala @@ -18,7 +18,8 @@ package org.apache.spark.sql.execution.datasources.csv import java.nio.charset.StandardCharsets -import java.text.SimpleDateFormat + +import org.apache.commons.lang3.time.FastDateFormat import org.apache.spark.internal.Logging import org.apache.spark.sql.execution.datasources.{CompressionCodecs, ParseModes} @@ -101,11 +102,13 @@ private[csv] class CSVOptions(@transient private val parameters: Map[String, Str name.map(CompressionCodecs.getCodecClassName) } - // Share date format object as it is expensive to parse date pattern. - val dateFormat: SimpleDateFormat = { - val dateFormat = parameters.get("dateFormat") - dateFormat.map(new SimpleDateFormat(_)).orNull - } + // Uses `FastDateFormat` which can be direct replacement for `SimpleDateFormat` and thread-safe. + val dateFormat: FastDateFormat = + FastDateFormat.getInstance(parameters.getOrElse("dateFormat", "yyyy-MM-dd")) + + val timestampFormat: FastDateFormat = + FastDateFormat.getInstance( + parameters.getOrElse("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSZZ")) val maxColumns = getInt("maxColumns", 20480) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala index de2d633c0bcf4..33b170bc31f62 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVRelation.scala @@ -30,6 +30,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.GenericMutableRow +import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.datasources.{OutputWriter, OutputWriterFactory, PartitionedFile, WriterContainer} import org.apache.spark.sql.types._ @@ -187,6 +188,14 @@ private[csv] class CsvOutputWriter( // create the Generator without separator inserted between 2 records private[this] val text = new Text() + // A `ValueConverter` is responsible for converting a value of an `InternalRow` to `String`. + // When the value is null, this converter should not be called. + private type ValueConverter = (InternalRow, Int) => String + + // `ValueConverter`s for all values in the fields of the schema + private val valueConverters: Array[ValueConverter] = + dataSchema.map(_.dataType).map(makeConverter).toArray + private val recordWriter: RecordWriter[NullWritable, Text] = { new TextOutputFormat[NullWritable, Text]() { override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = { @@ -203,18 +212,40 @@ private[csv] class CsvOutputWriter( private var records: Long = 0L private val csvWriter = new LineCsvWriter(params, dataSchema.fieldNames.toSeq) - private def rowToString(row: Seq[Any]): Seq[String] = row.map { field => - if (field != null) { - field.toString - } else { - params.nullValue + private def rowToString(row: InternalRow): Seq[String] = { + var i = 0 + val values = new Array[String](row.numFields) + while (i < row.numFields) { + if (!row.isNullAt(i)) { + values(i) = valueConverters(i).apply(row, i) + } else { + values(i) = params.nullValue + } + i += 1 } + values + } + + private def makeConverter(dataType: DataType): ValueConverter = dataType match { + case DateType => + (row: InternalRow, ordinal: Int) => + params.dateFormat.format(DateTimeUtils.toJavaDate(row.getInt(ordinal))) + + case TimestampType => + (row: InternalRow, ordinal: Int) => + params.timestampFormat.format(DateTimeUtils.toJavaTimestamp(row.getLong(ordinal))) + + case udt: UserDefinedType[_] => makeConverter(udt.sqlType) + + case dt: DataType => + (row: InternalRow, ordinal: Int) => + row.get(ordinal, dt).toString } override def write(row: Row): Unit = throw new UnsupportedOperationException("call writeInternal") override protected[sql] def writeInternal(row: InternalRow): Unit = { - csvWriter.writeRow(rowToString(row.toSeq(dataSchema)), records == 0L && params.headerFlag) + csvWriter.writeRow(rowToString(row), records == 0L && params.headerFlag) records += 1 if (records % FLUSH_BATCH_SIZE == 0) { flush() diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala index 66f1126fb9ae6..02d211d04265e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala @@ -18,6 +18,7 @@ package org.apache.spark.sql.execution.datasources.json import com.fasterxml.jackson.core.{JsonFactory, JsonParser} +import org.apache.commons.lang3.time.FastDateFormat import org.apache.spark.internal.Logging import org.apache.spark.sql.execution.datasources.{CompressionCodecs, ParseModes} @@ -53,6 +54,14 @@ private[sql] class JSONOptions( private val parseMode = parameters.getOrElse("mode", "PERMISSIVE") val columnNameOfCorruptRecord = parameters.get("columnNameOfCorruptRecord") + // Uses `FastDateFormat` which can be direct replacement for `SimpleDateFormat` and thread-safe. + val dateFormat: FastDateFormat = + FastDateFormat.getInstance(parameters.getOrElse("dateFormat", "yyyy-MM-dd")) + + val timestampFormat: FastDateFormat = + FastDateFormat.getInstance( + parameters.getOrElse("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSZZ")) + // Parse mode flags if (!ParseModes.isValidMode(parseMode)) { logWarning(s"$parseMode is not a valid parse mode. Using ${ParseModes.DEFAULT}.") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonGenerator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonGenerator.scala index 23f4a55491d28..270e7fbd3c137 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonGenerator.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonGenerator.scala @@ -26,7 +26,10 @@ import org.apache.spark.sql.catalyst.expressions.SpecializedGetters import org.apache.spark.sql.catalyst.util.{ArrayData, DateTimeUtils, MapData} import org.apache.spark.sql.types._ -private[sql] class JacksonGenerator(schema: StructType, writer: Writer) { +private[sql] class JacksonGenerator( + schema: StructType, + writer: Writer, + options: JSONOptions = new JSONOptions(Map.empty[String, String])) { // A `ValueWriter` is responsible for writing a field of an `InternalRow` to appropriate // JSON data. Here we are using `SpecializedGetters` rather than `InternalRow` so that // we can directly access data in `ArrayData` without the help of `SpecificMutableRow`. @@ -76,11 +79,15 @@ private[sql] class JacksonGenerator(schema: StructType, writer: Writer) { case TimestampType => (row: SpecializedGetters, ordinal: Int) => - gen.writeString(DateTimeUtils.toJavaTimestamp(row.getLong(ordinal)).toString) + val timestampString = + options.timestampFormat.format(DateTimeUtils.toJavaTimestamp(row.getLong(ordinal))) + gen.writeString(timestampString) case DateType => (row: SpecializedGetters, ordinal: Int) => - gen.writeString(DateTimeUtils.toJavaDate(row.getInt(ordinal)).toString) + val dateString = + options.dateFormat.format(DateTimeUtils.toJavaDate(row.getInt(ordinal))) + gen.writeString(dateString) case BinaryType => (row: SpecializedGetters, ordinal: Int) => diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala index 4ae9376b5a504..359a3e2aa8ad2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala @@ -20,6 +20,7 @@ package org.apache.spark.sql.execution.datasources.json import java.io.ByteArrayOutputStream import scala.collection.mutable.ArrayBuffer +import scala.util.Try import com.fasterxml.jackson.core._ @@ -204,7 +205,12 @@ class JacksonParser( case VALUE_STRING => // This one will lose microseconds parts. // See https://issues.apache.org/jira/browse/SPARK-10681. - DateTimeUtils.stringToTime(parser.getText).getTime * 1000L + Try(options.timestampFormat.parse(parser.getText).getTime * 1000L) + .getOrElse { + // If it fails to parse, then tries the way used in 2.0 and 1.x for backwards + // compatibility. + DateTimeUtils.stringToTime(parser.getText).getTime * 1000L + } case VALUE_NUMBER_INT => parser.getLongValue * 1000000L @@ -214,13 +220,18 @@ class JacksonParser( (parser: JsonParser) => parseJsonToken(parser, dataType) { case VALUE_STRING => val stringValue = parser.getText - if (stringValue.contains("-")) { - // The format of this string will probably be "yyyy-mm-dd". - DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(parser.getText).getTime) - } else { - // In Spark 1.5.0, we store the data as number of days since epoch in string. - // So, we just convert it to Int. - stringValue.toInt + // This one will lose microseconds parts. + // See https://issues.apache.org/jira/browse/SPARK-10681.x + Try(DateTimeUtils.millisToDays(options.dateFormat.parse(parser.getText).getTime)) + .getOrElse { + // If it fails to parse, then tries the way used in 2.0 and 1.x for backwards + // compatibility. + Try(DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(parser.getText).getTime)) + .getOrElse { + // In Spark 1.5.0, we store the data as number of days since epoch in string. + // So, we just convert it to Int. + stringValue.toInt + } } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala index 16150b91d6452..7421314df7aa5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala @@ -83,7 +83,7 @@ class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { bucketId: Option[Int], dataSchema: StructType, context: TaskAttemptContext): OutputWriter = { - new JsonOutputWriter(path, bucketId, dataSchema, context) + new JsonOutputWriter(path, parsedOptions, bucketId, dataSchema, context) } } } @@ -149,6 +149,7 @@ class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { private[json] class JsonOutputWriter( path: String, + options: JSONOptions, bucketId: Option[Int], dataSchema: StructType, context: TaskAttemptContext) @@ -156,7 +157,7 @@ private[json] class JsonOutputWriter( private[this] val writer = new CharArrayWriter() // create the Generator without separator inserted between 2 records - private[this] val gen = new JacksonGenerator(dataSchema, writer) + private[this] val gen = new JacksonGenerator(dataSchema, writer, options) private[this] val result = new Text() private val recordWriter: RecordWriter[NullWritable, Text] = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala index 2e606b21bdf30..3ad1125229c97 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala @@ -186,6 +186,12 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo *
  • `columnNameOfCorruptRecord` (default is the value specified in * `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string * created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.
  • + *
  • `dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. + * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to + * date type.
  • + *
  • `timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that + * indicates a timestamp format. Custom date formats follow the formats at + * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • * * @since 2.0.0 */ @@ -228,10 +234,12 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo * value. *
  • `negativeInf` (default `-Inf`): sets the string representation of a negative infinity * value.
  • - *
  • `dateFormat` (default `null`): sets the string that indicates a date format. Custom date - * formats follow the formats at `java.text.SimpleDateFormat`. This applies to both date type - * and timestamp type. By default, it is `null` which means trying to parse times and date by - * `java.sql.Timestamp.valueOf()` and `java.sql.Date.valueOf()`.
  • + *
  • `dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. + * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to + * date type.
  • + *
  • `timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that + * indicates a timestamp format. Custom date formats follow the formats at + * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • *
  • `maxColumns` (default `20480`): defines a hard limit of how many columns * a record can have.
  • *
  • `maxCharsPerColumn` (default `1000000`): defines the maximum number of characters allowed @@ -258,7 +266,8 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo *
  • `maxFilesPerTrigger` (default: no max limit): sets the maximum number of new files to be * considered in every trigger.
  • *
  • `mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets - * whether we should merge schemas collected from all Parquet part-files. This will override + * whether we should merge schemas collected from all + * Parquet part-files. This will override * `spark.sql.parquet.mergeSchema`.
  • * * @since 2.0.0 diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala index dbe3af49c90c3..5e00f669b8593 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala @@ -60,9 +60,9 @@ class CSVInferSchemaSuite extends SparkFunSuite { } test("Timestamp field types are inferred correctly via custom data format") { - var options = new CSVOptions(Map("dateFormat" -> "yyyy-mm")) + var options = new CSVOptions(Map("timestampFormat" -> "yyyy-mm")) assert(CSVInferSchema.inferField(TimestampType, "2015-08", options) == TimestampType) - options = new CSVOptions(Map("dateFormat" -> "yyyy")) + options = new CSVOptions(Map("timestampFormat" -> "yyyy")) assert(CSVInferSchema.inferField(TimestampType, "2015", options) == TimestampType) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala index 8cd76ddf20f04..2befad6d72eca 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala @@ -22,11 +22,13 @@ import java.nio.charset.UnsupportedCharsetException import java.sql.{Date, Timestamp} import java.text.SimpleDateFormat +import org.apache.commons.lang3.time.FastDateFormat import org.apache.hadoop.io.SequenceFile.CompressionType import org.apache.hadoop.io.compress.GzipCodec import org.apache.spark.SparkException import org.apache.spark.sql.{DataFrame, QueryTest, Row} +import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils} import org.apache.spark.sql.types._ @@ -477,7 +479,7 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils { val options = Map( "header" -> "true", "inferSchema" -> "true", - "dateFormat" -> "dd/MM/yyyy hh:mm") + "timestampFormat" -> "dd/MM/yyyy HH:mm") val results = spark.read .format("csv") .options(options) @@ -485,7 +487,7 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils { .select("date") .collect() - val dateFormat = new SimpleDateFormat("dd/MM/yyyy hh:mm") + val dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm") val expected = Seq(Seq(new Timestamp(dateFormat.parse("26/08/2015 18:00").getTime)), Seq(new Timestamp(dateFormat.parse("27/10/2014 18:30").getTime)), @@ -691,4 +693,155 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils { verifyCars(cars, withHeader = true, checkValues = false) } + + test("Write timestamps correctly in ISO8601 format by default") { + withTempDir { dir => + val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv" + val timestamps = spark.read + .format("csv") + .option("inferSchema", "true") + .option("header", "true") + .option("timestampFormat", "dd/MM/yyyy HH:mm") + .load(testFile(datesFile)) + timestamps.write + .format("csv") + .option("header", "true") + .save(iso8601timestampsPath) + + // This will load back the timestamps as string. + val iso8601Timestamps = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "false") + .load(iso8601timestampsPath) + + val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSSZZ") + val expectedTimestamps = timestamps.collect().map { r => + // This should be ISO8601 formatted string. + Row(iso8501.format(r.toSeq.head)) + } + + checkAnswer(iso8601Timestamps, expectedTimestamps) + } + } + + test("Write dates correctly in ISO8601 format by default") { + withTempDir { dir => + val customSchema = new StructType(Array(StructField("date", DateType, true))) + val iso8601datesPath = s"${dir.getCanonicalPath}/iso8601dates.csv" + val dates = spark.read + .format("csv") + .schema(customSchema) + .option("header", "true") + .option("inferSchema", "false") + .option("dateFormat", "dd/MM/yyyy HH:mm") + .load(testFile(datesFile)) + dates.write + .format("csv") + .option("header", "true") + .save(iso8601datesPath) + + // This will load back the dates as string. + val iso8601dates = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "false") + .load(iso8601datesPath) + + val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd") + val expectedDates = dates.collect().map { r => + // This should be ISO8601 formatted string. + Row(iso8501.format(r.toSeq.head)) + } + + checkAnswer(iso8601dates, expectedDates) + } + } + + test("Roundtrip in reading and writing timestamps") { + withTempDir { dir => + val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv" + val timestamps = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "true") + .load(testFile(datesFile)) + + timestamps.write + .format("csv") + .option("header", "true") + .save(iso8601timestampsPath) + + val iso8601timestamps = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "true") + .load(iso8601timestampsPath) + + checkAnswer(iso8601timestamps, timestamps) + } + } + + test("Write dates correctly with dateFormat option") { + val customSchema = new StructType(Array(StructField("date", DateType, true))) + withTempDir { dir => + // With dateFormat option. + val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.csv" + val datesWithFormat = spark.read + .format("csv") + .schema(customSchema) + .option("header", "true") + .option("dateFormat", "dd/MM/yyyy HH:mm") + .load(testFile(datesFile)) + datesWithFormat.write + .format("csv") + .option("header", "true") + .option("dateFormat", "yyyy/MM/dd") + .save(datesWithFormatPath) + + // This will load back the dates as string. + val stringDatesWithFormat = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "false") + .load(datesWithFormatPath) + val expectedStringDatesWithFormat = Seq( + Row("2015/08/26"), + Row("2014/10/27"), + Row("2016/01/28")) + + checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat) + } + } + + test("Write timestamps correctly with dateFormat option") { + withTempDir { dir => + // With dateFormat option. + val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv" + val timestampsWithFormat = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "true") + .option("timestampFormat", "dd/MM/yyyy HH:mm") + .load(testFile(datesFile)) + timestampsWithFormat.write + .format("csv") + .option("header", "true") + .option("timestampFormat", "yyyy/MM/dd HH:mm") + .save(timestampsWithFormatPath) + + // This will load back the timestamps as string. + val stringTimestampsWithFormat = spark.read + .format("csv") + .option("header", "true") + .option("inferSchema", "false") + .load(timestampsWithFormatPath) + val expectedStringTimestampsWithFormat = Seq( + Row("2015/08/26 18:00"), + Row("2014/10/27 18:30"), + Row("2016/01/28 20:00")) + + checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat) + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVTypeCastSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVTypeCastSuite.scala index 26b33b24efc3d..3ce643e667ce4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVTypeCastSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVTypeCastSuite.scala @@ -96,13 +96,18 @@ class CSVTypeCastSuite extends SparkFunSuite { assert(CSVTypeCast.castTo("1.00", DoubleType) == 1.0) assert(CSVTypeCast.castTo("true", BooleanType) == true) - val options = CSVOptions("dateFormat", "dd/MM/yyyy hh:mm") + val timestampsOptions = CSVOptions("timestampFormat", "dd/MM/yyyy hh:mm") val customTimestamp = "31/01/2015 00:00" - val expectedTime = options.dateFormat.parse("31/01/2015 00:00").getTime - assert(CSVTypeCast.castTo(customTimestamp, TimestampType, nullable = true, options) == - expectedTime * 1000L) - assert(CSVTypeCast.castTo(customTimestamp, DateType, nullable = true, options) == - DateTimeUtils.millisToDays(expectedTime)) + val expectedTime = timestampsOptions.timestampFormat.parse(customTimestamp).getTime + val castedTimestamp = + CSVTypeCast.castTo(customTimestamp, TimestampType, nullable = true, timestampsOptions) + assert(castedTimestamp == expectedTime * 1000L) + + val customDate = "31/01/2015" + val dateOptions = CSVOptions("dateFormat", "dd/MM/yyyy") + val expectedDate = dateOptions.dateFormat.parse(customDate).getTime + val castedDate = CSVTypeCast.castTo(customTimestamp, DateType, nullable = true, dateOptions) + assert(castedDate == DateTimeUtils.millisToDays(expectedDate)) val timestamp = "2015-01-01 00:00:00" assert(CSVTypeCast.castTo(timestamp, TimestampType) == diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala index 342fd3e82ee06..63a9061210ca6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala @@ -101,15 +101,15 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData { DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType)) val ISO8601Time1 = "1970-01-01T01:00:01.0Z" + val ISO8601Time2 = "1970-01-01T02:00:01-01:00" checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)), enforceCorrectType(ISO8601Time1, TimestampType)) - checkTypePromotion(DateTimeUtils.millisToDays(3601000), - enforceCorrectType(ISO8601Time1, DateType)) - val ISO8601Time2 = "1970-01-01T02:00:01-01:00" checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)), enforceCorrectType(ISO8601Time2, TimestampType)) - checkTypePromotion(DateTimeUtils.millisToDays(10801000), - enforceCorrectType(ISO8601Time2, DateType)) + + val ISO8601Date = "1970-01-01" + checkTypePromotion(DateTimeUtils.millisToDays(32400000), + enforceCorrectType(ISO8601Date, DateType)) } test("Get compatible type") { @@ -1664,4 +1664,61 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData { assert(df.schema.size === 2) df.collect() } + + test("Write dates correctly with dateFormat option") { + val customSchema = new StructType(Array(StructField("date", DateType, true))) + withTempDir { dir => + // With dateFormat option. + val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json" + val datesWithFormat = spark.read + .schema(customSchema) + .option("dateFormat", "dd/MM/yyyy HH:mm") + .json(datesRecords) + + datesWithFormat.write + .format("json") + .option("dateFormat", "yyyy/MM/dd") + .save(datesWithFormatPath) + + // This will load back the dates as string. + val stringSchema = StructType(StructField("date", StringType, true) :: Nil) + val stringDatesWithFormat = spark.read + .schema(stringSchema) + .json(datesWithFormatPath) + val expectedStringDatesWithFormat = Seq( + Row("2015/08/26"), + Row("2014/10/27"), + Row("2016/01/28")) + + checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat) + } + } + + test("Write timestamps correctly with dateFormat option") { + val customSchema = new StructType(Array(StructField("date", TimestampType, true))) + withTempDir { dir => + // With dateFormat option. + val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json" + val timestampsWithFormat = spark.read + .schema(customSchema) + .option("timestampFormat", "dd/MM/yyyy HH:mm") + .json(datesRecords) + timestampsWithFormat.write + .format("json") + .option("timestampFormat", "yyyy/MM/dd HH:mm") + .save(timestampsWithFormatPath) + + // This will load back the timestamps as string. + val stringSchema = StructType(StructField("date", StringType, true) :: Nil) + val stringTimestampsWithFormat = spark.read + .schema(stringSchema) + .json(timestampsWithFormatPath) + val expectedStringDatesWithFormat = Seq( + Row("2015/08/26 18:00"), + Row("2014/10/27 18:30"), + Row("2016/01/28 20:00")) + + checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat) + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala index f4a3336643869..a400940db924a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala @@ -222,6 +222,12 @@ private[json] trait TestJsonData { spark.sparkContext.parallelize( s"""{"a": 1${"0" * 38}, "b": 92233720368547758070}""" :: Nil) + def datesRecords: RDD[String] = + spark.sparkContext.parallelize( + """{"date": "26/08/2015 18:00"}""" :: + """{"date": "27/10/2014 18:30"}""" :: + """{"date": "28/01/2016 20:00"}""" :: Nil) + lazy val singleRow: RDD[String] = spark.sparkContext.parallelize("""{"a":123}""" :: Nil) def empty: RDD[String] = spark.sparkContext.parallelize(Seq[String]()) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala index d79edee5b1a4c..52486b122a93f 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala @@ -32,6 +32,10 @@ class JsonHadoopFsRelationSuite extends HadoopFsRelationTest { override protected def supportsDataType(dataType: DataType): Boolean = dataType match { case _: NullType => false case _: BinaryType => false + // `TimestampType` is disabled because `DatatypeConverter.parseDateTime()` + // in `DateTimeUtils` parses the formatted string wrongly when the date is + // too early. (e.g. "1600-07-13T08:36:32.847"). + case _: TimestampType => false case _: CalendarIntervalType => false case _ => true } From 945c04bcd439e0624232c040df529f12bcc05e13 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Wed, 24 Aug 2016 15:59:09 -0700 Subject: [PATCH 086/270] [MINOR][SPARKR] fix R MLlib parameter documentation ## What changes were proposed in this pull request? Fixed several misplaced param tag - they should be on the spark.* method generics ## How was this patch tested? run knitr junyangq Author: Felix Cheung Closes #14792 from felixcheung/rdocmllib. --- R/pkg/R/mllib.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index a670600ca6938..dfc5a1c7dfdc8 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -444,6 +444,7 @@ setMethod("write.ml", signature(object = "LDAModel", path = "character"), #' @param featureIndex The index of the feature if \code{featuresCol} is a vector column #' (default: 0), no effect otherwise #' @param weightCol The weight column name. +#' @param ... additional arguments passed to the method. #' @return \code{spark.isoreg} returns a fitted Isotonic Regression model #' @rdname spark.isoreg #' @aliases spark.isoreg,SparkDataFrame,formula-method @@ -504,7 +505,6 @@ setMethod("predict", signature(object = "IsotonicRegressionModel"), # Get the summary of an IsotonicRegressionModel model -#' @param ... Other optional arguments to summary of an IsotonicRegressionModel #' @return \code{summary} returns the model's boundaries and prediction as lists #' @rdname spark.isoreg #' @aliases summary,IsotonicRegressionModel-method @@ -1074,6 +1074,7 @@ setMethod("predict", signature(object = "AFTSurvivalRegressionModel"), #' @param k number of independent Gaussians in the mixture model. #' @param maxIter maximum iteration number. #' @param tol the convergence tolerance. +#' @param ... additional arguments passed to the method. #' @aliases spark.gaussianMixture,SparkDataFrame,formula-method #' @return \code{spark.gaussianMixture} returns a fitted multivariate gaussian mixture model. #' @rdname spark.gaussianMixture @@ -1117,7 +1118,6 @@ setMethod("spark.gaussianMixture", signature(data = "SparkDataFrame", formula = # Get the summary of a multivariate gaussian mixture model #' @param object a fitted gaussian mixture model. -#' @param ... currently not used argument(s) passed to the method. #' @return \code{summary} returns the model's lambda, mu, sigma and posterior. #' @aliases spark.gaussianMixture,SparkDataFrame,formula-method #' @rdname spark.gaussianMixture From 18708f76c366c6e01b5865981666e40d8642ac20 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Wed, 24 Aug 2016 16:00:04 -0700 Subject: [PATCH 087/270] [SPARKR][MINOR] Add more examples to window function docs ## What changes were proposed in this pull request? This PR adds more examples to window function docs to make them more accessible to the users. It also fixes default value issues for `lag` and `lead`. ## How was this patch tested? Manual test, R unit test. Author: Junyang Qian Closes #14779 from junyangq/SPARKR-FixWindowFunctionDocs. --- R/pkg/R/WindowSpec.R | 12 +++++++ R/pkg/R/functions.R | 78 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 72 insertions(+), 18 deletions(-) diff --git a/R/pkg/R/WindowSpec.R b/R/pkg/R/WindowSpec.R index ddd2ef2fcdee5..4ac83c29c6f7e 100644 --- a/R/pkg/R/WindowSpec.R +++ b/R/pkg/R/WindowSpec.R @@ -203,6 +203,18 @@ setMethod("rangeBetween", #' @aliases over,Column,WindowSpec-method #' @family colum_func #' @export +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' +#' # Partition by am (transmission) and order by hp (horsepower) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' +#' # Rank on hp within each partition +#' out <- select(df, over(rank(), ws), df$hp, df$am) +#' +#' # Lag mpg values by 1 row on the partition-and-ordered table +#' out <- select(df, over(lead(df$mpg), ws), df$mpg, df$hp, df$am) +#' } #' @note over since 2.0.0 setMethod("over", signature(x = "Column", window = "WindowSpec"), diff --git a/R/pkg/R/functions.R b/R/pkg/R/functions.R index f042adddef91f..dbf8dd89a204e 100644 --- a/R/pkg/R/functions.R +++ b/R/pkg/R/functions.R @@ -3121,9 +3121,9 @@ setMethod("ifelse", #' @aliases cume_dist,missing-method #' @export #' @examples \dontrun{ -#' df <- createDataFrame(iris) -#' ws <- orderBy(windowPartitionBy("Species"), "Sepal_Length") -#' out <- select(df, over(cume_dist(), ws), df$Sepal_Length, df$Species) +#' df <- createDataFrame(mtcars) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' out <- select(df, over(cume_dist(), ws), df$hp, df$am) #' } #' @note cume_dist since 1.6.0 setMethod("cume_dist", @@ -3148,7 +3148,11 @@ setMethod("cume_dist", #' @family window_funcs #' @aliases dense_rank,missing-method #' @export -#' @examples \dontrun{dense_rank()} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' out <- select(df, over(dense_rank(), ws), df$hp, df$am) +#' } #' @note dense_rank since 1.6.0 setMethod("dense_rank", signature("missing"), @@ -3168,18 +3172,26 @@ setMethod("dense_rank", #' @param x the column as a character string or a Column to compute on. #' @param offset the number of rows back from the current row from which to obtain a value. #' If not specified, the default is 1. -#' @param defaultValue default to use when the offset row does not exist. +#' @param defaultValue (optional) default to use when the offset row does not exist. #' @param ... further arguments to be passed to or from other methods. #' @rdname lag #' @name lag #' @aliases lag,characterOrColumn-method #' @family window_funcs #' @export -#' @examples \dontrun{lag(df$c)} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' +#' # Partition by am (transmission) and order by hp (horsepower) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' +#' # Lag mpg values by 1 row on the partition-and-ordered table +#' out <- select(df, over(lag(df$mpg), ws), df$mpg, df$hp, df$am) +#' } #' @note lag since 1.6.0 setMethod("lag", signature(x = "characterOrColumn"), - function(x, offset, defaultValue = NULL) { + function(x, offset = 1, defaultValue = NULL) { col <- if (class(x) == "Column") { x@jc } else { @@ -3194,25 +3206,35 @@ setMethod("lag", #' lead #' #' Window function: returns the value that is \code{offset} rows after the current row, and -#' NULL if there is less than \code{offset} rows after the current row. For example, -#' an \code{offset} of one will return the next row at any given point in the window partition. +#' \code{defaultValue} if there is less than \code{offset} rows after the current row. +#' For example, an \code{offset} of one will return the next row at any given point +#' in the window partition. #' #' This is equivalent to the \code{LEAD} function in SQL. #' -#' @param x Column to compute on -#' @param offset Number of rows to offset -#' @param defaultValue (Optional) default value to use +#' @param x the column as a character string or a Column to compute on. +#' @param offset the number of rows after the current row from which to obtain a value. +#' If not specified, the default is 1. +#' @param defaultValue (optional) default to use when the offset row does not exist. #' #' @rdname lead #' @name lead #' @family window_funcs #' @aliases lead,characterOrColumn,numeric-method #' @export -#' @examples \dontrun{lead(df$c)} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' +#' # Partition by am (transmission) and order by hp (horsepower) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' +#' # Lead mpg values by 1 row on the partition-and-ordered table +#' out <- select(df, over(lead(df$mpg), ws), df$mpg, df$hp, df$am) +#' } #' @note lead since 1.6.0 setMethod("lead", signature(x = "characterOrColumn", offset = "numeric", defaultValue = "ANY"), - function(x, offset, defaultValue = NULL) { + function(x, offset = 1, defaultValue = NULL) { col <- if (class(x) == "Column") { x@jc } else { @@ -3239,7 +3261,15 @@ setMethod("lead", #' @aliases ntile,numeric-method #' @family window_funcs #' @export -#' @examples \dontrun{ntile(1)} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' +#' # Partition by am (transmission) and order by hp (horsepower) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' +#' # Get ntile group id (1-4) for hp +#' out <- select(df, over(ntile(4), ws), df$hp, df$am) +#' } #' @note ntile since 1.6.0 setMethod("ntile", signature(x = "numeric"), @@ -3263,7 +3293,11 @@ setMethod("ntile", #' @family window_funcs #' @aliases percent_rank,missing-method #' @export -#' @examples \dontrun{percent_rank()} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' out <- select(df, over(percent_rank(), ws), df$hp, df$am) +#' } #' @note percent_rank since 1.6.0 setMethod("percent_rank", signature("missing"), @@ -3288,7 +3322,11 @@ setMethod("percent_rank", #' @family window_funcs #' @aliases rank,missing-method #' @export -#' @examples \dontrun{rank()} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' out <- select(df, over(rank(), ws), df$hp, df$am) +#' } #' @note rank since 1.6.0 setMethod("rank", signature(x = "missing"), @@ -3321,7 +3359,11 @@ setMethod("rank", #' @aliases row_number,missing-method #' @family window_funcs #' @export -#' @examples \dontrun{row_number()} +#' @examples \dontrun{ +#' df <- createDataFrame(mtcars) +#' ws <- orderBy(windowPartitionBy("am"), "hp") +#' out <- select(df, over(row_number(), ws), df$hp, df$am) +#' } #' @note row_number since 1.6.0 setMethod("row_number", signature("missing"), From 3a60be4b15a5ab9b6e0c4839df99dac7738aa7fe Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Wed, 24 Aug 2016 16:04:14 -0700 Subject: [PATCH 088/270] [SPARKR][MINOR] Add installation message for remote master mode and improve other messages ## What changes were proposed in this pull request? This PR gives informative message to users when they try to connect to a remote master but don't have Spark package in their local machine. As a clarification, for now, automatic installation will only happen if they start SparkR in R console (rather than from sparkr-shell) and connect to local master. In the remote master mode, local Spark package is still needed, but we will not trigger the install.spark function because the versions have to match those on the cluster, which involves more user input. Instead, we here try to provide detailed message that may help the users. Some of the other messages have also been slightly changed. ## How was this patch tested? Manual test. Author: Junyang Qian Closes #14761 from junyangq/SPARK-16579-V1. --- R/pkg/R/install.R | 64 ++++++++++++++++++++++++++++++++--------------- R/pkg/R/sparkR.R | 51 ++++++++++++++++++++++++------------- R/pkg/R/utils.R | 4 +-- 3 files changed, 80 insertions(+), 39 deletions(-) diff --git a/R/pkg/R/install.R b/R/pkg/R/install.R index c6ed88e032a71..69b0a523b84e4 100644 --- a/R/pkg/R/install.R +++ b/R/pkg/R/install.R @@ -70,9 +70,9 @@ install.spark <- function(hadoopVersion = "2.7", mirrorUrl = NULL, localDir = NULL, overwrite = FALSE) { version <- paste0("spark-", packageVersion("SparkR")) hadoopVersion <- tolower(hadoopVersion) - hadoopVersionName <- hadoop_version_name(hadoopVersion) + hadoopVersionName <- hadoopVersionName(hadoopVersion) packageName <- paste(version, "bin", hadoopVersionName, sep = "-") - localDir <- ifelse(is.null(localDir), spark_cache_path(), + localDir <- ifelse(is.null(localDir), sparkCachePath(), normalizePath(localDir, mustWork = FALSE)) if (is.na(file.info(localDir)$isdir)) { @@ -88,12 +88,14 @@ install.spark <- function(hadoopVersion = "2.7", mirrorUrl = NULL, # can use dir.exists(packageLocalDir) under R 3.2.0 or later if (!is.na(file.info(packageLocalDir)$isdir) && !overwrite) { - fmt <- "Spark %s for Hadoop %s is found, and SPARK_HOME set to %s" + fmt <- "%s for Hadoop %s found, with SPARK_HOME set to %s" msg <- sprintf(fmt, version, ifelse(hadoopVersion == "without", "Free build", hadoopVersion), packageLocalDir) message(msg) Sys.setenv(SPARK_HOME = packageLocalDir) return(invisible(packageLocalDir)) + } else { + message("Spark not found in the cache directory. Installation will start.") } packageLocalPath <- paste0(packageLocalDir, ".tgz") @@ -102,7 +104,7 @@ install.spark <- function(hadoopVersion = "2.7", mirrorUrl = NULL, if (tarExists && !overwrite) { message("tar file found.") } else { - robust_download_tar(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) + robustDownloadTar(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) } message(sprintf("Installing to %s", localDir)) @@ -116,33 +118,37 @@ install.spark <- function(hadoopVersion = "2.7", mirrorUrl = NULL, invisible(packageLocalDir) } -robust_download_tar <- function(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) { +robustDownloadTar <- function(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) { # step 1: use user-provided url if (!is.null(mirrorUrl)) { msg <- sprintf("Use user-provided mirror site: %s.", mirrorUrl) message(msg) - success <- direct_download_tar(mirrorUrl, version, hadoopVersion, + success <- directDownloadTar(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) - if (success) return() + if (success) { + return() + } else { + message(paste0("Unable to download from mirrorUrl: ", mirrorUrl)) + } } else { - message("Mirror site not provided.") + message("MirrorUrl not provided.") } # step 2: use url suggested from apache website - message("Looking for site suggested from apache website...") - mirrorUrl <- get_preferred_mirror(version, packageName) + message("Looking for preferred site from apache website...") + mirrorUrl <- getPreferredMirror(version, packageName) if (!is.null(mirrorUrl)) { - success <- direct_download_tar(mirrorUrl, version, hadoopVersion, + success <- directDownloadTar(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) if (success) return() } else { - message("Unable to find suggested mirror site.") + message("Unable to find preferred mirror site.") } # step 3: use backup option message("To use backup site...") - mirrorUrl <- default_mirror_url() - success <- direct_download_tar(mirrorUrl, version, hadoopVersion, + mirrorUrl <- defaultMirrorUrl() + success <- directDownloadTar(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) if (success) { return(packageLocalPath) @@ -155,7 +161,7 @@ robust_download_tar <- function(mirrorUrl, version, hadoopVersion, packageName, } } -get_preferred_mirror <- function(version, packageName) { +getPreferredMirror <- function(version, packageName) { jsonUrl <- paste0("http://www.apache.org/dyn/closer.cgi?path=", file.path("spark", version, packageName), ".tgz&as_json=1") @@ -175,10 +181,10 @@ get_preferred_mirror <- function(version, packageName) { mirrorPreferred } -direct_download_tar <- function(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) { +directDownloadTar <- function(mirrorUrl, version, hadoopVersion, packageName, packageLocalPath) { packageRemotePath <- paste0( file.path(mirrorUrl, version, packageName), ".tgz") - fmt <- paste("Downloading Spark %s for Hadoop %s from:\n- %s") + fmt <- "Downloading %s for Hadoop %s from:\n- %s" msg <- sprintf(fmt, version, ifelse(hadoopVersion == "without", "Free build", hadoopVersion), packageRemotePath) message(msg) @@ -192,11 +198,11 @@ direct_download_tar <- function(mirrorUrl, version, hadoopVersion, packageName, !isFail } -default_mirror_url <- function() { +defaultMirrorUrl <- function() { "http://www-us.apache.org/dist/spark" } -hadoop_version_name <- function(hadoopVersion) { +hadoopVersionName <- function(hadoopVersion) { if (hadoopVersion == "without") { "without-hadoop" } else if (grepl("^[0-9]+\\.[0-9]+$", hadoopVersion, perl = TRUE)) { @@ -208,7 +214,7 @@ hadoop_version_name <- function(hadoopVersion) { # The implementation refers to appdirs package: https://pypi.python.org/pypi/appdirs and # adapt to Spark context -spark_cache_path <- function() { +sparkCachePath <- function() { if (.Platform$OS.type == "windows") { winAppPath <- Sys.getenv("LOCALAPPDATA", unset = NA) if (is.na(winAppPath)) { @@ -231,3 +237,21 @@ spark_cache_path <- function() { } normalizePath(path, mustWork = FALSE) } + + +installInstruction <- function(mode) { + if (mode == "remote") { + paste0("Connecting to a remote Spark master. ", + "Please make sure Spark package is also installed in this machine.\n", + "- If there is one, set the path in sparkHome parameter or ", + "environment variable SPARK_HOME.\n", + "- If not, you may run install.spark function to do the job. ", + "Please make sure the Spark and the Hadoop versions ", + "match the versions on the cluster. ", + "SparkR package is compatible with Spark ", packageVersion("SparkR"), ".", + "If you need further help, ", + "contact the administrators of the cluster.") + } else { + stop(paste0("No instruction found for ", mode, " mode.")) + } +} diff --git a/R/pkg/R/sparkR.R b/R/pkg/R/sparkR.R index 85815af1f3639..de53b0bf79b58 100644 --- a/R/pkg/R/sparkR.R +++ b/R/pkg/R/sparkR.R @@ -366,25 +366,10 @@ sparkR.session <- function( } overrideEnvs(sparkConfigMap, paramMap) } - # do not download if it is run in the sparkR shell - if (!nzchar(master) || is_master_local(master)) { - if (!is_sparkR_shell()) { - if (is.na(file.info(sparkHome)$isdir)) { - msg <- paste0("Spark not found in SPARK_HOME: ", - sparkHome, - " .\nTo search in the cache directory. ", - "Installation will start if not found.") - message(msg) - packageLocalDir <- install.spark() - sparkHome <- packageLocalDir - } else { - msg <- paste0("Spark package is found in SPARK_HOME: ", sparkHome) - message(msg) - } - } - } if (!exists(".sparkRjsc", envir = .sparkREnv)) { + retHome <- sparkCheckInstall(sparkHome, master) + if (!is.null(retHome)) sparkHome <- retHome sparkExecutorEnvMap <- new.env() sparkR.sparkContext(master, appName, sparkHome, sparkConfigMap, sparkExecutorEnvMap, sparkJars, sparkPackages) @@ -547,3 +532,35 @@ processSparkPackages <- function(packages) { } splittedPackages } + +# Utility function that checks and install Spark to local folder if not found +# +# Installation will not be triggered if it's called from sparkR shell +# or if the master url is not local +# +# @param sparkHome directory to find Spark package. +# @param master the Spark master URL, used to check local or remote mode. +# @return NULL if no need to update sparkHome, and new sparkHome otherwise. +sparkCheckInstall <- function(sparkHome, master) { + if (!isSparkRShell()) { + if (!is.na(file.info(sparkHome)$isdir)) { + msg <- paste0("Spark package found in SPARK_HOME: ", sparkHome) + message(msg) + NULL + } else { + if (!nzchar(master) || isMasterLocal(master)) { + msg <- paste0("Spark not found in SPARK_HOME: ", + sparkHome) + message(msg) + packageLocalDir <- install.spark() + packageLocalDir + } else { + msg <- paste0("Spark not found in SPARK_HOME: ", + sparkHome, "\n", installInstruction("remote")) + stop(msg) + } + } + } else { + NULL + } +} diff --git a/R/pkg/R/utils.R b/R/pkg/R/utils.R index d78c0a7a539a8..2809ce5d376a5 100644 --- a/R/pkg/R/utils.R +++ b/R/pkg/R/utils.R @@ -690,10 +690,10 @@ getSparkContext <- function() { sc } -is_master_local <- function(master) { +isMasterLocal <- function(master) { grepl("^local(\\[([0-9]+|\\*)\\])?$", master, perl = TRUE) } -is_sparkR_shell <- function() { +isSparkRShell <- function() { grepl(".*shell\\.R$", Sys.getenv("R_PROFILE_USER"), perl = TRUE) } From ac27557eb622a257abeb3e8551f06ebc72f87133 Mon Sep 17 00:00:00 2001 From: Sameer Agarwal Date: Wed, 24 Aug 2016 21:24:24 -0700 Subject: [PATCH 089/270] [SPARK-17228][SQL] Not infer/propagate non-deterministic constraints ## What changes were proposed in this pull request? Given that filters based on non-deterministic constraints shouldn't be pushed down in the query plan, unnecessarily inferring them is confusing and a source of potential bugs. This patch simplifies the inferring logic by simply ignoring them. ## How was this patch tested? Added a new test in `ConstraintPropagationSuite`. Author: Sameer Agarwal Closes #14795 from sameeragarwal/deterministic-constraints. --- .../spark/sql/catalyst/plans/QueryPlan.scala | 3 ++- .../plans/ConstraintPropagationSuite.scala | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala index 8ee31f42ad88e..0fb6e7d2e795a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala @@ -35,7 +35,8 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanT .union(inferAdditionalConstraints(constraints)) .union(constructIsNotNullConstraints(constraints)) .filter(constraint => - constraint.references.nonEmpty && constraint.references.subsetOf(outputSet)) + constraint.references.nonEmpty && constraint.references.subsetOf(outputSet) && + constraint.deterministic) } /** diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/ConstraintPropagationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/ConstraintPropagationSuite.scala index 5a76969235acd..8d6a49a8a37b4 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/ConstraintPropagationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/ConstraintPropagationSuite.scala @@ -352,4 +352,21 @@ class ConstraintPropagationSuite extends SparkFunSuite { verifyConstraints(tr.analyze.constraints, ExpressionSet(Seq(IsNotNull(resolveColumn(tr, "b")), IsNotNull(resolveColumn(tr, "c"))))) } + + test("not infer non-deterministic constraints") { + val tr = LocalRelation('a.int, 'b.string, 'c.int) + + verifyConstraints(tr + .where('a.attr === Rand(0)) + .analyze.constraints, + ExpressionSet(Seq(IsNotNull(resolveColumn(tr, "a"))))) + + verifyConstraints(tr + .where('a.attr === InputFileName()) + .where('a.attr =!= 'c.attr) + .analyze.constraints, + ExpressionSet(Seq(resolveColumn(tr, "a") =!= resolveColumn(tr, "c"), + IsNotNull(resolveColumn(tr, "a")), + IsNotNull(resolveColumn(tr, "c"))))) + } } From 4d0706d616176dc29ff3562e40cb00dd4eb9c302 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Thu, 25 Aug 2016 12:50:03 +0800 Subject: [PATCH 090/270] [SPARK-17190][SQL] Removal of HiveSharedState ### What changes were proposed in this pull request? Since `HiveClient` is used to interact with the Hive metastore, it should be hidden in `HiveExternalCatalog`. After moving `HiveClient` into `HiveExternalCatalog`, `HiveSharedState` becomes a wrapper of `HiveExternalCatalog`. Thus, removal of `HiveSharedState` becomes straightforward. After removal of `HiveSharedState`, the reflection logic is directly applied on the choice of `ExternalCatalog` types, based on the configuration of `CATALOG_IMPLEMENTATION`. ~~`HiveClient` is also used/invoked by the other entities besides HiveExternalCatalog, we defines the following two APIs: getClient and getNewClient~~ ### How was this patch tested? The existing test cases Author: gatorsmile Closes #14757 from gatorsmile/removeHiveClient. --- .../catalyst/catalog/InMemoryCatalog.scala | 8 +++- .../org/apache/spark/sql/SparkSession.scala | 14 +----- .../spark/sql/internal/SharedState.scala | 47 ++++++++++++++++++- .../hive/thriftserver/HiveThriftServer2.scala | 2 +- .../apache/spark/sql/hive/HiveContext.scala | 4 -- .../spark/sql/hive/HiveExternalCatalog.scala | 10 +++- .../spark/sql/hive/HiveMetastoreCatalog.scala | 3 +- .../spark/sql/hive/HiveSessionState.scala | 9 ++-- .../spark/sql/hive/HiveSharedState.scala | 47 ------------------- .../apache/spark/sql/hive/test/TestHive.scala | 15 +++--- .../spark/sql/hive/HiveDataFrameSuite.scala | 2 +- .../sql/hive/HiveExternalCatalogSuite.scala | 16 +++---- .../spark/sql/hive/HiveSparkSubmitSuite.scala | 5 +- .../sql/hive/MetastoreDataSourcesSuite.scala | 3 +- .../spark/sql/hive/ShowCreateTableSuite.scala | 2 +- 15 files changed, 88 insertions(+), 99 deletions(-) delete mode 100644 sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala index 9ebf7de1a5680..b55ddcb54b457 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala @@ -24,7 +24,7 @@ import scala.collection.mutable import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path -import org.apache.spark.SparkException +import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.analysis._ @@ -39,7 +39,11 @@ import org.apache.spark.sql.catalyst.util.StringUtils * * All public methods should be synchronized for thread-safety. */ -class InMemoryCatalog(hadoopConfig: Configuration = new Configuration) extends ExternalCatalog { +class InMemoryCatalog( + conf: SparkConf = new SparkConf, + hadoopConfig: Configuration = new Configuration) + extends ExternalCatalog { + import CatalogTypes.TablePartitionSpec private class TableDesc(var table: CatalogTable) { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala index 362bf45d0356d..0f6292db62172 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala @@ -96,10 +96,7 @@ class SparkSession private( */ @transient private[sql] lazy val sharedState: SharedState = { - existingSharedState.getOrElse( - SparkSession.reflect[SharedState, SparkContext]( - SparkSession.sharedStateClassName(sparkContext.conf), - sparkContext)) + existingSharedState.getOrElse(new SharedState(sparkContext)) } /** @@ -913,16 +910,8 @@ object SparkSession { /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] - private val HIVE_SHARED_STATE_CLASS_NAME = "org.apache.spark.sql.hive.HiveSharedState" private val HIVE_SESSION_STATE_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionState" - private def sharedStateClassName(conf: SparkConf): String = { - conf.get(CATALOG_IMPLEMENTATION) match { - case "hive" => HIVE_SHARED_STATE_CLASS_NAME - case "in-memory" => classOf[SharedState].getCanonicalName - } - } - private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_CLASS_NAME @@ -953,7 +942,6 @@ object SparkSession { private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_CLASS_NAME) - Utils.classForName(HIVE_SHARED_STATE_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala index 54aee5e02bb9c..6387f0150631c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala @@ -17,7 +17,13 @@ package org.apache.spark.sql.internal -import org.apache.spark.SparkContext +import scala.reflect.ClassTag +import scala.util.control.NonFatal + +import org.apache.hadoop.conf.Configuration + +import org.apache.spark.{SparkConf, SparkContext} +import org.apache.spark.internal.config._ import org.apache.spark.internal.Logging import org.apache.spark.sql.{SparkSession, SQLContext} import org.apache.spark.sql.catalyst.catalog.{ExternalCatalog, InMemoryCatalog} @@ -51,7 +57,11 @@ private[sql] class SharedState(val sparkContext: SparkContext) extends Logging { /** * A catalog that interacts with external systems. */ - lazy val externalCatalog: ExternalCatalog = new InMemoryCatalog(sparkContext.hadoopConfiguration) + lazy val externalCatalog: ExternalCatalog = + SharedState.reflect[ExternalCatalog, SparkConf, Configuration]( + SharedState.externalCatalogClassName(sparkContext.conf), + sparkContext.conf, + sparkContext.hadoopConfiguration) /** * A classloader used to load all user-added jar. @@ -98,6 +108,39 @@ private[sql] class SharedState(val sparkContext: SparkContext) extends Logging { } } +object SharedState { + + private val HIVE_EXTERNAL_CATALOG_CLASS_NAME = "org.apache.spark.sql.hive.HiveExternalCatalog" + + private def externalCatalogClassName(conf: SparkConf): String = { + conf.get(CATALOG_IMPLEMENTATION) match { + case "hive" => HIVE_EXTERNAL_CATALOG_CLASS_NAME + case "in-memory" => classOf[InMemoryCatalog].getCanonicalName + } + } + + /** + * Helper method to create an instance of [[T]] using a single-arg constructor that + * accepts an [[Arg1]] and an [[Arg2]]. + */ + private def reflect[T, Arg1 <: AnyRef, Arg2 <: AnyRef]( + className: String, + ctorArg1: Arg1, + ctorArg2: Arg2)( + implicit ctorArgTag1: ClassTag[Arg1], + ctorArgTag2: ClassTag[Arg2]): T = { + try { + val clazz = Utils.classForName(className) + val ctor = clazz.getDeclaredConstructor(ctorArgTag1.runtimeClass, ctorArgTag2.runtimeClass) + val args = Array[AnyRef](ctorArg1, ctorArg2) + ctor.newInstance(args: _*).asInstanceOf[T] + } catch { + case NonFatal(e) => + throw new IllegalArgumentException(s"Error while instantiating '$className':", e) + } + } +} + /** * URL class loader that exposes the `addURL` and `getURLs` methods in URLClassLoader. diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala index e3258d858f1cc..13c6f11f461c6 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala @@ -34,7 +34,7 @@ import org.apache.spark.annotation.DeveloperApi import org.apache.spark.internal.Logging import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd, SparkListenerJobStart} import org.apache.spark.sql.SQLContext -import org.apache.spark.sql.hive.{HiveSharedState, HiveUtils} +import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._ import org.apache.spark.sql.hive.thriftserver.ui.ThriftServerTab import org.apache.spark.sql.internal.SQLConf diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala index 3cfe93234f24b..5393c57c9a28f 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala @@ -52,10 +52,6 @@ class HiveContext private[hive](_sparkSession: SparkSession) sparkSession.sessionState.asInstanceOf[HiveSessionState] } - protected[sql] override def sharedState: HiveSharedState = { - sparkSession.sharedState.asInstanceOf[HiveSharedState] - } - /** * Invalidate and refresh all the cached the metadata of the given table. For performance reasons, * Spark SQL or the external data source library it uses might cache certain metadata about a diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index de3e60a44d920..2586d11a6c1ff 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.hive.ql.metadata.HiveException import org.apache.thrift.TException +import org.apache.spark.SparkConf import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier @@ -41,13 +42,20 @@ import org.apache.spark.sql.types.{DataType, StructType} * A persistent implementation of the system catalog using Hive. * All public methods must be synchronized for thread-safety. */ -private[spark] class HiveExternalCatalog(client: HiveClient, hadoopConf: Configuration) +private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configuration) extends ExternalCatalog with Logging { import CatalogTypes.TablePartitionSpec import HiveExternalCatalog._ import CatalogTableType._ + /** + * A Hive client used to interact with the metastore. + */ + val client: HiveClient = { + HiveUtils.newClientForMetadata(conf, hadoopConf) + } + // Exceptions thrown by the hive client that we would like to wrap private val clientExceptions = Set( classOf[HiveException].getCanonicalName, diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index 181f470b2a100..701b73a4aa39b 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -44,7 +44,8 @@ import org.apache.spark.sql.types._ */ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Logging { private val sessionState = sparkSession.sessionState.asInstanceOf[HiveSessionState] - private val client = sparkSession.sharedState.asInstanceOf[HiveSharedState].metadataHive + private val client = + sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client /** A fully qualified identifier for a table (i.e., database.tableName) */ case class QualifiedTableName(database: String, name: String) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala index e01c053ab5a76..a7cc7cc142e4e 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala @@ -33,21 +33,18 @@ private[hive] class HiveSessionState(sparkSession: SparkSession) self => - private lazy val sharedState: HiveSharedState = { - sparkSession.sharedState.asInstanceOf[HiveSharedState] - } - /** * A Hive client used for interacting with the metastore. */ - lazy val metadataHive: HiveClient = sharedState.metadataHive.newSession() + lazy val metadataHive: HiveClient = + sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client.newSession() /** * Internal catalog for managing table and database states. */ override lazy val catalog = { new HiveSessionCatalog( - sharedState.externalCatalog, + sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog], metadataHive, sparkSession, functionResourceLoader, diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala deleted file mode 100644 index 12b4962fba178..0000000000000 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.hive - -import org.apache.spark.SparkContext -import org.apache.spark.sql.hive.client.HiveClient -import org.apache.spark.sql.internal.SharedState - - -/** - * A class that holds all state shared across sessions in a given - * [[org.apache.spark.sql.SparkSession]] backed by Hive. - */ -private[hive] class HiveSharedState(override val sparkContext: SparkContext) - extends SharedState(sparkContext) { - - // TODO: just share the IsolatedClientLoader instead of the client instance itself - - /** - * A Hive client used to interact with the metastore. - */ - // This needs to be a lazy val at here because TestHiveSharedState is overriding it. - lazy val metadataHive: HiveClient = { - HiveUtils.newClientForMetadata(sparkContext.conf, sparkContext.hadoopConfiguration) - } - - /** - * A catalog that interacts with the Hive metastore. - */ - override lazy val externalCatalog = - new HiveExternalCatalog(metadataHive, sparkContext.hadoopConfiguration) -} diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala index cdc8d610d378f..163f210802b53 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala @@ -39,7 +39,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.QueryExecution import org.apache.spark.sql.execution.command.CacheTableCommand import org.apache.spark.sql.hive._ -import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.internal.{SharedState, SQLConf} import org.apache.spark.util.{ShutdownHookManager, Utils} // SPARK-3729: Test key required to check for initialization errors with config. @@ -108,13 +108,13 @@ class TestHiveContext( * A [[SparkSession]] used in [[TestHiveContext]]. * * @param sc SparkContext - * @param existingSharedState optional [[HiveSharedState]] + * @param existingSharedState optional [[SharedState]] * @param loadTestTables if true, load the test tables. They can only be loaded when running * in the JVM, i.e when calling from Python this flag has to be false. */ private[hive] class TestHiveSparkSession( @transient private val sc: SparkContext, - @transient private val existingSharedState: Option[HiveSharedState], + @transient private val existingSharedState: Option[SharedState], private val loadTestTables: Boolean) extends SparkSession(sc) with Logging { self => @@ -139,14 +139,13 @@ private[hive] class TestHiveSparkSession( assume(sc.conf.get(CATALOG_IMPLEMENTATION) == "hive") - // TODO: Let's remove HiveSharedState and TestHiveSessionState. Otherwise, - // we are not really testing the reflection logic based on the setting of - // CATALOG_IMPLEMENTATION. @transient - override lazy val sharedState: HiveSharedState = { - existingSharedState.getOrElse(new HiveSharedState(sc)) + override lazy val sharedState: SharedState = { + existingSharedState.getOrElse(new SharedState(sc)) } + // TODO: Let's remove TestHiveSessionState. Otherwise, we are not really testing the reflection + // logic based on the setting of CATALOG_IMPLEMENTATION. @transient override lazy val sessionState: TestHiveSessionState = new TestHiveSessionState(self) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameSuite.scala index 23798431e697f..96e9054cd4876 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameSuite.scala @@ -31,7 +31,7 @@ class HiveDataFrameSuite extends QueryTest with TestHiveSingleton { } test("SPARK-15887: hive-site.xml should be loaded") { - val hiveClient = spark.sharedState.asInstanceOf[HiveSharedState].metadataHive + val hiveClient = spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client assert(hiveClient.getConf("hive.in.test", "") == "true") } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala index 175889b08b49f..26c2549820de6 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala @@ -21,26 +21,26 @@ import org.apache.hadoop.conf.Configuration import org.apache.spark.SparkConf import org.apache.spark.sql.catalyst.catalog._ -import org.apache.spark.sql.hive.client.HiveClient /** * Test suite for the [[HiveExternalCatalog]]. */ class HiveExternalCatalogSuite extends ExternalCatalogSuite { - private val client: HiveClient = { - // We create a metastore at a temp location to avoid any potential - // conflict of having multiple connections to a single derby instance. - HiveUtils.newClientForExecution(new SparkConf, new Configuration) + private val externalCatalog: HiveExternalCatalog = { + val catalog = new HiveExternalCatalog(new SparkConf, new Configuration) + catalog.client.reset() + catalog } protected override val utils: CatalogTestUtils = new CatalogTestUtils { override val tableInputFormat: String = "org.apache.hadoop.mapred.SequenceFileInputFormat" override val tableOutputFormat: String = "org.apache.hadoop.mapred.SequenceFileOutputFormat" - override def newEmptyCatalog(): ExternalCatalog = - new HiveExternalCatalog(client, new Configuration()) + override def newEmptyCatalog(): ExternalCatalog = externalCatalog } - protected override def resetState(): Unit = client.reset() + protected override def resetState(): Unit = { + externalCatalog.client.reset() + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala index dd8fec0c15ffa..af282866669b8 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala @@ -378,10 +378,9 @@ object SetMetastoreURLTest extends Logging { s"spark.sql.test.expectedMetastoreURL should be set.") } - // HiveSharedState is used when Hive support is enabled. + // HiveExternalCatalog is used when Hive support is enabled. val actualMetastoreURL = - spark.sharedState.asInstanceOf[HiveSharedState] - .metadataHive + spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client .getConf("javax.jdo.option.ConnectionURL", "this_is_a_wrong_URL") logInfo(s"javax.jdo.option.ConnectionURL is $actualMetastoreURL") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index 571ba49d115f8..d77bb5cf95f69 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -51,7 +51,8 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv // To test `HiveExternalCatalog`, we need to read the raw table metadata(schema, partition // columns and bucket specification are still in table properties) from hive client. - private def hiveClient: HiveClient = sharedState.asInstanceOf[HiveSharedState].metadataHive + private def hiveClient: HiveClient = + sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client test("persistent JSON table") { withTable("jsonTable") { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ShowCreateTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ShowCreateTableSuite.scala index 68f1bb60f66b7..e925921165d6a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ShowCreateTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ShowCreateTableSuite.scala @@ -266,7 +266,7 @@ class ShowCreateTableSuite extends QueryTest with SQLTestUtils with TestHiveSing } private def createRawHiveTable(ddl: String): Unit = { - hiveContext.sharedState.asInstanceOf[HiveSharedState].metadataHive.runSqlHive(ddl) + hiveContext.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client.runSqlHive(ddl) } private def checkCreateTable(table: String): Unit = { From 5f02d2e5b4d37f554629cbd0e488e856fffd7b6b Mon Sep 17 00:00:00 2001 From: jiangxingbo Date: Wed, 24 Aug 2016 23:36:04 -0700 Subject: [PATCH 091/270] [SPARK-17215][SQL] Method `SQLContext.parseDataType(dataTypeString: String)` could be removed. ## What changes were proposed in this pull request? Method `SQLContext.parseDataType(dataTypeString: String)` could be removed, we should use `SparkSession.parseDataType(dataTypeString: String)` instead. This require updating PySpark. ## How was this patch tested? Existing test cases. Author: jiangxingbo Closes #14790 from jiangxb1987/parseDataType. --- python/pyspark/sql/column.py | 7 +++---- python/pyspark/sql/functions.py | 6 +++--- python/pyspark/sql/readwriter.py | 4 +++- python/pyspark/sql/streaming.py | 4 +++- python/pyspark/sql/tests.py | 2 +- python/pyspark/sql/types.py | 6 +++--- .../main/scala/org/apache/spark/sql/SQLContext.scala | 10 ---------- 7 files changed, 16 insertions(+), 23 deletions(-) diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py index 4b99f3058b75c..8d5adc8ffd6d4 100644 --- a/python/pyspark/sql/column.py +++ b/python/pyspark/sql/column.py @@ -328,10 +328,9 @@ def cast(self, dataType): if isinstance(dataType, basestring): jc = self._jc.cast(dataType) elif isinstance(dataType, DataType): - from pyspark.sql import SQLContext - sc = SparkContext.getOrCreate() - ctx = SQLContext.getOrCreate(sc) - jdt = ctx._ssql_ctx.parseDataType(dataType.json()) + from pyspark.sql import SparkSession + spark = SparkSession.builder.getOrCreate() + jdt = spark._jsparkSession.parseDataType(dataType.json()) jc = self._jc.cast(jdt) else: raise TypeError("unexpected type: %s" % type(dataType)) diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py index 4ea83e24bbc9a..89b3c07c0740f 100644 --- a/python/pyspark/sql/functions.py +++ b/python/pyspark/sql/functions.py @@ -1760,11 +1760,11 @@ def __init__(self, func, returnType, name=None): self._judf = self._create_judf(name) def _create_judf(self, name): - from pyspark.sql import SQLContext + from pyspark.sql import SparkSession sc = SparkContext.getOrCreate() wrapped_func = _wrap_function(sc, self.func, self.returnType) - ctx = SQLContext.getOrCreate(sc) - jdt = ctx._ssql_ctx.parseDataType(self.returnType.json()) + spark = SparkSession.builder.getOrCreate() + jdt = spark._jsparkSession.parseDataType(self.returnType.json()) if name is None: f = self.func name = f.__name__ if hasattr(f, '__name__') else f.__class__.__name__ diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index 3da6f497e9f12..3d79e0ccccee4 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -98,9 +98,11 @@ def schema(self, schema): :param schema: a :class:`pyspark.sql.types.StructType` object """ + from pyspark.sql import SparkSession if not isinstance(schema, StructType): raise TypeError("schema should be StructType") - jschema = self._spark._ssql_ctx.parseDataType(schema.json()) + spark = SparkSession.builder.getOrCreate() + jschema = spark._jsparkSession.parseDataType(schema.json()) self._jreader = self._jreader.schema(jschema) return self diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py index 3761d2b1994f0..a0ba5825f35c6 100644 --- a/python/pyspark/sql/streaming.py +++ b/python/pyspark/sql/streaming.py @@ -273,9 +273,11 @@ def schema(self, schema): >>> s = spark.readStream.schema(sdf_schema) """ + from pyspark.sql import SparkSession if not isinstance(schema, StructType): raise TypeError("schema should be StructType") - jschema = self._spark._ssql_ctx.parseDataType(schema.json()) + spark = SparkSession.builder.getOrCreate() + jschema = spark._jsparkSession.parseDataType(schema.json()) self._jreader = self._jreader.schema(jschema) return self diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py index fc41701b5922d..fd8e9cec3e0bd 100644 --- a/python/pyspark/sql/tests.py +++ b/python/pyspark/sql/tests.py @@ -574,7 +574,7 @@ def test_udt(self): def check_datatype(datatype): pickled = pickle.loads(pickle.dumps(datatype)) assert datatype == pickled - scala_datatype = self.spark._wrapped._ssql_ctx.parseDataType(datatype.json()) + scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json()) python_datatype = _parse_datatype_json_string(scala_datatype.json()) assert datatype == python_datatype diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py index 11b1e60ee74f4..4a023123b6eca 100644 --- a/python/pyspark/sql/types.py +++ b/python/pyspark/sql/types.py @@ -851,7 +851,7 @@ def _parse_datatype_json_string(json_string): >>> def check_datatype(datatype): ... pickled = pickle.loads(pickle.dumps(datatype)) ... assert datatype == pickled - ... scala_datatype = sqlContext._ssql_ctx.parseDataType(datatype.json()) + ... scala_datatype = spark._jsparkSession.parseDataType(datatype.json()) ... python_datatype = _parse_datatype_json_string(scala_datatype.json()) ... assert datatype == python_datatype >>> for cls in _all_atomic_types.values(): @@ -1551,11 +1551,11 @@ def convert(self, obj, gateway_client): def _test(): import doctest from pyspark.context import SparkContext - from pyspark.sql import SQLContext + from pyspark.sql import SparkSession globs = globals() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc - globs['sqlContext'] = SQLContext(sc) + globs['spark'] = SparkSession.builder.getOrCreate() (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) globs['sc'].stop() if failure_count: diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index e7627ac2c95ab..fbf22197a1a11 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -743,16 +743,6 @@ class SQLContext private[sql](val sparkSession: SparkSession) sparkSession.catalog.listTables(databaseName).collect().map(_.name) } - /** - * Parses the data type in our internal string representation. The data type string should - * have the same format as the one generated by `toString` in scala. - * It is only used by PySpark. - */ - // TODO: Remove this function (would require updating PySpark). - private[sql] def parseDataType(dataTypeString: String): DataType = { - DataType.fromJson(dataTypeString) - } - //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // Deprecated methods From 2bcd5d5ce3eaf0eb1600a12a2b55ddb40927533b Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Thu, 25 Aug 2016 09:45:49 +0100 Subject: [PATCH 092/270] [SPARK-17193][CORE] HadoopRDD NPE at DEBUG log level when getLocationInfo == null ## What changes were proposed in this pull request? Handle null from Hadoop getLocationInfo directly instead of catching (and logging) exception ## How was this patch tested? Jenkins tests Author: Sean Owen Closes #14760 from srowen/SPARK-17193. --- .../org/apache/spark/rdd/HadoopRDD.scala | 26 +++++++++---------- .../org/apache/spark/rdd/NewHadoopRDD.scala | 2 +- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala index fd3a14bd48850..4640b5dc2f654 100644 --- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala @@ -22,7 +22,6 @@ import java.text.SimpleDateFormat import java.util.Date import scala.collection.immutable.Map -import scala.collection.mutable.ListBuffer import scala.reflect.ClassTag import org.apache.hadoop.conf.{Configurable, Configuration} @@ -317,7 +316,7 @@ class HadoopRDD[K, V]( try { val lsplit = c.inputSplitWithLocationInfo.cast(hsplit) val infos = c.getLocationInfo.invoke(lsplit).asInstanceOf[Array[AnyRef]] - Some(HadoopRDD.convertSplitLocationInfo(infos)) + HadoopRDD.convertSplitLocationInfo(infos) } catch { case e: Exception => logDebug("Failed to use InputSplitWithLocations.", e) @@ -419,21 +418,20 @@ private[spark] object HadoopRDD extends Logging { None } - private[spark] def convertSplitLocationInfo(infos: Array[AnyRef]): Seq[String] = { - val out = ListBuffer[String]() - infos.foreach { loc => - val locationStr = HadoopRDD.SPLIT_INFO_REFLECTIONS.get. - getLocation.invoke(loc).asInstanceOf[String] + private[spark] def convertSplitLocationInfo(infos: Array[AnyRef]): Option[Seq[String]] = { + Option(infos).map(_.flatMap { loc => + val reflections = HadoopRDD.SPLIT_INFO_REFLECTIONS.get + val locationStr = reflections.getLocation.invoke(loc).asInstanceOf[String] if (locationStr != "localhost") { - if (HadoopRDD.SPLIT_INFO_REFLECTIONS.get.isInMemory. - invoke(loc).asInstanceOf[Boolean]) { - logDebug("Partition " + locationStr + " is cached by Hadoop.") - out += new HDFSCacheTaskLocation(locationStr).toString + if (reflections.isInMemory.invoke(loc).asInstanceOf[Boolean]) { + logDebug(s"Partition $locationStr is cached by Hadoop.") + Some(HDFSCacheTaskLocation(locationStr).toString) } else { - out += new HostTaskLocation(locationStr).toString + Some(HostTaskLocation(locationStr).toString) } + } else { + None } - } - out.seq + }) } } diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala index be919e65870a9..1c7aec919bdc4 100644 --- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala @@ -255,7 +255,7 @@ class NewHadoopRDD[K, V]( case Some(c) => try { val infos = c.newGetLocationInfo.invoke(split).asInstanceOf[Array[AnyRef]] - Some(HadoopRDD.convertSplitLocationInfo(infos)) + HadoopRDD.convertSplitLocationInfo(infos) } catch { case e : Exception => logDebug("Failed to use InputSplit#getLocationInfo.", e) From e0b20f9f24d5c3304bf517a4dcfb0da93be5bc75 Mon Sep 17 00:00:00 2001 From: Liwei Lin Date: Thu, 25 Aug 2016 11:24:40 +0200 Subject: [PATCH 093/270] [SPARK-17061][SPARK-17093][SQL] MapObjects` should make copies of unsafe-backed data ## What changes were proposed in this pull request? Currently `MapObjects` does not make copies of unsafe-backed data, leading to problems like [SPARK-17061](https://issues.apache.org/jira/browse/SPARK-17061) [SPARK-17093](https://issues.apache.org/jira/browse/SPARK-17093). This patch makes `MapObjects` make copies of unsafe-backed data. Generated code - prior to this patch: ```java ... /* 295 */ if (isNull12) { /* 296 */ convertedArray1[loopIndex1] = null; /* 297 */ } else { /* 298 */ convertedArray1[loopIndex1] = value12; /* 299 */ } ... ``` Generated code - after this patch: ```java ... /* 295 */ if (isNull12) { /* 296 */ convertedArray1[loopIndex1] = null; /* 297 */ } else { /* 298 */ convertedArray1[loopIndex1] = value12 instanceof UnsafeRow? value12.copy() : value12; /* 299 */ } ... ``` ## How was this patch tested? Add a new test case which would fail without this patch. Author: Liwei Lin Closes #14698 from lw-lin/mapobjects-copy. --- .../expressions/objects/objects.scala | 12 ++++++- .../expressions/ExpressionEvalHelper.scala | 2 +- .../expressions/ObjectExpressionsSuite.scala | 34 +++++++++++++++++++ 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 31ed485317487..4da74a0a272d8 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -494,6 +494,16 @@ case class MapObjects private( s"$seq == null ? $array[$loopIndex] : $seq.apply($loopIndex)" } + // Make a copy of the data if it's unsafe-backed + def makeCopyIfInstanceOf(clazz: Class[_ <: Any], value: String) = + s"$value instanceof ${clazz.getSimpleName}? ${value}.copy() : $value" + val genFunctionValue = lambdaFunction.dataType match { + case StructType(_) => makeCopyIfInstanceOf(classOf[UnsafeRow], genFunction.value) + case ArrayType(_, _) => makeCopyIfInstanceOf(classOf[UnsafeArrayData], genFunction.value) + case MapType(_, _, _) => makeCopyIfInstanceOf(classOf[UnsafeMapData], genFunction.value) + case _ => genFunction.value + } + val loopNullCheck = inputDataType match { case _: ArrayType => s"$loopIsNull = ${genInputData.value}.isNullAt($loopIndex);" // The element of primitive array will never be null. @@ -521,7 +531,7 @@ case class MapObjects private( if (${genFunction.isNull}) { $convertedArray[$loopIndex] = null; } else { - $convertedArray[$loopIndex] = ${genFunction.value}; + $convertedArray[$loopIndex] = $genFunctionValue; } $loopIndex += 1; diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala index d6a9672d1f186..668543a28bd30 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala @@ -136,7 +136,7 @@ trait ExpressionEvalHelper extends GeneratorDrivenPropertyChecks { // some expression is reusing variable names across different instances. // This behavior is tested in ExpressionEvalHelperSuite. val plan = generateProject( - GenerateUnsafeProjection.generate( + UnsafeProjection.create( Alias(expression, s"Optimized($expression)1")() :: Alias(expression, s"Optimized($expression)2")() :: Nil), expression) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala index ee65826cd57f7..3edcc02f15264 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala @@ -19,7 +19,9 @@ package org.apache.spark.sql.catalyst.expressions import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.expressions.objects.Invoke +import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData} import org.apache.spark.sql.types.{IntegerType, ObjectType} @@ -32,4 +34,36 @@ class ObjectExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { val invoke = Invoke(inputObject, "_2", IntegerType) checkEvaluationWithGeneratedMutableProjection(invoke, null, inputRow) } + + test("MapObjects should make copies of unsafe-backed data") { + // test UnsafeRow-backed data + val structEncoder = ExpressionEncoder[Array[Tuple2[java.lang.Integer, java.lang.Integer]]] + val structInputRow = InternalRow.fromSeq(Seq(Array((1, 2), (3, 4)))) + val structExpected = new GenericArrayData( + Array(InternalRow.fromSeq(Seq(1, 2)), InternalRow.fromSeq(Seq(3, 4)))) + checkEvalutionWithUnsafeProjection( + structEncoder.serializer.head, structExpected, structInputRow) + + // test UnsafeArray-backed data + val arrayEncoder = ExpressionEncoder[Array[Array[Int]]] + val arrayInputRow = InternalRow.fromSeq(Seq(Array(Array(1, 2), Array(3, 4)))) + val arrayExpected = new GenericArrayData( + Array(new GenericArrayData(Array(1, 2)), new GenericArrayData(Array(3, 4)))) + checkEvalutionWithUnsafeProjection( + arrayEncoder.serializer.head, arrayExpected, arrayInputRow) + + // test UnsafeMap-backed data + val mapEncoder = ExpressionEncoder[Array[Map[Int, Int]]] + val mapInputRow = InternalRow.fromSeq(Seq(Array( + Map(1 -> 100, 2 -> 200), Map(3 -> 300, 4 -> 400)))) + val mapExpected = new GenericArrayData(Seq( + new ArrayBasedMapData( + new GenericArrayData(Array(1, 2)), + new GenericArrayData(Array(100, 200))), + new ArrayBasedMapData( + new GenericArrayData(Array(3, 4)), + new GenericArrayData(Array(300, 400))))) + checkEvalutionWithUnsafeProjection( + mapEncoder.serializer.head, mapExpected, mapInputRow) + } } From 6b8cb1fe52e2c8b4b87b0c7d820f3a1824287328 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Thu, 25 Aug 2016 02:26:33 -0700 Subject: [PATCH 094/270] [SPARK-17197][ML][PYSPARK] PySpark LiR/LoR supports tree aggregation level configurable. ## What changes were proposed in this pull request? [SPARK-17090](https://issues.apache.org/jira/browse/SPARK-17090) makes tree aggregation level in LiR/LoR configurable, this PR makes PySpark support this function. ## How was this patch tested? Since ```aggregationDepth``` is an expert param, I'm not prefer to test it in doctest which is also used for example. Here is the offline test result: ![image](https://cloud.githubusercontent.com/assets/1962026/17879457/f83d7760-68a6-11e6-9936-d0a884d5d6ec.png) Author: Yanbo Liang Closes #14766 from yanboliang/spark-17197. --- python/pyspark/ml/classification.py | 14 +++++++---- .../ml/param/_shared_params_code_gen.py | 4 +++- python/pyspark/ml/param/shared.py | 24 +++++++++++++++++++ python/pyspark/ml/regression.py | 11 +++++---- 4 files changed, 42 insertions(+), 11 deletions(-) diff --git a/python/pyspark/ml/classification.py b/python/pyspark/ml/classification.py index 33ada27454b72..d1522d78faa0b 100644 --- a/python/pyspark/ml/classification.py +++ b/python/pyspark/ml/classification.py @@ -64,7 +64,7 @@ def numClasses(self): class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol, HasElasticNetParam, HasFitIntercept, HasStandardization, HasThresholds, - HasWeightCol, JavaMLWritable, JavaMLReadable): + HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable): """ Logistic regression. Currently, this class only supports binary classification. @@ -121,12 +121,14 @@ class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredicti def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, threshold=0.5, thresholds=None, probabilityCol="probability", - rawPredictionCol="rawPrediction", standardization=True, weightCol=None): + rawPredictionCol="rawPrediction", standardization=True, weightCol=None, + aggregationDepth=2): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ threshold=0.5, thresholds=None, probabilityCol="probability", \ - rawPredictionCol="rawPrediction", standardization=True, weightCol=None) + rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \ + aggregationDepth=2) If the threshold and thresholds Params are both set, they must be equivalent. """ super(LogisticRegression, self).__init__() @@ -142,12 +144,14 @@ def __init__(self, featuresCol="features", labelCol="label", predictionCol="pred def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, threshold=0.5, thresholds=None, probabilityCol="probability", - rawPredictionCol="rawPrediction", standardization=True, weightCol=None): + rawPredictionCol="rawPrediction", standardization=True, weightCol=None, + aggregationDepth=2): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ threshold=0.5, thresholds=None, probabilityCol="probability", \ - rawPredictionCol="rawPrediction", standardization=True, weightCol=None) + rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \ + aggregationDepth=2) Sets params for logistic regression. If the threshold and thresholds Params are both set, they must be equivalent. """ diff --git a/python/pyspark/ml/param/_shared_params_code_gen.py b/python/pyspark/ml/param/_shared_params_code_gen.py index c32dcc467d492..4f4328bcadc6f 100644 --- a/python/pyspark/ml/param/_shared_params_code_gen.py +++ b/python/pyspark/ml/param/_shared_params_code_gen.py @@ -147,7 +147,9 @@ def get$Name(self): ("solver", "the solver algorithm for optimization. If this is not set or empty, " + "default value is 'auto'.", "'auto'", "TypeConverters.toString"), ("varianceCol", "column name for the biased sample variance of prediction.", - None, "TypeConverters.toString")] + None, "TypeConverters.toString"), + ("aggregationDepth", "suggested depth for treeAggregate (>= 2).", "2", + "TypeConverters.toInt")] code = [] for name, doc, defaultValueStr, typeConverter in shared: diff --git a/python/pyspark/ml/param/shared.py b/python/pyspark/ml/param/shared.py index c5ccf81540d58..24af07afc7d5c 100644 --- a/python/pyspark/ml/param/shared.py +++ b/python/pyspark/ml/param/shared.py @@ -560,6 +560,30 @@ def getVarianceCol(self): return self.getOrDefault(self.varianceCol) +class HasAggregationDepth(Params): + """ + Mixin for param aggregationDepth: suggested depth for treeAggregate (>= 2). + """ + + aggregationDepth = Param(Params._dummy(), "aggregationDepth", "suggested depth for treeAggregate (>= 2).", typeConverter=TypeConverters.toInt) + + def __init__(self): + super(HasAggregationDepth, self).__init__() + self._setDefault(aggregationDepth=2) + + def setAggregationDepth(self, value): + """ + Sets the value of :py:attr:`aggregationDepth`. + """ + return self._set(aggregationDepth=value) + + def getAggregationDepth(self): + """ + Gets the value of aggregationDepth or its default value. + """ + return self.getOrDefault(self.aggregationDepth) + + class DecisionTreeParams(Params): """ Mixin for Decision Tree parameters. diff --git a/python/pyspark/ml/regression.py b/python/pyspark/ml/regression.py index 56312f672f71d..19afc723bb784 100644 --- a/python/pyspark/ml/regression.py +++ b/python/pyspark/ml/regression.py @@ -39,7 +39,8 @@ @inherit_doc class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, HasRegParam, HasTol, HasElasticNetParam, HasFitIntercept, - HasStandardization, HasSolver, HasWeightCol, JavaMLWritable, JavaMLReadable): + HasStandardization, HasSolver, HasWeightCol, HasAggregationDepth, + JavaMLWritable, JavaMLReadable): """ Linear regression. @@ -97,11 +98,11 @@ class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPrediction @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, - standardization=True, solver="auto", weightCol=None): + standardization=True, solver="auto", weightCol=None, aggregationDepth=2): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ - standardization=True, solver="auto", weightCol=None) + standardization=True, solver="auto", weightCol=None, aggregationDepth=2) """ super(LinearRegression, self).__init__() self._java_obj = self._new_java_obj( @@ -114,11 +115,11 @@ def __init__(self, featuresCol="features", labelCol="label", predictionCol="pred @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, - standardization=True, solver="auto", weightCol=None): + standardization=True, solver="auto", weightCol=None, aggregationDepth=2): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ - standardization=True, solver="auto", weightCol=None) + standardization=True, solver="auto", weightCol=None, aggregationDepth=2) Sets params for linear regression. """ kwargs = self.setParams._input_kwargs From 2b0cc4e0dfa4ffb9f21ff4a303015bc9c962d42b Mon Sep 17 00:00:00 2001 From: Takeshi YAMAMURO Date: Thu, 25 Aug 2016 12:39:58 +0200 Subject: [PATCH 095/270] [SPARK-12978][SQL] Skip unnecessary final group-by when input data already clustered with group-by keys This ticket targets the optimization to skip an unnecessary group-by operation below; Without opt.: ``` == Physical Plan == TungstenAggregate(key=[col0#159], functions=[(sum(col1#160),mode=Final,isDistinct=false),(avg(col2#161),mode=Final,isDistinct=false)], output=[col0#159,sum(col1)#177,avg(col2)#178]) +- TungstenAggregate(key=[col0#159], functions=[(sum(col1#160),mode=Partial,isDistinct=false),(avg(col2#161),mode=Partial,isDistinct=false)], output=[col0#159,sum#200,sum#201,count#202L]) +- TungstenExchange hashpartitioning(col0#159,200), None +- InMemoryColumnarTableScan [col0#159,col1#160,col2#161], InMemoryRelation [col0#159,col1#160,col2#161], true, 10000, StorageLevel(true, true, false, true, 1), ConvertToUnsafe, None ``` With opt.: ``` == Physical Plan == TungstenAggregate(key=[col0#159], functions=[(sum(col1#160),mode=Complete,isDistinct=false),(avg(col2#161),mode=Final,isDistinct=false)], output=[col0#159,sum(col1)#177,avg(col2)#178]) +- TungstenExchange hashpartitioning(col0#159,200), None +- InMemoryColumnarTableScan [col0#159,col1#160,col2#161], InMemoryRelation [col0#159,col1#160,col2#161], true, 10000, StorageLevel(true, true, false, true, 1), ConvertToUnsafe, None ``` Author: Takeshi YAMAMURO Closes #10896 from maropu/SkipGroupbySpike. --- .../spark/sql/execution/SparkStrategies.scala | 17 +- .../sql/execution/aggregate/AggUtils.scala | 250 ++++++++---------- .../execution/aggregate/AggregateExec.scala | 56 ++++ .../aggregate/HashAggregateExec.scala | 22 +- .../aggregate/SortAggregateExec.scala | 24 +- .../exchange/EnsureRequirements.scala | 38 ++- .../org/apache/spark/sql/DataFrameSuite.scala | 15 +- .../spark/sql/execution/PlannerSuite.scala | 59 +++-- 8 files changed, 257 insertions(+), 224 deletions(-) create mode 100644 sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index 4aaf454285f4f..cda3b2b75e6b4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -259,24 +259,17 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { } val aggregateOperator = - if (aggregateExpressions.map(_.aggregateFunction).exists(!_.supportsPartial)) { - if (functionsWithDistinct.nonEmpty) { - sys.error("Distinct columns cannot exist in Aggregate operator containing " + - "aggregate functions which don't support partial aggregation.") - } else { - aggregate.AggUtils.planAggregateWithoutPartial( - groupingExpressions, - aggregateExpressions, - resultExpressions, - planLater(child)) - } - } else if (functionsWithDistinct.isEmpty) { + if (functionsWithDistinct.isEmpty) { aggregate.AggUtils.planAggregateWithoutDistinct( groupingExpressions, aggregateExpressions, resultExpressions, planLater(child)) } else { + if (aggregateExpressions.map(_.aggregateFunction).exists(!_.supportsPartial)) { + sys.error("Distinct columns cannot exist in Aggregate operator containing " + + "aggregate functions which don't support partial aggregation.") + } aggregate.AggUtils.planAggregateWithOneDistinct( groupingExpressions, functionsWithDistinct, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala index 4fbb9d554c9bf..fe75ecea177a7 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala @@ -19,34 +19,97 @@ package org.apache.spark.sql.execution.aggregate import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ +import org.apache.spark.sql.catalyst.plans.physical.Distribution import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.streaming.{StateStoreRestoreExec, StateStoreSaveExec} +/** + * A pattern that finds aggregate operators to support partial aggregations. + */ +object PartialAggregate { + + def unapply(plan: SparkPlan): Option[Distribution] = plan match { + case agg: AggregateExec if AggUtils.supportPartialAggregate(agg.aggregateExpressions) => + Some(agg.requiredChildDistribution.head) + case _ => + None + } +} + /** * Utility functions used by the query planner to convert our plan to new aggregation code path. */ object AggUtils { - def planAggregateWithoutPartial( + def supportPartialAggregate(aggregateExpressions: Seq[AggregateExpression]): Boolean = { + aggregateExpressions.map(_.aggregateFunction).forall(_.supportsPartial) + } + + private def createPartialAggregateExec( groupingExpressions: Seq[NamedExpression], aggregateExpressions: Seq[AggregateExpression], - resultExpressions: Seq[NamedExpression], - child: SparkPlan): Seq[SparkPlan] = { + child: SparkPlan): SparkPlan = { + val groupingAttributes = groupingExpressions.map(_.toAttribute) + val functionsWithDistinct = aggregateExpressions.filter(_.isDistinct) + val partialAggregateExpressions = aggregateExpressions.map { + case agg @ AggregateExpression(_, _, false, _) if functionsWithDistinct.length > 0 => + agg.copy(mode = PartialMerge) + case agg => + agg.copy(mode = Partial) + } + val partialAggregateAttributes = + partialAggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) + val partialResultExpressions = + groupingAttributes ++ + partialAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) - val completeAggregateExpressions = aggregateExpressions.map(_.copy(mode = Complete)) - val completeAggregateAttributes = completeAggregateExpressions.map(_.resultAttribute) - SortAggregateExec( - requiredChildDistributionExpressions = Some(groupingExpressions), + createAggregateExec( + requiredChildDistributionExpressions = None, groupingExpressions = groupingExpressions, - aggregateExpressions = completeAggregateExpressions, - aggregateAttributes = completeAggregateAttributes, - initialInputBufferOffset = 0, - resultExpressions = resultExpressions, - child = child - ) :: Nil + aggregateExpressions = partialAggregateExpressions, + aggregateAttributes = partialAggregateAttributes, + initialInputBufferOffset = if (functionsWithDistinct.length > 0) { + groupingExpressions.length + functionsWithDistinct.head.aggregateFunction.children.length + } else { + 0 + }, + resultExpressions = partialResultExpressions, + child = child) } - private def createAggregate( + private def updateMergeAggregateMode(aggregateExpressions: Seq[AggregateExpression]) = { + def updateMode(mode: AggregateMode) = mode match { + case Partial => PartialMerge + case Complete => Final + case mode => mode + } + aggregateExpressions.map(e => e.copy(mode = updateMode(e.mode))) + } + + /** + * Builds new merge and map-side [[AggregateExec]]s from an input aggregate operator. + * If an aggregation needs a shuffle for satisfying its own distribution and supports partial + * aggregations, a map-side aggregation is appended before the shuffle in + * [[org.apache.spark.sql.execution.exchange.EnsureRequirements]]. + */ + def createMapMergeAggregatePair(operator: SparkPlan): (SparkPlan, SparkPlan) = operator match { + case agg: AggregateExec => + val mapSideAgg = createPartialAggregateExec( + agg.groupingExpressions, agg.aggregateExpressions, agg.child) + val mergeAgg = createAggregateExec( + requiredChildDistributionExpressions = agg.requiredChildDistributionExpressions, + groupingExpressions = agg.groupingExpressions.map(_.toAttribute), + aggregateExpressions = updateMergeAggregateMode(agg.aggregateExpressions), + aggregateAttributes = agg.aggregateAttributes, + initialInputBufferOffset = agg.groupingExpressions.length, + resultExpressions = agg.resultExpressions, + child = mapSideAgg + ) + + (mergeAgg, mapSideAgg) + } + + private def createAggregateExec( requiredChildDistributionExpressions: Option[Seq[Expression]] = None, groupingExpressions: Seq[NamedExpression] = Nil, aggregateExpressions: Seq[AggregateExpression] = Nil, @@ -55,7 +118,8 @@ object AggUtils { resultExpressions: Seq[NamedExpression] = Nil, child: SparkPlan): SparkPlan = { val useHash = HashAggregateExec.supportsAggregate( - aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)) + aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)) && + supportPartialAggregate(aggregateExpressions) if (useHash) { HashAggregateExec( requiredChildDistributionExpressions = requiredChildDistributionExpressions, @@ -82,43 +146,21 @@ object AggUtils { aggregateExpressions: Seq[AggregateExpression], resultExpressions: Seq[NamedExpression], child: SparkPlan): Seq[SparkPlan] = { - // Check if we can use HashAggregate. - - // 1. Create an Aggregate Operator for partial aggregations. - val groupingAttributes = groupingExpressions.map(_.toAttribute) - val partialAggregateExpressions = aggregateExpressions.map(_.copy(mode = Partial)) - val partialAggregateAttributes = - partialAggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) - val partialResultExpressions = - groupingAttributes ++ - partialAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) - - val partialAggregate = createAggregate( - requiredChildDistributionExpressions = None, - groupingExpressions = groupingExpressions, - aggregateExpressions = partialAggregateExpressions, - aggregateAttributes = partialAggregateAttributes, - initialInputBufferOffset = 0, - resultExpressions = partialResultExpressions, - child = child) - - // 2. Create an Aggregate Operator for final aggregations. - val finalAggregateExpressions = aggregateExpressions.map(_.copy(mode = Final)) - // The attributes of the final aggregation buffer, which is presented as input to the result - // projection: - val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute) - - val finalAggregate = createAggregate( - requiredChildDistributionExpressions = Some(groupingAttributes), - groupingExpressions = groupingAttributes, - aggregateExpressions = finalAggregateExpressions, - aggregateAttributes = finalAggregateAttributes, - initialInputBufferOffset = groupingExpressions.length, - resultExpressions = resultExpressions, - child = partialAggregate) + val completeAggregateExpressions = aggregateExpressions.map(_.copy(mode = Complete)) + val completeAggregateAttributes = completeAggregateExpressions.map(_.resultAttribute) + val supportPartial = supportPartialAggregate(aggregateExpressions) - finalAggregate :: Nil + createAggregateExec( + requiredChildDistributionExpressions = + Some(if (supportPartial) groupingAttributes else groupingExpressions), + groupingExpressions = groupingExpressions, + aggregateExpressions = completeAggregateExpressions, + aggregateAttributes = completeAggregateAttributes, + initialInputBufferOffset = 0, + resultExpressions = resultExpressions, + child = child + ) :: Nil } def planAggregateWithOneDistinct( @@ -141,39 +183,23 @@ object AggUtils { val distinctAttributes = namedDistinctExpressions.map(_.toAttribute) val groupingAttributes = groupingExpressions.map(_.toAttribute) - // 1. Create an Aggregate Operator for partial aggregations. + // 1. Create an Aggregate Operator for non-distinct aggregations. val partialAggregate: SparkPlan = { val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial)) val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - // We will group by the original grouping expression, plus an additional expression for the - // DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping - // expressions will be [key, value]. - createAggregate( - groupingExpressions = groupingExpressions ++ namedDistinctExpressions, - aggregateExpressions = aggregateExpressions, - aggregateAttributes = aggregateAttributes, - resultExpressions = groupingAttributes ++ distinctAttributes ++ - aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), - child = child) - } - - // 2. Create an Aggregate Operator for partial merge aggregations. - val partialMergeAggregate: SparkPlan = { - val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) - val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - createAggregate( + createAggregateExec( requiredChildDistributionExpressions = Some(groupingAttributes ++ distinctAttributes), - groupingExpressions = groupingAttributes ++ distinctAttributes, + groupingExpressions = groupingExpressions ++ namedDistinctExpressions, aggregateExpressions = aggregateExpressions, aggregateAttributes = aggregateAttributes, initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length, resultExpressions = groupingAttributes ++ distinctAttributes ++ aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), - child = partialAggregate) + child = child) } - // 3. Create an Aggregate operator for partial aggregation (for distinct) + // 2. Create an Aggregate Operator for the final aggregation. val distinctColumnAttributeLookup = distinctExpressions.zip(distinctAttributes).toMap val rewrittenDistinctFunctions = functionsWithDistinct.map { // Children of an AggregateFunction with DISTINCT keyword has already @@ -183,38 +209,6 @@ object AggUtils { aggregateFunction.transformDown(distinctColumnAttributeLookup) .asInstanceOf[AggregateFunction] } - - val partialDistinctAggregate: SparkPlan = { - val mergeAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) - // The attributes of the final aggregation buffer, which is presented as input to the result - // projection: - val mergeAggregateAttributes = mergeAggregateExpressions.map(_.resultAttribute) - val (distinctAggregateExpressions, distinctAggregateAttributes) = - rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) => - // We rewrite the aggregate function to a non-distinct aggregation because - // its input will have distinct arguments. - // We just keep the isDistinct setting to true, so when users look at the query plan, - // they still can see distinct aggregations. - val expr = AggregateExpression(func, Partial, isDistinct = true) - // Use original AggregationFunction to lookup attributes, which is used to build - // aggregateFunctionToAttribute - val attr = functionsWithDistinct(i).resultAttribute - (expr, attr) - }.unzip - - val partialAggregateResult = groupingAttributes ++ - mergeAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) ++ - distinctAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) - createAggregate( - groupingExpressions = groupingAttributes, - aggregateExpressions = mergeAggregateExpressions ++ distinctAggregateExpressions, - aggregateAttributes = mergeAggregateAttributes ++ distinctAggregateAttributes, - initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length, - resultExpressions = partialAggregateResult, - child = partialMergeAggregate) - } - - // 4. Create an Aggregate Operator for the final aggregation. val finalAndCompleteAggregate: SparkPlan = { val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final)) // The attributes of the final aggregation buffer, which is presented as input to the result @@ -225,23 +219,23 @@ object AggUtils { rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) => // We rewrite the aggregate function to a non-distinct aggregation because // its input will have distinct arguments. - // We just keep the isDistinct setting to true, so when users look at the query plan, - // they still can see distinct aggregations. - val expr = AggregateExpression(func, Final, isDistinct = true) + // We keep the isDistinct setting to true because this flag is used to generate partial + // aggregations and it is easy to see aggregation types in the query plan. + val expr = AggregateExpression(func, Complete, isDistinct = true) // Use original AggregationFunction to lookup attributes, which is used to build // aggregateFunctionToAttribute val attr = functionsWithDistinct(i).resultAttribute (expr, attr) - }.unzip + }.unzip - createAggregate( + createAggregateExec( requiredChildDistributionExpressions = Some(groupingAttributes), groupingExpressions = groupingAttributes, aggregateExpressions = finalAggregateExpressions ++ distinctAggregateExpressions, aggregateAttributes = finalAggregateAttributes ++ distinctAggregateAttributes, initialInputBufferOffset = groupingAttributes.length, resultExpressions = resultExpressions, - child = partialDistinctAggregate) + child = partialAggregate) } finalAndCompleteAggregate :: Nil @@ -249,13 +243,14 @@ object AggUtils { /** * Plans a streaming aggregation using the following progression: - * - Partial Aggregation - * - Shuffle - * - Partial Merge (now there is at most 1 tuple per group) + * - Partial Aggregation (now there is at most 1 tuple per group) * - StateStoreRestore (now there is 1 tuple from this batch + optionally one from the previous) * - PartialMerge (now there is at most 1 tuple per group) * - StateStoreSave (saves the tuple for the next batch) * - Complete (output the current result of the aggregation) + * + * If the first aggregation needs a shuffle to satisfy its distribution, a map-side partial + * an aggregation and a shuffle are added in `EnsureRequirements`. */ def planStreamingAggregation( groupingExpressions: Seq[NamedExpression], @@ -268,39 +263,24 @@ object AggUtils { val partialAggregate: SparkPlan = { val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial)) val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - // We will group by the original grouping expression, plus an additional expression for the - // DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping - // expressions will be [key, value]. - createAggregate( - groupingExpressions = groupingExpressions, - aggregateExpressions = aggregateExpressions, - aggregateAttributes = aggregateAttributes, - resultExpressions = groupingAttributes ++ - aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), - child = child) - } - - val partialMerged1: SparkPlan = { - val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) - val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - createAggregate( + createAggregateExec( requiredChildDistributionExpressions = Some(groupingAttributes), - groupingExpressions = groupingAttributes, + groupingExpressions = groupingExpressions, aggregateExpressions = aggregateExpressions, aggregateAttributes = aggregateAttributes, initialInputBufferOffset = groupingAttributes.length, resultExpressions = groupingAttributes ++ aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), - child = partialAggregate) + child = child) } - val restored = StateStoreRestoreExec(groupingAttributes, None, partialMerged1) + val restored = StateStoreRestoreExec(groupingAttributes, None, partialAggregate) - val partialMerged2: SparkPlan = { + val partialMerged: SparkPlan = { val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - createAggregate( + createAggregateExec( requiredChildDistributionExpressions = Some(groupingAttributes), groupingExpressions = groupingAttributes, @@ -314,7 +294,7 @@ object AggUtils { // Note: stateId and returnAllStates are filled in later with preparation rules // in IncrementalExecution. val saved = StateStoreSaveExec( - groupingAttributes, stateId = None, returnAllStates = None, partialMerged2) + groupingAttributes, stateId = None, returnAllStates = None, partialMerged) val finalAndCompleteAggregate: SparkPlan = { val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final)) @@ -322,7 +302,7 @@ object AggUtils { // projection: val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute) - createAggregate( + createAggregateExec( requiredChildDistributionExpressions = Some(groupingAttributes), groupingExpressions = groupingAttributes, aggregateExpressions = finalAggregateExpressions, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala new file mode 100644 index 0000000000000..b88a8aa3daecd --- /dev/null +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.aggregate + +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression +import org.apache.spark.sql.catalyst.plans.physical._ +import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.UnaryExecNode + +/** + * A base class for aggregate implementation. + */ +abstract class AggregateExec extends UnaryExecNode { + + def requiredChildDistributionExpressions: Option[Seq[Expression]] + def groupingExpressions: Seq[NamedExpression] + def aggregateExpressions: Seq[AggregateExpression] + def aggregateAttributes: Seq[Attribute] + def initialInputBufferOffset: Int + def resultExpressions: Seq[NamedExpression] + + protected[this] val aggregateBufferAttributes = { + aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) + } + + override def producedAttributes: AttributeSet = + AttributeSet(aggregateAttributes) ++ + AttributeSet(resultExpressions.diff(groupingExpressions).map(_.toAttribute)) ++ + AttributeSet(aggregateBufferAttributes) + + override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute) + + override def requiredChildDistribution: List[Distribution] = { + requiredChildDistributionExpressions match { + case Some(exprs) if exprs.isEmpty => AllTuples :: Nil + case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil + case None => UnspecifiedDistribution :: Nil + } + } +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala index bd7efa606e0ce..525c7e301add4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala @@ -24,7 +24,6 @@ import org.apache.spark.sql.catalyst.errors._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.expressions.codegen._ -import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} import org.apache.spark.sql.types.{DecimalType, StringType, StructType} @@ -42,11 +41,7 @@ case class HashAggregateExec( initialInputBufferOffset: Int, resultExpressions: Seq[NamedExpression], child: SparkPlan) - extends UnaryExecNode with CodegenSupport { - - private[this] val aggregateBufferAttributes = { - aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) - } + extends AggregateExec with CodegenSupport { require(HashAggregateExec.supportsAggregate(aggregateBufferAttributes)) @@ -60,21 +55,6 @@ case class HashAggregateExec( "spillSize" -> SQLMetrics.createSizeMetric(sparkContext, "spill size"), "aggTime" -> SQLMetrics.createTimingMetric(sparkContext, "aggregate time")) - override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute) - - override def producedAttributes: AttributeSet = - AttributeSet(aggregateAttributes) ++ - AttributeSet(resultExpressions.diff(groupingExpressions).map(_.toAttribute)) ++ - AttributeSet(aggregateBufferAttributes) - - override def requiredChildDistribution: List[Distribution] = { - requiredChildDistributionExpressions match { - case Some(exprs) if exprs.isEmpty => AllTuples :: Nil - case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil - case None => UnspecifiedDistribution :: Nil - } - } - // This is for testing. We force TungstenAggregationIterator to fall back to the unsafe row hash // map and/or the sort-based aggregation once it has processed a given number of input rows. private val testFallbackStartsAt: Option[(Int, Int)] = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala index 2a81a823c44b3..68f86fca80937 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala @@ -22,8 +22,7 @@ import org.apache.spark.sql.catalyst.errors._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ -import org.apache.spark.sql.catalyst.plans.physical.{AllTuples, ClusteredDistribution, Distribution, UnspecifiedDistribution} -import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode} +import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.metric.SQLMetrics import org.apache.spark.util.Utils @@ -38,30 +37,11 @@ case class SortAggregateExec( initialInputBufferOffset: Int, resultExpressions: Seq[NamedExpression], child: SparkPlan) - extends UnaryExecNode { - - private[this] val aggregateBufferAttributes = { - aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) - } - - override def producedAttributes: AttributeSet = - AttributeSet(aggregateAttributes) ++ - AttributeSet(resultExpressions.diff(groupingExpressions).map(_.toAttribute)) ++ - AttributeSet(aggregateBufferAttributes) + extends AggregateExec { override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) - override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute) - - override def requiredChildDistribution: List[Distribution] = { - requiredChildDistributionExpressions match { - case Some(exprs) if exprs.isEmpty => AllTuples :: Nil - case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil - case None => UnspecifiedDistribution :: Nil - } - } - override def requiredChildOrdering: Seq[Seq[SortOrder]] = { groupingExpressions.map(SortOrder(_, Ascending)) :: Nil } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala index 446571aa8409f..951051c4df2f7 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala @@ -21,6 +21,8 @@ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.execution._ +import org.apache.spark.sql.execution.aggregate.AggUtils +import org.apache.spark.sql.execution.aggregate.PartialAggregate import org.apache.spark.sql.internal.SQLConf /** @@ -151,18 +153,30 @@ case class EnsureRequirements(conf: SQLConf) extends Rule[SparkPlan] { private def ensureDistributionAndOrdering(operator: SparkPlan): SparkPlan = { val requiredChildDistributions: Seq[Distribution] = operator.requiredChildDistribution val requiredChildOrderings: Seq[Seq[SortOrder]] = operator.requiredChildOrdering - var children: Seq[SparkPlan] = operator.children - assert(requiredChildDistributions.length == children.length) - assert(requiredChildOrderings.length == children.length) + assert(requiredChildDistributions.length == operator.children.length) + assert(requiredChildOrderings.length == operator.children.length) - // Ensure that the operator's children satisfy their output distribution requirements: - children = children.zip(requiredChildDistributions).map { - case (child, distribution) if child.outputPartitioning.satisfies(distribution) => - child - case (child, BroadcastDistribution(mode)) => - BroadcastExchangeExec(mode, child) - case (child, distribution) => - ShuffleExchange(createPartitioning(distribution, defaultNumPreShufflePartitions), child) + def createShuffleExchange(dist: Distribution, child: SparkPlan) = + ShuffleExchange(createPartitioning(dist, defaultNumPreShufflePartitions), child) + + var (parent, children) = operator match { + case PartialAggregate(childDist) if !operator.outputPartitioning.satisfies(childDist) => + // If an aggregation needs a shuffle and support partial aggregations, a map-side partial + // aggregation and a shuffle are added as children. + val (mergeAgg, mapSideAgg) = AggUtils.createMapMergeAggregatePair(operator) + (mergeAgg, createShuffleExchange(requiredChildDistributions.head, mapSideAgg) :: Nil) + case _ => + // Ensure that the operator's children satisfy their output distribution requirements: + val childrenWithDist = operator.children.zip(requiredChildDistributions) + val newChildren = childrenWithDist.map { + case (child, distribution) if child.outputPartitioning.satisfies(distribution) => + child + case (child, BroadcastDistribution(mode)) => + BroadcastExchangeExec(mode, child) + case (child, distribution) => + createShuffleExchange(distribution, child) + } + (operator, newChildren) } // If the operator has multiple children and specifies child output distributions (e.g. join), @@ -246,7 +260,7 @@ case class EnsureRequirements(conf: SQLConf) extends Rule[SparkPlan] { } } - operator.withNewChildren(children) + parent.withNewChildren(children) } def apply(plan: SparkPlan): SparkPlan = plan.transformUp { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index 499f3180379c2..cd485770d269c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -1248,17 +1248,17 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { } /** - * Verifies that there is no Exchange between the Aggregations for `df` + * Verifies that there is a single Aggregation for `df` */ - private def verifyNonExchangingAgg(df: DataFrame) = { + private def verifyNonExchangingSingleAgg(df: DataFrame) = { var atFirstAgg: Boolean = false df.queryExecution.executedPlan.foreach { case agg: HashAggregateExec => - atFirstAgg = !atFirstAgg - case _ => if (atFirstAgg) { - fail("Should not have operators between the two aggregations") + fail("Should not have back to back Aggregates") } + atFirstAgg = true + case _ => } } @@ -1292,9 +1292,10 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { // Group by the column we are distributed by. This should generate a plan with no exchange // between the aggregates val df3 = testData.repartition($"key").groupBy("key").count() - verifyNonExchangingAgg(df3) - verifyNonExchangingAgg(testData.repartition($"key", $"value") + verifyNonExchangingSingleAgg(df3) + verifyNonExchangingSingleAgg(testData.repartition($"key", $"value") .groupBy("key", "value").count()) + verifyNonExchangingSingleAgg(testData.repartition($"key").groupBy("key", "value").count()) // Grouping by just the first distributeBy expr, need to exchange. verifyExchangingAgg(testData.repartition($"key", $"value") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index 13490c35679a2..436ff59c4d3f7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -18,7 +18,7 @@ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{execution, Row} +import org.apache.spark.sql.{execution, DataFrame, Row} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, Literal, SortOrder} import org.apache.spark.sql.catalyst.plans.Inner @@ -37,36 +37,65 @@ class PlannerSuite extends SharedSQLContext { setupTestData() - private def testPartialAggregationPlan(query: LogicalPlan): Unit = { + private def testPartialAggregationPlan(query: LogicalPlan): Seq[SparkPlan] = { val planner = spark.sessionState.planner import planner._ - val plannedOption = Aggregation(query).headOption - val planned = - plannedOption.getOrElse( - fail(s"Could query play aggregation query $query. Is it an aggregation query?")) - val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n } - - // For the new aggregation code path, there will be four aggregate operator for - // distinct aggregations. - assert( - aggregations.size == 2 || aggregations.size == 4, - s"The plan of query $query does not have partial aggregations.") + val ensureRequirements = EnsureRequirements(spark.sessionState.conf) + val planned = Aggregation(query).headOption.map(ensureRequirements(_)) + .getOrElse(fail(s"Could query play aggregation query $query. Is it an aggregation query?")) + planned.collect { case n if n.nodeName contains "Aggregate" => n } } test("count is partially aggregated") { val query = testData.groupBy('value).agg(count('key)).queryExecution.analyzed - testPartialAggregationPlan(query) + assert(testPartialAggregationPlan(query).size == 2, + s"The plan of query $query does not have partial aggregations.") } test("count distinct is partially aggregated") { val query = testData.groupBy('value).agg(countDistinct('key)).queryExecution.analyzed testPartialAggregationPlan(query) + // For the new aggregation code path, there will be four aggregate operator for distinct + // aggregations. + assert(testPartialAggregationPlan(query).size == 4, + s"The plan of query $query does not have partial aggregations.") } test("mixed aggregates are partially aggregated") { val query = testData.groupBy('value).agg(count('value), countDistinct('key)).queryExecution.analyzed - testPartialAggregationPlan(query) + // For the new aggregation code path, there will be four aggregate operator for distinct + // aggregations. + assert(testPartialAggregationPlan(query).size == 4, + s"The plan of query $query does not have partial aggregations.") + } + + test("non-partial aggregation for aggregates") { + withTempView("testNonPartialAggregation") { + val schema = StructType(StructField(s"value", IntegerType, true) :: Nil) + val row = Row.fromSeq(Seq.fill(1)(null)) + val rowRDD = sparkContext.parallelize(row :: Nil) + spark.createDataFrame(rowRDD, schema).repartition($"value") + .createOrReplaceTempView("testNonPartialAggregation") + + val planned1 = sql("SELECT SUM(value) FROM testNonPartialAggregation GROUP BY value") + .queryExecution.executedPlan + + // If input data are already partitioned and the same columns are used in grouping keys and + // aggregation values, no partial aggregation exist in query plans. + val aggOps1 = planned1.collect { case n if n.nodeName contains "Aggregate" => n } + assert(aggOps1.size == 1, s"The plan $planned1 has partial aggregations.") + + val planned2 = sql( + """ + |SELECT t.value, SUM(DISTINCT t.value) + |FROM (SELECT * FROM testNonPartialAggregation ORDER BY value) t + |GROUP BY t.value + """.stripMargin).queryExecution.executedPlan + + val aggOps2 = planned1.collect { case n if n.nodeName contains "Aggregate" => n } + assert(aggOps2.size == 1, s"The plan $planned2 has partial aggregations.") + } } test("sizeInBytes estimation of limit operator for broadcast hash join optimization") { From d2ae6399ee2f0524b88262735adbbcb2035de8fd Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Thu, 25 Aug 2016 14:18:58 +0200 Subject: [PATCH 096/270] [SPARK-16991][SPARK-17099][SPARK-17120][SQL] Fix Outer Join Elimination when Filter's isNotNull Constraints Unable to Filter Out All Null-supplying Rows ### What changes were proposed in this pull request? This PR is to fix an incorrect outer join elimination when filter's `isNotNull` constraints is unable to filter out all null-supplying rows. For example, `isnotnull(coalesce(b#227, c#238))`. Users can hit this error when they try to use `using/natural outer join`, which is converted to a normal outer join with a `coalesce` expression on the `using columns`. For example, ```Scala val a = Seq((1, 2), (2, 3)).toDF("a", "b") val b = Seq((2, 5), (3, 4)).toDF("a", "c") val c = Seq((3, 1)).toDF("a", "d") val ab = a.join(b, Seq("a"), "fullouter") ab.join(c, "a").explain(true) ``` The dataframe `ab` is doing `using full-outer join`, which is converted to a normal outer join with a `coalesce` expression. Constraints inference generates a `Filter` with constraints `isnotnull(coalesce(b#227, c#238))`. Then, it triggers a wrong outer join elimination and generates a wrong result. ``` Project [a#251, b#227, c#237, d#247] +- Join Inner, (a#251 = a#246) :- Project [coalesce(a#226, a#236) AS a#251, b#227, c#237] : +- Join FullOuter, (a#226 = a#236) : :- Project [_1#223 AS a#226, _2#224 AS b#227] : : +- LocalRelation [_1#223, _2#224] : +- Project [_1#233 AS a#236, _2#234 AS c#237] : +- LocalRelation [_1#233, _2#234] +- Project [_1#243 AS a#246, _2#244 AS d#247] +- LocalRelation [_1#243, _2#244] == Optimized Logical Plan == Project [a#251, b#227, c#237, d#247] +- Join Inner, (a#251 = a#246) :- Project [coalesce(a#226, a#236) AS a#251, b#227, c#237] : +- Filter isnotnull(coalesce(a#226, a#236)) : +- Join FullOuter, (a#226 = a#236) : :- LocalRelation [a#226, b#227] : +- LocalRelation [a#236, c#237] +- LocalRelation [a#246, d#247] ``` **A note to the `Committer`**, please also give the credit to dongjoon-hyun who submitted another PR for fixing this issue. https://github.com/apache/spark/pull/14580 ### How was this patch tested? Added test cases Author: gatorsmile Closes #14661 from gatorsmile/fixOuterJoinElimination. --- .../sql/catalyst/optimizer/Optimizer.scala | 18 ++--- .../optimizer/OuterJoinEliminationSuite.scala | 39 ++++++++++ .../resources/sql-tests/inputs/outer-join.sql | 36 ++++++++++ .../sql-tests/results/outer-join.sql.out | 72 +++++++++++++++++++ .../apache/spark/sql/DataFrameJoinSuite.scala | 8 +++ 5 files changed, 161 insertions(+), 12 deletions(-) create mode 100644 sql/core/src/test/resources/sql-tests/inputs/outer-join.sql create mode 100644 sql/core/src/test/resources/sql-tests/results/outer-join.sql.out diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 9a0ff8a9b3211..82ad0fb5eeea0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -1343,18 +1343,12 @@ object EliminateOuterJoin extends Rule[LogicalPlan] with PredicateHelper { } private def buildNewJoinType(filter: Filter, join: Join): JoinType = { - val splitConjunctiveConditions: Seq[Expression] = splitConjunctivePredicates(filter.condition) - val leftConditions = splitConjunctiveConditions - .filter(_.references.subsetOf(join.left.outputSet)) - val rightConditions = splitConjunctiveConditions - .filter(_.references.subsetOf(join.right.outputSet)) - - val leftHasNonNullPredicate = leftConditions.exists(canFilterOutNull) || - filter.constraints.filter(_.isInstanceOf[IsNotNull]) - .exists(expr => join.left.outputSet.intersect(expr.references).nonEmpty) - val rightHasNonNullPredicate = rightConditions.exists(canFilterOutNull) || - filter.constraints.filter(_.isInstanceOf[IsNotNull]) - .exists(expr => join.right.outputSet.intersect(expr.references).nonEmpty) + val conditions = splitConjunctivePredicates(filter.condition) ++ filter.constraints + val leftConditions = conditions.filter(_.references.subsetOf(join.left.outputSet)) + val rightConditions = conditions.filter(_.references.subsetOf(join.right.outputSet)) + + val leftHasNonNullPredicate = leftConditions.exists(canFilterOutNull) + val rightHasNonNullPredicate = rightConditions.exists(canFilterOutNull) join.joinType match { case RightOuter if leftHasNonNullPredicate => Inner diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OuterJoinEliminationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OuterJoinEliminationSuite.scala index 41754adef4216..c168a55e40c54 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OuterJoinEliminationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OuterJoinEliminationSuite.scala @@ -20,6 +20,7 @@ package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ +import org.apache.spark.sql.catalyst.expressions.{Coalesce, IsNotNull} import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ @@ -192,4 +193,42 @@ class OuterJoinEliminationSuite extends PlanTest { comparePlans(optimized, correctAnswer) } + + test("joins: no outer join elimination if the filter is not NULL eliminated") { + val x = testRelation.subquery('x) + val y = testRelation1.subquery('y) + + val originalQuery = + x.join(y, FullOuter, Option("x.a".attr === "y.d".attr)) + .where(Coalesce("y.e".attr :: "x.a".attr :: Nil)) + + val optimized = Optimize.execute(originalQuery.analyze) + + val left = testRelation + val right = testRelation1 + val correctAnswer = + left.join(right, FullOuter, Option("a".attr === "d".attr)) + .where(Coalesce("e".attr :: "a".attr :: Nil)).analyze + + comparePlans(optimized, correctAnswer) + } + + test("joins: no outer join elimination if the filter's constraints are not NULL eliminated") { + val x = testRelation.subquery('x) + val y = testRelation1.subquery('y) + + val originalQuery = + x.join(y, FullOuter, Option("x.a".attr === "y.d".attr)) + .where(IsNotNull(Coalesce("y.e".attr :: "x.a".attr :: Nil))) + + val optimized = Optimize.execute(originalQuery.analyze) + + val left = testRelation + val right = testRelation1 + val correctAnswer = + left.join(right, FullOuter, Option("a".attr === "d".attr)) + .where(IsNotNull(Coalesce("e".attr :: "a".attr :: Nil))).analyze + + comparePlans(optimized, correctAnswer) + } } diff --git a/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql b/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql new file mode 100644 index 0000000000000..f50f1ebad970e --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql @@ -0,0 +1,36 @@ +-- SPARK-17099: Incorrect result when HAVING clause is added to group by query +CREATE OR REPLACE TEMPORARY VIEW t1 AS SELECT * FROM VALUES +(-234), (145), (367), (975), (298) +as t1(int_col1); + +CREATE OR REPLACE TEMPORARY VIEW t2 AS SELECT * FROM VALUES +(-769, -244), (-800, -409), (940, 86), (-507, 304), (-367, 158) +as t2(int_col0, int_col1); + +SELECT + (SUM(COALESCE(t1.int_col1, t2.int_col0))), + ((COALESCE(t1.int_col1, t2.int_col0)) * 2) +FROM t1 +RIGHT JOIN t2 + ON (t2.int_col0) = (t1.int_col1) +GROUP BY GREATEST(COALESCE(t2.int_col1, 109), COALESCE(t1.int_col1, -449)), + COALESCE(t1.int_col1, t2.int_col0) +HAVING (SUM(COALESCE(t1.int_col1, t2.int_col0))) + > ((COALESCE(t1.int_col1, t2.int_col0)) * 2); + + +-- SPARK-17120: Analyzer incorrectly optimizes plan to empty LocalRelation +CREATE OR REPLACE TEMPORARY VIEW t1 AS SELECT * FROM VALUES (97) as t1(int_col1); + +CREATE OR REPLACE TEMPORARY VIEW t2 AS SELECT * FROM VALUES (0) as t2(int_col1); + +SELECT * +FROM ( +SELECT + COALESCE(t2.int_col1, t1.int_col1) AS int_col + FROM t1 + LEFT JOIN t2 ON false +) t where (t.int_col) is not null; + + + diff --git a/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out b/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out new file mode 100644 index 0000000000000..b39fdb0e58720 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out @@ -0,0 +1,72 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 6 + + +-- !query 0 +CREATE OR REPLACE TEMPORARY VIEW t1 AS SELECT * FROM VALUES +(-234), (145), (367), (975), (298) +as t1(int_col1) +-- !query 0 schema +struct<> +-- !query 0 output + + + +-- !query 1 +CREATE OR REPLACE TEMPORARY VIEW t2 AS SELECT * FROM VALUES +(-769, -244), (-800, -409), (940, 86), (-507, 304), (-367, 158) +as t2(int_col0, int_col1) +-- !query 1 schema +struct<> +-- !query 1 output + + + +-- !query 2 +SELECT + (SUM(COALESCE(t1.int_col1, t2.int_col0))), + ((COALESCE(t1.int_col1, t2.int_col0)) * 2) +FROM t1 +RIGHT JOIN t2 + ON (t2.int_col0) = (t1.int_col1) +GROUP BY GREATEST(COALESCE(t2.int_col1, 109), COALESCE(t1.int_col1, -449)), + COALESCE(t1.int_col1, t2.int_col0) +HAVING (SUM(COALESCE(t1.int_col1, t2.int_col0))) + > ((COALESCE(t1.int_col1, t2.int_col0)) * 2) +-- !query 2 schema +struct +-- !query 2 output +-367 -734 +-507 -1014 +-769 -1538 +-800 -1600 + + +-- !query 3 +CREATE OR REPLACE TEMPORARY VIEW t1 AS SELECT * FROM VALUES (97) as t1(int_col1) +-- !query 3 schema +struct<> +-- !query 3 output + + + +-- !query 4 +CREATE OR REPLACE TEMPORARY VIEW t2 AS SELECT * FROM VALUES (0) as t2(int_col1) +-- !query 4 schema +struct<> +-- !query 4 output + + + +-- !query 5 +SELECT * +FROM ( +SELECT + COALESCE(t2.int_col1, t1.int_col1) AS int_col + FROM t1 + LEFT JOIN t2 ON false +) t where (t.int_col) is not null +-- !query 5 schema +struct +-- !query 5 output +97 diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala index 4342c039aefc8..4abf5e42b9c34 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala @@ -225,4 +225,12 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext { Row(1, null) :: Row(null, 2) :: Nil ) } + + test("SPARK-16991: Full outer join followed by inner join produces wrong results") { + val a = Seq((1, 2), (2, 3)).toDF("a", "b") + val b = Seq((2, 5), (3, 4)).toDF("a", "c") + val c = Seq((3, 1)).toDF("a", "d") + val ab = a.join(b, Seq("a"), "fullouter") + checkAnswer(ab.join(c, "a"), Row(3, null, 4, 1) :: Nil) + } } From f2093107196b9af62908ecf15bac043f3b1e64c4 Mon Sep 17 00:00:00 2001 From: Michael Allman Date: Thu, 25 Aug 2016 11:57:38 -0700 Subject: [PATCH 097/270] [SPARK-17231][CORE] Avoid building debug or trace log messages unless the respective log level is enabled (This PR addresses https://issues.apache.org/jira/browse/SPARK-17231) ## What changes were proposed in this pull request? While debugging the performance of a large GraphX connected components computation, we found several places in the `network-common` and `network-shuffle` code bases where trace or debug log messages are constructed even if the respective log level is disabled. According to YourKit, these constructions were creating substantial churn in the eden region. Refactoring the respective code to avoid these unnecessary constructions except where necessary led to a modest but measurable reduction in our job's task time, GC time and the ratio thereof. ## How was this patch tested? We computed the connected components of a graph with about 2.6 billion vertices and 1.7 billion edges four times. We used four different EC2 clusters each with 8 r3.8xl worker nodes. Two test runs used Spark master. Two used Spark master + this PR. The results from the first test run, master and master+PR: ![master](https://cloud.githubusercontent.com/assets/833693/17951634/7471cbca-6a18-11e6-9c26-78afe9319685.jpg) ![logging_perf_improvements](https://cloud.githubusercontent.com/assets/833693/17951632/7467844e-6a18-11e6-9a0e-053dc7650413.jpg) The results from the second test run, master and master+PR: ![master 2](https://cloud.githubusercontent.com/assets/833693/17951633/746dd6aa-6a18-11e6-8e27-606680b3f105.jpg) ![logging_perf_improvements 2](https://cloud.githubusercontent.com/assets/833693/17951631/74488710-6a18-11e6-8a32-08692f373386.jpg) Though modest, I believe these results are significant. Author: Michael Allman Closes #14798 from mallman/spark-17231-logging_perf_improvements. --- .../spark/network/client/TransportClient.java | 39 ++++++++++++------- .../client/TransportClientFactory.java | 2 +- .../client/TransportResponseHandler.java | 15 ++++--- .../network/protocol/MessageDecoder.java | 2 +- .../server/TransportChannelHandler.java | 6 +-- .../server/TransportRequestHandler.java | 18 ++++----- .../spark/network/server/TransportServer.java | 2 +- .../shuffle/ExternalShuffleBlockHandler.java | 14 ++++--- .../shuffle/ExternalShuffleBlockResolver.java | 2 +- 9 files changed, 55 insertions(+), 45 deletions(-) diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java index 64a83171e9e90..a67683b892218 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java @@ -43,7 +43,7 @@ import org.apache.spark.network.protocol.RpcRequest; import org.apache.spark.network.protocol.StreamChunkId; import org.apache.spark.network.protocol.StreamRequest; -import org.apache.spark.network.util.NettyUtils; +import static org.apache.spark.network.util.NettyUtils.getRemoteAddress; /** * Client for fetching consecutive chunks of a pre-negotiated stream. This API is intended to allow @@ -135,9 +135,10 @@ public void fetchChunk( long streamId, final int chunkIndex, final ChunkReceivedCallback callback) { - final String serverAddr = NettyUtils.getRemoteAddress(channel); final long startTime = System.currentTimeMillis(); - logger.debug("Sending fetch chunk request {} to {}", chunkIndex, serverAddr); + if (logger.isDebugEnabled()) { + logger.debug("Sending fetch chunk request {} to {}", chunkIndex, getRemoteAddress(channel)); + } final StreamChunkId streamChunkId = new StreamChunkId(streamId, chunkIndex); handler.addFetchRequest(streamChunkId, callback); @@ -148,11 +149,13 @@ public void fetchChunk( public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { long timeTaken = System.currentTimeMillis() - startTime; - logger.trace("Sending request {} to {} took {} ms", streamChunkId, serverAddr, - timeTaken); + if (logger.isTraceEnabled()) { + logger.trace("Sending request {} to {} took {} ms", streamChunkId, getRemoteAddress(channel), + timeTaken); + } } else { String errorMsg = String.format("Failed to send request %s to %s: %s", streamChunkId, - serverAddr, future.cause()); + getRemoteAddress(channel), future.cause()); logger.error(errorMsg, future.cause()); handler.removeFetchRequest(streamChunkId); channel.close(); @@ -173,9 +176,10 @@ public void operationComplete(ChannelFuture future) throws Exception { * @param callback Object to call with the stream data. */ public void stream(final String streamId, final StreamCallback callback) { - final String serverAddr = NettyUtils.getRemoteAddress(channel); final long startTime = System.currentTimeMillis(); - logger.debug("Sending stream request for {} to {}", streamId, serverAddr); + if (logger.isDebugEnabled()) { + logger.debug("Sending stream request for {} to {}", streamId, getRemoteAddress(channel)); + } // Need to synchronize here so that the callback is added to the queue and the RPC is // written to the socket atomically, so that callbacks are called in the right order @@ -188,11 +192,13 @@ public void stream(final String streamId, final StreamCallback callback) { public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { long timeTaken = System.currentTimeMillis() - startTime; - logger.trace("Sending request for {} to {} took {} ms", streamId, serverAddr, - timeTaken); + if (logger.isTraceEnabled()) { + logger.trace("Sending request for {} to {} took {} ms", streamId, getRemoteAddress(channel), + timeTaken); + } } else { String errorMsg = String.format("Failed to send request for %s to %s: %s", streamId, - serverAddr, future.cause()); + getRemoteAddress(channel), future.cause()); logger.error(errorMsg, future.cause()); channel.close(); try { @@ -215,9 +221,10 @@ public void operationComplete(ChannelFuture future) throws Exception { * @return The RPC's id. */ public long sendRpc(ByteBuffer message, final RpcResponseCallback callback) { - final String serverAddr = NettyUtils.getRemoteAddress(channel); final long startTime = System.currentTimeMillis(); - logger.trace("Sending RPC to {}", serverAddr); + if (logger.isTraceEnabled()) { + logger.trace("Sending RPC to {}", getRemoteAddress(channel)); + } final long requestId = Math.abs(UUID.randomUUID().getLeastSignificantBits()); handler.addRpcRequest(requestId, callback); @@ -228,10 +235,12 @@ public long sendRpc(ByteBuffer message, final RpcResponseCallback callback) { public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { long timeTaken = System.currentTimeMillis() - startTime; - logger.trace("Sending request {} to {} took {} ms", requestId, serverAddr, timeTaken); + if (logger.isTraceEnabled()) { + logger.trace("Sending request {} to {} took {} ms", requestId, getRemoteAddress(channel), timeTaken); + } } else { String errorMsg = String.format("Failed to send RPC %s to %s: %s", requestId, - serverAddr, future.cause()); + getRemoteAddress(channel), future.cause()); logger.error(errorMsg, future.cause()); handler.removeRpcRequest(requestId); channel.close(); diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java index a27aaf2b277f7..1c9916baee07c 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java @@ -195,7 +195,7 @@ public TransportClient createUnmanagedClient(String remoteHost, int remotePort) /** Create a completely new {@link TransportClient} to the remote address. */ private TransportClient createClient(InetSocketAddress address) throws IOException { - logger.debug("Creating new connection to " + address); + logger.debug("Creating new connection to {}", address); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(workerGroup) diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java index 8a69223c88ee4..179667296ec7d 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java @@ -38,7 +38,7 @@ import org.apache.spark.network.protocol.StreamFailure; import org.apache.spark.network.protocol.StreamResponse; import org.apache.spark.network.server.MessageHandler; -import org.apache.spark.network.util.NettyUtils; +import static org.apache.spark.network.util.NettyUtils.getRemoteAddress; import org.apache.spark.network.util.TransportFrameDecoder; /** @@ -122,7 +122,7 @@ public void channelActive() { @Override public void channelInactive() { if (numOutstandingRequests() > 0) { - String remoteAddress = NettyUtils.getRemoteAddress(channel); + String remoteAddress = getRemoteAddress(channel); logger.error("Still have {} requests outstanding when connection from {} is closed", numOutstandingRequests(), remoteAddress); failOutstandingRequests(new IOException("Connection from " + remoteAddress + " closed")); @@ -132,7 +132,7 @@ public void channelInactive() { @Override public void exceptionCaught(Throwable cause) { if (numOutstandingRequests() > 0) { - String remoteAddress = NettyUtils.getRemoteAddress(channel); + String remoteAddress = getRemoteAddress(channel); logger.error("Still have {} requests outstanding when connection from {} is closed", numOutstandingRequests(), remoteAddress); failOutstandingRequests(cause); @@ -141,13 +141,12 @@ public void exceptionCaught(Throwable cause) { @Override public void handle(ResponseMessage message) throws Exception { - String remoteAddress = NettyUtils.getRemoteAddress(channel); if (message instanceof ChunkFetchSuccess) { ChunkFetchSuccess resp = (ChunkFetchSuccess) message; ChunkReceivedCallback listener = outstandingFetches.get(resp.streamChunkId); if (listener == null) { logger.warn("Ignoring response for block {} from {} since it is not outstanding", - resp.streamChunkId, remoteAddress); + resp.streamChunkId, getRemoteAddress(channel)); resp.body().release(); } else { outstandingFetches.remove(resp.streamChunkId); @@ -159,7 +158,7 @@ public void handle(ResponseMessage message) throws Exception { ChunkReceivedCallback listener = outstandingFetches.get(resp.streamChunkId); if (listener == null) { logger.warn("Ignoring response for block {} from {} ({}) since it is not outstanding", - resp.streamChunkId, remoteAddress, resp.errorString); + resp.streamChunkId, getRemoteAddress(channel), resp.errorString); } else { outstandingFetches.remove(resp.streamChunkId); listener.onFailure(resp.streamChunkId.chunkIndex, new ChunkFetchFailureException( @@ -170,7 +169,7 @@ public void handle(ResponseMessage message) throws Exception { RpcResponseCallback listener = outstandingRpcs.get(resp.requestId); if (listener == null) { logger.warn("Ignoring response for RPC {} from {} ({} bytes) since it is not outstanding", - resp.requestId, remoteAddress, resp.body().size()); + resp.requestId, getRemoteAddress(channel), resp.body().size()); } else { outstandingRpcs.remove(resp.requestId); try { @@ -184,7 +183,7 @@ public void handle(ResponseMessage message) throws Exception { RpcResponseCallback listener = outstandingRpcs.get(resp.requestId); if (listener == null) { logger.warn("Ignoring response for RPC {} from {} ({}) since it is not outstanding", - resp.requestId, remoteAddress, resp.errorString); + resp.requestId, getRemoteAddress(channel), resp.errorString); } else { outstandingRpcs.remove(resp.requestId); listener.onFailure(new RuntimeException(resp.errorString)); diff --git a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java index 074780f2b95ce..f0453186185e1 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java +++ b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java @@ -39,7 +39,7 @@ public void decode(ChannelHandlerContext ctx, ByteBuf in, List out) { Message.Type msgType = Message.Type.decode(in); Message decoded = decode(msgType, in); assert decoded.type() == msgType; - logger.trace("Received message " + msgType + ": " + decoded); + logger.trace("Received message {}: {}", msgType, decoded); out.add(decoded); } diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java index f2223379a9d24..884ea7d1152a5 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java @@ -29,7 +29,7 @@ import org.apache.spark.network.protocol.Message; import org.apache.spark.network.protocol.RequestMessage; import org.apache.spark.network.protocol.ResponseMessage; -import org.apache.spark.network.util.NettyUtils; +import static org.apache.spark.network.util.NettyUtils.getRemoteAddress; /** * The single Transport-level Channel handler which is used for delegating requests to the @@ -76,7 +76,7 @@ public TransportClient getClient() { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - logger.warn("Exception in connection from " + NettyUtils.getRemoteAddress(ctx.channel()), + logger.warn("Exception in connection from " + getRemoteAddress(ctx.channel()), cause); requestHandler.exceptionCaught(cause); responseHandler.exceptionCaught(cause); @@ -139,7 +139,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc System.nanoTime() - responseHandler.getTimeOfLastRequestNs() > requestTimeoutNs; if (e.state() == IdleState.ALL_IDLE && isActuallyOverdue) { if (responseHandler.numOutstandingRequests() > 0) { - String address = NettyUtils.getRemoteAddress(ctx.channel()); + String address = getRemoteAddress(ctx.channel()); logger.error("Connection to {} has been quiet for {} ms while there are outstanding " + "requests. Assuming connection is dead; please adjust spark.network.timeout if " + "this is wrong.", address, requestTimeoutNs / 1000 / 1000); diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java index bebe88ec5d503..e67a034cb8e58 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java @@ -17,6 +17,7 @@ package org.apache.spark.network.server; +import java.net.SocketAddress; import java.nio.ByteBuffer; import com.google.common.base.Throwables; @@ -42,7 +43,7 @@ import org.apache.spark.network.protocol.StreamFailure; import org.apache.spark.network.protocol.StreamRequest; import org.apache.spark.network.protocol.StreamResponse; -import org.apache.spark.network.util.NettyUtils; +import static org.apache.spark.network.util.NettyUtils.getRemoteAddress; /** * A handler that processes requests from clients and writes chunk data back. Each handler is @@ -114,9 +115,9 @@ public void handle(RequestMessage request) { } private void processFetchRequest(final ChunkFetchRequest req) { - final String client = NettyUtils.getRemoteAddress(channel); - - logger.trace("Received req from {} to fetch block {}", client, req.streamChunkId); + if (logger.isTraceEnabled()) { + logger.trace("Received req from {} to fetch block {}", getRemoteAddress(channel), req.streamChunkId); + } ManagedBuffer buf; try { @@ -125,7 +126,7 @@ private void processFetchRequest(final ChunkFetchRequest req) { buf = streamManager.getChunk(req.streamChunkId.streamId, req.streamChunkId.chunkIndex); } catch (Exception e) { logger.error(String.format( - "Error opening block %s for request from %s", req.streamChunkId, client), e); + "Error opening block %s for request from %s", req.streamChunkId, getRemoteAddress(channel)), e); respond(new ChunkFetchFailure(req.streamChunkId, Throwables.getStackTraceAsString(e))); return; } @@ -134,13 +135,12 @@ private void processFetchRequest(final ChunkFetchRequest req) { } private void processStreamRequest(final StreamRequest req) { - final String client = NettyUtils.getRemoteAddress(channel); ManagedBuffer buf; try { buf = streamManager.openStream(req.streamId); } catch (Exception e) { logger.error(String.format( - "Error opening stream %s for request from %s", req.streamId, client), e); + "Error opening stream %s for request from %s", req.streamId, getRemoteAddress(channel)), e); respond(new StreamFailure(req.streamId, Throwables.getStackTraceAsString(e))); return; } @@ -189,13 +189,13 @@ private void processOneWayMessage(OneWayMessage req) { * it will be logged and the channel closed. */ private void respond(final Encodable result) { - final String remoteAddress = channel.remoteAddress().toString(); + final SocketAddress remoteAddress = channel.remoteAddress(); channel.writeAndFlush(result).addListener( new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { - logger.trace(String.format("Sent result %s to client %s", result, remoteAddress)); + logger.trace("Sent result {} to client {}", result, remoteAddress); } else { logger.error(String.format("Error sending result %s to %s; closing connection", result, remoteAddress), future.cause()); diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java index baae235e02205..a67db4f69f086 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java @@ -130,7 +130,7 @@ protected void initChannel(SocketChannel ch) throws Exception { channelFuture.syncUninterruptibly(); port = ((InetSocketAddress) channelFuture.channel().localAddress()).getPort(); - logger.debug("Shuffle server started on port :" + port); + logger.debug("Shuffle server started on port: {}", port); } @Override diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java index 1270cef621b7d..d05d0ac4d246e 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java @@ -42,7 +42,7 @@ import org.apache.spark.network.server.StreamManager; import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId; import org.apache.spark.network.shuffle.protocol.*; -import org.apache.spark.network.util.NettyUtils; +import static org.apache.spark.network.util.NettyUtils.getRemoteAddress; import org.apache.spark.network.util.TransportConf; @@ -101,11 +101,13 @@ protected void handleMessage( blocks.add(block); } long streamId = streamManager.registerStream(client.getClientId(), blocks.iterator()); - logger.trace("Registered streamId {} with {} buffers for client {} from host {}", - streamId, - msg.blockIds.length, - client.getClientId(), - NettyUtils.getRemoteAddress(client.getChannel())); + if (logger.isTraceEnabled()) { + logger.trace("Registered streamId {} with {} buffers for client {} from host {}", + streamId, + msg.blockIds.length, + client.getClientId(), + getRemoteAddress(client.getChannel())); + } callback.onSuccess(new StreamHandle(streamId, msg.blockIds.length).toByteBuffer()); metrics.blockTransferRateBytes.mark(totalBlockSize); } finally { diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java index 56cf1e2e3eb9e..d436711692e3d 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java @@ -267,7 +267,7 @@ private void deleteExecutorDirs(String[] dirs) { for (String localDir : dirs) { try { JavaUtils.deleteRecursively(new File(localDir)); - logger.debug("Successfully cleaned up directory: " + localDir); + logger.debug("Successfully cleaned up directory: {}", localDir); } catch (Exception e) { logger.error("Failed to delete directory: " + localDir, e); } From 9958ac0ce2b9e451d400604767bef2fe12a3399d Mon Sep 17 00:00:00 2001 From: "wm624@hotmail.com" Date: Thu, 25 Aug 2016 12:11:27 -0700 Subject: [PATCH 098/270] [SPARKR][BUILD] ignore cran-check.out under R folder ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) R add cran check which will generate the cran-check.out. This file should be ignored in git. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Manual test it. Run clean test and git status to make sure the file is not included in git. Author: wm624@hotmail.com Closes #14774 from wangmiao1981/ignore. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0991976abfb8b..cfa8ad05f7da1 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ /lib/ R-unit-tests.log R/unit-tests.out +R/cran-check.out build/*.jar build/apache-maven* build/scala* From a133057ce5817f834babe9f25023092aec3c321d Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Thu, 25 Aug 2016 23:22:40 +0200 Subject: [PATCH 099/270] [SPARK-17229][SQL] PostgresDialect shouldn't widen float and short types during reads ## What changes were proposed in this pull request? When reading float4 and smallint columns from PostgreSQL, Spark's `PostgresDialect` widens these types to Decimal and Integer rather than using the narrower Float and Short types. According to https://www.postgresql.org/docs/7.1/static/datatype.html#DATATYPE-TABLE, Postgres maps the `smallint` type to a signed two-byte integer and the `real` / `float4` types to single precision floating point numbers. This patch fixes this by adding more special-cases to `getCatalystType`, similar to what was done for the Derby JDBC dialect. I also fixed a similar problem in the write path which causes Spark to create integer columns in Postgres for what should have been ShortType columns. ## How was this patch tested? New test cases in `PostgresIntegrationSuite` (which I ran manually because Jenkins can't run it right now). Author: Josh Rosen Closes #14796 from JoshRosen/postgres-jdbc-type-fixes. --- .../sql/jdbc/PostgresIntegrationSuite.scala | 22 +++++++++++++++---- .../execution/datasources/jdbc/JDBCRDD.scala | 4 ++++ .../spark/sql/jdbc/PostgresDialect.scala | 7 +++++- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/PostgresIntegrationSuite.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/PostgresIntegrationSuite.scala index 79dd70116ecb8..c9325dea0bb04 100644 --- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/PostgresIntegrationSuite.scala +++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/PostgresIntegrationSuite.scala @@ -22,7 +22,7 @@ import java.util.Properties import org.apache.spark.sql.Column import org.apache.spark.sql.catalyst.expressions.Literal -import org.apache.spark.sql.types.{ArrayType, DecimalType} +import org.apache.spark.sql.types.{ArrayType, DecimalType, FloatType, ShortType} import org.apache.spark.tags.DockerTest @DockerTest @@ -45,10 +45,12 @@ class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite { conn.prepareStatement("CREATE TYPE enum_type AS ENUM ('d1', 'd2')").executeUpdate() conn.prepareStatement("CREATE TABLE bar (c0 text, c1 integer, c2 double precision, c3 bigint, " + "c4 bit(1), c5 bit(10), c6 bytea, c7 boolean, c8 inet, c9 cidr, " - + "c10 integer[], c11 text[], c12 real[], c13 numeric(2,2)[], c14 enum_type)").executeUpdate() + + "c10 integer[], c11 text[], c12 real[], c13 numeric(2,2)[], c14 enum_type, " + + "c15 float4, c16 smallint)").executeUpdate() conn.prepareStatement("INSERT INTO bar VALUES ('hello', 42, 1.25, 123456789012345, B'0', " + "B'1000100101', E'\\\\xDEADBEEF', true, '172.16.0.42', '192.168.0.0/16', " - + """'{1, 2}', '{"a", null, "b"}', '{0.11, 0.22}', '{0.11, 0.22}', 'd1')""").executeUpdate() + + """'{1, 2}', '{"a", null, "b"}', '{0.11, 0.22}', '{0.11, 0.22}', 'd1', 1.01, 1)""" + ).executeUpdate() } test("Type mapping for various types") { @@ -56,7 +58,7 @@ class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite { val rows = df.collect() assert(rows.length == 1) val types = rows(0).toSeq.map(x => x.getClass) - assert(types.length == 15) + assert(types.length == 17) assert(classOf[String].isAssignableFrom(types(0))) assert(classOf[java.lang.Integer].isAssignableFrom(types(1))) assert(classOf[java.lang.Double].isAssignableFrom(types(2))) @@ -72,6 +74,8 @@ class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite { assert(classOf[Seq[Double]].isAssignableFrom(types(12))) assert(classOf[Seq[BigDecimal]].isAssignableFrom(types(13))) assert(classOf[String].isAssignableFrom(types(14))) + assert(classOf[java.lang.Float].isAssignableFrom(types(15))) + assert(classOf[java.lang.Short].isAssignableFrom(types(16))) assert(rows(0).getString(0).equals("hello")) assert(rows(0).getInt(1) == 42) assert(rows(0).getDouble(2) == 1.25) @@ -90,6 +94,8 @@ class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite { assert(rows(0).getSeq(12).toSeq == Seq(0.11f, 0.22f)) assert(rows(0).getSeq(13) == Seq("0.11", "0.22").map(BigDecimal(_).bigDecimal)) assert(rows(0).getString(14) == "d1") + assert(rows(0).getFloat(15) == 1.01f) + assert(rows(0).getShort(16) == 1) } test("Basic write test") { @@ -104,4 +110,12 @@ class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite { Column(Literal.create(null, a.dataType)).as(a.name) }: _*).write.jdbc(jdbcUrl, "public.barcopy2", new Properties) } + + test("Creating a table with shorts and floats") { + sqlContext.createDataFrame(Seq((1.0f, 1.toShort))) + .write.jdbc(jdbcUrl, "shortfloat", new Properties) + val schema = sqlContext.read.jdbc(jdbcUrl, "shortfloat", new Properties).schema + assert(schema(0).dataType == FloatType) + assert(schema(1).dataType == ShortType) + } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala index 6dad8cbef7200..8d9048ab82ac6 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala @@ -390,6 +390,10 @@ private[jdbc] class JDBCRDD( (rs: ResultSet, row: MutableRow, pos: Int) => row.setLong(pos, rs.getLong(pos + 1)) + case ShortType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setShort(pos, rs.getShort(pos + 1)) + case StringType => (rs: ResultSet, row: MutableRow, pos: Int) => // TODO(davies): use getBytes for better performance, if the encoding is UTF-8 diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala index fb959d881e9dd..3f540d6258a0d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala @@ -29,7 +29,11 @@ private object PostgresDialect extends JdbcDialect { override def getCatalystType( sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = { - if (sqlType == Types.BIT && typeName.equals("bit") && size != 1) { + if (sqlType == Types.REAL) { + Some(FloatType) + } else if (sqlType == Types.SMALLINT) { + Some(ShortType) + } else if (sqlType == Types.BIT && typeName.equals("bit") && size != 1) { Some(BinaryType) } else if (sqlType == Types.OTHER) { Some(StringType) @@ -66,6 +70,7 @@ private object PostgresDialect extends JdbcDialect { case BooleanType => Some(JdbcType("BOOLEAN", Types.BOOLEAN)) case FloatType => Some(JdbcType("FLOAT4", Types.FLOAT)) case DoubleType => Some(JdbcType("FLOAT8", Types.DOUBLE)) + case ShortType => Some(JdbcType("SMALLINT", Types.SMALLINT)) case t: DecimalType => Some( JdbcType(s"NUMERIC(${t.precision},${t.scale})", java.sql.Types.NUMERIC)) case ArrayType(et, _) if et.isInstanceOf[AtomicType] => From 3e4c7db4d11c474457e7886a5501108ebab0cf6d Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Fri, 26 Aug 2016 00:15:01 +0200 Subject: [PATCH 100/270] [SPARK-17205] Literal.sql should handle Infinity and NaN This patch updates `Literal.sql` to properly generate SQL for `NaN` and `Infinity` float and double literals: these special values need to be handled differently from regular values, since simply appending a suffix to the value's `toString()` representation will not work for these values. Author: Josh Rosen Closes #14777 from JoshRosen/SPARK-17205. --- .../sql/catalyst/expressions/literals.scala | 17 +++++++++++++++-- .../catalyst/ExpressionSQLBuilderSuite.scala | 6 ++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala index 55fd9c0834fcc..730a7f62e04c7 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala @@ -251,8 +251,21 @@ case class Literal (value: Any, dataType: DataType) extends LeafExpression with case (v: Short, ShortType) => v + "S" case (v: Long, LongType) => v + "L" // Float type doesn't have a suffix - case (v: Float, FloatType) => s"CAST($v AS ${FloatType.sql})" - case (v: Double, DoubleType) => v + "D" + case (v: Float, FloatType) => + val castedValue = v match { + case _ if v.isNaN => "'NaN'" + case Float.PositiveInfinity => "'Infinity'" + case Float.NegativeInfinity => "'-Infinity'" + case _ => v + } + s"CAST($castedValue AS ${FloatType.sql})" + case (v: Double, DoubleType) => + v match { + case _ if v.isNaN => s"CAST('NaN' AS ${DoubleType.sql})" + case Double.PositiveInfinity => s"CAST('Infinity' AS ${DoubleType.sql})" + case Double.NegativeInfinity => s"CAST('-Infinity' AS ${DoubleType.sql})" + case _ => v + "D" + } case (v: Decimal, t: DecimalType) => s"CAST($v AS ${t.sql})" case (v: Int, DateType) => s"DATE '${DateTimeUtils.toJavaDate(v)}'" case (v: Long, TimestampType) => s"TIMESTAMP('${DateTimeUtils.toJavaTimestamp(v)}')" diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala index 93dc0f493eb7b..86724cbb676c4 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala @@ -32,7 +32,13 @@ class ExpressionSQLBuilderSuite extends SQLBuilderTest { checkSQL(Literal(4: Int), "4") checkSQL(Literal(8: Long), "8L") checkSQL(Literal(1.5F), "CAST(1.5 AS FLOAT)") + checkSQL(Literal(Float.PositiveInfinity), "CAST('Infinity' AS FLOAT)") + checkSQL(Literal(Float.NegativeInfinity), "CAST('-Infinity' AS FLOAT)") + checkSQL(Literal(Float.NaN), "CAST('NaN' AS FLOAT)") checkSQL(Literal(2.5D), "2.5D") + checkSQL(Literal(Double.PositiveInfinity), "CAST('Infinity' AS DOUBLE)") + checkSQL(Literal(Double.NegativeInfinity), "CAST('-Infinity' AS DOUBLE)") + checkSQL(Literal(Double.NaN), "CAST('NaN' AS DOUBLE)") checkSQL( Literal(Timestamp.valueOf("2016-01-01 00:00:00")), "TIMESTAMP('2016-01-01 00:00:00.0')") // TODO tests for decimals From 9b5a1d1d53bc4412de3cbc86dc819b0c213229a8 Mon Sep 17 00:00:00 2001 From: Marcelo Vanzin Date: Thu, 25 Aug 2016 16:11:42 -0700 Subject: [PATCH 101/270] [SPARK-17240][CORE] Make SparkConf serializable again. Make the config reader transient, and initialize it lazily so that serialization works with both java and kryo (and hopefully any other custom serializer). Added unit test to make sure SparkConf remains serializable and the reader works with both built-in serializers. Author: Marcelo Vanzin Closes #14813 from vanzin/SPARK-17240. --- .../scala/org/apache/spark/SparkConf.scala | 11 ++++++---- .../org/apache/spark/SparkConfSuite.scala | 22 ++++++++++++++++++- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala index 31b41d95248f3..e85e5aa237384 100644 --- a/core/src/main/scala/org/apache/spark/SparkConf.scala +++ b/core/src/main/scala/org/apache/spark/SparkConf.scala @@ -56,10 +56,13 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria private val settings = new ConcurrentHashMap[String, String]() - private val reader = new ConfigReader(new SparkConfigProvider(settings)) - reader.bindEnv(new ConfigProvider { - override def get(key: String): Option[String] = Option(getenv(key)) - }) + @transient private lazy val reader: ConfigReader = { + val _reader = new ConfigReader(new SparkConfigProvider(settings)) + _reader.bindEnv(new ConfigProvider { + override def get(key: String): Option[String] = Option(getenv(key)) + }) + _reader + } if (loadDefaults) { loadFromSystemProperties(false) diff --git a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala index 1f0f655a15b4a..83906cff123bf 100644 --- a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala +++ b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala @@ -26,8 +26,9 @@ import scala.util.{Random, Try} import com.esotericsoftware.kryo.Kryo +import org.apache.spark.internal.config._ import org.apache.spark.network.util.ByteUnit -import org.apache.spark.serializer.{KryoRegistrator, KryoSerializer} +import org.apache.spark.serializer.{JavaSerializer, KryoRegistrator, KryoSerializer} import org.apache.spark.util.{ResetSystemProperties, RpcUtils} class SparkConfSuite extends SparkFunSuite with LocalSparkContext with ResetSystemProperties { @@ -283,6 +284,25 @@ class SparkConfSuite extends SparkFunSuite with LocalSparkContext with ResetSyst assert(conf.contains("spark.io.compression.lz4.blockSize")) assert(conf.contains("spark.io.unknown") === false) } + + val serializers = Map( + "java" -> new JavaSerializer(new SparkConf()), + "kryo" -> new KryoSerializer(new SparkConf())) + + serializers.foreach { case (name, ser) => + test(s"SPARK-17240: SparkConf should be serializable ($name)") { + val conf = new SparkConf() + conf.set(DRIVER_CLASS_PATH, "${" + DRIVER_JAVA_OPTIONS.key + "}") + conf.set(DRIVER_JAVA_OPTIONS, "test") + + val serializer = ser.newInstance() + val bytes = serializer.serialize(conf) + val deser = serializer.deserialize[SparkConf](bytes) + + assert(conf.get(DRIVER_CLASS_PATH) === deser.get(DRIVER_CLASS_PATH)) + } + } + } class Class1 {} From d96d1515638da20b594f7bfe3cfdb50088f25a04 Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Thu, 25 Aug 2016 16:36:16 -0700 Subject: [PATCH 102/270] [SPARK-17187][SQL] Supports using arbitrary Java object as internal aggregation buffer object ## What changes were proposed in this pull request? This PR introduces an abstract class `TypedImperativeAggregate` so that an aggregation function of TypedImperativeAggregate can use **arbitrary** user-defined Java object as intermediate aggregation buffer object. **This has advantages like:** 1. It now can support larger category of aggregation functions. For example, it will be much easier to implement aggregation function `percentile_approx`, which has a complex aggregation buffer definition. 2. It can be used to avoid doing serialization/de-serialization for every call of `update` or `merge` when converting domain specific aggregation object to internal Spark-Sql storage format. 3. It is easier to integrate with other existing monoid libraries like algebird, and supports more aggregation functions with high performance. Please see `org.apache.spark.sql.TypedImperativeAggregateSuite.TypedMaxAggregate` to find an example of how to defined a `TypedImperativeAggregate` aggregation function. Please see Java doc of `TypedImperativeAggregate` and Jira ticket SPARK-17187 for more information. ## How was this patch tested? Unit tests. Author: Sean Zhong Author: Yin Huai Closes #14753 from clockfly/object_aggregation_buffer_try_2. --- .../expressions/aggregate/interfaces.scala | 141 ++++++++ .../aggregate/AggregationIterator.scala | 15 + .../sql/TypedImperativeAggregateSuite.scala | 300 ++++++++++++++++++ 3 files changed, 456 insertions(+) create mode 100644 sql/core/src/test/scala/org/apache/spark/sql/TypedImperativeAggregateSuite.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala index 7a39e568fa289..ecbaa2f4669b9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala @@ -389,3 +389,144 @@ abstract class DeclarativeAggregate def right: AttributeReference = inputAggBufferAttributes(aggBufferAttributes.indexOf(a)) } } + +/** + * Aggregation function which allows **arbitrary** user-defined java object to be used as internal + * aggregation buffer object. + * + * {{{ + * aggregation buffer for normal aggregation function `avg` + * | + * v + * +--------------+---------------+-----------------------------------+ + * | sum1 (Long) | count1 (Long) | generic user-defined java objects | + * +--------------+---------------+-----------------------------------+ + * ^ + * | + * Aggregation buffer object for `TypedImperativeAggregate` aggregation function + * }}} + * + * Work flow (Partial mode aggregate at Mapper side, and Final mode aggregate at Reducer side): + * + * Stage 1: Partial aggregate at Mapper side: + * + * 1. The framework calls `createAggregationBuffer(): T` to create an empty internal aggregation + * buffer object. + * 2. Upon each input row, the framework calls + * `update(buffer: T, input: InternalRow): Unit` to update the aggregation buffer object T. + * 3. After processing all rows of current group (group by key), the framework will serialize + * aggregation buffer object T to storage format (Array[Byte]) and persist the Array[Byte] + * to disk if needed. + * 4. The framework moves on to next group, until all groups have been processed. + * + * Shuffling exchange data to Reducer tasks... + * + * Stage 2: Final mode aggregate at Reducer side: + * + * 1. The framework calls `createAggregationBuffer(): T` to create an empty internal aggregation + * buffer object (type T) for merging. + * 2. For each aggregation output of Stage 1, The framework de-serializes the storage + * format (Array[Byte]) and produces one input aggregation object (type T). + * 3. For each input aggregation object, the framework calls `merge(buffer: T, input: T): Unit` + * to merge the input aggregation object into aggregation buffer object. + * 4. After processing all input aggregation objects of current group (group by key), the framework + * calls method `eval(buffer: T)` to generate the final output for this group. + * 5. The framework moves on to next group, until all groups have been processed. + * + * NOTE: SQL with TypedImperativeAggregate functions is planned in sort based aggregation, + * instead of hash based aggregation, as TypedImperativeAggregate use BinaryType as aggregation + * buffer's storage format, which is not supported by hash based aggregation. Hash based + * aggregation only support aggregation buffer of mutable types (like LongType, IntType that have + * fixed length and can be mutated in place in UnsafeRow) + */ +abstract class TypedImperativeAggregate[T] extends ImperativeAggregate { + + /** + * Creates an empty aggregation buffer object. This is called before processing each key group + * (group by key). + * + * @return an aggregation buffer object + */ + def createAggregationBuffer(): T + + /** + * In-place updates the aggregation buffer object with an input row. buffer = buffer + input. + * This is typically called when doing Partial or Complete mode aggregation. + * + * @param buffer The aggregation buffer object. + * @param input an input row + */ + def update(buffer: T, input: InternalRow): Unit + + /** + * Merges an input aggregation object into aggregation buffer object. buffer = buffer + input. + * This is typically called when doing PartialMerge or Final mode aggregation. + * + * @param buffer the aggregation buffer object used to store the aggregation result. + * @param input an input aggregation object. Input aggregation object can be produced by + * de-serializing the partial aggregate's output from Mapper side. + */ + def merge(buffer: T, input: T): Unit + + /** + * Generates the final aggregation result value for current key group with the aggregation buffer + * object. + * + * @param buffer aggregation buffer object. + * @return The aggregation result of current key group + */ + def eval(buffer: T): Any + + /** Serializes the aggregation buffer object T to Array[Byte] */ + def serialize(buffer: T): Array[Byte] + + /** De-serializes the serialized format Array[Byte], and produces aggregation buffer object T */ + def deserialize(storageFormat: Array[Byte]): T + + final override def initialize(buffer: MutableRow): Unit = { + val bufferObject = createAggregationBuffer() + buffer.update(mutableAggBufferOffset, bufferObject) + } + + final override def update(buffer: MutableRow, input: InternalRow): Unit = { + val bufferObject = getField[T](buffer, mutableAggBufferOffset) + update(bufferObject, input) + } + + final override def merge(buffer: MutableRow, inputBuffer: InternalRow): Unit = { + val bufferObject = getField[T](buffer, mutableAggBufferOffset) + // The inputBuffer stores serialized aggregation buffer object produced by partial aggregate + val inputObject = deserialize(inputBuffer.getBinary(inputAggBufferOffset)) + merge(bufferObject, inputObject) + } + + final override def eval(buffer: InternalRow): Any = { + val bufferObject = getField[T](buffer, mutableAggBufferOffset) + eval(bufferObject) + } + + private[this] val anyObjectType = ObjectType(classOf[AnyRef]) + private def getField[U](input: InternalRow, fieldIndex: Int): U = { + input.get(fieldIndex, anyObjectType).asInstanceOf[U] + } + + final override lazy val aggBufferAttributes: Seq[AttributeReference] = { + // Underlying storage type for the aggregation buffer object + Seq(AttributeReference("buf", BinaryType)()) + } + + final override lazy val inputAggBufferAttributes: Seq[AttributeReference] = + aggBufferAttributes.map(_.newInstance()) + + final override def aggBufferSchema: StructType = StructType.fromAttributes(aggBufferAttributes) + + /** + * In-place replaces the aggregation buffer object stored at buffer's index + * `mutableAggBufferOffset`, with SparkSQL internally supported underlying storage format + * (BinaryType). + */ + final def serializeAggregateBufferInPlace(buffer: MutableRow): Unit = { + val bufferObject = getField[T](buffer, mutableAggBufferOffset) + buffer(mutableAggBufferOffset) = serialize(bufferObject) + } +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala index 34de76dd4ab4e..dfed084fe64a2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala @@ -234,7 +234,22 @@ abstract class AggregationIterator( val resultProjection = UnsafeProjection.create( groupingAttributes ++ bufferAttributes, groupingAttributes ++ bufferAttributes) + + // TypedImperativeAggregate stores generic object in aggregation buffer, and requires + // calling serialization before shuffling. See [[TypedImperativeAggregate]] for more info. + val typedImperativeAggregates: Array[TypedImperativeAggregate[_]] = { + aggregateFunctions.collect { + case (ag: TypedImperativeAggregate[_]) => ag + } + } + (currentGroupingKey: UnsafeRow, currentBuffer: MutableRow) => { + // Serializes the generic object stored in aggregation buffer + var i = 0 + while (i < typedImperativeAggregates.length) { + typedImperativeAggregates(i).serializeAggregateBufferInPlace(currentBuffer) + i += 1 + } resultProjection(joinedRow(currentGroupingKey, currentBuffer)) } } else { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/TypedImperativeAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/TypedImperativeAggregateSuite.scala new file mode 100644 index 0000000000000..b5eb16b6f650b --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/TypedImperativeAggregateSuite.scala @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream} + +import org.apache.spark.sql.TypedImperativeAggregateSuite.TypedMax +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.{BoundReference, Expression, GenericMutableRow, SpecificMutableRow} +import org.apache.spark.sql.catalyst.expressions.aggregate.TypedImperativeAggregate +import org.apache.spark.sql.execution.aggregate.SortAggregateExec +import org.apache.spark.sql.expressions.Window +import org.apache.spark.sql.functions._ +import org.apache.spark.sql.test.SharedSQLContext +import org.apache.spark.sql.types.{AbstractDataType, BinaryType, DataType, IntegerType, LongType} + +class TypedImperativeAggregateSuite extends QueryTest with SharedSQLContext { + + import testImplicits._ + + private val random = new java.util.Random() + + private val data = (0 until 1000).map { _ => + (random.nextInt(10), random.nextInt(100)) + } + + test("aggregate with object aggregate buffer") { + val agg = new TypedMax(BoundReference(0, IntegerType, nullable = false)) + + val group1 = (0 until data.length / 2) + val group1Buffer = agg.createAggregationBuffer() + group1.foreach { index => + val input = InternalRow(data(index)._1, data(index)._2) + agg.update(group1Buffer, input) + } + + val group2 = (data.length / 2 until data.length) + val group2Buffer = agg.createAggregationBuffer() + group2.foreach { index => + val input = InternalRow(data(index)._1, data(index)._2) + agg.update(group2Buffer, input) + } + + val mergeBuffer = agg.createAggregationBuffer() + agg.merge(mergeBuffer, group1Buffer) + agg.merge(mergeBuffer, group2Buffer) + + assert(mergeBuffer.value == data.map(_._1).max) + assert(agg.eval(mergeBuffer) == data.map(_._1).max) + + // Tests low level eval(row: InternalRow) API. + val row = new GenericMutableRow(Array(mergeBuffer): Array[Any]) + + // Evaluates directly on row consist of aggregation buffer object. + assert(agg.eval(row) == data.map(_._1).max) + } + + test("supports SpecificMutableRow as mutable row") { + val aggregationBufferSchema = Seq(IntegerType, LongType, BinaryType, IntegerType) + val aggBufferOffset = 2 + val buffer = new SpecificMutableRow(aggregationBufferSchema) + val agg = new TypedMax(BoundReference(ordinal = 1, dataType = IntegerType, nullable = false)) + .withNewMutableAggBufferOffset(aggBufferOffset) + + agg.initialize(buffer) + data.foreach { kv => + val input = InternalRow(kv._1, kv._2) + agg.update(buffer, input) + } + assert(agg.eval(buffer) == data.map(_._2).max) + } + + test("dataframe aggregate with object aggregate buffer, should not use HashAggregate") { + val df = data.toDF("a", "b") + val max = new TypedMax($"a".expr) + + // Always uses SortAggregateExec + val sparkPlan = df.select(Column(max.toAggregateExpression())).queryExecution.sparkPlan + assert(sparkPlan.isInstanceOf[SortAggregateExec]) + } + + test("dataframe aggregate with object aggregate buffer, no group by") { + val df = data.toDF("key", "value").coalesce(2) + val query = df.select(typedMax($"key"), count($"key"), typedMax($"value"), count($"value")) + val maxKey = data.map(_._1).max + val countKey = data.size + val maxValue = data.map(_._2).max + val countValue = data.size + val expected = Seq(Row(maxKey, countKey, maxValue, countValue)) + checkAnswer(query, expected) + } + + test("dataframe aggregate with object aggregate buffer, non-nullable aggregator") { + val df = data.toDF("key", "value").coalesce(2) + + // Test non-nullable typedMax + val query = df.select(typedMax(lit(null)), count($"key"), typedMax(lit(null)), + count($"value")) + + // typedMax is not nullable + val maxNull = Int.MinValue + val countKey = data.size + val countValue = data.size + val expected = Seq(Row(maxNull, countKey, maxNull, countValue)) + checkAnswer(query, expected) + } + + test("dataframe aggregate with object aggregate buffer, nullable aggregator") { + val df = data.toDF("key", "value").coalesce(2) + + // Test nullable nullableTypedMax + val query = df.select(nullableTypedMax(lit(null)), count($"key"), nullableTypedMax(lit(null)), + count($"value")) + + // nullableTypedMax is nullable + val maxNull = null + val countKey = data.size + val countValue = data.size + val expected = Seq(Row(maxNull, countKey, maxNull, countValue)) + checkAnswer(query, expected) + } + + test("dataframe aggregation with object aggregate buffer, input row contains null") { + + val nullableData = (0 until 1000).map {id => + val nullableKey: Integer = if (random.nextBoolean()) null else random.nextInt(100) + val nullableValue: Integer = if (random.nextBoolean()) null else random.nextInt(100) + (nullableKey, nullableValue) + } + + val df = nullableData.toDF("key", "value").coalesce(2) + val query = df.select(typedMax($"key"), count($"key"), typedMax($"value"), + count($"value")) + val maxKey = nullableData.map(_._1).filter(_ != null).max + val countKey = nullableData.map(_._1).filter(_ != null).size + val maxValue = nullableData.map(_._2).filter(_ != null).max + val countValue = nullableData.map(_._2).filter(_ != null).size + val expected = Seq(Row(maxKey, countKey, maxValue, countValue)) + checkAnswer(query, expected) + } + + test("dataframe aggregate with object aggregate buffer, with group by") { + val df = data.toDF("value", "key").coalesce(2) + val query = df.groupBy($"key").agg(typedMax($"value"), count($"value"), typedMax($"value")) + val expected = data.groupBy(_._2).toSeq.map { group => + val (key, values) = group + val valueMax = values.map(_._1).max + val countValue = values.size + Row(key, valueMax, countValue, valueMax) + } + checkAnswer(query, expected) + } + + test("dataframe aggregate with object aggregate buffer, empty inputs, no group by") { + val empty = Seq.empty[(Int, Int)].toDF("a", "b") + checkAnswer( + empty.select(typedMax($"a"), count($"a"), typedMax($"b"), count($"b")), + Seq(Row(Int.MinValue, 0, Int.MinValue, 0))) + } + + test("dataframe aggregate with object aggregate buffer, empty inputs, with group by") { + val empty = Seq.empty[(Int, Int)].toDF("a", "b") + checkAnswer( + empty.groupBy($"b").agg(typedMax($"a"), count($"a"), typedMax($"a")), + Seq.empty[Row]) + } + + test("TypedImperativeAggregate should not break Window function") { + val df = data.toDF("key", "value") + // OVER (PARTITION BY a ORDER BY b ROW BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) + val w = Window.orderBy("value").partitionBy("key").rowsBetween(Long.MinValue, 0) + + val query = df.select(sum($"key").over(w), typedMax($"key").over(w), sum($"value").over(w), + typedMax($"value").over(w)) + + val expected = data.groupBy(_._1).toSeq.flatMap { group => + val (key, values) = group + val sortedValues = values.map(_._2).sorted + + var outputRows = Seq.empty[Row] + var i = 0 + while (i < sortedValues.size) { + val unboundedPrecedingAndCurrent = sortedValues.slice(0, i + 1) + val sumKey = key * unboundedPrecedingAndCurrent.size + val maxKey = key + val sumValue = unboundedPrecedingAndCurrent.sum + val maxValue = unboundedPrecedingAndCurrent.max + + outputRows :+= Row(sumKey, maxKey, sumValue, maxValue) + i += 1 + } + + outputRows + } + checkAnswer(query, expected) + } + + private def typedMax(column: Column): Column = { + val max = TypedMax(column.expr, nullable = false) + Column(max.toAggregateExpression()) + } + + private def nullableTypedMax(column: Column): Column = { + val max = TypedMax(column.expr, nullable = true) + Column(max.toAggregateExpression()) + } +} + +object TypedImperativeAggregateSuite { + + /** + * Calculate the max value with object aggregation buffer. This stores class MaxValue + * in aggregation buffer. + */ + private case class TypedMax( + child: Expression, + nullable: Boolean = false, + mutableAggBufferOffset: Int = 0, + inputAggBufferOffset: Int = 0) extends TypedImperativeAggregate[MaxValue] { + + + override def createAggregationBuffer(): MaxValue = { + // Returns Int.MinValue if all inputs are null + new MaxValue(Int.MinValue) + } + + override def update(buffer: MaxValue, input: InternalRow): Unit = { + child.eval(input) match { + case inputValue: Int => + if (inputValue > buffer.value) { + buffer.value = inputValue + buffer.isValueSet = true + } + case null => // skip + } + } + + override def merge(bufferMax: MaxValue, inputMax: MaxValue): Unit = { + if (inputMax.value > bufferMax.value) { + bufferMax.value = inputMax.value + bufferMax.isValueSet = bufferMax.isValueSet || inputMax.isValueSet + } + } + + override def eval(bufferMax: MaxValue): Any = { + if (nullable && bufferMax.isValueSet == false) { + null + } else { + bufferMax.value + } + } + + override def deterministic: Boolean = true + + override def children: Seq[Expression] = Seq(child) + + override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType) + + override def dataType: DataType = IntegerType + + override def withNewMutableAggBufferOffset(newOffset: Int): TypedImperativeAggregate[MaxValue] = + copy(mutableAggBufferOffset = newOffset) + + override def withNewInputAggBufferOffset(newOffset: Int): TypedImperativeAggregate[MaxValue] = + copy(inputAggBufferOffset = newOffset) + + override def serialize(buffer: MaxValue): Array[Byte] = { + val out = new ByteArrayOutputStream() + val stream = new DataOutputStream(out) + stream.writeBoolean(buffer.isValueSet) + stream.writeInt(buffer.value) + out.toByteArray + } + + override def deserialize(storageFormat: Array[Byte]): MaxValue = { + val in = new ByteArrayInputStream(storageFormat) + val stream = new DataInputStream(in) + val isValueSet = stream.readBoolean() + val value = stream.readInt() + new MaxValue(value, isValueSet) + } + } + + private class MaxValue(var value: Int, var isValueSet: Boolean = false) +} From b964a172a8c075486189cc9be09a51b8446f0da4 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Fri, 26 Aug 2016 08:58:43 +0800 Subject: [PATCH 103/270] [SPARK-17212][SQL] TypeCoercion supports widening conversion between DateType and TimestampType ## What changes were proposed in this pull request? Currently, type-widening does not work between `TimestampType` and `DateType`. This applies to `SetOperation`, `Union`, `In`, `CaseWhen`, `Greatest`, `Leatest`, `CreateArray`, `CreateMap`, `Coalesce`, `NullIf`, `IfNull`, `Nvl` and `Nvl2`, . This PR adds the support for widening `DateType` to `TimestampType` for them. For a simple example, **Before** ```scala Seq(Tuple2(new Timestamp(0), new Date(0))).toDF("a", "b").selectExpr("greatest(a, b)").show() ``` shows below: ``` cannot resolve 'greatest(`a`, `b`)' due to data type mismatch: The expressions should all have the same type, got GREATEST(timestamp, date) ``` or union as below: ```scala val a = Seq(Tuple1(new Timestamp(0))).toDF() val b = Seq(Tuple1(new Date(0))).toDF() a.union(b).show() ``` shows below: ``` Union can only be performed on tables with the compatible column types. DateType <> TimestampType at the first column of the second table; ``` **After** ```scala Seq(Tuple2(new Timestamp(0), new Date(0))).toDF("a", "b").selectExpr("greatest(a, b)").show() ``` shows below: ``` +----------------------------------------------------+ |greatest(CAST(a AS TIMESTAMP), CAST(b AS TIMESTAMP))| +----------------------------------------------------+ | 1969-12-31 16:00:...| +----------------------------------------------------+ ``` or union as below: ```scala val a = Seq(Tuple1(new Timestamp(0))).toDF() val b = Seq(Tuple1(new Date(0))).toDF() a.union(b).show() ``` shows below: ``` +--------------------+ | _1| +--------------------+ |1969-12-31 16:00:...| |1969-12-31 00:00:...| +--------------------+ ``` ## How was this patch tested? Unit tests in `TypeCoercionSuite`. Author: hyukjinkwon Author: HyukjinKwon Closes #14786 from HyukjinKwon/SPARK-17212. --- .../org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala | 3 +++ .../apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala | 1 + 2 files changed, 4 insertions(+) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala index 193c3ec4e585a..01b04c036d150 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala @@ -96,6 +96,9 @@ object TypeCoercion { val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2) Some(numericPrecedence(index)) + case (_: TimestampType, _: DateType) | (_: DateType, _: TimestampType) => + Some(TimestampType) + case _ => None } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala index 9560563a8ca56..6f69613f85315 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala @@ -188,6 +188,7 @@ class TypeCoercionSuite extends PlanTest { // TimestampType widenTest(NullType, TimestampType, Some(TimestampType)) widenTest(TimestampType, TimestampType, Some(TimestampType)) + widenTest(DateType, TimestampType, Some(TimestampType)) widenTest(IntegerType, TimestampType, None) widenTest(StringType, TimestampType, None) From 341e0e778dff8c404b47d34ee7661b658bb91880 Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Thu, 25 Aug 2016 21:08:42 -0700 Subject: [PATCH 104/270] [SPARK-17242][DOCUMENT] Update links of external dstream projects ## What changes were proposed in this pull request? Updated links of external dstream projects. ## How was this patch tested? Just document changes. Author: Shixiong Zhu Closes #14814 from zsxwing/dstream-link. --- docs/streaming-programming-guide.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index df94e9533e994..82d36474ff4bf 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -656,7 +656,7 @@ methods for creating DStreams from files as input sources. Python API `fileStream` is not available in the Python API, only `textFileStream` is available. - **Streams based on Custom Receivers:** DStreams can be created with data streams received through custom receivers. See the [Custom Receiver - Guide](streaming-custom-receivers.html) and [DStream Akka](https://github.com/spark-packages/dstream-akka) for more details. + Guide](streaming-custom-receivers.html) for more details. - **Queue of RDDs as a Stream:** For testing a Spark Streaming application with test data, one can also create a DStream based on a queue of RDDs, using `streamingContext.queueStream(queueOfRDDs)`. Each RDD pushed into the queue will be treated as a batch of data in the DStream, and processed like a stream. @@ -2383,11 +2383,7 @@ additional effort may be necessary to achieve exactly-once semantics. There are - [Kafka Integration Guide](streaming-kafka-integration.html) - [Kinesis Integration Guide](streaming-kinesis-integration.html) - [Custom Receiver Guide](streaming-custom-receivers.html) -* External DStream data sources: - - [DStream MQTT](https://github.com/spark-packages/dstream-mqtt) - - [DStream Twitter](https://github.com/spark-packages/dstream-twitter) - - [DStream Akka](https://github.com/spark-packages/dstream-akka) - - [DStream ZeroMQ](https://github.com/spark-packages/dstream-zeromq) +* Third-party DStream data sources can be found in [Spark Packages](https://spark-packages.org/) * API documentation - Scala docs * [StreamingContext](api/scala/index.html#org.apache.spark.streaming.StreamingContext) and From 6063d5963fcf01768570c1a9b542be6175a3bcbc Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Fri, 26 Aug 2016 17:29:37 +0200 Subject: [PATCH 105/270] [SPARK-16216][SQL][FOLLOWUP] Enable timestamp type tests for JSON and verify all unsupported types in CSV ## What changes were proposed in this pull request? This PR enables the tests for `TimestampType` for JSON and unifies the logics for verifying schema when writing in CSV. In more details, this PR, - Enables the tests for `TimestampType` for JSON and This was disabled due to an issue in `DatatypeConverter.parseDateTime` which parses dates incorrectly, for example as below: ```scala val d = javax.xml.bind.DatatypeConverter.parseDateTime("0900-01-01T00:00:00.000").getTime println(d.toString) ``` ``` Fri Dec 28 00:00:00 KST 899 ``` However, since we use `FastDateFormat`, it seems we are safe now. ```scala val d = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSS").parse("0900-01-01T00:00:00.000") println(d) ``` ``` Tue Jan 01 00:00:00 PST 900 ``` - Verifies all unsupported types in CSV There is a separate logics to verify the schemas in `CSVFileFormat`. This is actually not quite correct enough because we don't support `NullType` and `CalanderIntervalType` as well `StructType`, `ArrayType`, `MapType`. So, this PR adds both types. ## How was this patch tested? Tests in `JsonHadoopFsRelation` and `CSVSuite` Author: hyukjinkwon Closes #14829 from HyukjinKwon/SPARK-16216-followup. --- .../datasources/csv/CSVFileFormat.scala | 17 +++++++++++------ .../datasources/csv/CSVInferSchema.scala | 1 + .../execution/datasources/csv/CSVSuite.scala | 16 ++++++++++++++-- .../sql/sources/JsonHadoopFsRelationSuite.scala | 4 ---- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala index 1bf57882ce023..9a118fe5a273d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala @@ -186,13 +186,18 @@ class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { } private def verifySchema(schema: StructType): Unit = { - schema.foreach { field => - field.dataType match { - case _: ArrayType | _: MapType | _: StructType => - throw new UnsupportedOperationException( - s"CSV data source does not support ${field.dataType.simpleString} data type.") + def verifyType(dataType: DataType): Unit = dataType match { + case ByteType | ShortType | IntegerType | LongType | FloatType | + DoubleType | BooleanType | _: DecimalType | TimestampType | + DateType | StringType => + + case udt: UserDefinedType[_] => verifyType(udt.sqlType) + case _ => - } + throw new UnsupportedOperationException( + s"CSV data source does not support ${dataType.simpleString} data type.") } + + schema.foreach(field => verifyType(field.dataType)) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala index f1b4c11878a94..1ca6eff1b8c2e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchema.scala @@ -290,6 +290,7 @@ private[csv] object CSVTypeCast { DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(datum).getTime) } case _: StringType => UTF8String.fromString(datum) + case udt: UserDefinedType[_] => castTo(datum, udt.sqlType, nullable, options) case _ => throw new RuntimeException(s"Unsupported type: ${castType.typeName}") } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala index 2befad6d72eca..1930862118e9b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala @@ -27,8 +27,7 @@ import org.apache.hadoop.io.SequenceFile.CompressionType import org.apache.hadoop.io.compress.GzipCodec import org.apache.spark.SparkException -import org.apache.spark.sql.{DataFrame, QueryTest, Row} -import org.apache.spark.sql.catalyst.util.DateTimeUtils +import org.apache.spark.sql.{DataFrame, QueryTest, Row, UDT} import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils} import org.apache.spark.sql.types._ @@ -681,6 +680,19 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils { Seq((1, Array("Tesla", "Chevy", "Ford"))).toDF("id", "brands").write.csv(csvDir) }.getMessage assert(msg.contains("CSV data source does not support array data type")) + + msg = intercept[UnsupportedOperationException] { + Seq((1, new UDT.MyDenseVector(Array(0.25, 2.25, 4.25)))).toDF("id", "vectors") + .write.csv(csvDir) + }.getMessage + assert(msg.contains("CSV data source does not support array data type")) + + msg = intercept[SparkException] { + val schema = StructType(StructField("a", new UDT.MyDenseVectorUDT(), true) :: Nil) + spark.range(1).write.csv(csvDir) + spark.read.schema(schema).csv(csvDir).collect() + }.getCause.getMessage + assert(msg.contains("Unsupported type: array")) } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala index 52486b122a93f..d79edee5b1a4c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala @@ -32,10 +32,6 @@ class JsonHadoopFsRelationSuite extends HadoopFsRelationTest { override protected def supportsDataType(dataType: DataType): Boolean = dataType match { case _: NullType => false case _: BinaryType => false - // `TimestampType` is disabled because `DatatypeConverter.parseDateTime()` - // in `DateTimeUtils` parses the formatted string wrongly when the date is - // too early. (e.g. "1600-07-13T08:36:32.847"). - case _: TimestampType => false case _: CalendarIntervalType => false case _ => true } From 28ab17922a227e8d93654d3478c0d493bfb599d5 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Fri, 26 Aug 2016 08:52:10 -0700 Subject: [PATCH 106/270] [SPARK-17260][MINOR] move CreateTables to HiveStrategies ## What changes were proposed in this pull request? `CreateTables` rule turns a general `CreateTable` plan to `CreateHiveTableAsSelectCommand` for hive serde table. However, this rule is logically a planner strategy, we should move it to `HiveStrategies`, to be consistent with other DDL commands. ## How was this patch tested? existing tests. Author: Wenchen Fan Closes #14825 from cloud-fan/ctas. --- .../spark/sql/hive/HiveMetastoreCatalog.scala | 35 ------------------- .../spark/sql/hive/HiveSessionCatalog.scala | 1 - .../spark/sql/hive/HiveSessionState.scala | 1 - .../spark/sql/hive/HiveStrategies.scala | 27 ++++++++++++++ 4 files changed, 27 insertions(+), 37 deletions(-) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index 701b73a4aa39b..ff82c7f7af6f8 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -376,41 +376,6 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log } } } - - /** - * Creates any tables required for query execution. - * For example, because of a CREATE TABLE X AS statement. - */ - object CreateTables extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - // Wait until children are resolved. - case p: LogicalPlan if !p.childrenResolved => p - - case CreateTable(tableDesc, mode, Some(query)) if tableDesc.provider.get == "hive" => - val newTableDesc = if (tableDesc.storage.serde.isEmpty) { - // add default serde - tableDesc.withNewStorage( - serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - } else { - tableDesc - } - - val QualifiedTableName(dbName, tblName) = getQualifiedTableName(tableDesc) - - // Currently we will never hit this branch, as SQL string API can only use `Ignore` or - // `ErrorIfExists` mode, and `DataFrameWriter.saveAsTable` doesn't support hive serde - // tables yet. - if (mode == SaveMode.Append || mode == SaveMode.Overwrite) { - throw new AnalysisException("" + - "CTAS for hive serde tables does not support append or overwrite semantics.") - } - - execution.CreateHiveTableAsSelectCommand( - newTableDesc.copy(identifier = TableIdentifier(tblName, Some(dbName))), - query, - mode == SaveMode.Ignore) - } - } } /** diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala index ca8c7347f23e9..86d3b6de0dbfd 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala @@ -87,7 +87,6 @@ private[sql] class HiveSessionCatalog( val ParquetConversions: Rule[LogicalPlan] = metastoreCatalog.ParquetConversions val OrcConversions: Rule[LogicalPlan] = metastoreCatalog.OrcConversions - val CreateTables: Rule[LogicalPlan] = metastoreCatalog.CreateTables override def refreshTable(name: TableIdentifier): Unit = { super.refreshTable(name) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala index a7cc7cc142e4e..f3c4135da6552 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala @@ -61,7 +61,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession) override val extendedResolutionRules = catalog.ParquetConversions :: catalog.OrcConversions :: - catalog.CreateTables :: PreprocessDDL(conf) :: PreprocessTableInsertion(conf) :: DataSourceAnalysis(conf) :: diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala index 17956ded1796d..fb11c849edd94 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala @@ -23,6 +23,8 @@ import org.apache.spark.sql.catalyst.planning._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution._ +import org.apache.spark.sql.execution.command.ExecutedCommandExec +import org.apache.spark.sql.execution.datasources.CreateTable import org.apache.spark.sql.hive.execution._ private[hive] trait HiveStrategies { @@ -45,6 +47,31 @@ private[hive] trait HiveStrategies { case logical.InsertIntoTable( table: MetastoreRelation, partition, child, overwrite, ifNotExists) => InsertIntoHiveTable(table, partition, planLater(child), overwrite, ifNotExists) :: Nil + + case CreateTable(tableDesc, mode, Some(query)) if tableDesc.provider.get == "hive" => + val newTableDesc = if (tableDesc.storage.serde.isEmpty) { + // add default serde + tableDesc.withNewStorage( + serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) + } else { + tableDesc + } + + // Currently we will never hit this branch, as SQL string API can only use `Ignore` or + // `ErrorIfExists` mode, and `DataFrameWriter.saveAsTable` doesn't support hive serde + // tables yet. + if (mode == SaveMode.Append || mode == SaveMode.Overwrite) { + throw new AnalysisException("" + + "CTAS for hive serde tables does not support append or overwrite semantics.") + } + + val dbName = tableDesc.identifier.database.getOrElse(sparkSession.catalog.currentDatabase) + val cmd = CreateHiveTableAsSelectCommand( + newTableDesc.copy(identifier = tableDesc.identifier.copy(database = Some(dbName))), + query, + mode == SaveMode.Ignore) + ExecutedCommandExec(cmd) :: Nil + case _ => Nil } } From 970ab8f6ddc66401ad1cf4b2d1050dd0c8876224 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Fri, 26 Aug 2016 10:56:57 -0700 Subject: [PATCH 107/270] [SPARK-17187][SQL][FOLLOW-UP] improve document of TypedImperativeAggregate ## What changes were proposed in this pull request? improve the document to make it easier to understand and also mention window operator. ## How was this patch tested? N/A Author: Wenchen Fan Closes #14822 from cloud-fan/object-agg. --- .../expressions/aggregate/interfaces.scala | 101 +++++++++++------- 1 file changed, 61 insertions(+), 40 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala index ecbaa2f4669b9..b5c0844fbf310 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala @@ -390,48 +390,69 @@ abstract class DeclarativeAggregate } } + /** * Aggregation function which allows **arbitrary** user-defined java object to be used as internal - * aggregation buffer object. + * aggregation buffer. * * {{{ - * aggregation buffer for normal aggregation function `avg` - * | - * v - * +--------------+---------------+-----------------------------------+ - * | sum1 (Long) | count1 (Long) | generic user-defined java objects | - * +--------------+---------------+-----------------------------------+ - * ^ - * | - * Aggregation buffer object for `TypedImperativeAggregate` aggregation function + * aggregation buffer for normal aggregation function `avg` aggregate buffer for `sum` + * | | + * v v + * +--------------+---------------+-----------------------------------+-------------+ + * | sum1 (Long) | count1 (Long) | generic user-defined java objects | sum2 (Long) | + * +--------------+---------------+-----------------------------------+-------------+ + * ^ + * | + * aggregation buffer object for `TypedImperativeAggregate` aggregation function * }}} * - * Work flow (Partial mode aggregate at Mapper side, and Final mode aggregate at Reducer side): + * General work flow: + * + * Stage 1: initialize aggregate buffer object. + * + * 1. The framework calls `initialize(buffer: MutableRow)` to set up the empty aggregate buffer. + * 2. In `initialize`, we call `createAggregationBuffer(): T` to get the initial buffer object, + * and set it to the global buffer row. + * + * + * Stage 2: process input rows. * - * Stage 1: Partial aggregate at Mapper side: + * If the aggregate mode is `Partial` or `Complete`: + * 1. The framework calls `update(buffer: MutableRow, input: InternalRow)` to process the input + * row. + * 2. In `update`, we get the buffer object from the global buffer row and call + * `update(buffer: T, input: InternalRow): Unit`. * - * 1. The framework calls `createAggregationBuffer(): T` to create an empty internal aggregation - * buffer object. - * 2. Upon each input row, the framework calls - * `update(buffer: T, input: InternalRow): Unit` to update the aggregation buffer object T. - * 3. After processing all rows of current group (group by key), the framework will serialize - * aggregation buffer object T to storage format (Array[Byte]) and persist the Array[Byte] - * to disk if needed. - * 4. The framework moves on to next group, until all groups have been processed. + * If the aggregate mode is `PartialMerge` or `Final`: + * 1. The framework call `merge(buffer: MutableRow, inputBuffer: InternalRow)` to process the + * input row, which are serialized buffer objects shuffled from other nodes. + * 2. In `merge`, we get the buffer object from the global buffer row, and get the binary data + * from input row and deserialize it to buffer object, then we call + * `merge(buffer: T, input: T): Unit` to merge these 2 buffer objects. * - * Shuffling exchange data to Reducer tasks... * - * Stage 2: Final mode aggregate at Reducer side: + * Stage 3: output results. + * + * If the aggregate mode is `Partial` or `PartialMerge`: + * 1. The framework calls `serializeAggregateBufferInPlace` to replace the buffer object in the + * global buffer row with binary data. + * 2. In `serializeAggregateBufferInPlace`, we get the buffer object from the global buffer row + * and call `serialize(buffer: T): Array[Byte]` to serialize the buffer object to binary. + * 3. The framework outputs buffer attributes and shuffle them to other nodes. + * + * If the aggregate mode is `Final` or `Complete`: + * 1. The framework calls `eval(buffer: InternalRow)` to calculate the final result. + * 2. In `eval`, we get the buffer object from the global buffer row and call + * `eval(buffer: T): Any` to get the final result. + * 3. The framework outputs these final results. + * + * + * Window function work flow: + * The framework calls `update(buffer: MutableRow, input: InternalRow)` several times and then + * call `eval(buffer: InternalRow)`, so there is no need for window operator to call + * `serializeAggregateBufferInPlace`. * - * 1. The framework calls `createAggregationBuffer(): T` to create an empty internal aggregation - * buffer object (type T) for merging. - * 2. For each aggregation output of Stage 1, The framework de-serializes the storage - * format (Array[Byte]) and produces one input aggregation object (type T). - * 3. For each input aggregation object, the framework calls `merge(buffer: T, input: T): Unit` - * to merge the input aggregation object into aggregation buffer object. - * 4. After processing all input aggregation objects of current group (group by key), the framework - * calls method `eval(buffer: T)` to generate the final output for this group. - * 5. The framework moves on to next group, until all groups have been processed. * * NOTE: SQL with TypedImperativeAggregate functions is planned in sort based aggregation, * instead of hash based aggregation, as TypedImperativeAggregate use BinaryType as aggregation @@ -489,25 +510,23 @@ abstract class TypedImperativeAggregate[T] extends ImperativeAggregate { } final override def update(buffer: MutableRow, input: InternalRow): Unit = { - val bufferObject = getField[T](buffer, mutableAggBufferOffset) - update(bufferObject, input) + update(getBufferObject(buffer), input) } final override def merge(buffer: MutableRow, inputBuffer: InternalRow): Unit = { - val bufferObject = getField[T](buffer, mutableAggBufferOffset) + val bufferObject = getBufferObject(buffer) // The inputBuffer stores serialized aggregation buffer object produced by partial aggregate val inputObject = deserialize(inputBuffer.getBinary(inputAggBufferOffset)) merge(bufferObject, inputObject) } final override def eval(buffer: InternalRow): Any = { - val bufferObject = getField[T](buffer, mutableAggBufferOffset) - eval(bufferObject) + eval(getBufferObject(buffer)) } private[this] val anyObjectType = ObjectType(classOf[AnyRef]) - private def getField[U](input: InternalRow, fieldIndex: Int): U = { - input.get(fieldIndex, anyObjectType).asInstanceOf[U] + private def getBufferObject(bufferRow: InternalRow): T = { + bufferRow.get(mutableAggBufferOffset, anyObjectType).asInstanceOf[T] } final override lazy val aggBufferAttributes: Seq[AttributeReference] = { @@ -524,9 +543,11 @@ abstract class TypedImperativeAggregate[T] extends ImperativeAggregate { * In-place replaces the aggregation buffer object stored at buffer's index * `mutableAggBufferOffset`, with SparkSQL internally supported underlying storage format * (BinaryType). + * + * This is only called when doing Partial or PartialMerge mode aggregation, before the framework + * shuffle out aggregate buffers. */ final def serializeAggregateBufferInPlace(buffer: MutableRow): Unit = { - val bufferObject = getField[T](buffer, mutableAggBufferOffset) - buffer(mutableAggBufferOffset) = serialize(bufferObject) + buffer(mutableAggBufferOffset) = serialize(getBufferObject(buffer)) } } From 18832162357282ec81515b5b2ba93747be3ad18b Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Fri, 26 Aug 2016 11:01:48 -0700 Subject: [PATCH 108/270] [SPARKR][MINOR] Fix example of spark.naiveBayes ## What changes were proposed in this pull request? The original example doesn't work because the features are not categorical. This PR fixes this by changing to another dataset. ## How was this patch tested? Manual test. Author: Junyang Qian Closes #14820 from junyangq/SPARK-FixNaiveBayes. --- R/pkg/R/mllib.R | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index dfc5a1c7dfdc8..6808aaea8cac3 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -747,10 +747,11 @@ setMethod("summary", signature(object = "MultilayerPerceptronClassificationModel #' @export #' @examples #' \dontrun{ -#' df <- createDataFrame(infert) +#' data <- as.data.frame(UCBAdmissions) +#' df <- createDataFrame(data) #' #' # fit a Bernoulli naive Bayes model -#' model <- spark.naiveBayes(df, education ~ ., smoothing = 0) +#' model <- spark.naiveBayes(df, Admit ~ Gender + Dept, smoothing = 0) #' #' # get the summary of the model #' summary(model) From fd4ba3f626f49d7d616a2a334d45b1c736e1db1c Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Fri, 26 Aug 2016 11:13:38 -0700 Subject: [PATCH 109/270] [SPARK-17192][SQL] Issue Exception when Users Specify the Partitioning Columns without a Given Schema ### What changes were proposed in this pull request? Address the comments by yhuai in the original PR: https://github.com/apache/spark/pull/14207 First, issue an exception instead of logging a warning when users specify the partitioning columns without a given schema. Second, refactor the codes a little. ### How was this patch tested? Fixed the test cases. Author: gatorsmile Closes #14572 from gatorsmile/followup16552. --- .../sql/execution/datasources/rules.scala | 25 ++++++------------- .../sql/execution/command/DDLSuite.scala | 17 +++++++++---- .../spark/sql/hive/HiveExternalCatalog.scala | 16 ++++++------ 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala index 5eb2f0a9ff034..f14c63c19f905 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala @@ -72,29 +72,20 @@ case class PreprocessDDL(conf: SQLConf) extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { // When we CREATE TABLE without specifying the table schema, we should fail the query if - // bucketing information is specified, as we can't infer bucketing from data files currently, - // and we should ignore the partition columns if it's specified, as we will infer it later, at - // runtime. + // bucketing information is specified, as we can't infer bucketing from data files currently. + // Since the runtime inferred partition columns could be different from what user specified, + // we fail the query if the partitioning information is specified. case c @ CreateTable(tableDesc, _, None) if tableDesc.schema.isEmpty => if (tableDesc.bucketSpec.isDefined) { failAnalysis("Cannot specify bucketing information if the table schema is not specified " + "when creating and will be inferred at runtime") } - - val partitionColumnNames = tableDesc.partitionColumnNames - if (partitionColumnNames.nonEmpty) { - // The table does not have a specified schema, which means that the schema will be inferred - // at runtime. So, we are not expecting partition columns and we will discover partitions - // at runtime. However, if there are specified partition columns, we simply ignore them and - // provide a warning message. - logWarning( - s"Specified partition columns (${partitionColumnNames.mkString(",")}) will be " + - s"ignored. The schema and partition columns of table ${tableDesc.identifier} will " + - "be inferred.") - c.copy(tableDesc = tableDesc.copy(partitionColumnNames = Nil)) - } else { - c + if (tableDesc.partitionColumnNames.nonEmpty) { + failAnalysis("It is not allowed to specify partition columns when the table schema is " + + "not defined. When the table schema is not provided, schema and partition columns " + + "will be inferred.") } + c // Here we normalize partition, bucket and sort column names, w.r.t. the case sensitivity // config, and do various checks: diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index e6ae42258d4c5..b343454b12d86 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -265,7 +265,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { userSpecifiedPartitionCols.map(p => s"PARTITIONED BY ($p)").getOrElse("") val schemaClause = userSpecifiedSchema.map(s => s"($s)").getOrElse("") val uri = path.toURI - sql( + val sqlCreateTable = s""" |CREATE TABLE $tabName $schemaClause |USING parquet @@ -273,11 +273,18 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { | path '$uri' |) |$partitionClause - """.stripMargin) - val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName)) + """.stripMargin + if (userSpecifiedSchema.isEmpty && userSpecifiedPartitionCols.nonEmpty) { + val e = intercept[AnalysisException](sql(sqlCreateTable)).getMessage + assert(e.contains( + "not allowed to specify partition columns when the table schema is not defined")) + } else { + sql(sqlCreateTable) + val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName)) - assert(expectedSchema == tableMetadata.schema) - assert(expectedPartitionCols == tableMetadata.partitionColumnNames) + assert(expectedSchema == tableMetadata.schema) + assert(expectedPartitionCols == tableMetadata.partitionColumnNames) + } } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index 2586d11a6c1ff..7f50e38d30c9a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -622,24 +622,26 @@ object HiveExternalCatalog { def getSchemaFromTableProperties(metadata: CatalogTable): StructType = { val errorMessage = "Could not read schema from the hive metastore because it is corrupted." val props = metadata.properties - props.get(DATASOURCE_SCHEMA).map { schema => + val schema = props.get(DATASOURCE_SCHEMA) + if (schema.isDefined) { // Originally, we used `spark.sql.sources.schema` to store the schema of a data source table. // After SPARK-6024, we removed this flag. // Although we are not using `spark.sql.sources.schema` any more, we need to still support. - DataType.fromJson(schema).asInstanceOf[StructType] - } getOrElse { - props.get(DATASOURCE_SCHEMA_NUMPARTS).map { numParts => - val parts = (0 until numParts.toInt).map { index => + DataType.fromJson(schema.get).asInstanceOf[StructType] + } else { + val numSchemaParts = props.get(DATASOURCE_SCHEMA_NUMPARTS) + if (numSchemaParts.isDefined) { + val parts = (0 until numSchemaParts.get.toInt).map { index => val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull if (part == null) { throw new AnalysisException(errorMessage + - s" (missing part $index of the schema, $numParts parts are expected).") + s" (missing part $index of the schema, ${numSchemaParts.get} parts are expected).") } part } // Stick all parts back to a single schema string. DataType.fromJson(parts.mkString).asInstanceOf[StructType] - } getOrElse { + } else { throw new AnalysisException(errorMessage) } } From 261c55dd8808502fb7f3384eb537d26a4a8123d7 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Fri, 26 Aug 2016 11:19:03 -0700 Subject: [PATCH 110/270] [SPARK-17250][SQL] Remove HiveClient and setCurrentDatabase from HiveSessionCatalog ### What changes were proposed in this pull request? This is the first step to remove `HiveClient` from `HiveSessionState`. In the metastore interaction, we always use the fully qualified table name when accessing/operating a table. That means, we always specify the database. Thus, it is not necessary to use `HiveClient` to change the active database in Hive metastore. In `HiveSessionCatalog `, `setCurrentDatabase` is the only function that uses `HiveClient`. Thus, we can remove it after removing `setCurrentDatabase` ### How was this patch tested? The existing test cases. Author: gatorsmile Closes #14821 from gatorsmile/setCurrentDB. --- .../org/apache/spark/sql/hive/HiveSessionCatalog.scala | 7 ------- .../scala/org/apache/spark/sql/hive/HiveSessionState.scala | 1 - 2 files changed, 8 deletions(-) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala index 86d3b6de0dbfd..bfa5899fafdbe 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala @@ -34,7 +34,6 @@ import org.apache.spark.sql.catalyst.expressions.{Cast, Expression, ExpressionIn import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.hive.HiveShim.HiveFunctionWrapper -import org.apache.spark.sql.hive.client.HiveClient import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{DecimalType, DoubleType} import org.apache.spark.util.Utils @@ -42,7 +41,6 @@ import org.apache.spark.util.Utils private[sql] class HiveSessionCatalog( externalCatalog: HiveExternalCatalog, - client: HiveClient, sparkSession: SparkSession, functionResourceLoader: FunctionResourceLoader, functionRegistry: FunctionRegistry, @@ -55,11 +53,6 @@ private[sql] class HiveSessionCatalog( conf, hadoopConf) { - override def setCurrentDatabase(db: String): Unit = { - super.setCurrentDatabase(db) - client.setCurrentDatabase(db) - } - override def lookupRelation(name: TableIdentifier, alias: Option[String]): LogicalPlan = { val table = formatTableName(name.table) if (name.database.isDefined || !tempTables.contains(table)) { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala index f3c4135da6552..15e1255653f88 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala @@ -45,7 +45,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession) override lazy val catalog = { new HiveSessionCatalog( sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog], - metadataHive, sparkSession, functionResourceLoader, functionRegistry, From 9812f7d5381f7cd8112fd30c7e45ae4f0eab6e88 Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Fri, 26 Aug 2016 11:30:23 -0700 Subject: [PATCH 111/270] [SPARK-17165][SQL] FileStreamSource should not track the list of seen files indefinitely ## What changes were proposed in this pull request? Before this change, FileStreamSource uses an in-memory hash set to track the list of files processed by the engine. The list can grow indefinitely, leading to OOM or overflow of the hash set. This patch introduces a new user-defined option called "maxFileAge", default to 24 hours. If a file is older than this age, FileStreamSource will purge it from the in-memory map that was used to track the list of files that have been processed. ## How was this patch tested? Added unit tests for the underlying utility, and also added an end-to-end test to validate the purge in FileStreamSourceSuite. Also verified the new test cases would fail when the timeout was set to a very large number. Author: petermaxlee Closes #14728 from petermaxlee/SPARK-17165. --- .../streaming/FileStreamOptions.scala | 54 +++++++ .../streaming/FileStreamSource.scala | 149 ++++++++++++++---- .../execution/streaming/HDFSMetadataLog.scala | 2 +- .../streaming/FileStreamSourceSuite.scala | 76 +++++++++ .../sql/streaming/FileStreamSourceSuite.scala | 40 ++++- 5 files changed, 285 insertions(+), 36 deletions(-) create mode 100644 sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamOptions.scala create mode 100644 sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceSuite.scala diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamOptions.scala new file mode 100644 index 0000000000000..3efc20c1d662d --- /dev/null +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamOptions.scala @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.streaming + +import scala.util.Try + +import org.apache.spark.internal.Logging +import org.apache.spark.sql.execution.datasources.CaseInsensitiveMap +import org.apache.spark.util.Utils + +/** + * User specified options for file streams. + */ +class FileStreamOptions(parameters: Map[String, String]) extends Logging { + + val maxFilesPerTrigger: Option[Int] = parameters.get("maxFilesPerTrigger").map { str => + Try(str.toInt).toOption.filter(_ > 0).getOrElse { + throw new IllegalArgumentException( + s"Invalid value '$str' for option 'maxFilesPerTrigger', must be a positive integer") + } + } + + /** + * Maximum age of a file that can be found in this directory, before it is deleted. + * + * The max age is specified with respect to the timestamp of the latest file, and not the + * timestamp of the current system. That this means if the last file has timestamp 1000, and the + * current system time is 2000, and max age is 200, the system will purge files older than + * 800 (rather than 1800) from the internal state. + * + * Default to a week. + */ + val maxFileAgeMs: Long = + Utils.timeStringAsMs(parameters.getOrElse("maxFileAge", "7d")) + + /** Options as specified by the user, in a case-insensitive map, without "path" set. */ + val optionMapWithoutPath: Map[String, String] = + new CaseInsensitiveMap(parameters).filterKeys(_ != "path") +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala index 0cfad659dc92c..e8b969b5e0a55 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala @@ -17,21 +17,20 @@ package org.apache.spark.sql.execution.streaming -import scala.util.Try +import scala.collection.JavaConverters._ import org.apache.hadoop.fs.Path import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging import org.apache.spark.sql.{DataFrame, Dataset, SparkSession} -import org.apache.spark.sql.execution.datasources.{CaseInsensitiveMap, DataSource, ListingFileCatalog, LogicalRelation} +import org.apache.spark.sql.execution.datasources.{DataSource, ListingFileCatalog, LogicalRelation} import org.apache.spark.sql.types.StructType -import org.apache.spark.util.collection.OpenHashSet /** - * A very simple source that reads text files from the given directory as they appear. + * A very simple source that reads files from the given directory as they appear. * - * TODO Clean up the metadata files periodically + * TODO: Clean up the metadata log files periodically. */ class FileStreamSource( sparkSession: SparkSession, @@ -41,19 +40,34 @@ class FileStreamSource( metadataPath: String, options: Map[String, String]) extends Source with Logging { - private val fs = new Path(path).getFileSystem(sparkSession.sessionState.newHadoopConf()) - private val qualifiedBasePath = fs.makeQualified(new Path(path)) // can contains glob patterns - private val metadataLog = new HDFSMetadataLog[Seq[String]](sparkSession, metadataPath) + import FileStreamSource._ + + private val sourceOptions = new FileStreamOptions(options) + + private val qualifiedBasePath: Path = { + val fs = new Path(path).getFileSystem(sparkSession.sessionState.newHadoopConf()) + fs.makeQualified(new Path(path)) // can contains glob patterns + } + + private val metadataLog = new HDFSMetadataLog[Seq[FileEntry]](sparkSession, metadataPath) + private var maxBatchId = metadataLog.getLatest().map(_._1).getOrElse(-1L) /** Maximum number of new files to be considered in each batch */ - private val maxFilesPerBatch = getMaxFilesPerBatch() + private val maxFilesPerBatch = sourceOptions.maxFilesPerTrigger + + /** A mapping from a file that we have processed to some timestamp it was last modified. */ + // Visible for testing and debugging in production. + val seenFiles = new SeenFilesMap(sourceOptions.maxFileAgeMs) - private val seenFiles = new OpenHashSet[String] - metadataLog.get(None, Some(maxBatchId)).foreach { case (batchId, files) => - files.foreach(seenFiles.add) + metadataLog.get(None, Some(maxBatchId)).foreach { case (batchId, entry) => + entry.foreach(seenFiles.add) + // TODO: move purge call out of the loop once we truncate logs. + seenFiles.purge() } + logInfo(s"maxFilesPerBatch = $maxFilesPerBatch, maxFileAge = ${sourceOptions.maxFileAgeMs}") + /** * Returns the maximum offset that can be retrieved from the source. * @@ -61,16 +75,27 @@ class FileStreamSource( * there is no race here, so the cost of `synchronized` should be rare. */ private def fetchMaxOffset(): LongOffset = synchronized { - val newFiles = fetchAllFiles().filter(!seenFiles.contains(_)) + // All the new files found - ignore aged files and files that we have seen. + val newFiles = fetchAllFiles().filter(seenFiles.isNewFile) + + // Obey user's setting to limit the number of files in this batch trigger. val batchFiles = if (maxFilesPerBatch.nonEmpty) newFiles.take(maxFilesPerBatch.get) else newFiles + batchFiles.foreach { file => seenFiles.add(file) logDebug(s"New file: $file") } - logTrace(s"Number of new files = ${newFiles.size})") - logTrace(s"Number of files selected for batch = ${batchFiles.size}") - logTrace(s"Number of seen files = ${seenFiles.size}") + val numPurged = seenFiles.purge() + + logTrace( + s""" + |Number of new files = ${newFiles.size} + |Number of files selected for batch = ${batchFiles.size} + |Number of seen files = ${seenFiles.size} + |Number of files purged from tracking map = $numPurged + """.stripMargin) + if (batchFiles.nonEmpty) { maxBatchId += 1 metadataLog.add(maxBatchId, batchFiles) @@ -104,22 +129,26 @@ class FileStreamSource( val files = metadataLog.get(Some(startId + 1), Some(endId)).flatMap(_._2) logInfo(s"Processing ${files.length} files from ${startId + 1}:$endId") logTrace(s"Files are:\n\t" + files.mkString("\n\t")) - val newOptions = new CaseInsensitiveMap(options).filterKeys(_ != "path") val newDataSource = DataSource( sparkSession, - paths = files, + paths = files.map(_.path), userSpecifiedSchema = Some(schema), className = fileFormatClassName, - options = newOptions) + options = sourceOptions.optionMapWithoutPath) Dataset.ofRows(sparkSession, LogicalRelation(newDataSource.resolveRelation())) } - private def fetchAllFiles(): Seq[String] = { + /** + * Returns a list of files found, sorted by their timestamp. + */ + private def fetchAllFiles(): Seq[FileEntry] = { val startTime = System.nanoTime val globbedPaths = SparkHadoopUtil.get.globPathIfNecessary(qualifiedBasePath) val catalog = new ListingFileCatalog(sparkSession, globbedPaths, options, Some(new StructType)) - val files = catalog.allFiles().sortBy(_.getModificationTime).map(_.getPath.toUri.toString) + val files = catalog.allFiles().sortBy(_.getModificationTime).map { status => + FileEntry(status.getPath.toUri.toString, status.getModificationTime) + } val endTime = System.nanoTime val listingTimeMs = (endTime.toDouble - startTime) / 1000000 if (listingTimeMs > 2000) { @@ -132,20 +161,76 @@ class FileStreamSource( files } - private def getMaxFilesPerBatch(): Option[Int] = { - new CaseInsensitiveMap(options) - .get("maxFilesPerTrigger") - .map { str => - Try(str.toInt).toOption.filter(_ > 0).getOrElse { - throw new IllegalArgumentException( - s"Invalid value '$str' for option 'maxFilesPerTrigger', must be a positive integer") - } - } - } - override def getOffset: Option[Offset] = Some(fetchMaxOffset()).filterNot(_.offset == -1) override def toString: String = s"FileStreamSource[$qualifiedBasePath]" override def stop() {} } + + +object FileStreamSource { + + /** Timestamp for file modification time, in ms since January 1, 1970 UTC. */ + type Timestamp = Long + + case class FileEntry(path: String, timestamp: Timestamp) extends Serializable + + /** + * A custom hash map used to track the list of files seen. This map is not thread-safe. + * + * To prevent the hash map from growing indefinitely, a purge function is available to + * remove files "maxAgeMs" older than the latest file. + */ + class SeenFilesMap(maxAgeMs: Long) { + require(maxAgeMs >= 0) + + /** Mapping from file to its timestamp. */ + private val map = new java.util.HashMap[String, Timestamp] + + /** Timestamp of the latest file. */ + private var latestTimestamp: Timestamp = 0L + + /** Timestamp for the last purge operation. */ + private var lastPurgeTimestamp: Timestamp = 0L + + /** Add a new file to the map. */ + def add(file: FileEntry): Unit = { + map.put(file.path, file.timestamp) + if (file.timestamp > latestTimestamp) { + latestTimestamp = file.timestamp + } + } + + /** + * Returns true if we should consider this file a new file. The file is only considered "new" + * if it is new enough that we are still tracking, and we have not seen it before. + */ + def isNewFile(file: FileEntry): Boolean = { + // Note that we are testing against lastPurgeTimestamp here so we'd never miss a file that + // is older than (latestTimestamp - maxAgeMs) but has not been purged yet. + file.timestamp >= lastPurgeTimestamp && !map.containsKey(file.path) + } + + /** Removes aged entries and returns the number of files removed. */ + def purge(): Int = { + lastPurgeTimestamp = latestTimestamp - maxAgeMs + val iter = map.entrySet().iterator() + var count = 0 + while (iter.hasNext) { + val entry = iter.next() + if (entry.getValue < lastPurgeTimestamp) { + count += 1 + iter.remove() + } + } + count + } + + def size: Int = map.size() + + def allEntries: Seq[FileEntry] = { + map.entrySet().asScala.map(entry => FileEntry(entry.getKey, entry.getValue)).toSeq + } + } +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala index 698f07b0a187f..2b6f76ca28e21 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala @@ -180,7 +180,7 @@ class HDFSMetadataLog[T: ClassTag](sparkSession: SparkSession, path: String) private def isFileAlreadyExistsException(e: IOException): Boolean = { e.isInstanceOf[FileAlreadyExistsException] || // Old Hadoop versions don't throw FileAlreadyExistsException. Although it's fixed in - // HADOOP-9361, we still need to support old Hadoop versions. + // HADOOP-9361 in Hadoop 2.5, we still need to support old Hadoop versions. (e.getMessage != null && e.getMessage.startsWith("File already exists: ")) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceSuite.scala new file mode 100644 index 0000000000000..c6db2fd3f908e --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceSuite.scala @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.streaming + +import org.apache.spark.SparkFunSuite + +class FileStreamSourceSuite extends SparkFunSuite { + + import FileStreamSource._ + + test("SeenFilesMap") { + val map = new SeenFilesMap(maxAgeMs = 10) + + map.add(FileEntry("a", 5)) + assert(map.size == 1) + map.purge() + assert(map.size == 1) + + // Add a new entry and purge should be no-op, since the gap is exactly 10 ms. + map.add(FileEntry("b", 15)) + assert(map.size == 2) + map.purge() + assert(map.size == 2) + + // Add a new entry that's more than 10 ms than the first entry. We should be able to purge now. + map.add(FileEntry("c", 16)) + assert(map.size == 3) + map.purge() + assert(map.size == 2) + + // Override existing entry shouldn't change the size + map.add(FileEntry("c", 25)) + assert(map.size == 2) + + // Not a new file because we have seen c before + assert(!map.isNewFile(FileEntry("c", 20))) + + // Not a new file because timestamp is too old + assert(!map.isNewFile(FileEntry("d", 5))) + + // Finally a new file: never seen and not too old + assert(map.isNewFile(FileEntry("e", 20))) + } + + test("SeenFilesMap should only consider a file old if it is earlier than last purge time") { + val map = new SeenFilesMap(maxAgeMs = 10) + + map.add(FileEntry("a", 20)) + assert(map.size == 1) + + // Timestamp 5 should still considered a new file because purge time should be 0 + assert(map.isNewFile(FileEntry("b", 9))) + assert(map.isNewFile(FileEntry("b", 10))) + + // Once purge, purge time should be 10 and then b would be a old file if it is less than 10. + map.purge() + assert(!map.isNewFile(FileEntry("b", 9))) + assert(map.isNewFile(FileEntry("b", 10))) + } + +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala index 47260a23c7ee3..03222b4a49c6c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala @@ -104,12 +104,13 @@ class FileStreamSourceTest extends StreamTest with SharedSQLContext { def createFileStream( format: String, path: String, - schema: Option[StructType] = None): DataFrame = { + schema: Option[StructType] = None, + options: Map[String, String] = Map.empty): DataFrame = { val reader = if (schema.isDefined) { - spark.readStream.format(format).schema(schema.get) + spark.readStream.format(format).schema(schema.get).options(options) } else { - spark.readStream.format(format) + spark.readStream.format(format).options(options) } reader.load(path) } @@ -331,6 +332,39 @@ class FileStreamSourceSuite extends FileStreamSourceTest { } } + test("SPARK-17165 should not track the list of seen files indefinitely") { + // This test works by: + // 1. Create a file + // 2. Get it processed + // 3. Sleeps for a very short amount of time (larger than maxFileAge + // 4. Add another file (at this point the original file should have been purged + // 5. Test the size of the seenFiles internal data structure + + // Note that if we change maxFileAge to a very large number, the last step should fail. + withTempDirs { case (src, tmp) => + val textStream: DataFrame = + createFileStream("text", src.getCanonicalPath, options = Map("maxFileAge" -> "5ms")) + + testStream(textStream)( + AddTextFileData("a\nb", src, tmp), + CheckAnswer("a", "b"), + + // SLeeps longer than 5ms (maxFileAge) + AssertOnQuery { _ => Thread.sleep(10); true }, + + AddTextFileData("c\nd", src, tmp), + CheckAnswer("a", "b", "c", "d"), + + AssertOnQuery("seen files should contain only one entry") { streamExecution => + val source = streamExecution.logicalPlan.collect { case e: StreamingExecutionRelation => + e.source.asInstanceOf[FileStreamSource] + }.head + source.seenFiles.size == 1 + } + ) + } + } + // =============== JSON file stream tests ================ test("read from json files") { From c0949dc944b7e2fc8a4465acc68a8f2713b3fa13 Mon Sep 17 00:00:00 2001 From: "Peng, Meng" Date: Fri, 26 Aug 2016 11:54:10 -0700 Subject: [PATCH 112/270] [SPARK-17207][MLLIB] fix comparing Vector bug in TestingUtils ## What changes were proposed in this pull request? fix comparing Vector bug in TestingUtils. There is the same bug for Matrix comparing. How to check the length of Matrix should be discussed first. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: Peng, Meng Closes #14785 from mpjlu/testUtils. --- .../apache/spark/ml/util/TestingUtils.scala | 10 +- .../spark/ml/util/TestingUtilsSuite.scala | 277 +++++++++++++++++- .../ml/feature/CountVectorizerSuite.scala | 8 +- .../spark/mllib/util/TestingUtils.scala | 10 +- .../spark/mllib/util/TestingUtilsSuite.scala | 277 +++++++++++++++++- 5 files changed, 566 insertions(+), 16 deletions(-) diff --git a/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtils.scala b/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtils.scala index 2bebaa35ba15e..2327917e2cad7 100644 --- a/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtils.scala +++ b/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtils.scala @@ -154,7 +154,7 @@ object TestingUtils { */ def absTol(eps: Double): CompareVectorRightSide = CompareVectorRightSide( (x: Vector, y: Vector, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) + x.size == y.size && x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) }, x, eps, ABS_TOL_MSG) /** @@ -164,7 +164,7 @@ object TestingUtils { */ def relTol(eps: Double): CompareVectorRightSide = CompareVectorRightSide( (x: Vector, y: Vector, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) + x.size == y.size && x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) }, x, eps, REL_TOL_MSG) override def toString: String = x.toString @@ -217,7 +217,8 @@ object TestingUtils { */ def absTol(eps: Double): CompareMatrixRightSide = CompareMatrixRightSide( (x: Matrix, y: Matrix, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) + x.numRows == y.numRows && x.numCols == y.numCols && + x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) }, x, eps, ABS_TOL_MSG) /** @@ -227,7 +228,8 @@ object TestingUtils { */ def relTol(eps: Double): CompareMatrixRightSide = CompareMatrixRightSide( (x: Matrix, y: Matrix, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) + x.numRows == y.numRows && x.numCols == y.numCols && + x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) }, x, eps, REL_TOL_MSG) override def toString: String = x.toString diff --git a/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtilsSuite.scala b/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtilsSuite.scala index e374165f75e6f..5cbf2f04e6269 100644 --- a/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtilsSuite.scala +++ b/mllib-local/src/test/scala/org/apache/spark/ml/util/TestingUtilsSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.ml.util import org.scalatest.exceptions.TestFailedException import org.apache.spark.ml.SparkMLFunSuite -import org.apache.spark.ml.linalg.Vectors +import org.apache.spark.ml.linalg.{Matrices, Vectors} import org.apache.spark.ml.util.TestingUtils._ class TestingUtilsSuite extends SparkMLFunSuite { @@ -109,6 +109,10 @@ class TestingUtilsSuite extends SparkMLFunSuite { assert(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01) assert(!(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)) assert(!(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01)) + assert(Vectors.dense(Array(3.1)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01) + assert(Vectors.dense(Array[Double]()) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01) + assert(Vectors.dense(Array(3.1)) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01) + assert(Vectors.dense(Array[Double]()) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01) // Should throw exception with message when test fails. intercept[TestFailedException]( @@ -117,6 +121,12 @@ class TestingUtilsSuite extends SparkMLFunSuite { intercept[TestFailedException]( Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01) + intercept[TestFailedException]( + Vectors.dense(Array(3.1)) ~== Vectors.dense(Array(3.535, 3.534)) relTol 0.01) + + intercept[TestFailedException]( + Vectors.dense(Array[Double]()) ~== Vectors.dense(Array(3.135)) relTol 0.01) + // Comparing against zero should fail the test and throw exception with message // saying that the relative error is meaningless in this situation. intercept[TestFailedException]( @@ -125,12 +135,18 @@ class TestingUtilsSuite extends SparkMLFunSuite { intercept[TestFailedException]( Vectors.dense(Array(3.1, 0.01)) ~== Vectors.sparse(2, Array(0), Array(3.13)) relTol 0.01) - // Comparisons of two sparse vectors + // Comparisons of a sparse vector and a dense vector assert(Vectors.dense(Array(3.1, 3.5)) ~== Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) assert(Vectors.dense(Array(3.1, 3.5)) !~== Vectors.sparse(2, Array(0, 1), Array(3.135, 3.534)) relTol 0.01) + + assert(Vectors.dense(Array(3.1)) !~== + Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) + + assert(Vectors.dense(Array[Double]()) !~== + Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) } test("Comparing vectors using absolute error.") { @@ -154,6 +170,21 @@ class TestingUtilsSuite extends SparkMLFunSuite { assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) ~= Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)) + assert(Vectors.dense(Array(3.1)) !~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5) + + assert(!(Vectors.dense(Array(3.1)) ~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5)) + + assert(Vectors.dense(Array[Double]()) !~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5) + + assert(!(Vectors.dense(Array[Double]()) ~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5)) + + assert(Vectors.dense(Array[Double]()) ~= + Vectors.dense(Array[Double]()) absTol 1E-5) + // Should throw exception with message when test fails. intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) !~== Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6) @@ -161,6 +192,12 @@ class TestingUtilsSuite extends SparkMLFunSuite { intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) ~== Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6) + intercept[TestFailedException](Vectors.dense(Array(3.1)) ~== + Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7)) absTol 1E-6) + + intercept[TestFailedException](Vectors.dense(Array[Double]()) ~== + Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7)) absTol 1E-6) + // Comparisons of two sparse vectors assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~== Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) absTol 1E-6) @@ -174,6 +211,12 @@ class TestingUtilsSuite extends SparkMLFunSuite { assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) !~== Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6) + assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-6, 2.4)) !~== + Vectors.sparse(1, Array(0), Array(3.1)) absTol 1E-3) + + assert(Vectors.sparse(0, Array[Int](), Array[Double]()) !~== + Vectors.sparse(1, Array(0), Array(3.1)) absTol 1E-3) + // Comparisons of a dense vector and a sparse vector assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~== Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) absTol 1E-6) @@ -183,5 +226,235 @@ class TestingUtilsSuite extends SparkMLFunSuite { assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~== Vectors.dense(Array(3.1, 1E-3, 2.4)) absTol 1E-6) + + assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~== + Vectors.dense(Array(3.1)) absTol 1E-6) + + assert(Vectors.dense(Array[Double]()) !~== + Vectors.sparse(3, Array(0, 2), Array(0, 2.4)) absTol 1E-6) + + assert(Vectors.sparse(1, Array(0), Array(3.1)) !~== + Vectors.dense(Array(3.1, 3.2)) absTol 1E-6) + + assert(Vectors.dense(Array(3.1)) !~== + Vectors.sparse(0, Array[Int](), Array[Double]()) absTol 1E-6) + } + + test("Comparing Matrices using absolute error.") { + + // Comparisons of two dense Matrices + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(0, 0, Array()) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(0, 0, Array()) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + // Should throw exception with message when test fails. + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + intercept[TestFailedException](Matrices.dense(2, 1, Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-5) + + intercept[TestFailedException](Matrices.dense(0, 0, Array()) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-5) + + // Comparisons of two sparse Matrices + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5)) absTol 1E-9)) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5)) absTol 1E-6)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + // Comparisons of a dense Matrix and a sparse Matrix + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9)) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 1, Array(3.1 + 1E-8, 0)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 1, Array(3.1 + 1E-8, 0)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(0, 0, Array()) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(0, 0, Array()) absTol 1E-6) + } + + test("Comparing Matrices using relative error.") { + + // Comparisons of two dense Matrices + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.134, 3.535, 3.134, 3.535)) relTol 0.01)) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01)) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + assert(Matrices.dense(0, 0, Array()) !~= + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + assert(Matrices.dense(0, 0, Array()) !~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + // Should throw exception with message when test fails. + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01) + + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01) + + intercept[TestFailedException](Matrices.dense(2, 1, Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + intercept[TestFailedException](Matrices.dense(0, 0, Array()) ~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + // Comparisons of two sparse Matrices + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01)) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + // Comparisons of a dense Matrix and a sparse Matrix + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01)) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 1, Array(3.1, 0)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 1, Array(3.1, 0)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(0, 0, Array()) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(0, 0, Array()) relTol 0.01) } } diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala index a59203c33d814..863b66bf497fe 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala @@ -77,10 +77,10 @@ class CountVectorizerSuite extends SparkFunSuite with MLlibTestSparkContext test("CountVectorizer vocabSize and minDF") { val df = spark.createDataFrame(Seq( - (0, split("a b c d"), Vectors.sparse(3, Seq((0, 1.0), (1, 1.0)))), - (1, split("a b c"), Vectors.sparse(3, Seq((0, 1.0), (1, 1.0)))), - (2, split("a b"), Vectors.sparse(3, Seq((0, 1.0), (1, 1.0)))), - (3, split("a"), Vectors.sparse(3, Seq((0, 1.0))))) + (0, split("a b c d"), Vectors.sparse(2, Seq((0, 1.0), (1, 1.0)))), + (1, split("a b c"), Vectors.sparse(2, Seq((0, 1.0), (1, 1.0)))), + (2, split("a b"), Vectors.sparse(2, Seq((0, 1.0), (1, 1.0)))), + (3, split("a"), Vectors.sparse(2, Seq((0, 1.0))))) ).toDF("id", "words", "expected") val cvModel = new CountVectorizer() .setInputCol("words") diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala index 6de9aaf94f1b2..39a6bc37d9638 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala @@ -154,7 +154,7 @@ object TestingUtils { */ def absTol(eps: Double): CompareVectorRightSide = CompareVectorRightSide( (x: Vector, y: Vector, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) + x.size == y.size && x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) }, x, eps, ABS_TOL_MSG) /** @@ -164,7 +164,7 @@ object TestingUtils { */ def relTol(eps: Double): CompareVectorRightSide = CompareVectorRightSide( (x: Vector, y: Vector, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) + x.size == y.size && x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) }, x, eps, REL_TOL_MSG) override def toString: String = x.toString @@ -217,7 +217,8 @@ object TestingUtils { */ def absTol(eps: Double): CompareMatrixRightSide = CompareMatrixRightSide( (x: Matrix, y: Matrix, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) + x.numRows == y.numRows && x.numCols == y.numCols && + x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps) }, x, eps, ABS_TOL_MSG) /** @@ -227,7 +228,8 @@ object TestingUtils { */ def relTol(eps: Double): CompareMatrixRightSide = CompareMatrixRightSide( (x: Matrix, y: Matrix, eps: Double) => { - x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) + x.numRows == y.numRows && x.numCols == y.numCols && + x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps) }, x, eps, REL_TOL_MSG) override def toString: String = x.toString diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala index 44c39704e5b92..1aff44480aac9 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.mllib.util import org.scalatest.exceptions.TestFailedException import org.apache.spark.SparkFunSuite -import org.apache.spark.mllib.linalg.Vectors +import org.apache.spark.mllib.linalg.{Matrices, Vectors} import org.apache.spark.mllib.util.TestingUtils._ class TestingUtilsSuite extends SparkFunSuite { @@ -109,6 +109,10 @@ class TestingUtilsSuite extends SparkFunSuite { assert(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01) assert(!(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)) assert(!(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01)) + assert(Vectors.dense(Array(3.1)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01) + assert(Vectors.dense(Array[Double]()) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01) + assert(Vectors.dense(Array(3.1)) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01) + assert(Vectors.dense(Array[Double]()) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01) // Should throw exception with message when test fails. intercept[TestFailedException]( @@ -117,6 +121,12 @@ class TestingUtilsSuite extends SparkFunSuite { intercept[TestFailedException]( Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01) + intercept[TestFailedException]( + Vectors.dense(Array(3.1)) ~== Vectors.dense(Array(3.535, 3.534)) relTol 0.01) + + intercept[TestFailedException]( + Vectors.dense(Array[Double]()) ~== Vectors.dense(Array(3.135)) relTol 0.01) + // Comparing against zero should fail the test and throw exception with message // saying that the relative error is meaningless in this situation. intercept[TestFailedException]( @@ -125,12 +135,18 @@ class TestingUtilsSuite extends SparkFunSuite { intercept[TestFailedException]( Vectors.dense(Array(3.1, 0.01)) ~== Vectors.sparse(2, Array(0), Array(3.13)) relTol 0.01) - // Comparisons of two sparse vectors + // Comparisons of a sparse vector and a dense vector assert(Vectors.dense(Array(3.1, 3.5)) ~== Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) assert(Vectors.dense(Array(3.1, 3.5)) !~== Vectors.sparse(2, Array(0, 1), Array(3.135, 3.534)) relTol 0.01) + + assert(Vectors.dense(Array(3.1)) !~== + Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) + + assert(Vectors.dense(Array[Double]()) !~== + Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) } test("Comparing vectors using absolute error.") { @@ -154,6 +170,21 @@ class TestingUtilsSuite extends SparkFunSuite { assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) ~= Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)) + assert(Vectors.dense(Array(3.1)) !~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5) + + assert(!(Vectors.dense(Array(3.1)) ~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5)) + + assert(Vectors.dense(Array[Double]()) !~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5) + + assert(!(Vectors.dense(Array[Double]()) ~= + Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5)) + + assert(Vectors.dense(Array[Double]()) ~= + Vectors.dense(Array[Double]()) absTol 1E-5) + // Should throw exception with message when test fails. intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) !~== Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6) @@ -161,6 +192,12 @@ class TestingUtilsSuite extends SparkFunSuite { intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) ~== Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6) + intercept[TestFailedException](Vectors.dense(Array(3.1)) ~== + Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7)) absTol 1E-6) + + intercept[TestFailedException](Vectors.dense(Array[Double]()) ~== + Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7)) absTol 1E-6) + // Comparisons of two sparse vectors assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~== Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) absTol 1E-6) @@ -174,6 +211,12 @@ class TestingUtilsSuite extends SparkFunSuite { assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) !~== Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6) + assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-6, 2.4)) !~== + Vectors.sparse(1, Array(0), Array(3.1)) absTol 1E-3) + + assert(Vectors.sparse(0, Array[Int](), Array[Double]()) !~== + Vectors.sparse(1, Array(0), Array(3.1)) absTol 1E-3) + // Comparisons of a dense vector and a sparse vector assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~== Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) absTol 1E-6) @@ -183,5 +226,235 @@ class TestingUtilsSuite extends SparkFunSuite { assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~== Vectors.dense(Array(3.1, 1E-3, 2.4)) absTol 1E-6) + + assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~== + Vectors.dense(Array(3.1)) absTol 1E-6) + + assert(Vectors.dense(Array[Double]()) !~== + Vectors.sparse(3, Array(0, 2), Array(0, 2.4)) absTol 1E-6) + + assert(Vectors.sparse(1, Array(0), Array(3.1)) !~== + Vectors.dense(Array(3.1, 3.2)) absTol 1E-6) + + assert(Vectors.dense(Array(3.1)) !~== + Vectors.sparse(0, Array[Int](), Array[Double]()) absTol 1E-6) + } + + test("Comparing Matrices using absolute error.") { + + // Comparisons of two dense Matrices + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(0, 0, Array()) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.dense(0, 0, Array()) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + // Should throw exception with message when test fails. + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + intercept[TestFailedException](Matrices.dense(2, 1, Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-5) + + intercept[TestFailedException](Matrices.dense(0, 0, Array()) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-5) + + // Comparisons of two sparse Matrices + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5)) absTol 1E-9)) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5)) absTol 1E-6)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6) + + // Comparisons of a dense Matrix and a sparse Matrix + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9)) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 1, Array(3.1 + 1E-8, 0)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 1, Array(3.1 + 1E-8, 0)) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(0, 0, Array()) absTol 1E-6) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(0, 0, Array()) absTol 1E-6) + } + + test("Comparing Matrices using relative error.") { + + // Comparisons of two dense Matrices + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01) + + assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.134, 3.535, 3.134, 3.535)) relTol 0.01)) + + assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01)) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + assert(Matrices.dense(0, 0, Array()) !~= + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + assert(Matrices.dense(0, 0, Array()) !~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + // Should throw exception with message when test fails. + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01) + + intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01) + + intercept[TestFailedException](Matrices.dense(2, 1, Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + intercept[TestFailedException](Matrices.dense(0, 0, Array()) ~== + Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01) + + // Comparisons of two sparse Matrices + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01) + + assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01)) + + assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~== + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~= + Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01) + + // Comparisons of a dense Matrix and a sparse Matrix + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~== + Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~= + Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01)) + + assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01)) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(2, 1, Array(3.1, 0)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(2, 1, Array(3.1, 0)) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~== + Matrices.dense(0, 0, Array()) relTol 0.01) + + assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~= + Matrices.dense(0, 0, Array()) relTol 0.01) } } From 8e5475be3c9a620f18f6712631b093464a7d0ee7 Mon Sep 17 00:00:00 2001 From: Michael Gummelt Date: Fri, 26 Aug 2016 12:25:22 -0700 Subject: [PATCH 113/270] [SPARK-16967] move mesos to module ## What changes were proposed in this pull request? Move Mesos code into a mvn module ## How was this patch tested? unit tests manually submitting a client mode and cluster mode job spark/mesos integration test suite Author: Michael Gummelt Closes #14637 from mgummelt/mesos-module. --- .travis.yml | 2 +- assembly/pom.xml | 10 ++ core/pom.xml | 5 - .../scala/org/apache/spark/SparkContext.scala | 18 +-- .../scala/org/apache/spark/TaskState.scala | 20 ---- .../SparkContextSchedulerCreationSuite.scala | 28 ----- dev/create-release/release-build.sh | 15 +-- dev/lint-java | 2 +- dev/mima | 2 +- dev/scalastyle | 1 + dev/sparktestsupport/modules.py | 7 ++ dev/test-dependencies.sh | 2 +- docs/building-spark.md | 24 ++-- mesos/pom.xml | 109 ++++++++++++++++++ ...che.spark.scheduler.ExternalClusterManager | 1 + .../deploy/mesos/MesosClusterDispatcher.scala | 0 .../MesosClusterDispatcherArguments.scala | 0 .../deploy/mesos/MesosDriverDescription.scala | 0 .../mesos/MesosExternalShuffleService.scala | 0 .../spark/deploy/mesos/ui/DriverPage.scala | 0 .../deploy/mesos/ui/MesosClusterPage.scala | 0 .../deploy/mesos/ui/MesosClusterUI.scala | 0 .../deploy/rest/mesos/MesosRestServer.scala | 0 .../spark/executor/MesosExecutorBackend.scala | 9 +- .../cluster/mesos/MesosClusterManager.scala | 60 ++++++++++ .../mesos/MesosClusterPersistenceEngine.scala | 0 .../cluster/mesos/MesosClusterScheduler.scala | 2 +- .../mesos/MesosClusterSchedulerSource.scala | 0 .../MesosCoarseGrainedSchedulerBackend.scala | 2 +- .../MesosFineGrainedSchedulerBackend.scala | 4 +- .../mesos/MesosSchedulerBackendUtil.scala | 0 .../cluster/mesos/MesosSchedulerUtils.scala | 24 +++- .../cluster/mesos/MesosTaskLaunchData.scala | 0 .../mesos/MesosClusterManagerSuite.scala | 47 ++++++++ .../mesos/MesosClusterSchedulerSuite.scala | 0 ...osCoarseGrainedSchedulerBackendSuite.scala | 0 ...esosFineGrainedSchedulerBackendSuite.scala | 0 .../mesos/MesosSchedulerUtilsSuite.scala | 0 .../mesos/MesosTaskLaunchDataSuite.scala | 0 .../spark/scheduler/cluster/mesos/Utils.scala | 0 pom.xml | 21 ++-- project/MimaExcludes.scala | 4 +- project/SparkBuild.scala | 4 +- 43 files changed, 305 insertions(+), 118 deletions(-) create mode 100644 mesos/pom.xml create mode 100644 mesos/src/main/resources/META-INF/services/org.apache.spark.scheduler.ExternalClusterManager rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/MesosDriverDescription.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/MesosExternalShuffleService.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/ui/DriverPage.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterPage.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterUI.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala (93%) create mode 100644 mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManager.scala rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterPersistenceEngine.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala (99%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSource.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala (99%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala (99%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala (100%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala (94%) rename {core => mesos}/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchData.scala (100%) create mode 100644 mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManagerSuite.scala rename {core => mesos}/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala (100%) rename {core => mesos}/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala (100%) rename {core => mesos}/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala (100%) rename {core => mesos}/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala (100%) rename {core => mesos}/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchDataSuite.scala (100%) rename {core => mesos}/src/test/scala/org/apache/spark/scheduler/cluster/mesos/Utils.scala (100%) diff --git a/.travis.yml b/.travis.yml index c16f76399ccd2..8739849a20798 100644 --- a/.travis.yml +++ b/.travis.yml @@ -44,7 +44,7 @@ notifications: # 5. Run maven install before running lint-java. install: - export MAVEN_SKIP_RC=1 - - build/mvn -T 4 -q -DskipTests -Pyarn -Phadoop-2.3 -Pkinesis-asl -Phive -Phive-thriftserver install + - build/mvn -T 4 -q -DskipTests -Pmesos -Pyarn -Phadoop-2.3 -Pkinesis-asl -Phive -Phive-thriftserver install # 6. Run lint-java. script: diff --git a/assembly/pom.xml b/assembly/pom.xml index 971a62f87a218..ec243eaebaea7 100644 --- a/assembly/pom.xml +++ b/assembly/pom.xml @@ -138,6 +138,16 @@ + + mesos + + + org.apache.spark + spark-mesos_${scala.binary.version} + ${project.version} + + + hive diff --git a/core/pom.xml b/core/pom.xml index ab6c3ce805275..c04cf7e5255f2 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -215,11 +215,6 @@ org.glassfish.jersey.containers jersey-container-servlet-core - - org.apache.mesos - mesos - ${mesos.classifier} - io.netty netty-all diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 2eaeab1d807b4..08d6343d623cf 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -42,7 +42,6 @@ import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, Sequence TextInputFormat} import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob} import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat} -import org.apache.mesos.MesosNativeLibrary import org.apache.spark.annotation.DeveloperApi import org.apache.spark.broadcast.Broadcast @@ -56,7 +55,6 @@ import org.apache.spark.rdd._ import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, StandaloneSchedulerBackend} -import org.apache.spark.scheduler.cluster.mesos.{MesosCoarseGrainedSchedulerBackend, MesosFineGrainedSchedulerBackend} import org.apache.spark.scheduler.local.LocalSchedulerBackend import org.apache.spark.storage._ import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump @@ -2512,18 +2510,6 @@ object SparkContext extends Logging { } (backend, scheduler) - case MESOS_REGEX(mesosUrl) => - MesosNativeLibrary.load() - val scheduler = new TaskSchedulerImpl(sc) - val coarseGrained = sc.conf.getBoolean("spark.mesos.coarse", defaultValue = true) - val backend = if (coarseGrained) { - new MesosCoarseGrainedSchedulerBackend(scheduler, sc, mesosUrl, sc.env.securityManager) - } else { - new MesosFineGrainedSchedulerBackend(scheduler, sc, mesosUrl) - } - scheduler.initialize(backend) - (backend, scheduler) - case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr @@ -2545,7 +2531,7 @@ object SparkContext extends Logging { private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = - ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) + ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException(s"Multiple Cluster Managers ($serviceLoaders) registered " + s"for the url $url:") @@ -2566,8 +2552,6 @@ private object SparkMasterRegex { val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r - // Regular expression for connection to Mesos cluster by mesos:// or mesos://zk:// url - val MESOS_REGEX = """mesos://(.*)""".r } /** diff --git a/core/src/main/scala/org/apache/spark/TaskState.scala b/core/src/main/scala/org/apache/spark/TaskState.scala index d232fae6b15b3..cbace7b5f9f37 100644 --- a/core/src/main/scala/org/apache/spark/TaskState.scala +++ b/core/src/main/scala/org/apache/spark/TaskState.scala @@ -17,8 +17,6 @@ package org.apache.spark -import org.apache.mesos.Protos.{TaskState => MesosTaskState} - private[spark] object TaskState extends Enumeration { val LAUNCHING, RUNNING, FINISHED, FAILED, KILLED, LOST = Value @@ -30,22 +28,4 @@ private[spark] object TaskState extends Enumeration { def isFailed(state: TaskState): Boolean = (LOST == state) || (FAILED == state) def isFinished(state: TaskState): Boolean = FINISHED_STATES.contains(state) - - def toMesos(state: TaskState): MesosTaskState = state match { - case LAUNCHING => MesosTaskState.TASK_STARTING - case RUNNING => MesosTaskState.TASK_RUNNING - case FINISHED => MesosTaskState.TASK_FINISHED - case FAILED => MesosTaskState.TASK_FAILED - case KILLED => MesosTaskState.TASK_KILLED - case LOST => MesosTaskState.TASK_LOST - } - - def fromMesos(mesosState: MesosTaskState): TaskState = mesosState match { - case MesosTaskState.TASK_STAGING | MesosTaskState.TASK_STARTING => LAUNCHING - case MesosTaskState.TASK_RUNNING | MesosTaskState.TASK_KILLING => RUNNING - case MesosTaskState.TASK_FINISHED => FINISHED - case MesosTaskState.TASK_FAILED => FAILED - case MesosTaskState.TASK_KILLED => KILLED - case MesosTaskState.TASK_LOST | MesosTaskState.TASK_ERROR => LOST - } } diff --git a/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala index 7d75a93ff6839..f8938dfedee5b 100644 --- a/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala +++ b/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala @@ -22,7 +22,6 @@ import org.scalatest.PrivateMethodTester import org.apache.spark.internal.Logging import org.apache.spark.scheduler.{SchedulerBackend, TaskScheduler, TaskSchedulerImpl} import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend -import org.apache.spark.scheduler.cluster.mesos.{MesosCoarseGrainedSchedulerBackend, MesosFineGrainedSchedulerBackend} import org.apache.spark.scheduler.local.LocalSchedulerBackend @@ -130,31 +129,4 @@ class SparkContextSchedulerCreationSuite case _ => fail() } } - - def testMesos(master: String, expectedClass: Class[_], coarse: Boolean) { - val conf = new SparkConf().set("spark.mesos.coarse", coarse.toString) - try { - val sched = createTaskScheduler(master, "client", conf) - assert(sched.backend.getClass === expectedClass) - } catch { - case e: UnsatisfiedLinkError => - assert(e.getMessage.contains("mesos")) - logWarning("Mesos not available, could not test actual Mesos scheduler creation") - case e: Throwable => fail(e) - } - } - - test("mesos fine-grained") { - testMesos("mesos://localhost:1234", classOf[MesosFineGrainedSchedulerBackend], coarse = false) - } - - test("mesos coarse-grained") { - testMesos("mesos://localhost:1234", classOf[MesosCoarseGrainedSchedulerBackend], coarse = true) - } - - test("mesos with zookeeper") { - testMesos("mesos://zk://localhost:1234,localhost:2345", - classOf[MesosFineGrainedSchedulerBackend], coarse = false) - } - } diff --git a/dev/create-release/release-build.sh b/dev/create-release/release-build.sh index 2833dc7651117..96f9b5714ebb8 100755 --- a/dev/create-release/release-build.sh +++ b/dev/create-release/release-build.sh @@ -80,7 +80,7 @@ NEXUS_PROFILE=d63f592e7eac0 # Profile for Spark staging uploads BASE_DIR=$(pwd) MVN="build/mvn --force" -PUBLISH_PROFILES="-Pyarn -Phive -Phive-thriftserver -Phadoop-2.2" +PUBLISH_PROFILES="-Pmesos -Pyarn -Phive -Phive-thriftserver -Phadoop-2.2" PUBLISH_PROFILES="$PUBLISH_PROFILES -Pspark-ganglia-lgpl -Pkinesis-asl" rm -rf spark @@ -186,12 +186,13 @@ if [[ "$1" == "package" ]]; then # We increment the Zinc port each time to avoid OOM's and other craziness if multiple builds # share the same Zinc server. - make_binary_release "hadoop2.3" "-Psparkr -Phadoop-2.3 -Phive -Phive-thriftserver -Pyarn" "3033" & - make_binary_release "hadoop2.4" "-Psparkr -Phadoop-2.4 -Phive -Phive-thriftserver -Pyarn" "3034" & - make_binary_release "hadoop2.6" "-Psparkr -Phadoop-2.6 -Phive -Phive-thriftserver -Pyarn" "3035" & - make_binary_release "hadoop2.7" "-Psparkr -Phadoop-2.7 -Phive -Phive-thriftserver -Pyarn" "3036" & - make_binary_release "hadoop2.4-without-hive" "-Psparkr -Phadoop-2.4 -Pyarn" "3037" & - make_binary_release "without-hadoop" "-Psparkr -Phadoop-provided -Pyarn" "3038" & + FLAGS="-Psparkr -Phive -Phive-thriftserver -Pyarn -Pmesos" + make_binary_release "hadoop2.3" "-Phadoop2.3 $FLAGS" "3033" & + make_binary_release "hadoop2.4" "-Phadoop2.4 $FLAGS" "3034" & + make_binary_release "hadoop2.6" "-Phadoop2.6 $FLAGS" "3035" & + make_binary_release "hadoop2.7" "-Phadoop2.7 $FLAGS" "3036" & + make_binary_release "hadoop2.4-without-hive" "-Psparkr -Phadoop-2.4 -Pyarn -Pmesos" "3037" & + make_binary_release "without-hadoop" "-Psparkr -Phadoop-provided -Pyarn -Pmesos" "3038" & wait rm -rf spark-$SPARK_VERSION-bin-*/ diff --git a/dev/lint-java b/dev/lint-java index fe8ab83d562d1..c2e80538ef2a5 100755 --- a/dev/lint-java +++ b/dev/lint-java @@ -20,7 +20,7 @@ SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )" SPARK_ROOT_DIR="$(dirname $SCRIPT_DIR)" -ERRORS=$($SCRIPT_DIR/../build/mvn -Pkinesis-asl -Pyarn -Phive -Phive-thriftserver checkstyle:check | grep ERROR) +ERRORS=$($SCRIPT_DIR/../build/mvn -Pkinesis-asl -Pmesos -Pyarn -Phive -Phive-thriftserver checkstyle:check | grep ERROR) if test ! -z "$ERRORS"; then echo -e "Checkstyle checks failed at following occurrences:\n$ERRORS" diff --git a/dev/mima b/dev/mima index c3553490451c8..11c4af29808a8 100755 --- a/dev/mima +++ b/dev/mima @@ -24,7 +24,7 @@ set -e FWDIR="$(cd "`dirname "$0"`"/..; pwd)" cd "$FWDIR" -SPARK_PROFILES="-Pyarn -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive" +SPARK_PROFILES="-Pmesos -Pyarn -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive" TOOLS_CLASSPATH="$(build/sbt -DcopyDependencies=false "export tools/fullClasspath" | tail -n1)" OLD_DEPS_CLASSPATH="$(build/sbt -DcopyDependencies=false $SPARK_PROFILES "export oldDeps/fullClasspath" | tail -n1)" diff --git a/dev/scalastyle b/dev/scalastyle index 8fd3604b9f451..f3dec833636c6 100755 --- a/dev/scalastyle +++ b/dev/scalastyle @@ -22,6 +22,7 @@ ERRORS=$(echo -e "q\n" \ | build/sbt \ -Pkinesis-asl \ + -Pmesos \ -Pyarn \ -Phive \ -Phive-thriftserver \ diff --git a/dev/sparktestsupport/modules.py b/dev/sparktestsupport/modules.py index ce5725764be6d..f2aa241a4b8ff 100644 --- a/dev/sparktestsupport/modules.py +++ b/dev/sparktestsupport/modules.py @@ -458,6 +458,13 @@ def __hash__(self): ] ) +mesos = Module( + name="mesos", + dependencies=[], + source_file_regexes=["mesos/"], + sbt_test_goals=["mesos/test"] +) + # The root module is a dummy module which is used to run all of the tests. # No other modules should directly depend on this module. root = Module( diff --git a/dev/test-dependencies.sh b/dev/test-dependencies.sh index 28e3d4d8d4f00..4014f42e1983c 100755 --- a/dev/test-dependencies.sh +++ b/dev/test-dependencies.sh @@ -29,7 +29,7 @@ export LC_ALL=C # TODO: This would be much nicer to do in SBT, once SBT supports Maven-style resolution. # NOTE: These should match those in the release publishing script -HADOOP2_MODULE_PROFILES="-Phive-thriftserver -Pyarn -Phive" +HADOOP2_MODULE_PROFILES="-Phive-thriftserver -Pmesos -Pyarn -Phive" MVN="build/mvn" HADOOP_PROFILES=( hadoop-2.2 diff --git a/docs/building-spark.md b/docs/building-spark.md index 2c987cf8346ef..6908fc1ba74d0 100644 --- a/docs/building-spark.md +++ b/docs/building-spark.md @@ -50,7 +50,7 @@ To create a Spark distribution like those distributed by the to be runnable, use `./dev/make-distribution.sh` in the project root directory. It can be configured with Maven profile settings and so on like the direct Maven build. Example: - ./dev/make-distribution.sh --name custom-spark --tgz -Psparkr -Phadoop-2.4 -Phive -Phive-thriftserver -Pyarn + ./dev/make-distribution.sh --name custom-spark --tgz -Psparkr -Phadoop-2.4 -Phive -Phive-thriftserver -Pmesos -Pyarn For more information on usage, run `./dev/make-distribution.sh --help` @@ -105,13 +105,17 @@ By default Spark will build with Hive 1.2.1 bindings. ## Packaging without Hadoop Dependencies for YARN -The assembly directory produced by `mvn package` will, by default, include all of Spark's -dependencies, including Hadoop and some of its ecosystem projects. On YARN deployments, this -causes multiple versions of these to appear on executor classpaths: the version packaged in +The assembly directory produced by `mvn package` will, by default, include all of Spark's +dependencies, including Hadoop and some of its ecosystem projects. On YARN deployments, this +causes multiple versions of these to appear on executor classpaths: the version packaged in the Spark assembly and the version on each node, included with `yarn.application.classpath`. -The `hadoop-provided` profile builds the assembly without including Hadoop-ecosystem projects, +The `hadoop-provided` profile builds the assembly without including Hadoop-ecosystem projects, like ZooKeeper and Hadoop itself. +## Building with Mesos support + + ./build/mvn -Pmesos -DskipTests clean package + ## Building for Scala 2.10 To produce a Spark package compiled with Scala 2.10, use the `-Dscala-2.10` property: @@ -263,17 +267,17 @@ The run-tests script also can be limited to a specific Python version or a speci ## Running R Tests -To run the SparkR tests you will need to install the R package `testthat` -(run `install.packages(testthat)` from R shell). You can run just the SparkR tests using +To run the SparkR tests you will need to install the R package `testthat` +(run `install.packages(testthat)` from R shell). You can run just the SparkR tests using the command: ./R/run-tests.sh ## Running Docker-based Integration Test Suites -In order to run Docker integration tests, you have to install the `docker` engine on your box. -The instructions for installation can be found at [the Docker site](https://docs.docker.com/engine/installation/). -Once installed, the `docker` service needs to be started, if not already running. +In order to run Docker integration tests, you have to install the `docker` engine on your box. +The instructions for installation can be found at [the Docker site](https://docs.docker.com/engine/installation/). +Once installed, the `docker` service needs to be started, if not already running. On Linux, this can be done by `sudo service docker start`. ./build/mvn install -DskipTests diff --git a/mesos/pom.xml b/mesos/pom.xml new file mode 100644 index 0000000000000..57cc26a4ccef9 --- /dev/null +++ b/mesos/pom.xml @@ -0,0 +1,109 @@ + + + + 4.0.0 + + org.apache.spark + spark-parent_2.11 + 2.1.0-SNAPSHOT + ../pom.xml + + + spark-mesos_2.11 + jar + Spark Project Mesos + + mesos + 1.0.0 + shaded-protobuf + + + + + org.apache.spark + spark-core_${scala.binary.version} + ${project.version} + + + + org.apache.spark + spark-core_${scala.binary.version} + ${project.version} + test-jar + test + + + + org.apache.mesos + mesos + ${mesos.version} + ${mesos.classifier} + + + com.google.protobuf + protobuf-java + + + + + + org.mockito + mockito-core + test + + + + + com.google.guava + guava + + + org.eclipse.jetty + jetty-server + + + org.eclipse.jetty + jetty-plus + + + org.eclipse.jetty + jetty-util + + + org.eclipse.jetty + jetty-http + + + org.eclipse.jetty + jetty-servlet + + + org.eclipse.jetty + jetty-servlets + + + + + + + + target/scala-${scala.binary.version}/classes + target/scala-${scala.binary.version}/test-classes + + + diff --git a/mesos/src/main/resources/META-INF/services/org.apache.spark.scheduler.ExternalClusterManager b/mesos/src/main/resources/META-INF/services/org.apache.spark.scheduler.ExternalClusterManager new file mode 100644 index 0000000000000..12b6d5b64d68c --- /dev/null +++ b/mesos/src/main/resources/META-INF/services/org.apache.spark.scheduler.ExternalClusterManager @@ -0,0 +1 @@ +org.apache.spark.scheduler.cluster.mesos.MesosClusterManager diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcher.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosDriverDescription.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosDriverDescription.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/MesosDriverDescription.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosDriverDescription.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosExternalShuffleService.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosExternalShuffleService.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/MesosExternalShuffleService.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/MesosExternalShuffleService.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/ui/DriverPage.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/DriverPage.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/ui/DriverPage.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/DriverPage.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterPage.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterPage.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterPage.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterPage.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterUI.scala b/mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterUI.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterUI.scala rename to mesos/src/main/scala/org/apache/spark/deploy/mesos/ui/MesosClusterUI.scala diff --git a/core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala b/mesos/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala rename to mesos/src/main/scala/org/apache/spark/deploy/rest/mesos/MesosRestServer.scala diff --git a/core/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala b/mesos/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala similarity index 93% rename from core/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala rename to mesos/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala index 680cfb733e9e6..1937bd30bac51 100644 --- a/core/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala +++ b/mesos/src/main/scala/org/apache/spark/executor/MesosExecutorBackend.scala @@ -26,25 +26,26 @@ import org.apache.mesos.Protos.{TaskStatus => MesosTaskStatus, _} import org.apache.mesos.protobuf.ByteString import org.apache.spark.{SparkConf, SparkEnv, TaskState} -import org.apache.spark.TaskState.TaskState +import org.apache.spark.TaskState import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging -import org.apache.spark.scheduler.cluster.mesos.MesosTaskLaunchData +import org.apache.spark.scheduler.cluster.mesos.{MesosSchedulerUtils, MesosTaskLaunchData} import org.apache.spark.util.Utils private[spark] class MesosExecutorBackend extends MesosExecutor + with MesosSchedulerUtils // TODO: fix with ExecutorBackend with Logging { var executor: Executor = null var driver: ExecutorDriver = null - override def statusUpdate(taskId: Long, state: TaskState, data: ByteBuffer) { + override def statusUpdate(taskId: Long, state: TaskState.TaskState, data: ByteBuffer) { val mesosTaskId = TaskID.newBuilder().setValue(taskId.toString).build() driver.sendStatusUpdate(MesosTaskStatus.newBuilder() .setTaskId(mesosTaskId) - .setState(TaskState.toMesos(state)) + .setState(taskStateToMesos(state)) .setData(ByteString.copyFrom(data)) .build()) } diff --git a/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManager.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManager.scala new file mode 100644 index 0000000000000..a849c4afa24f5 --- /dev/null +++ b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManager.scala @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.scheduler.cluster.mesos + +import org.apache.spark.{SparkContext, SparkException} +import org.apache.spark.scheduler.{ExternalClusterManager, SchedulerBackend, TaskScheduler, TaskSchedulerImpl} + +/** + * Cluster Manager for creation of Yarn scheduler and backend + */ +private[spark] class MesosClusterManager extends ExternalClusterManager { + private val MESOS_REGEX = """mesos://(.*)""".r + + override def canCreate(masterURL: String): Boolean = { + masterURL.startsWith("mesos") + } + + override def createTaskScheduler(sc: SparkContext, masterURL: String): TaskScheduler = { + new TaskSchedulerImpl(sc) + } + + override def createSchedulerBackend(sc: SparkContext, + masterURL: String, + scheduler: TaskScheduler): SchedulerBackend = { + val mesosUrl = MESOS_REGEX.findFirstMatchIn(masterURL).get.group(1) + val coarse = sc.conf.getBoolean("spark.mesos.coarse", defaultValue = true) + if (coarse) { + new MesosCoarseGrainedSchedulerBackend( + scheduler.asInstanceOf[TaskSchedulerImpl], + sc, + mesosUrl, + sc.env.securityManager) + } else { + new MesosFineGrainedSchedulerBackend( + scheduler.asInstanceOf[TaskSchedulerImpl], + sc, + mesosUrl) + } + } + + override def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = { + scheduler.asInstanceOf[TaskSchedulerImpl].initialize(backend) + } +} + diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterPersistenceEngine.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterPersistenceEngine.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterPersistenceEngine.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterPersistenceEngine.scala diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala similarity index 99% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala index bb6f6b3e3ffd5..0b454997772d8 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala +++ b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala @@ -680,7 +680,7 @@ private[spark] class MesosClusterScheduler( retryState = Some(new MesosClusterRetryState(status, retries, nextRetry, waitTimeSec))) pendingRetryDrivers += newDriverDescription pendingRetryDriversState.persist(taskId, newDriverDescription) - } else if (TaskState.isFinished(TaskState.fromMesos(status.getState))) { + } else if (TaskState.isFinished(mesosToTaskState(status.getState))) { removeFromLaunchedDrivers(taskId) state.finishDate = Some(new Date()) if (finishedDrivers.size >= retainedDrivers) { diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSource.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSource.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSource.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSource.scala diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala similarity index 99% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala index 6b9313e5edb97..fde1fb3228020 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala +++ b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala @@ -473,7 +473,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend( override def statusUpdate(d: org.apache.mesos.SchedulerDriver, status: TaskStatus) { val taskId = status.getTaskId.getValue val slaveId = status.getSlaveId.getValue - val state = TaskState.fromMesos(status.getState) + val state = mesosToTaskState(status.getState) logInfo(s"Mesos task $taskId is now ${status.getState}") diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala similarity index 99% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala index f1e48fa7c52e1..eb3b235949501 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala +++ b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackend.scala @@ -366,9 +366,9 @@ private[spark] class MesosFineGrainedSchedulerBackend( override def statusUpdate(d: org.apache.mesos.SchedulerDriver, status: TaskStatus) { inClassLoader() { val tid = status.getTaskId.getValue.toLong - val state = TaskState.fromMesos(status.getState) + val state = mesosToTaskState(status.getState) synchronized { - if (TaskState.isFailed(TaskState.fromMesos(status.getState)) + if (TaskState.isFailed(mesosToTaskState(status.getState)) && taskIdToSlaveId.contains(tid)) { // We lost the executor on this slave, so remember that it's gone removeExecutor(taskIdToSlaveId(tid), "Lost executor") diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala similarity index 94% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala index 1bbede18533e0..e19d445137207 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala +++ b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala @@ -26,19 +26,21 @@ import scala.util.control.NonFatal import com.google.common.base.Splitter import org.apache.mesos.{MesosSchedulerDriver, Protos, Scheduler, SchedulerDriver} -import org.apache.mesos.Protos._ +import org.apache.mesos.Protos.{TaskState => MesosTaskState, _} import org.apache.mesos.protobuf.{ByteString, GeneratedMessage} import org.apache.spark.{SparkConf, SparkContext, SparkException} +import org.apache.spark.TaskState import org.apache.spark.internal.Logging import org.apache.spark.util.Utils + /** * Shared trait for implementing a Mesos Scheduler. This holds common state and helper * methods and Mesos scheduler will use. */ -private[mesos] trait MesosSchedulerUtils extends Logging { +trait MesosSchedulerUtils extends Logging { // Lock used to wait for scheduler to be registered private final val registerLatch = new CountDownLatch(1) @@ -491,4 +493,22 @@ private[mesos] trait MesosSchedulerUtils extends Logging { sc.conf.remove("spark.mesos.driver.frameworkId") System.clearProperty("spark.mesos.driver.frameworkId") } + + def mesosToTaskState(state: MesosTaskState): TaskState.TaskState = state match { + case MesosTaskState.TASK_STAGING | MesosTaskState.TASK_STARTING => TaskState.LAUNCHING + case MesosTaskState.TASK_RUNNING | MesosTaskState.TASK_KILLING => TaskState.RUNNING + case MesosTaskState.TASK_FINISHED => TaskState.FINISHED + case MesosTaskState.TASK_FAILED => TaskState.FAILED + case MesosTaskState.TASK_KILLED => TaskState.KILLED + case MesosTaskState.TASK_LOST | MesosTaskState.TASK_ERROR => TaskState.LOST + } + + def taskStateToMesos(state: TaskState.TaskState): MesosTaskState = state match { + case TaskState.LAUNCHING => MesosTaskState.TASK_STARTING + case TaskState.RUNNING => MesosTaskState.TASK_RUNNING + case TaskState.FINISHED => MesosTaskState.TASK_FINISHED + case TaskState.FAILED => MesosTaskState.TASK_FAILED + case TaskState.KILLED => MesosTaskState.TASK_KILLED + case TaskState.LOST => MesosTaskState.TASK_LOST + } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchData.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchData.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchData.scala rename to mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchData.scala diff --git a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManagerSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManagerSuite.scala new file mode 100644 index 0000000000000..6fce06632c57e --- /dev/null +++ b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterManagerSuite.scala @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.scheduler.cluster.mesos + +import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite} + +class MesosClusterManagerSuite extends SparkFunSuite with LocalSparkContext { + def testURL(masterURL: String, expectedClass: Class[_], coarse: Boolean) { + val conf = new SparkConf().set("spark.mesos.coarse", coarse.toString) + sc = new SparkContext("local", "test", conf) + val clusterManager = new MesosClusterManager() + + assert(clusterManager.canCreate(masterURL)) + val taskScheduler = clusterManager.createTaskScheduler(sc, masterURL) + val sched = clusterManager.createSchedulerBackend(sc, masterURL, taskScheduler) + assert(sched.getClass === expectedClass) + } + + test("mesos fine-grained") { + testURL("mesos://localhost:1234", classOf[MesosFineGrainedSchedulerBackend], coarse = false) + } + + test("mesos coarse-grained") { + testURL("mesos://localhost:1234", classOf[MesosCoarseGrainedSchedulerBackend], coarse = true) + } + + test("mesos with zookeeper") { + testURL("mesos://zk://localhost:1234,localhost:2345", + classOf[MesosFineGrainedSchedulerBackend], + coarse = false) + } +} diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala rename to mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala rename to mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala rename to mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala rename to mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchDataSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchDataSuite.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchDataSuite.scala rename to mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosTaskLaunchDataSuite.scala diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/Utils.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/Utils.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/Utils.scala rename to mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/Utils.scala diff --git a/pom.xml b/pom.xml index 989658216e5fd..74238db59ed8f 100644 --- a/pom.xml +++ b/pom.xml @@ -119,8 +119,6 @@ 1.7 3.3.9 spark - 1.0.0 - shaded-protobuf 1.7.16 1.2.17 2.2.0 @@ -527,18 +525,6 @@ ${protobuf.version} ${hadoop.deps.scope} - - org.apache.mesos - mesos - ${mesos.version} - ${mesos.classifier} - - - com.google.protobuf - protobuf-java - - - org.roaringbitmap RoaringBitmap @@ -2527,6 +2513,13 @@ + + mesos + + mesos + + + hive-thriftserver diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala index 688218f6f43af..16f26e7d283b4 100644 --- a/project/MimaExcludes.scala +++ b/project/MimaExcludes.scala @@ -40,7 +40,9 @@ object MimaExcludes { // [SPARK-16199][SQL] Add a method to list the referenced columns in data source Filter ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.sources.Filter.references"), // [SPARK-16853][SQL] Fixes encoder error in DataSet typed select - ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.Dataset.select") + ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.Dataset.select"), + // [SPARK-16967] Move Mesos to Module + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkMasterRegex.MESOS_REGEX") ) } diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index c769ba300e5e6..83a7c0864f76d 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -56,9 +56,9 @@ object BuildCommons { "tags", "sketch" ).map(ProjectRef(buildLocation, _)) ++ sqlProjects ++ streamingProjects - val optionallyEnabledProjects@Seq(yarn, java8Tests, sparkGangliaLgpl, + val optionallyEnabledProjects@Seq(mesos, yarn, java8Tests, sparkGangliaLgpl, streamingKinesisAsl, dockerIntegrationTests) = - Seq("yarn", "java8-tests", "ganglia-lgpl", "streaming-kinesis-asl", + Seq("mesos", "yarn", "java8-tests", "ganglia-lgpl", "streaming-kinesis-asl", "docker-integration-tests").map(ProjectRef(buildLocation, _)) val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKafka010Assembly, streamingKinesisAslAssembly) = From a11d10f1826b578ff721c4738224eef2b3c3b9f3 Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Fri, 26 Aug 2016 13:29:22 -0700 Subject: [PATCH 114/270] [SPARK-17246][SQL] Add BigDecimal literal ## What changes were proposed in this pull request? This PR adds parser support for `BigDecimal` literals. If you append the suffix `BD` to a valid number then this will be interpreted as a `BigDecimal`, for example `12.0E10BD` will interpreted into a BigDecimal with scale -9 and precision 3. This is useful in situations where you need exact values. ## How was this patch tested? Added tests to `ExpressionParserSuite`, `ExpressionSQLBuilderSuite` and `SQLQueryTestSuite`. Author: Herman van Hovell Closes #14819 from hvanhovell/SPARK-17246. --- .../spark/sql/catalyst/parser/SqlBase.g4 | 6 +++++ .../sql/catalyst/expressions/literals.scala | 2 +- .../sql/catalyst/parser/AstBuilder.scala | 16 ++++++++++++- .../parser/ExpressionParserSuite.scala | 7 ++++++ .../resources/sql-tests/inputs/literals.sql | 6 +++++ .../sql-tests/results/literals.sql.out | 24 ++++++++++++++++++- .../catalyst/ExpressionSQLBuilderSuite.scala | 1 + 7 files changed, 59 insertions(+), 3 deletions(-) diff --git a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 index cab7c3ff5a8f7..a8af840c1e2a2 100644 --- a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 +++ b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 @@ -633,6 +633,7 @@ number | MINUS? SMALLINT_LITERAL #smallIntLiteral | MINUS? TINYINT_LITERAL #tinyIntLiteral | MINUS? DOUBLE_LITERAL #doubleLiteral + | MINUS? BIGDECIMAL_LITERAL #bigDecimalLiteral ; nonReserved @@ -928,6 +929,11 @@ DOUBLE_LITERAL (INTEGER_VALUE | DECIMAL_VALUE | SCIENTIFIC_DECIMAL_VALUE) 'D' ; +BIGDECIMAL_LITERAL + : + (INTEGER_VALUE | DECIMAL_VALUE | SCIENTIFIC_DECIMAL_VALUE) 'BD' + ; + IDENTIFIER : (LETTER | DIGIT | '_')+ ; diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala index 730a7f62e04c7..41e3952f0e253 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala @@ -266,7 +266,7 @@ case class Literal (value: Any, dataType: DataType) extends LeafExpression with case Double.NegativeInfinity => s"CAST('-Infinity' AS ${DoubleType.sql})" case _ => v + "D" } - case (v: Decimal, t: DecimalType) => s"CAST($v AS ${t.sql})" + case (v: Decimal, t: DecimalType) => v + "BD" case (v: Int, DateType) => s"DATE '${DateTimeUtils.toJavaDate(v)}'" case (v: Long, TimestampType) => s"TIMESTAMP('${DateTimeUtils.toJavaTimestamp(v)}')" case _ => value.toString diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 8b98efcbf33c8..893db93368457 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -26,7 +26,8 @@ import org.antlr.v4.runtime.{ParserRuleContext, Token} import org.antlr.v4.runtime.tree.{ParseTree, RuleNode, TerminalNode} import org.apache.spark.internal.Logging -import org.apache.spark.sql.catalyst.{FunctionIdentifier, InternalRow, TableIdentifier} +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.parser.SqlBaseParser._ @@ -1323,6 +1324,19 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { numericLiteral(ctx, Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble) } + /** + * Create a BigDecimal Literal expression. + */ + override def visitBigDecimalLiteral(ctx: BigDecimalLiteralContext): Literal = { + val raw = ctx.getText.substring(0, ctx.getText.length - 2) + try { + Literal(BigDecimal(raw).underlying()) + } catch { + case e: AnalysisException => + throw new ParseException(e.message, ctx) + } + } + /** * Create a String literal expression. */ diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala index 401d9cd9d288c..dbc5db39aed98 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala @@ -392,6 +392,13 @@ class ExpressionParserSuite extends PlanTest { intercept("1.8E308D", s"does not fit in range") // TODO we need to figure out if we should throw an exception here! assertEqual("1E309", Literal(Double.PositiveInfinity)) + + // BigDecimal Literal + assertEqual("90912830918230182310293801923652346786BD", + Literal(BigDecimal("90912830918230182310293801923652346786").underlying())) + assertEqual("123.0E-28BD", Literal(BigDecimal("123.0E-28").underlying())) + assertEqual("123.08BD", Literal(BigDecimal("123.08").underlying())) + intercept("1.20E-38BD", "DecimalType can only support precision up to 38") } test("strings") { diff --git a/sql/core/src/test/resources/sql-tests/inputs/literals.sql b/sql/core/src/test/resources/sql-tests/inputs/literals.sql index 62f0d3d0599c6..a532a598c6bf9 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/literals.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/literals.sql @@ -90,3 +90,9 @@ select interval 10 nanoseconds; -- unsupported data type select GEO '(10,-6)'; + +-- big decimal parsing +select 90912830918230182310293801923652346786BD, 123.0E-28BD, 123.08BD; + +-- out of range big decimal +select 1.20E-38BD; diff --git a/sql/core/src/test/resources/sql-tests/results/literals.sql.out b/sql/core/src/test/resources/sql-tests/results/literals.sql.out index 67e6d78dfbf24..85629f7ba813a 100644 --- a/sql/core/src/test/resources/sql-tests/results/literals.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/literals.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 38 +-- Number of queries: 40 -- !query 0 @@ -354,3 +354,25 @@ Literals of type 'GEO' are currently not supported.(line 1, pos 7) == SQL == select GEO '(10,-6)' -------^^^ + + +-- !query 38 +select 90912830918230182310293801923652346786BD, 123.0E-28BD, 123.08BD +-- !query 38 schema +struct<90912830918230182310293801923652346786:decimal(38,0),1.230E-26:decimal(29,29),123.08:decimal(5,2)> +-- !query 38 output +90912830918230182310293801923652346786 0.0000000000000000000000000123 123.08 + + +-- !query 39 +select 1.20E-38BD +-- !query 39 schema +struct<> +-- !query 39 output +org.apache.spark.sql.catalyst.parser.ParseException + +DecimalType can only support precision up to 38(line 1, pos 7) + +== SQL == +select 1.20E-38BD +-------^^^ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala index 86724cbb676c4..43a218b4d14b3 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala @@ -39,6 +39,7 @@ class ExpressionSQLBuilderSuite extends SQLBuilderTest { checkSQL(Literal(Double.PositiveInfinity), "CAST('Infinity' AS DOUBLE)") checkSQL(Literal(Double.NegativeInfinity), "CAST('-Infinity' AS DOUBLE)") checkSQL(Literal(Double.NaN), "CAST('NaN' AS DOUBLE)") + checkSQL(Literal(BigDecimal("10.0000000").underlying), "10.0000000BD") checkSQL( Literal(Timestamp.valueOf("2016-01-01 00:00:00")), "TIMESTAMP('2016-01-01 00:00:00.0')") // TODO tests for decimals From f64a1ddd09a34d5d867ccbaba46204d75fad038d Mon Sep 17 00:00:00 2001 From: petermaxlee Date: Fri, 26 Aug 2016 16:05:34 -0700 Subject: [PATCH 115/270] [SPARK-17235][SQL] Support purging of old logs in MetadataLog ## What changes were proposed in this pull request? This patch adds a purge interface to MetadataLog, and an implementation in HDFSMetadataLog. The purge function is currently unused, but I will use it to purge old execution and file source logs in follow-up patches. These changes are required in a production structured streaming job that runs for a long period of time. ## How was this patch tested? Added a unit test case in HDFSMetadataLogSuite. Author: petermaxlee Closes #14802 from petermaxlee/SPARK-17235. --- .../execution/streaming/HDFSMetadataLog.scala | 14 ++++++++++ .../sql/execution/streaming/MetadataLog.scala | 6 +++++ .../streaming/HDFSMetadataLogSuite.scala | 27 ++++++++++++++++--- 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala index 2b6f76ca28e21..127ece9ab0e56 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala @@ -227,6 +227,20 @@ class HDFSMetadataLog[T: ClassTag](sparkSession: SparkSession, path: String) None } + /** + * Removes all the log entry earlier than thresholdBatchId (exclusive). + */ + override def purge(thresholdBatchId: Long): Unit = { + val batchIds = fileManager.list(metadataPath, batchFilesFilter) + .map(f => pathToBatchId(f.getPath)) + + for (batchId <- batchIds if batchId < thresholdBatchId) { + val path = batchIdToPath(batchId) + fileManager.delete(path) + logTrace(s"Removed metadata log file: $path") + } + } + private def createFileManager(): FileManager = { val hadoopConf = sparkSession.sessionState.newHadoopConf() try { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MetadataLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MetadataLog.scala index cc70e1d314d1d..78d6be17df05a 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MetadataLog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MetadataLog.scala @@ -48,4 +48,10 @@ trait MetadataLog[T] { * Return the latest batch Id and its metadata if exist. */ def getLatest(): Option[(Long, T)] + + /** + * Removes all the log entry earlier than thresholdBatchId (exclusive). + * This operation should be idempotent. + */ + def purge(thresholdBatchId: Long): Unit } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala index ab5a2d253b94a..4259384f0bc61 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala @@ -46,14 +46,14 @@ class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext { test("FileManager: FileContextManager") { withTempDir { temp => val path = new Path(temp.getAbsolutePath) - testManager(path, new FileContextManager(path, new Configuration)) + testFileManager(path, new FileContextManager(path, new Configuration)) } } test("FileManager: FileSystemManager") { withTempDir { temp => val path = new Path(temp.getAbsolutePath) - testManager(path, new FileSystemManager(path, new Configuration)) + testFileManager(path, new FileSystemManager(path, new Configuration)) } } @@ -103,6 +103,25 @@ class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext { } } + testWithUninterruptibleThread("HDFSMetadataLog: purge") { + withTempDir { temp => + val metadataLog = new HDFSMetadataLog[String](spark, temp.getAbsolutePath) + assert(metadataLog.add(0, "batch0")) + assert(metadataLog.add(1, "batch1")) + assert(metadataLog.add(2, "batch2")) + assert(metadataLog.get(0).isDefined) + assert(metadataLog.get(1).isDefined) + assert(metadataLog.get(2).isDefined) + assert(metadataLog.getLatest().get._1 == 2) + + metadataLog.purge(2) + assert(metadataLog.get(0).isEmpty) + assert(metadataLog.get(1).isEmpty) + assert(metadataLog.get(2).isDefined) + assert(metadataLog.getLatest().get._1 == 2) + } + } + testWithUninterruptibleThread("HDFSMetadataLog: restart") { withTempDir { temp => val metadataLog = new HDFSMetadataLog[String](spark, temp.getAbsolutePath) @@ -155,8 +174,8 @@ class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext { } } - - def testManager(basePath: Path, fm: FileManager): Unit = { + /** Basic test case for [[FileManager]] implementation. */ + private def testFileManager(basePath: Path, fm: FileManager): Unit = { // Mkdirs val dir = new Path(s"$basePath/dir/subdir/subsubdir") assert(!fm.exists(dir)) From 540e91280147a61727f99592a66c0cbb12328fac Mon Sep 17 00:00:00 2001 From: Sameer Agarwal Date: Fri, 26 Aug 2016 16:40:59 -0700 Subject: [PATCH 116/270] [SPARK-17244] Catalyst should not pushdown non-deterministic join conditions ## What changes were proposed in this pull request? Given that non-deterministic expressions can be stateful, pushing them down the query plan during the optimization phase can cause incorrect behavior. This patch fixes that issue by explicitly disabling that. ## How was this patch tested? A new test in `FilterPushdownSuite` that checks catalyst behavior for both deterministic and non-deterministic join conditions. Author: Sameer Agarwal Closes #14815 from sameeragarwal/constraint-inputfile. --- .../sql/catalyst/optimizer/Optimizer.scala | 21 ++++++++++++------- .../optimizer/FilterPushdownSuite.scala | 14 +++++++++++++ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 82ad0fb5eeea0..5c8316189d7b4 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -1379,18 +1379,25 @@ object EliminateOuterJoin extends Rule[LogicalPlan] with PredicateHelper { */ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { /** - * Splits join condition expressions into three categories based on the attributes required - * to evaluate them. + * Splits join condition expressions or filter predicates (on a given join's output) into three + * categories based on the attributes required to evaluate them. Note that we explicitly exclude + * on-deterministic (i.e., stateful) condition expressions in canEvaluateInLeft or + * canEvaluateInRight to prevent pushing these predicates on either side of the join. * * @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth) */ private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = { + // Note: In order to ensure correctness, it's important to not change the relative ordering of + // any deterministic expression that follows a non-deterministic expression. To achieve this, + // we only consider pushing down those expressions that precede the first non-deterministic + // expression in the condition. + val (pushDownCandidates, containingNonDeterministic) = condition.span(_.deterministic) val (leftEvaluateCondition, rest) = - condition.partition(_.references subsetOf left.outputSet) + pushDownCandidates.partition(_.references.subsetOf(left.outputSet)) val (rightEvaluateCondition, commonCondition) = - rest.partition(_.references subsetOf right.outputSet) + rest.partition(expr => expr.references.subsetOf(right.outputSet)) - (leftEvaluateCondition, rightEvaluateCondition, commonCondition) + (leftEvaluateCondition, rightEvaluateCondition, commonCondition ++ containingNonDeterministic) } def apply(plan: LogicalPlan): LogicalPlan = plan transform { @@ -1441,7 +1448,7 @@ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { } // push down the join filter into sub query scanning if applicable - case f @ Join(left, right, joinType, joinCondition) => + case j @ Join(left, right, joinType, joinCondition) => val (leftJoinConditions, rightJoinConditions, commonJoinCondition) = split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right) @@ -1471,7 +1478,7 @@ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And) Join(newLeft, newRight, LeftOuter, newJoinCond) - case FullOuter => f + case FullOuter => j case NaturalJoin(_) => sys.error("Untransformed NaturalJoin node") case UsingJoin(_, _) => sys.error("Untransformed Using join node") } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala index 9f25e9d8e9ac8..55836f96f7e0e 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala @@ -987,4 +987,18 @@ class FilterPushdownSuite extends PlanTest { comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer) } + + test("join condition pushdown: deterministic and non-deterministic") { + val x = testRelation.subquery('x) + val y = testRelation.subquery('y) + + // Verify that all conditions preceding the first non-deterministic condition are pushed down + // by the optimizer and others are not. + val originalQuery = x.join(y, condition = Some("x.a".attr === 5 && "y.a".attr === 5 && + "x.a".attr === Rand(10) && "y.b".attr === 5)) + val correctAnswer = x.where("x.a".attr === 5).join(y.where("y.a".attr === 5), + condition = Some("x.a".attr === Rand(10) && "y.b".attr === 5)) + + comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer.analyze) + } } From a6bca3ad02bd896e7637dec37ed8ba1a7306b58c Mon Sep 17 00:00:00 2001 From: Yin Huai Date: Fri, 26 Aug 2016 19:38:52 -0700 Subject: [PATCH 117/270] [SPARK-17266][TEST] Add empty strings to the regressionTests of PrefixComparatorsSuite ## What changes were proposed in this pull request? This PR adds a regression test to PrefixComparatorsSuite's "String prefix comparator" because this test failed on jenkins once (https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-2.4/1620/testReport/junit/org.apache.spark.util.collection.unsafe.sort/PrefixComparatorsSuite/String_prefix_comparator/). I could not reproduce it locally. But, let's this test case in the regressionTests. Author: Yin Huai Closes #14837 from yhuai/SPARK-17266. --- .../util/collection/unsafe/sort/PrefixComparatorsSuite.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala index b4083230b4ac5..5180c58a566cb 100644 --- a/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala @@ -50,7 +50,8 @@ class PrefixComparatorsSuite extends SparkFunSuite with PropertyChecks { ("s1", "s2"), ("abc", "世界"), ("你好", "世界"), - ("你好123", "你好122") + ("你好123", "你好122"), + ("", "") ) // scalastyle:on From cc0caa690b32246b076c699ea3f8d8a84797fb94 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Fri, 26 Aug 2016 21:41:58 -0700 Subject: [PATCH 118/270] [SPARK-17270][SQL] Move object optimization rules into its own file ## What changes were proposed in this pull request? As part of breaking Optimizer.scala apart, this patch moves various Dataset object optimization rules into a single file. I'm submitting separate pull requests so we can more easily merge this in branch-2.0 to simplify optimizer backports. ## How was this patch tested? This should be covered by existing tests. Author: Reynold Xin Closes #14839 from rxin/SPARK-17270. --- .../sql/catalyst/optimizer/Optimizer.scala | 71 -------------- .../sql/catalyst/optimizer/objects.scala | 98 +++++++++++++++++++ 2 files changed, 98 insertions(+), 71 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/objects.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 5c8316189d7b4..7bbcd742b587b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -201,43 +201,6 @@ object RemoveAliasOnlyProject extends Rule[LogicalPlan] { } } -/** - * Removes cases where we are unnecessarily going between the object and serialized (InternalRow) - * representation of data item. For example back to back map operations. - */ -object EliminateSerialization extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case d @ DeserializeToObject(_, _, s: SerializeFromObject) - if d.outputObjAttr.dataType == s.inputObjAttr.dataType => - // Adds an extra Project here, to preserve the output expr id of `DeserializeToObject`. - // We will remove it later in RemoveAliasOnlyProject rule. - val objAttr = Alias(s.inputObjAttr, s.inputObjAttr.name)(exprId = d.outputObjAttr.exprId) - Project(objAttr :: Nil, s.child) - - case a @ AppendColumns(_, _, _, _, _, s: SerializeFromObject) - if a.deserializer.dataType == s.inputObjAttr.dataType => - AppendColumnsWithObject(a.func, s.serializer, a.serializer, s.child) - - // If there is a `SerializeFromObject` under typed filter and its input object type is same with - // the typed filter's deserializer, we can convert typed filter to normal filter without - // deserialization in condition, and push it down through `SerializeFromObject`. - // e.g. `ds.map(...).filter(...)` can be optimized by this rule to save extra deserialization, - // but `ds.map(...).as[AnotherType].filter(...)` can not be optimized. - case f @ TypedFilter(_, _, _, _, s: SerializeFromObject) - if f.deserializer.dataType == s.inputObjAttr.dataType => - s.copy(child = f.withObjectProducerChild(s.child)) - - // If there is a `DeserializeToObject` upon typed filter and its output object type is same with - // the typed filter's deserializer, we can convert typed filter to normal filter without - // deserialization in condition, and pull it up through `DeserializeToObject`. - // e.g. `ds.filter(...).map(...)` can be optimized by this rule to save extra deserialization, - // but `ds.filter(...).as[AnotherType].map(...)` can not be optimized. - case d @ DeserializeToObject(_, _, f: TypedFilter) - if d.outputObjAttr.dataType == f.deserializer.dataType => - f.withObjectProducerChild(d.copy(child = f.child)) - } -} - /** * Pushes down [[LocalLimit]] beneath UNION ALL and beneath the streamed inputs of outer joins. */ @@ -1713,40 +1676,6 @@ case class GetCurrentDatabase(sessionCatalog: SessionCatalog) extends Rule[Logic } } -/** - * Combines two adjacent [[TypedFilter]]s, which operate on same type object in condition, into one, - * mering the filter functions into one conjunctive function. - */ -object CombineTypedFilters extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case t1 @ TypedFilter(_, _, _, _, t2 @ TypedFilter(_, _, _, _, child)) - if t1.deserializer.dataType == t2.deserializer.dataType => - TypedFilter( - combineFilterFunction(t2.func, t1.func), - t1.argumentClass, - t1.argumentSchema, - t1.deserializer, - child) - } - - private def combineFilterFunction(func1: AnyRef, func2: AnyRef): Any => Boolean = { - (func1, func2) match { - case (f1: FilterFunction[_], f2: FilterFunction[_]) => - input => f1.asInstanceOf[FilterFunction[Any]].call(input) && - f2.asInstanceOf[FilterFunction[Any]].call(input) - case (f1: FilterFunction[_], f2) => - input => f1.asInstanceOf[FilterFunction[Any]].call(input) && - f2.asInstanceOf[Any => Boolean](input) - case (f1, f2: FilterFunction[_]) => - input => f1.asInstanceOf[Any => Boolean].apply(input) && - f2.asInstanceOf[FilterFunction[Any]].call(input) - case (f1, f2) => - input => f1.asInstanceOf[Any => Boolean].apply(input) && - f2.asInstanceOf[Any => Boolean].apply(input) - } - } -} - /** * This rule rewrites predicate sub-queries into left semi/anti joins. The following predicates * are supported: diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/objects.scala new file mode 100644 index 0000000000000..174d546e22809 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/objects.scala @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import org.apache.spark.api.java.function.FilterFunction +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules._ + +/* + * This file defines optimization rules related to object manipulation (for the Dataset API). + */ + +/** + * Removes cases where we are unnecessarily going between the object and serialized (InternalRow) + * representation of data item. For example back to back map operations. + */ +object EliminateSerialization extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case d @ DeserializeToObject(_, _, s: SerializeFromObject) + if d.outputObjAttr.dataType == s.inputObjAttr.dataType => + // Adds an extra Project here, to preserve the output expr id of `DeserializeToObject`. + // We will remove it later in RemoveAliasOnlyProject rule. + val objAttr = Alias(s.inputObjAttr, s.inputObjAttr.name)(exprId = d.outputObjAttr.exprId) + Project(objAttr :: Nil, s.child) + + case a @ AppendColumns(_, _, _, _, _, s: SerializeFromObject) + if a.deserializer.dataType == s.inputObjAttr.dataType => + AppendColumnsWithObject(a.func, s.serializer, a.serializer, s.child) + + // If there is a `SerializeFromObject` under typed filter and its input object type is same with + // the typed filter's deserializer, we can convert typed filter to normal filter without + // deserialization in condition, and push it down through `SerializeFromObject`. + // e.g. `ds.map(...).filter(...)` can be optimized by this rule to save extra deserialization, + // but `ds.map(...).as[AnotherType].filter(...)` can not be optimized. + case f @ TypedFilter(_, _, _, _, s: SerializeFromObject) + if f.deserializer.dataType == s.inputObjAttr.dataType => + s.copy(child = f.withObjectProducerChild(s.child)) + + // If there is a `DeserializeToObject` upon typed filter and its output object type is same with + // the typed filter's deserializer, we can convert typed filter to normal filter without + // deserialization in condition, and pull it up through `DeserializeToObject`. + // e.g. `ds.filter(...).map(...)` can be optimized by this rule to save extra deserialization, + // but `ds.filter(...).as[AnotherType].map(...)` can not be optimized. + case d @ DeserializeToObject(_, _, f: TypedFilter) + if d.outputObjAttr.dataType == f.deserializer.dataType => + f.withObjectProducerChild(d.copy(child = f.child)) + } +} + +/** + * Combines two adjacent [[TypedFilter]]s, which operate on same type object in condition, into one, + * mering the filter functions into one conjunctive function. + */ +object CombineTypedFilters extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case t1 @ TypedFilter(_, _, _, _, t2 @ TypedFilter(_, _, _, _, child)) + if t1.deserializer.dataType == t2.deserializer.dataType => + TypedFilter( + combineFilterFunction(t2.func, t1.func), + t1.argumentClass, + t1.argumentSchema, + t1.deserializer, + child) + } + + private def combineFilterFunction(func1: AnyRef, func2: AnyRef): Any => Boolean = { + (func1, func2) match { + case (f1: FilterFunction[_], f2: FilterFunction[_]) => + input => f1.asInstanceOf[FilterFunction[Any]].call(input) && + f2.asInstanceOf[FilterFunction[Any]].call(input) + case (f1: FilterFunction[_], f2) => + input => f1.asInstanceOf[FilterFunction[Any]].call(input) && + f2.asInstanceOf[Any => Boolean](input) + case (f1, f2: FilterFunction[_]) => + input => f1.asInstanceOf[Any => Boolean].apply(input) && + f2.asInstanceOf[FilterFunction[Any]].call(input) + case (f1, f2) => + input => f1.asInstanceOf[Any => Boolean].apply(input) && + f2.asInstanceOf[Any => Boolean].apply(input) + } + } +} From dcefac438788c51d84641bfbc505efe095731a39 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Fri, 26 Aug 2016 22:10:28 -0700 Subject: [PATCH 119/270] [SPARK-17269][SQL] Move finish analysis optimization stage into its own file ## What changes were proposed in this pull request? As part of breaking Optimizer.scala apart, this patch moves various finish analysis optimization stage rules into a single file. I'm submitting separate pull requests so we can more easily merge this in branch-2.0 to simplify optimizer backports. ## How was this patch tested? This should be covered by existing tests. Author: Reynold Xin Closes #14838 from rxin/SPARK-17269. --- .../sql/catalyst/optimizer/Optimizer.scala | 38 ----------- .../RewriteDistinctAggregates.scala | 2 +- .../catalyst/optimizer/finishAnalysis.scala | 65 +++++++++++++++++++ 3 files changed, 66 insertions(+), 39 deletions(-) rename sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/{analysis => optimizer}/RewriteDistinctAggregates.scala (99%) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/finishAnalysis.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 7bbcd742b587b..d055bc3d9b25f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -1638,44 +1638,6 @@ object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] { } } -/** - * Finds all [[RuntimeReplaceable]] expressions and replace them with the expressions that can - * be evaluated. This is mainly used to provide compatibility with other databases. - * For example, we use this to support "nvl" by replacing it with "coalesce". - */ -object ReplaceExpressions extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { - case e: RuntimeReplaceable => e.replaced - } -} - -/** - * Computes the current date and time to make sure we return the same result in a single query. - */ -object ComputeCurrentTime extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = { - val dateExpr = CurrentDate() - val timeExpr = CurrentTimestamp() - val currentDate = Literal.create(dateExpr.eval(EmptyRow), dateExpr.dataType) - val currentTime = Literal.create(timeExpr.eval(EmptyRow), timeExpr.dataType) - - plan transformAllExpressions { - case CurrentDate() => currentDate - case CurrentTimestamp() => currentTime - } - } -} - -/** Replaces the expression of CurrentDatabase with the current database name. */ -case class GetCurrentDatabase(sessionCatalog: SessionCatalog) extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = { - plan transformAllExpressions { - case CurrentDatabase() => - Literal.create(sessionCatalog.getCurrentDatabase, StringType) - } - } -} - /** * This rule rewrites predicate sub-queries into left semi/anti joins. The following predicates * are supported: diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/RewriteDistinctAggregates.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteDistinctAggregates.scala similarity index 99% rename from sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/RewriteDistinctAggregates.scala rename to sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteDistinctAggregates.scala index 8afd28dbba5c2..0f43e7bb88733 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/RewriteDistinctAggregates.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteDistinctAggregates.scala @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.spark.sql.catalyst.analysis +package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, AggregateFunction, Complete} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/finishAnalysis.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/finishAnalysis.scala new file mode 100644 index 0000000000000..7c667315870f5 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/finishAnalysis.scala @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import org.apache.spark.sql.catalyst.catalog.SessionCatalog +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules._ +import org.apache.spark.sql.types._ + + +/** + * Finds all [[RuntimeReplaceable]] expressions and replace them with the expressions that can + * be evaluated. This is mainly used to provide compatibility with other databases. + * For example, we use this to support "nvl" by replacing it with "coalesce". + */ +object ReplaceExpressions extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { + case e: RuntimeReplaceable => e.replaced + } +} + + +/** + * Computes the current date and time to make sure we return the same result in a single query. + */ +object ComputeCurrentTime extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = { + val dateExpr = CurrentDate() + val timeExpr = CurrentTimestamp() + val currentDate = Literal.create(dateExpr.eval(EmptyRow), dateExpr.dataType) + val currentTime = Literal.create(timeExpr.eval(EmptyRow), timeExpr.dataType) + + plan transformAllExpressions { + case CurrentDate() => currentDate + case CurrentTimestamp() => currentTime + } + } +} + + +/** Replaces the expression of CurrentDatabase with the current database name. */ +case class GetCurrentDatabase(sessionCatalog: SessionCatalog) extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = { + plan transformAllExpressions { + case CurrentDatabase() => + Literal.create(sessionCatalog.getCurrentDatabase, StringType) + } + } +} From 0243b328736f83faea5f83d18c4d331890ed8e81 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Sat, 27 Aug 2016 00:32:57 -0700 Subject: [PATCH 120/270] [SPARK-17272][SQL] Move subquery optimizer rules into its own file ## What changes were proposed in this pull request? As part of breaking Optimizer.scala apart, this patch moves various subquery rules into a single file. ## How was this patch tested? This should be covered by existing tests. Author: Reynold Xin Closes #14844 from rxin/SPARK-17272. --- .../sql/catalyst/optimizer/Optimizer.scala | 323 ---------------- .../sql/catalyst/optimizer/subquery.scala | 356 ++++++++++++++++++ 2 files changed, 356 insertions(+), 323 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index d055bc3d9b25f..8a503689801b1 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -1637,326 +1637,3 @@ object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] { a.copy(groupingExpressions = newGrouping) } } - -/** - * This rule rewrites predicate sub-queries into left semi/anti joins. The following predicates - * are supported: - * a. EXISTS/NOT EXISTS will be rewritten as semi/anti join, unresolved conditions in Filter - * will be pulled out as the join conditions. - * b. IN/NOT IN will be rewritten as semi/anti join, unresolved conditions in the Filter will - * be pulled out as join conditions, value = selected column will also be used as join - * condition. - */ -object RewritePredicateSubquery extends Rule[LogicalPlan] with PredicateHelper { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case Filter(condition, child) => - val (withSubquery, withoutSubquery) = - splitConjunctivePredicates(condition).partition(PredicateSubquery.hasPredicateSubquery) - - // Construct the pruned filter condition. - val newFilter: LogicalPlan = withoutSubquery match { - case Nil => child - case conditions => Filter(conditions.reduce(And), child) - } - - // Filter the plan by applying left semi and left anti joins. - withSubquery.foldLeft(newFilter) { - case (p, PredicateSubquery(sub, conditions, _, _)) => - val (joinCond, outerPlan) = rewriteExistentialExpr(conditions, p) - Join(outerPlan, sub, LeftSemi, joinCond) - case (p, Not(PredicateSubquery(sub, conditions, false, _))) => - val (joinCond, outerPlan) = rewriteExistentialExpr(conditions, p) - Join(outerPlan, sub, LeftAnti, joinCond) - case (p, Not(PredicateSubquery(sub, conditions, true, _))) => - // This is a NULL-aware (left) anti join (NAAJ) e.g. col NOT IN expr - // Construct the condition. A NULL in one of the conditions is regarded as a positive - // result; such a row will be filtered out by the Anti-Join operator. - - // Note that will almost certainly be planned as a Broadcast Nested Loop join. - // Use EXISTS if performance matters to you. - val (joinCond, outerPlan) = rewriteExistentialExpr(conditions, p) - val anyNull = splitConjunctivePredicates(joinCond.get).map(IsNull).reduceLeft(Or) - Join(outerPlan, sub, LeftAnti, Option(Or(anyNull, joinCond.get))) - case (p, predicate) => - val (newCond, inputPlan) = rewriteExistentialExpr(Seq(predicate), p) - Project(p.output, Filter(newCond.get, inputPlan)) - } - } - - /** - * Given a predicate expression and an input plan, it rewrites - * any embedded existential sub-query into an existential join. - * It returns the rewritten expression together with the updated plan. - * Currently, it does not support null-aware joins. Embedded NOT IN predicates - * are blocked in the Analyzer. - */ - private def rewriteExistentialExpr( - exprs: Seq[Expression], - plan: LogicalPlan): (Option[Expression], LogicalPlan) = { - var newPlan = plan - val newExprs = exprs.map { e => - e transformUp { - case PredicateSubquery(sub, conditions, nullAware, _) => - // TODO: support null-aware join - val exists = AttributeReference("exists", BooleanType, nullable = false)() - newPlan = Join(newPlan, sub, ExistenceJoin(exists), conditions.reduceLeftOption(And)) - exists - } - } - (newExprs.reduceOption(And), newPlan) - } -} - -/** - * This rule rewrites correlated [[ScalarSubquery]] expressions into LEFT OUTER joins. - */ -object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { - /** - * Extract all correlated scalar subqueries from an expression. The subqueries are collected using - * the given collector. The expression is rewritten and returned. - */ - private def extractCorrelatedScalarSubqueries[E <: Expression]( - expression: E, - subqueries: ArrayBuffer[ScalarSubquery]): E = { - val newExpression = expression transform { - case s: ScalarSubquery if s.children.nonEmpty => - subqueries += s - s.plan.output.head - } - newExpression.asInstanceOf[E] - } - - /** - * Statically evaluate an expression containing zero or more placeholders, given a set - * of bindings for placeholder values. - */ - private def evalExpr(expr: Expression, bindings: Map[ExprId, Option[Any]]) : Option[Any] = { - val rewrittenExpr = expr transform { - case r: AttributeReference => - bindings(r.exprId) match { - case Some(v) => Literal.create(v, r.dataType) - case None => Literal.default(NullType) - } - } - Option(rewrittenExpr.eval()) - } - - /** - * Statically evaluate an expression containing one or more aggregates on an empty input. - */ - private def evalAggOnZeroTups(expr: Expression) : Option[Any] = { - // AggregateExpressions are Unevaluable, so we need to replace all aggregates - // in the expression with the value they would return for zero input tuples. - // Also replace attribute refs (for example, for grouping columns) with NULL. - val rewrittenExpr = expr transform { - case a @ AggregateExpression(aggFunc, _, _, resultId) => - aggFunc.defaultResult.getOrElse(Literal.default(NullType)) - - case _: AttributeReference => Literal.default(NullType) - } - Option(rewrittenExpr.eval()) - } - - /** - * Statically evaluate a scalar subquery on an empty input. - * - * WARNING: This method only covers subqueries that pass the checks under - * [[org.apache.spark.sql.catalyst.analysis.CheckAnalysis]]. If the checks in - * CheckAnalysis become less restrictive, this method will need to change. - */ - private def evalSubqueryOnZeroTups(plan: LogicalPlan) : Option[Any] = { - // Inputs to this method will start with a chain of zero or more SubqueryAlias - // and Project operators, followed by an optional Filter, followed by an - // Aggregate. Traverse the operators recursively. - def evalPlan(lp : LogicalPlan) : Map[ExprId, Option[Any]] = lp match { - case SubqueryAlias(_, child, _) => evalPlan(child) - case Filter(condition, child) => - val bindings = evalPlan(child) - if (bindings.isEmpty) bindings - else { - val exprResult = evalExpr(condition, bindings).getOrElse(false) - .asInstanceOf[Boolean] - if (exprResult) bindings else Map.empty - } - - case Project(projectList, child) => - val bindings = evalPlan(child) - if (bindings.isEmpty) { - bindings - } else { - projectList.map(ne => (ne.exprId, evalExpr(ne, bindings))).toMap - } - - case Aggregate(_, aggExprs, _) => - // Some of the expressions under the Aggregate node are the join columns - // for joining with the outer query block. Fill those expressions in with - // nulls and statically evaluate the remainder. - aggExprs.map { - case ref: AttributeReference => (ref.exprId, None) - case alias @ Alias(_: AttributeReference, _) => (alias.exprId, None) - case ne => (ne.exprId, evalAggOnZeroTups(ne)) - }.toMap - - case _ => sys.error(s"Unexpected operator in scalar subquery: $lp") - } - - val resultMap = evalPlan(plan) - - // By convention, the scalar subquery result is the leftmost field. - resultMap(plan.output.head.exprId) - } - - /** - * Split the plan for a scalar subquery into the parts above the innermost query block - * (first part of returned value), the HAVING clause of the innermost query block - * (optional second part) and the parts below the HAVING CLAUSE (third part). - */ - private def splitSubquery(plan: LogicalPlan) : (Seq[LogicalPlan], Option[Filter], Aggregate) = { - val topPart = ArrayBuffer.empty[LogicalPlan] - var bottomPart: LogicalPlan = plan - while (true) { - bottomPart match { - case havingPart @ Filter(_, aggPart: Aggregate) => - return (topPart, Option(havingPart), aggPart) - - case aggPart: Aggregate => - // No HAVING clause - return (topPart, None, aggPart) - - case p @ Project(_, child) => - topPart += p - bottomPart = child - - case s @ SubqueryAlias(_, child, _) => - topPart += s - bottomPart = child - - case Filter(_, op) => - sys.error(s"Correlated subquery has unexpected operator $op below filter") - - case op @ _ => sys.error(s"Unexpected operator $op in correlated subquery") - } - } - - sys.error("This line should be unreachable") - } - - // Name of generated column used in rewrite below - val ALWAYS_TRUE_COLNAME = "alwaysTrue" - - /** - * Construct a new child plan by left joining the given subqueries to a base plan. - */ - private def constructLeftJoins( - child: LogicalPlan, - subqueries: ArrayBuffer[ScalarSubquery]): LogicalPlan = { - subqueries.foldLeft(child) { - case (currentChild, ScalarSubquery(query, conditions, _)) => - val origOutput = query.output.head - - val resultWithZeroTups = evalSubqueryOnZeroTups(query) - if (resultWithZeroTups.isEmpty) { - // CASE 1: Subquery guaranteed not to have the COUNT bug - Project( - currentChild.output :+ origOutput, - Join(currentChild, query, LeftOuter, conditions.reduceOption(And))) - } else { - // Subquery might have the COUNT bug. Add appropriate corrections. - val (topPart, havingNode, aggNode) = splitSubquery(query) - - // The next two cases add a leading column to the outer join input to make it - // possible to distinguish between the case when no tuples join and the case - // when the tuple that joins contains null values. - // The leading column always has the value TRUE. - val alwaysTrueExprId = NamedExpression.newExprId - val alwaysTrueExpr = Alias(Literal.TrueLiteral, - ALWAYS_TRUE_COLNAME)(exprId = alwaysTrueExprId) - val alwaysTrueRef = AttributeReference(ALWAYS_TRUE_COLNAME, - BooleanType)(exprId = alwaysTrueExprId) - - val aggValRef = query.output.head - - if (havingNode.isEmpty) { - // CASE 2: Subquery with no HAVING clause - Project( - currentChild.output :+ - Alias( - If(IsNull(alwaysTrueRef), - Literal.create(resultWithZeroTups.get, origOutput.dataType), - aggValRef), origOutput.name)(exprId = origOutput.exprId), - Join(currentChild, - Project(query.output :+ alwaysTrueExpr, query), - LeftOuter, conditions.reduceOption(And))) - - } else { - // CASE 3: Subquery with HAVING clause. Pull the HAVING clause above the join. - // Need to modify any operators below the join to pass through all columns - // referenced in the HAVING clause. - var subqueryRoot: UnaryNode = aggNode - val havingInputs: Seq[NamedExpression] = aggNode.output - - topPart.reverse.foreach { - case Project(projList, _) => - subqueryRoot = Project(projList ++ havingInputs, subqueryRoot) - case s @ SubqueryAlias(alias, _, None) => - subqueryRoot = SubqueryAlias(alias, subqueryRoot, None) - case op => sys.error(s"Unexpected operator $op in corelated subquery") - } - - // CASE WHEN alwayTrue IS NULL THEN resultOnZeroTups - // WHEN NOT (original HAVING clause expr) THEN CAST(null AS ) - // ELSE (aggregate value) END AS (original column name) - val caseExpr = Alias(CaseWhen(Seq( - (IsNull(alwaysTrueRef), Literal.create(resultWithZeroTups.get, origOutput.dataType)), - (Not(havingNode.get.condition), Literal.create(null, aggValRef.dataType))), - aggValRef), - origOutput.name)(exprId = origOutput.exprId) - - Project( - currentChild.output :+ caseExpr, - Join(currentChild, - Project(subqueryRoot.output :+ alwaysTrueExpr, subqueryRoot), - LeftOuter, conditions.reduceOption(And))) - - } - } - } - } - - /** - * Rewrite [[Filter]], [[Project]] and [[Aggregate]] plans containing correlated scalar - * subqueries. - */ - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case a @ Aggregate(grouping, expressions, child) => - val subqueries = ArrayBuffer.empty[ScalarSubquery] - val newExpressions = expressions.map(extractCorrelatedScalarSubqueries(_, subqueries)) - if (subqueries.nonEmpty) { - // We currently only allow correlated subqueries in an aggregate if they are part of the - // grouping expressions. As a result we need to replace all the scalar subqueries in the - // grouping expressions by their result. - val newGrouping = grouping.map { e => - subqueries.find(_.semanticEquals(e)).map(_.plan.output.head).getOrElse(e) - } - Aggregate(newGrouping, newExpressions, constructLeftJoins(child, subqueries)) - } else { - a - } - case p @ Project(expressions, child) => - val subqueries = ArrayBuffer.empty[ScalarSubquery] - val newExpressions = expressions.map(extractCorrelatedScalarSubqueries(_, subqueries)) - if (subqueries.nonEmpty) { - Project(newExpressions, constructLeftJoins(child, subqueries)) - } else { - p - } - case f @ Filter(condition, child) => - val subqueries = ArrayBuffer.empty[ScalarSubquery] - val newCondition = extractCorrelatedScalarSubqueries(condition, subqueries) - if (subqueries.nonEmpty) { - Project(f.output, Filter(newCondition, constructLeftJoins(child, subqueries))) - } else { - f - } - } -} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala new file mode 100644 index 0000000000000..f14aaab72a98f --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import scala.collection.mutable.ArrayBuffer + +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.expressions.aggregate._ +import org.apache.spark.sql.catalyst.plans._ +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules._ +import org.apache.spark.sql.types._ + +/* + * This file defines optimization rules related to subqueries. + */ + + +/** + * This rule rewrites predicate sub-queries into left semi/anti joins. The following predicates + * are supported: + * a. EXISTS/NOT EXISTS will be rewritten as semi/anti join, unresolved conditions in Filter + * will be pulled out as the join conditions. + * b. IN/NOT IN will be rewritten as semi/anti join, unresolved conditions in the Filter will + * be pulled out as join conditions, value = selected column will also be used as join + * condition. + */ +object RewritePredicateSubquery extends Rule[LogicalPlan] with PredicateHelper { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case Filter(condition, child) => + val (withSubquery, withoutSubquery) = + splitConjunctivePredicates(condition).partition(PredicateSubquery.hasPredicateSubquery) + + // Construct the pruned filter condition. + val newFilter: LogicalPlan = withoutSubquery match { + case Nil => child + case conditions => Filter(conditions.reduce(And), child) + } + + // Filter the plan by applying left semi and left anti joins. + withSubquery.foldLeft(newFilter) { + case (p, PredicateSubquery(sub, conditions, _, _)) => + val (joinCond, outerPlan) = rewriteExistentialExpr(conditions, p) + Join(outerPlan, sub, LeftSemi, joinCond) + case (p, Not(PredicateSubquery(sub, conditions, false, _))) => + val (joinCond, outerPlan) = rewriteExistentialExpr(conditions, p) + Join(outerPlan, sub, LeftAnti, joinCond) + case (p, Not(PredicateSubquery(sub, conditions, true, _))) => + // This is a NULL-aware (left) anti join (NAAJ) e.g. col NOT IN expr + // Construct the condition. A NULL in one of the conditions is regarded as a positive + // result; such a row will be filtered out by the Anti-Join operator. + + // Note that will almost certainly be planned as a Broadcast Nested Loop join. + // Use EXISTS if performance matters to you. + val (joinCond, outerPlan) = rewriteExistentialExpr(conditions, p) + val anyNull = splitConjunctivePredicates(joinCond.get).map(IsNull).reduceLeft(Or) + Join(outerPlan, sub, LeftAnti, Option(Or(anyNull, joinCond.get))) + case (p, predicate) => + val (newCond, inputPlan) = rewriteExistentialExpr(Seq(predicate), p) + Project(p.output, Filter(newCond.get, inputPlan)) + } + } + + /** + * Given a predicate expression and an input plan, it rewrites + * any embedded existential sub-query into an existential join. + * It returns the rewritten expression together with the updated plan. + * Currently, it does not support null-aware joins. Embedded NOT IN predicates + * are blocked in the Analyzer. + */ + private def rewriteExistentialExpr( + exprs: Seq[Expression], + plan: LogicalPlan): (Option[Expression], LogicalPlan) = { + var newPlan = plan + val newExprs = exprs.map { e => + e transformUp { + case PredicateSubquery(sub, conditions, nullAware, _) => + // TODO: support null-aware join + val exists = AttributeReference("exists", BooleanType, nullable = false)() + newPlan = Join(newPlan, sub, ExistenceJoin(exists), conditions.reduceLeftOption(And)) + exists + } + } + (newExprs.reduceOption(And), newPlan) + } +} + + +/** + * This rule rewrites correlated [[ScalarSubquery]] expressions into LEFT OUTER joins. + */ +object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] { + /** + * Extract all correlated scalar subqueries from an expression. The subqueries are collected using + * the given collector. The expression is rewritten and returned. + */ + private def extractCorrelatedScalarSubqueries[E <: Expression]( + expression: E, + subqueries: ArrayBuffer[ScalarSubquery]): E = { + val newExpression = expression transform { + case s: ScalarSubquery if s.children.nonEmpty => + subqueries += s + s.plan.output.head + } + newExpression.asInstanceOf[E] + } + + /** + * Statically evaluate an expression containing zero or more placeholders, given a set + * of bindings for placeholder values. + */ + private def evalExpr(expr: Expression, bindings: Map[ExprId, Option[Any]]) : Option[Any] = { + val rewrittenExpr = expr transform { + case r: AttributeReference => + bindings(r.exprId) match { + case Some(v) => Literal.create(v, r.dataType) + case None => Literal.default(NullType) + } + } + Option(rewrittenExpr.eval()) + } + + /** + * Statically evaluate an expression containing one or more aggregates on an empty input. + */ + private def evalAggOnZeroTups(expr: Expression) : Option[Any] = { + // AggregateExpressions are Unevaluable, so we need to replace all aggregates + // in the expression with the value they would return for zero input tuples. + // Also replace attribute refs (for example, for grouping columns) with NULL. + val rewrittenExpr = expr transform { + case a @ AggregateExpression(aggFunc, _, _, resultId) => + aggFunc.defaultResult.getOrElse(Literal.default(NullType)) + + case _: AttributeReference => Literal.default(NullType) + } + Option(rewrittenExpr.eval()) + } + + /** + * Statically evaluate a scalar subquery on an empty input. + * + * WARNING: This method only covers subqueries that pass the checks under + * [[org.apache.spark.sql.catalyst.analysis.CheckAnalysis]]. If the checks in + * CheckAnalysis become less restrictive, this method will need to change. + */ + private def evalSubqueryOnZeroTups(plan: LogicalPlan) : Option[Any] = { + // Inputs to this method will start with a chain of zero or more SubqueryAlias + // and Project operators, followed by an optional Filter, followed by an + // Aggregate. Traverse the operators recursively. + def evalPlan(lp : LogicalPlan) : Map[ExprId, Option[Any]] = lp match { + case SubqueryAlias(_, child, _) => evalPlan(child) + case Filter(condition, child) => + val bindings = evalPlan(child) + if (bindings.isEmpty) bindings + else { + val exprResult = evalExpr(condition, bindings).getOrElse(false) + .asInstanceOf[Boolean] + if (exprResult) bindings else Map.empty + } + + case Project(projectList, child) => + val bindings = evalPlan(child) + if (bindings.isEmpty) { + bindings + } else { + projectList.map(ne => (ne.exprId, evalExpr(ne, bindings))).toMap + } + + case Aggregate(_, aggExprs, _) => + // Some of the expressions under the Aggregate node are the join columns + // for joining with the outer query block. Fill those expressions in with + // nulls and statically evaluate the remainder. + aggExprs.map { + case ref: AttributeReference => (ref.exprId, None) + case alias @ Alias(_: AttributeReference, _) => (alias.exprId, None) + case ne => (ne.exprId, evalAggOnZeroTups(ne)) + }.toMap + + case _ => sys.error(s"Unexpected operator in scalar subquery: $lp") + } + + val resultMap = evalPlan(plan) + + // By convention, the scalar subquery result is the leftmost field. + resultMap(plan.output.head.exprId) + } + + /** + * Split the plan for a scalar subquery into the parts above the innermost query block + * (first part of returned value), the HAVING clause of the innermost query block + * (optional second part) and the parts below the HAVING CLAUSE (third part). + */ + private def splitSubquery(plan: LogicalPlan) : (Seq[LogicalPlan], Option[Filter], Aggregate) = { + val topPart = ArrayBuffer.empty[LogicalPlan] + var bottomPart: LogicalPlan = plan + while (true) { + bottomPart match { + case havingPart @ Filter(_, aggPart: Aggregate) => + return (topPart, Option(havingPart), aggPart) + + case aggPart: Aggregate => + // No HAVING clause + return (topPart, None, aggPart) + + case p @ Project(_, child) => + topPart += p + bottomPart = child + + case s @ SubqueryAlias(_, child, _) => + topPart += s + bottomPart = child + + case Filter(_, op) => + sys.error(s"Correlated subquery has unexpected operator $op below filter") + + case op @ _ => sys.error(s"Unexpected operator $op in correlated subquery") + } + } + + sys.error("This line should be unreachable") + } + + // Name of generated column used in rewrite below + val ALWAYS_TRUE_COLNAME = "alwaysTrue" + + /** + * Construct a new child plan by left joining the given subqueries to a base plan. + */ + private def constructLeftJoins( + child: LogicalPlan, + subqueries: ArrayBuffer[ScalarSubquery]): LogicalPlan = { + subqueries.foldLeft(child) { + case (currentChild, ScalarSubquery(query, conditions, _)) => + val origOutput = query.output.head + + val resultWithZeroTups = evalSubqueryOnZeroTups(query) + if (resultWithZeroTups.isEmpty) { + // CASE 1: Subquery guaranteed not to have the COUNT bug + Project( + currentChild.output :+ origOutput, + Join(currentChild, query, LeftOuter, conditions.reduceOption(And))) + } else { + // Subquery might have the COUNT bug. Add appropriate corrections. + val (topPart, havingNode, aggNode) = splitSubquery(query) + + // The next two cases add a leading column to the outer join input to make it + // possible to distinguish between the case when no tuples join and the case + // when the tuple that joins contains null values. + // The leading column always has the value TRUE. + val alwaysTrueExprId = NamedExpression.newExprId + val alwaysTrueExpr = Alias(Literal.TrueLiteral, + ALWAYS_TRUE_COLNAME)(exprId = alwaysTrueExprId) + val alwaysTrueRef = AttributeReference(ALWAYS_TRUE_COLNAME, + BooleanType)(exprId = alwaysTrueExprId) + + val aggValRef = query.output.head + + if (havingNode.isEmpty) { + // CASE 2: Subquery with no HAVING clause + Project( + currentChild.output :+ + Alias( + If(IsNull(alwaysTrueRef), + Literal.create(resultWithZeroTups.get, origOutput.dataType), + aggValRef), origOutput.name)(exprId = origOutput.exprId), + Join(currentChild, + Project(query.output :+ alwaysTrueExpr, query), + LeftOuter, conditions.reduceOption(And))) + + } else { + // CASE 3: Subquery with HAVING clause. Pull the HAVING clause above the join. + // Need to modify any operators below the join to pass through all columns + // referenced in the HAVING clause. + var subqueryRoot: UnaryNode = aggNode + val havingInputs: Seq[NamedExpression] = aggNode.output + + topPart.reverse.foreach { + case Project(projList, _) => + subqueryRoot = Project(projList ++ havingInputs, subqueryRoot) + case s @ SubqueryAlias(alias, _, None) => + subqueryRoot = SubqueryAlias(alias, subqueryRoot, None) + case op => sys.error(s"Unexpected operator $op in corelated subquery") + } + + // CASE WHEN alwayTrue IS NULL THEN resultOnZeroTups + // WHEN NOT (original HAVING clause expr) THEN CAST(null AS ) + // ELSE (aggregate value) END AS (original column name) + val caseExpr = Alias(CaseWhen(Seq( + (IsNull(alwaysTrueRef), Literal.create(resultWithZeroTups.get, origOutput.dataType)), + (Not(havingNode.get.condition), Literal.create(null, aggValRef.dataType))), + aggValRef), + origOutput.name)(exprId = origOutput.exprId) + + Project( + currentChild.output :+ caseExpr, + Join(currentChild, + Project(subqueryRoot.output :+ alwaysTrueExpr, subqueryRoot), + LeftOuter, conditions.reduceOption(And))) + + } + } + } + } + + /** + * Rewrite [[Filter]], [[Project]] and [[Aggregate]] plans containing correlated scalar + * subqueries. + */ + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case a @ Aggregate(grouping, expressions, child) => + val subqueries = ArrayBuffer.empty[ScalarSubquery] + val newExpressions = expressions.map(extractCorrelatedScalarSubqueries(_, subqueries)) + if (subqueries.nonEmpty) { + // We currently only allow correlated subqueries in an aggregate if they are part of the + // grouping expressions. As a result we need to replace all the scalar subqueries in the + // grouping expressions by their result. + val newGrouping = grouping.map { e => + subqueries.find(_.semanticEquals(e)).map(_.plan.output.head).getOrElse(e) + } + Aggregate(newGrouping, newExpressions, constructLeftJoins(child, subqueries)) + } else { + a + } + case p @ Project(expressions, child) => + val subqueries = ArrayBuffer.empty[ScalarSubquery] + val newExpressions = expressions.map(extractCorrelatedScalarSubqueries(_, subqueries)) + if (subqueries.nonEmpty) { + Project(newExpressions, constructLeftJoins(child, subqueries)) + } else { + p + } + case f @ Filter(condition, child) => + val subqueries = ArrayBuffer.empty[ScalarSubquery] + val newCondition = extractCorrelatedScalarSubqueries(condition, subqueries) + if (subqueries.nonEmpty) { + Project(f.output, Filter(newCondition, constructLeftJoins(child, subqueries))) + } else { + f + } + } +} From 5aad4509c15e131948d387157ecf56af1a705e19 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Sat, 27 Aug 2016 00:34:35 -0700 Subject: [PATCH 121/270] [SPARK-17273][SQL] Move expression optimizer rules into a separate file ## What changes were proposed in this pull request? As part of breaking Optimizer.scala apart, this patch moves various expression optimization rules into a single file. ## How was this patch tested? This should be covered by existing tests. Author: Reynold Xin Closes #14845 from rxin/SPARK-17273. --- .../sql/catalyst/optimizer/Optimizer.scala | 461 +--------------- .../sql/catalyst/optimizer/expressions.scala | 506 ++++++++++++++++++ 2 files changed, 507 insertions(+), 460 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 8a503689801b1..17cab18ff8e9e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -533,176 +533,6 @@ object CollapseRepartition extends Rule[LogicalPlan] { } } -/** - * Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition. - * For example, when the expression is just checking to see if a string starts with a given - * pattern. - */ -object LikeSimplification extends Rule[LogicalPlan] { - // if guards below protect from escapes on trailing %. - // Cases like "something\%" are not optimized, but this does not affect correctness. - private val startsWith = "([^_%]+)%".r - private val endsWith = "%([^_%]+)".r - private val startsAndEndsWith = "([^_%]+)%([^_%]+)".r - private val contains = "%([^_%]+)%".r - private val equalTo = "([^_%]*)".r - - def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { - case Like(input, Literal(pattern, StringType)) => - pattern.toString match { - case startsWith(prefix) if !prefix.endsWith("\\") => - StartsWith(input, Literal(prefix)) - case endsWith(postfix) => - EndsWith(input, Literal(postfix)) - // 'a%a' pattern is basically same with 'a%' && '%a'. - // However, the additional `Length` condition is required to prevent 'a' match 'a%a'. - case startsAndEndsWith(prefix, postfix) if !prefix.endsWith("\\") => - And(GreaterThanOrEqual(Length(input), Literal(prefix.size + postfix.size)), - And(StartsWith(input, Literal(prefix)), EndsWith(input, Literal(postfix)))) - case contains(infix) if !infix.endsWith("\\") => - Contains(input, Literal(infix)) - case equalTo(str) => - EqualTo(input, Literal(str)) - case _ => - Like(input, Literal.create(pattern, StringType)) - } - } -} - -/** - * Replaces [[Expression Expressions]] that can be statically evaluated with - * equivalent [[Literal]] values. This rule is more specific with - * Null value propagation from bottom to top of the expression tree. - */ -object NullPropagation extends Rule[LogicalPlan] { - private def nonNullLiteral(e: Expression): Boolean = e match { - case Literal(null, _) => false - case _ => true - } - - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsUp { - case e @ WindowExpression(Cast(Literal(0L, _), _), _) => - Cast(Literal(0L), e.dataType) - case e @ AggregateExpression(Count(exprs), _, _, _) if !exprs.exists(nonNullLiteral) => - Cast(Literal(0L), e.dataType) - case e @ IsNull(c) if !c.nullable => Literal.create(false, BooleanType) - case e @ IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType) - case e @ GetArrayItem(Literal(null, _), _) => Literal.create(null, e.dataType) - case e @ GetArrayItem(_, Literal(null, _)) => Literal.create(null, e.dataType) - case e @ GetMapValue(Literal(null, _), _) => Literal.create(null, e.dataType) - case e @ GetMapValue(_, Literal(null, _)) => Literal.create(null, e.dataType) - case e @ GetStructField(Literal(null, _), _, _) => Literal.create(null, e.dataType) - case e @ GetArrayStructFields(Literal(null, _), _, _, _, _) => - Literal.create(null, e.dataType) - case e @ EqualNullSafe(Literal(null, _), r) => IsNull(r) - case e @ EqualNullSafe(l, Literal(null, _)) => IsNull(l) - case ae @ AggregateExpression(Count(exprs), _, false, _) if !exprs.exists(_.nullable) => - // This rule should be only triggered when isDistinct field is false. - ae.copy(aggregateFunction = Count(Literal(1))) - - // For Coalesce, remove null literals. - case e @ Coalesce(children) => - val newChildren = children.filter(nonNullLiteral) - if (newChildren.isEmpty) { - Literal.create(null, e.dataType) - } else if (newChildren.length == 1) { - newChildren.head - } else { - Coalesce(newChildren) - } - - case e @ Substring(Literal(null, _), _, _) => Literal.create(null, e.dataType) - case e @ Substring(_, Literal(null, _), _) => Literal.create(null, e.dataType) - case e @ Substring(_, _, Literal(null, _)) => Literal.create(null, e.dataType) - - // Put exceptional cases above if any - case e @ BinaryArithmetic(Literal(null, _), _) => Literal.create(null, e.dataType) - case e @ BinaryArithmetic(_, Literal(null, _)) => Literal.create(null, e.dataType) - - case e @ BinaryComparison(Literal(null, _), _) => Literal.create(null, e.dataType) - case e @ BinaryComparison(_, Literal(null, _)) => Literal.create(null, e.dataType) - - case e: StringRegexExpression => e.children match { - case Literal(null, _) :: right :: Nil => Literal.create(null, e.dataType) - case left :: Literal(null, _) :: Nil => Literal.create(null, e.dataType) - case _ => e - } - - case e: StringPredicate => e.children match { - case Literal(null, _) :: right :: Nil => Literal.create(null, e.dataType) - case left :: Literal(null, _) :: Nil => Literal.create(null, e.dataType) - case _ => e - } - - // If the value expression is NULL then transform the In expression to - // Literal(null) - case In(Literal(null, _), list) => Literal.create(null, BooleanType) - - } - } -} - -/** - * Propagate foldable expressions: - * Replace attributes with aliases of the original foldable expressions if possible. - * Other optimizations will take advantage of the propagated foldable expressions. - * - * {{{ - * SELECT 1.0 x, 'abc' y, Now() z ORDER BY x, y, 3 - * ==> SELECT 1.0 x, 'abc' y, Now() z ORDER BY 1.0, 'abc', Now() - * }}} - */ -object FoldablePropagation extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = { - val foldableMap = AttributeMap(plan.flatMap { - case Project(projectList, _) => projectList.collect { - case a: Alias if a.child.foldable => (a.toAttribute, a) - } - case _ => Nil - }) - - if (foldableMap.isEmpty) { - plan - } else { - var stop = false - CleanupAliases(plan.transformUp { - case u: Union => - stop = true - u - case c: Command => - stop = true - c - // For outer join, although its output attributes are derived from its children, they are - // actually different attributes: the output of outer join is not always picked from its - // children, but can also be null. - // TODO(cloud-fan): It seems more reasonable to use new attributes as the output attributes - // of outer join. - case j @ Join(_, _, LeftOuter | RightOuter | FullOuter, _) => - stop = true - j - - // These 3 operators take attributes as constructor parameters, and these attributes - // can't be replaced by alias. - case m: MapGroups => - stop = true - m - case f: FlatMapGroupsInR => - stop = true - f - case c: CoGroup => - stop = true - c - - case p: LogicalPlan if !stop => p.transformExpressions { - case a: AttributeReference if foldableMap.contains(a) => - foldableMap(a) - } - }) - } - } -} - /** * Generate a list of additional filters from an operator's existing constraint but remove those * that are either already part of the operator's condition or are part of the operator's child @@ -742,261 +572,6 @@ object InferFiltersFromConstraints extends Rule[LogicalPlan] with PredicateHelpe } } -/** - * Reorder associative integral-type operators and fold all constants into one. - */ -object ReorderAssociativeOperator extends Rule[LogicalPlan] { - private def flattenAdd(e: Expression): Seq[Expression] = e match { - case Add(l, r) => flattenAdd(l) ++ flattenAdd(r) - case other => other :: Nil - } - - private def flattenMultiply(e: Expression): Seq[Expression] = e match { - case Multiply(l, r) => flattenMultiply(l) ++ flattenMultiply(r) - case other => other :: Nil - } - - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsDown { - case a: Add if a.deterministic && a.dataType.isInstanceOf[IntegralType] => - val (foldables, others) = flattenAdd(a).partition(_.foldable) - if (foldables.size > 1) { - val foldableExpr = foldables.reduce((x, y) => Add(x, y)) - val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType) - if (others.isEmpty) c else Add(others.reduce((x, y) => Add(x, y)), c) - } else { - a - } - case m: Multiply if m.deterministic && m.dataType.isInstanceOf[IntegralType] => - val (foldables, others) = flattenMultiply(m).partition(_.foldable) - if (foldables.size > 1) { - val foldableExpr = foldables.reduce((x, y) => Multiply(x, y)) - val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType) - if (others.isEmpty) c else Multiply(others.reduce((x, y) => Multiply(x, y)), c) - } else { - m - } - } - } -} - -/** - * Replaces [[Expression Expressions]] that can be statically evaluated with - * equivalent [[Literal]] values. - */ -object ConstantFolding extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsDown { - // Skip redundant folding of literals. This rule is technically not necessary. Placing this - // here avoids running the next rule for Literal values, which would create a new Literal - // object and running eval unnecessarily. - case l: Literal => l - - // Fold expressions that are foldable. - case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType) - } - } -} - -/** - * Optimize IN predicates: - * 1. Removes literal repetitions. - * 2. Replaces [[In (value, seq[Literal])]] with optimized version - * [[InSet (value, HashSet[Literal])]] which is much faster. - */ -case class OptimizeIn(conf: CatalystConf) extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsDown { - case expr @ In(v, list) if expr.inSetConvertible => - val newList = ExpressionSet(list).toSeq - if (newList.size > conf.optimizerInSetConversionThreshold) { - val hSet = newList.map(e => e.eval(EmptyRow)) - InSet(v, HashSet() ++ hSet) - } else if (newList.size < list.size) { - expr.copy(list = newList) - } else { // newList.length == list.length - expr - } - } - } -} - -/** - * Simplifies boolean expressions: - * 1. Simplifies expressions whose answer can be determined without evaluating both sides. - * 2. Eliminates / extracts common factors. - * 3. Merge same expressions - * 4. Removes `Not` operator. - */ -object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsUp { - case TrueLiteral And e => e - case e And TrueLiteral => e - case FalseLiteral Or e => e - case e Or FalseLiteral => e - - case FalseLiteral And _ => FalseLiteral - case _ And FalseLiteral => FalseLiteral - case TrueLiteral Or _ => TrueLiteral - case _ Or TrueLiteral => TrueLiteral - - case a And b if a.semanticEquals(b) => a - case a Or b if a.semanticEquals(b) => a - - case a And (b Or c) if Not(a).semanticEquals(b) => And(a, c) - case a And (b Or c) if Not(a).semanticEquals(c) => And(a, b) - case (a Or b) And c if a.semanticEquals(Not(c)) => And(b, c) - case (a Or b) And c if b.semanticEquals(Not(c)) => And(a, c) - - case a Or (b And c) if Not(a).semanticEquals(b) => Or(a, c) - case a Or (b And c) if Not(a).semanticEquals(c) => Or(a, b) - case (a And b) Or c if a.semanticEquals(Not(c)) => Or(b, c) - case (a And b) Or c if b.semanticEquals(Not(c)) => Or(a, c) - - // Common factor elimination for conjunction - case and @ (left And right) => - // 1. Split left and right to get the disjunctive predicates, - // i.e. lhs = (a, b), rhs = (a, c) - // 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a) - // 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c) - // 4. Apply the formula, get the optimized predicate: common || (ldiff && rdiff) - val lhs = splitDisjunctivePredicates(left) - val rhs = splitDisjunctivePredicates(right) - val common = lhs.filter(e => rhs.exists(e.semanticEquals)) - if (common.isEmpty) { - // No common factors, return the original predicate - and - } else { - val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals)) - val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals)) - if (ldiff.isEmpty || rdiff.isEmpty) { - // (a || b || c || ...) && (a || b) => (a || b) - common.reduce(Or) - } else { - // (a || b || c || ...) && (a || b || d || ...) => - // ((c || ...) && (d || ...)) || a || b - (common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or) - } - } - - // Common factor elimination for disjunction - case or @ (left Or right) => - // 1. Split left and right to get the conjunctive predicates, - // i.e. lhs = (a, b), rhs = (a, c) - // 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a) - // 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c) - // 4. Apply the formula, get the optimized predicate: common && (ldiff || rdiff) - val lhs = splitConjunctivePredicates(left) - val rhs = splitConjunctivePredicates(right) - val common = lhs.filter(e => rhs.exists(e.semanticEquals)) - if (common.isEmpty) { - // No common factors, return the original predicate - or - } else { - val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals)) - val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals)) - if (ldiff.isEmpty || rdiff.isEmpty) { - // (a && b) || (a && b && c && ...) => a && b - common.reduce(And) - } else { - // (a && b && c && ...) || (a && b && d && ...) => - // ((c && ...) || (d && ...)) && a && b - (common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And) - } - } - - case Not(TrueLiteral) => FalseLiteral - case Not(FalseLiteral) => TrueLiteral - - case Not(a GreaterThan b) => LessThanOrEqual(a, b) - case Not(a GreaterThanOrEqual b) => LessThan(a, b) - - case Not(a LessThan b) => GreaterThanOrEqual(a, b) - case Not(a LessThanOrEqual b) => GreaterThan(a, b) - - case Not(a Or b) => And(Not(a), Not(b)) - case Not(a And b) => Or(Not(a), Not(b)) - - case Not(Not(e)) => e - } - } -} - -/** - * Simplifies binary comparisons with semantically-equal expressions: - * 1) Replace '<=>' with 'true' literal. - * 2) Replace '=', '<=', and '>=' with 'true' literal if both operands are non-nullable. - * 3) Replace '<' and '>' with 'false' literal if both operands are non-nullable. - */ -object SimplifyBinaryComparison extends Rule[LogicalPlan] with PredicateHelper { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsUp { - // True with equality - case a EqualNullSafe b if a.semanticEquals(b) => TrueLiteral - case a EqualTo b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral - case a GreaterThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => - TrueLiteral - case a LessThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral - - // False with inequality - case a GreaterThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral - case a LessThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral - } - } -} - -/** - * Simplifies conditional expressions (if / case). - */ -object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper { - private def falseOrNullLiteral(e: Expression): Boolean = e match { - case FalseLiteral => true - case Literal(null, _) => true - case _ => false - } - - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsUp { - case If(TrueLiteral, trueValue, _) => trueValue - case If(FalseLiteral, _, falseValue) => falseValue - case If(Literal(null, _), _, falseValue) => falseValue - - case e @ CaseWhen(branches, elseValue) if branches.exists(x => falseOrNullLiteral(x._1)) => - // If there are branches that are always false, remove them. - // If there are no more branches left, just use the else value. - // Note that these two are handled together here in a single case statement because - // otherwise we cannot determine the data type for the elseValue if it is None (i.e. null). - val newBranches = branches.filter(x => !falseOrNullLiteral(x._1)) - if (newBranches.isEmpty) { - elseValue.getOrElse(Literal.create(null, e.dataType)) - } else { - e.copy(branches = newBranches) - } - - case e @ CaseWhen(branches, _) if branches.headOption.map(_._1) == Some(TrueLiteral) => - // If the first branch is a true literal, remove the entire CaseWhen and use the value - // from that. Note that CaseWhen.branches should never be empty, and as a result the - // headOption (rather than head) added above is just an extra (and unnecessary) safeguard. - branches.head._2 - } - } -} - -/** - * Optimizes expressions by replacing according to CodeGen configuration. - */ -case class OptimizeCodegen(conf: CatalystConf) extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { - case e: CaseWhen if canCodegen(e) => e.toCodegen() - } - - private def canCodegen(e: CaseWhen): Boolean = { - val numBranches = e.branches.size + e.elseValue.size - numBranches <= conf.maxCaseBranchesForCodegen - } -} - /** * Combines all adjacent [[Union]] operators into a single [[Union]]. */ @@ -1026,7 +601,7 @@ object CombineFilters extends Rule[LogicalPlan] with PredicateHelper { /** * Removes no-op SortOrder from Sort */ -object EliminateSorts extends Rule[LogicalPlan] { +object EliminateSorts extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case s @ Sort(orders, _, child) if orders.isEmpty || orders.exists(_.child.foldable) => val newOrders = orders.filterNot(_.child.foldable) @@ -1448,25 +1023,6 @@ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { } } -/** - * Removes [[Cast Casts]] that are unnecessary because the input is already the correct type. - */ -object SimplifyCasts extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { - case Cast(e, dataType) if e.dataType == dataType => e - } -} - -/** - * Removes nodes that are not necessary. - */ -object RemoveDispensableExpressions extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { - case UnaryPositive(child) => child - case PromotePrecision(child) => child - } -} - /** * Combines two adjacent [[Limit]] operators into one, merging the * expressions into one single expression. @@ -1482,21 +1038,6 @@ object CombineLimits extends Rule[LogicalPlan] { } } -/** - * Removes the inner case conversion expressions that are unnecessary because - * the inner conversion is overwritten by the outer one. - */ -object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] { - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsUp { - case Upper(Upper(child)) => Upper(child) - case Upper(Lower(child)) => Upper(child) - case Lower(Upper(child)) => Lower(child) - case Lower(Lower(child)) => Lower(child) - } - } -} - /** * Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values. * diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala new file mode 100644 index 0000000000000..74dfd10189d81 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala @@ -0,0 +1,506 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import scala.collection.immutable.HashSet + +import org.apache.spark.sql.catalyst.CatalystConf +import org.apache.spark.sql.catalyst.analysis._ +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.expressions.aggregate._ +import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral} +import org.apache.spark.sql.catalyst.plans._ +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules._ +import org.apache.spark.sql.types._ + +/* + * Optimization rules defined in this file should not affect the structure of the logical plan. + */ + + +/** + * Replaces [[Expression Expressions]] that can be statically evaluated with + * equivalent [[Literal]] values. + */ +object ConstantFolding extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsDown { + // Skip redundant folding of literals. This rule is technically not necessary. Placing this + // here avoids running the next rule for Literal values, which would create a new Literal + // object and running eval unnecessarily. + case l: Literal => l + + // Fold expressions that are foldable. + case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType) + } + } +} + + +/** + * Reorder associative integral-type operators and fold all constants into one. + */ +object ReorderAssociativeOperator extends Rule[LogicalPlan] { + private def flattenAdd(e: Expression): Seq[Expression] = e match { + case Add(l, r) => flattenAdd(l) ++ flattenAdd(r) + case other => other :: Nil + } + + private def flattenMultiply(e: Expression): Seq[Expression] = e match { + case Multiply(l, r) => flattenMultiply(l) ++ flattenMultiply(r) + case other => other :: Nil + } + + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsDown { + case a: Add if a.deterministic && a.dataType.isInstanceOf[IntegralType] => + val (foldables, others) = flattenAdd(a).partition(_.foldable) + if (foldables.size > 1) { + val foldableExpr = foldables.reduce((x, y) => Add(x, y)) + val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType) + if (others.isEmpty) c else Add(others.reduce((x, y) => Add(x, y)), c) + } else { + a + } + case m: Multiply if m.deterministic && m.dataType.isInstanceOf[IntegralType] => + val (foldables, others) = flattenMultiply(m).partition(_.foldable) + if (foldables.size > 1) { + val foldableExpr = foldables.reduce((x, y) => Multiply(x, y)) + val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType) + if (others.isEmpty) c else Multiply(others.reduce((x, y) => Multiply(x, y)), c) + } else { + m + } + } + } +} + + +/** + * Optimize IN predicates: + * 1. Removes literal repetitions. + * 2. Replaces [[In (value, seq[Literal])]] with optimized version + * [[InSet (value, HashSet[Literal])]] which is much faster. + */ +case class OptimizeIn(conf: CatalystConf) extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsDown { + case expr @ In(v, list) if expr.inSetConvertible => + val newList = ExpressionSet(list).toSeq + if (newList.size > conf.optimizerInSetConversionThreshold) { + val hSet = newList.map(e => e.eval(EmptyRow)) + InSet(v, HashSet() ++ hSet) + } else if (newList.size < list.size) { + expr.copy(list = newList) + } else { // newList.length == list.length + expr + } + } + } +} + + +/** + * Simplifies boolean expressions: + * 1. Simplifies expressions whose answer can be determined without evaluating both sides. + * 2. Eliminates / extracts common factors. + * 3. Merge same expressions + * 4. Removes `Not` operator. + */ +object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsUp { + case TrueLiteral And e => e + case e And TrueLiteral => e + case FalseLiteral Or e => e + case e Or FalseLiteral => e + + case FalseLiteral And _ => FalseLiteral + case _ And FalseLiteral => FalseLiteral + case TrueLiteral Or _ => TrueLiteral + case _ Or TrueLiteral => TrueLiteral + + case a And b if a.semanticEquals(b) => a + case a Or b if a.semanticEquals(b) => a + + case a And (b Or c) if Not(a).semanticEquals(b) => And(a, c) + case a And (b Or c) if Not(a).semanticEquals(c) => And(a, b) + case (a Or b) And c if a.semanticEquals(Not(c)) => And(b, c) + case (a Or b) And c if b.semanticEquals(Not(c)) => And(a, c) + + case a Or (b And c) if Not(a).semanticEquals(b) => Or(a, c) + case a Or (b And c) if Not(a).semanticEquals(c) => Or(a, b) + case (a And b) Or c if a.semanticEquals(Not(c)) => Or(b, c) + case (a And b) Or c if b.semanticEquals(Not(c)) => Or(a, c) + + // Common factor elimination for conjunction + case and @ (left And right) => + // 1. Split left and right to get the disjunctive predicates, + // i.e. lhs = (a, b), rhs = (a, c) + // 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a) + // 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c) + // 4. Apply the formula, get the optimized predicate: common || (ldiff && rdiff) + val lhs = splitDisjunctivePredicates(left) + val rhs = splitDisjunctivePredicates(right) + val common = lhs.filter(e => rhs.exists(e.semanticEquals)) + if (common.isEmpty) { + // No common factors, return the original predicate + and + } else { + val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals)) + val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals)) + if (ldiff.isEmpty || rdiff.isEmpty) { + // (a || b || c || ...) && (a || b) => (a || b) + common.reduce(Or) + } else { + // (a || b || c || ...) && (a || b || d || ...) => + // ((c || ...) && (d || ...)) || a || b + (common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or) + } + } + + // Common factor elimination for disjunction + case or @ (left Or right) => + // 1. Split left and right to get the conjunctive predicates, + // i.e. lhs = (a, b), rhs = (a, c) + // 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a) + // 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c) + // 4. Apply the formula, get the optimized predicate: common && (ldiff || rdiff) + val lhs = splitConjunctivePredicates(left) + val rhs = splitConjunctivePredicates(right) + val common = lhs.filter(e => rhs.exists(e.semanticEquals)) + if (common.isEmpty) { + // No common factors, return the original predicate + or + } else { + val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals)) + val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals)) + if (ldiff.isEmpty || rdiff.isEmpty) { + // (a && b) || (a && b && c && ...) => a && b + common.reduce(And) + } else { + // (a && b && c && ...) || (a && b && d && ...) => + // ((c && ...) || (d && ...)) && a && b + (common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And) + } + } + + case Not(TrueLiteral) => FalseLiteral + case Not(FalseLiteral) => TrueLiteral + + case Not(a GreaterThan b) => LessThanOrEqual(a, b) + case Not(a GreaterThanOrEqual b) => LessThan(a, b) + + case Not(a LessThan b) => GreaterThanOrEqual(a, b) + case Not(a LessThanOrEqual b) => GreaterThan(a, b) + + case Not(a Or b) => And(Not(a), Not(b)) + case Not(a And b) => Or(Not(a), Not(b)) + + case Not(Not(e)) => e + } + } +} + + +/** + * Simplifies binary comparisons with semantically-equal expressions: + * 1) Replace '<=>' with 'true' literal. + * 2) Replace '=', '<=', and '>=' with 'true' literal if both operands are non-nullable. + * 3) Replace '<' and '>' with 'false' literal if both operands are non-nullable. + */ +object SimplifyBinaryComparison extends Rule[LogicalPlan] with PredicateHelper { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsUp { + // True with equality + case a EqualNullSafe b if a.semanticEquals(b) => TrueLiteral + case a EqualTo b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral + case a GreaterThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => + TrueLiteral + case a LessThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral + + // False with inequality + case a GreaterThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral + case a LessThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral + } + } +} + + +/** + * Simplifies conditional expressions (if / case). + */ +object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper { + private def falseOrNullLiteral(e: Expression): Boolean = e match { + case FalseLiteral => true + case Literal(null, _) => true + case _ => false + } + + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsUp { + case If(TrueLiteral, trueValue, _) => trueValue + case If(FalseLiteral, _, falseValue) => falseValue + case If(Literal(null, _), _, falseValue) => falseValue + + case e @ CaseWhen(branches, elseValue) if branches.exists(x => falseOrNullLiteral(x._1)) => + // If there are branches that are always false, remove them. + // If there are no more branches left, just use the else value. + // Note that these two are handled together here in a single case statement because + // otherwise we cannot determine the data type for the elseValue if it is None (i.e. null). + val newBranches = branches.filter(x => !falseOrNullLiteral(x._1)) + if (newBranches.isEmpty) { + elseValue.getOrElse(Literal.create(null, e.dataType)) + } else { + e.copy(branches = newBranches) + } + + case e @ CaseWhen(branches, _) if branches.headOption.map(_._1) == Some(TrueLiteral) => + // If the first branch is a true literal, remove the entire CaseWhen and use the value + // from that. Note that CaseWhen.branches should never be empty, and as a result the + // headOption (rather than head) added above is just an extra (and unnecessary) safeguard. + branches.head._2 + } + } +} + + +/** + * Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition. + * For example, when the expression is just checking to see if a string starts with a given + * pattern. + */ +object LikeSimplification extends Rule[LogicalPlan] { + // if guards below protect from escapes on trailing %. + // Cases like "something\%" are not optimized, but this does not affect correctness. + private val startsWith = "([^_%]+)%".r + private val endsWith = "%([^_%]+)".r + private val startsAndEndsWith = "([^_%]+)%([^_%]+)".r + private val contains = "%([^_%]+)%".r + private val equalTo = "([^_%]*)".r + + def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { + case Like(input, Literal(pattern, StringType)) => + pattern.toString match { + case startsWith(prefix) if !prefix.endsWith("\\") => + StartsWith(input, Literal(prefix)) + case endsWith(postfix) => + EndsWith(input, Literal(postfix)) + // 'a%a' pattern is basically same with 'a%' && '%a'. + // However, the additional `Length` condition is required to prevent 'a' match 'a%a'. + case startsAndEndsWith(prefix, postfix) if !prefix.endsWith("\\") => + And(GreaterThanOrEqual(Length(input), Literal(prefix.size + postfix.size)), + And(StartsWith(input, Literal(prefix)), EndsWith(input, Literal(postfix)))) + case contains(infix) if !infix.endsWith("\\") => + Contains(input, Literal(infix)) + case equalTo(str) => + EqualTo(input, Literal(str)) + case _ => + Like(input, Literal.create(pattern, StringType)) + } + } +} + + +/** + * Replaces [[Expression Expressions]] that can be statically evaluated with + * equivalent [[Literal]] values. This rule is more specific with + * Null value propagation from bottom to top of the expression tree. + */ +object NullPropagation extends Rule[LogicalPlan] { + private def nonNullLiteral(e: Expression): Boolean = e match { + case Literal(null, _) => false + case _ => true + } + + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsUp { + case e @ WindowExpression(Cast(Literal(0L, _), _), _) => + Cast(Literal(0L), e.dataType) + case e @ AggregateExpression(Count(exprs), _, _, _) if !exprs.exists(nonNullLiteral) => + Cast(Literal(0L), e.dataType) + case e @ IsNull(c) if !c.nullable => Literal.create(false, BooleanType) + case e @ IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType) + case e @ GetArrayItem(Literal(null, _), _) => Literal.create(null, e.dataType) + case e @ GetArrayItem(_, Literal(null, _)) => Literal.create(null, e.dataType) + case e @ GetMapValue(Literal(null, _), _) => Literal.create(null, e.dataType) + case e @ GetMapValue(_, Literal(null, _)) => Literal.create(null, e.dataType) + case e @ GetStructField(Literal(null, _), _, _) => Literal.create(null, e.dataType) + case e @ GetArrayStructFields(Literal(null, _), _, _, _, _) => + Literal.create(null, e.dataType) + case e @ EqualNullSafe(Literal(null, _), r) => IsNull(r) + case e @ EqualNullSafe(l, Literal(null, _)) => IsNull(l) + case ae @ AggregateExpression(Count(exprs), _, false, _) if !exprs.exists(_.nullable) => + // This rule should be only triggered when isDistinct field is false. + ae.copy(aggregateFunction = Count(Literal(1))) + + // For Coalesce, remove null literals. + case e @ Coalesce(children) => + val newChildren = children.filter(nonNullLiteral) + if (newChildren.isEmpty) { + Literal.create(null, e.dataType) + } else if (newChildren.length == 1) { + newChildren.head + } else { + Coalesce(newChildren) + } + + case e @ Substring(Literal(null, _), _, _) => Literal.create(null, e.dataType) + case e @ Substring(_, Literal(null, _), _) => Literal.create(null, e.dataType) + case e @ Substring(_, _, Literal(null, _)) => Literal.create(null, e.dataType) + + // Put exceptional cases above if any + case e @ BinaryArithmetic(Literal(null, _), _) => Literal.create(null, e.dataType) + case e @ BinaryArithmetic(_, Literal(null, _)) => Literal.create(null, e.dataType) + + case e @ BinaryComparison(Literal(null, _), _) => Literal.create(null, e.dataType) + case e @ BinaryComparison(_, Literal(null, _)) => Literal.create(null, e.dataType) + + case e: StringRegexExpression => e.children match { + case Literal(null, _) :: right :: Nil => Literal.create(null, e.dataType) + case left :: Literal(null, _) :: Nil => Literal.create(null, e.dataType) + case _ => e + } + + case e: StringPredicate => e.children match { + case Literal(null, _) :: right :: Nil => Literal.create(null, e.dataType) + case left :: Literal(null, _) :: Nil => Literal.create(null, e.dataType) + case _ => e + } + + // If the value expression is NULL then transform the In expression to + // Literal(null) + case In(Literal(null, _), list) => Literal.create(null, BooleanType) + + } + } +} + + +/** + * Propagate foldable expressions: + * Replace attributes with aliases of the original foldable expressions if possible. + * Other optimizations will take advantage of the propagated foldable expressions. + * + * {{{ + * SELECT 1.0 x, 'abc' y, Now() z ORDER BY x, y, 3 + * ==> SELECT 1.0 x, 'abc' y, Now() z ORDER BY 1.0, 'abc', Now() + * }}} + */ +object FoldablePropagation extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = { + val foldableMap = AttributeMap(plan.flatMap { + case Project(projectList, _) => projectList.collect { + case a: Alias if a.child.foldable => (a.toAttribute, a) + } + case _ => Nil + }) + + if (foldableMap.isEmpty) { + plan + } else { + var stop = false + CleanupAliases(plan.transformUp { + case u: Union => + stop = true + u + case c: Command => + stop = true + c + // For outer join, although its output attributes are derived from its children, they are + // actually different attributes: the output of outer join is not always picked from its + // children, but can also be null. + // TODO(cloud-fan): It seems more reasonable to use new attributes as the output attributes + // of outer join. + case j @ Join(_, _, LeftOuter | RightOuter | FullOuter, _) => + stop = true + j + + // These 3 operators take attributes as constructor parameters, and these attributes + // can't be replaced by alias. + case m: MapGroups => + stop = true + m + case f: FlatMapGroupsInR => + stop = true + f + case c: CoGroup => + stop = true + c + + case p: LogicalPlan if !stop => p.transformExpressions { + case a: AttributeReference if foldableMap.contains(a) => + foldableMap(a) + } + }) + } + } +} + + +/** + * Optimizes expressions by replacing according to CodeGen configuration. + */ +case class OptimizeCodegen(conf: CatalystConf) extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { + case e: CaseWhen if canCodegen(e) => e.toCodegen() + } + + private def canCodegen(e: CaseWhen): Boolean = { + val numBranches = e.branches.size + e.elseValue.size + numBranches <= conf.maxCaseBranchesForCodegen + } +} + + +/** + * Removes [[Cast Casts]] that are unnecessary because the input is already the correct type. + */ +object SimplifyCasts extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { + case Cast(e, dataType) if e.dataType == dataType => e + } +} + + +/** + * Removes nodes that are not necessary. + */ +object RemoveDispensableExpressions extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { + case UnaryPositive(child) => child + case PromotePrecision(child) => child + } +} + + +/** + * Removes the inner case conversion expressions that are unnecessary because + * the inner conversion is overwritten by the outer one. + */ +object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] { + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case q: LogicalPlan => q transformExpressionsUp { + case Upper(Upper(child)) => Upper(child) + case Upper(Lower(child)) => Upper(child) + case Lower(Upper(child)) => Lower(child) + case Lower(Lower(child)) => Lower(child) + } + } +} From 718b6bad2d698b76be6906d51da13626e9f3890e Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Sat, 27 Aug 2016 00:36:18 -0700 Subject: [PATCH 122/270] [SPARK-17274][SQL] Move join optimizer rules into a separate file ## What changes were proposed in this pull request? As part of breaking Optimizer.scala apart, this patch moves various join rules into a single file. ## How was this patch tested? This should be covered by existing tests. Author: Reynold Xin Closes #14846 from rxin/SPARK-17274. --- .../sql/catalyst/optimizer/Optimizer.scala | 106 -------------- .../spark/sql/catalyst/optimizer/joins.scala | 134 ++++++++++++++++++ 2 files changed, 134 insertions(+), 106 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 17cab18ff8e9e..7617d34261807 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -799,112 +799,6 @@ object PushDownPredicate extends Rule[LogicalPlan] with PredicateHelper { } } -/** - * Reorder the joins and push all the conditions into join, so that the bottom ones have at least - * one condition. - * - * The order of joins will not be changed if all of them already have at least one condition. - */ -object ReorderJoin extends Rule[LogicalPlan] with PredicateHelper { - - /** - * Join a list of plans together and push down the conditions into them. - * - * The joined plan are picked from left to right, prefer those has at least one join condition. - * - * @param input a list of LogicalPlans to join. - * @param conditions a list of condition for join. - */ - @tailrec - def createOrderedJoin(input: Seq[LogicalPlan], conditions: Seq[Expression]): LogicalPlan = { - assert(input.size >= 2) - if (input.size == 2) { - val (joinConditions, others) = conditions.partition( - e => !SubqueryExpression.hasCorrelatedSubquery(e)) - val join = Join(input(0), input(1), Inner, joinConditions.reduceLeftOption(And)) - if (others.nonEmpty) { - Filter(others.reduceLeft(And), join) - } else { - join - } - } else { - val left :: rest = input.toList - // find out the first join that have at least one join condition - val conditionalJoin = rest.find { plan => - val refs = left.outputSet ++ plan.outputSet - conditions.filterNot(canEvaluate(_, left)).filterNot(canEvaluate(_, plan)) - .exists(_.references.subsetOf(refs)) - } - // pick the next one if no condition left - val right = conditionalJoin.getOrElse(rest.head) - - val joinedRefs = left.outputSet ++ right.outputSet - val (joinConditions, others) = conditions.partition( - e => e.references.subsetOf(joinedRefs) && !SubqueryExpression.hasCorrelatedSubquery(e)) - val joined = Join(left, right, Inner, joinConditions.reduceLeftOption(And)) - - // should not have reference to same logical plan - createOrderedJoin(Seq(joined) ++ rest.filterNot(_ eq right), others) - } - } - - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case j @ ExtractFiltersAndInnerJoins(input, conditions) - if input.size > 2 && conditions.nonEmpty => - createOrderedJoin(input, conditions) - } -} - -/** - * Elimination of outer joins, if the predicates can restrict the result sets so that - * all null-supplying rows are eliminated - * - * - full outer -> inner if both sides have such predicates - * - left outer -> inner if the right side has such predicates - * - right outer -> inner if the left side has such predicates - * - full outer -> left outer if only the left side has such predicates - * - full outer -> right outer if only the right side has such predicates - * - * This rule should be executed before pushing down the Filter - */ -object EliminateOuterJoin extends Rule[LogicalPlan] with PredicateHelper { - - /** - * Returns whether the expression returns null or false when all inputs are nulls. - */ - private def canFilterOutNull(e: Expression): Boolean = { - if (!e.deterministic || SubqueryExpression.hasCorrelatedSubquery(e)) return false - val attributes = e.references.toSeq - val emptyRow = new GenericInternalRow(attributes.length) - val v = BindReferences.bindReference(e, attributes).eval(emptyRow) - v == null || v == false - } - - private def buildNewJoinType(filter: Filter, join: Join): JoinType = { - val conditions = splitConjunctivePredicates(filter.condition) ++ filter.constraints - val leftConditions = conditions.filter(_.references.subsetOf(join.left.outputSet)) - val rightConditions = conditions.filter(_.references.subsetOf(join.right.outputSet)) - - val leftHasNonNullPredicate = leftConditions.exists(canFilterOutNull) - val rightHasNonNullPredicate = rightConditions.exists(canFilterOutNull) - - join.joinType match { - case RightOuter if leftHasNonNullPredicate => Inner - case LeftOuter if rightHasNonNullPredicate => Inner - case FullOuter if leftHasNonNullPredicate && rightHasNonNullPredicate => Inner - case FullOuter if leftHasNonNullPredicate => LeftOuter - case FullOuter if rightHasNonNullPredicate => RightOuter - case o => o - } - } - - def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case f @ Filter(condition, j @ Join(_, _, RightOuter | LeftOuter | FullOuter, _)) => - val newJoinType = buildNewJoinType(f, j) - if (j.joinType == newJoinType) f else Filter(condition, j.copy(joinType = newJoinType)) - } -} - /** * Pushes down [[Filter]] operators where the `condition` can be * evaluated using only the attributes of the left or right side of a join. Other diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala new file mode 100644 index 0000000000000..158ad3d91fbad --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import scala.annotation.tailrec + +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.planning.ExtractFiltersAndInnerJoins +import org.apache.spark.sql.catalyst.plans._ +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules._ + + +/** + * Reorder the joins and push all the conditions into join, so that the bottom ones have at least + * one condition. + * + * The order of joins will not be changed if all of them already have at least one condition. + */ +object ReorderJoin extends Rule[LogicalPlan] with PredicateHelper { + + /** + * Join a list of plans together and push down the conditions into them. + * + * The joined plan are picked from left to right, prefer those has at least one join condition. + * + * @param input a list of LogicalPlans to join. + * @param conditions a list of condition for join. + */ + @tailrec + def createOrderedJoin(input: Seq[LogicalPlan], conditions: Seq[Expression]): LogicalPlan = { + assert(input.size >= 2) + if (input.size == 2) { + val (joinConditions, others) = conditions.partition( + e => !SubqueryExpression.hasCorrelatedSubquery(e)) + val join = Join(input(0), input(1), Inner, joinConditions.reduceLeftOption(And)) + if (others.nonEmpty) { + Filter(others.reduceLeft(And), join) + } else { + join + } + } else { + val left :: rest = input.toList + // find out the first join that have at least one join condition + val conditionalJoin = rest.find { plan => + val refs = left.outputSet ++ plan.outputSet + conditions.filterNot(canEvaluate(_, left)).filterNot(canEvaluate(_, plan)) + .exists(_.references.subsetOf(refs)) + } + // pick the next one if no condition left + val right = conditionalJoin.getOrElse(rest.head) + + val joinedRefs = left.outputSet ++ right.outputSet + val (joinConditions, others) = conditions.partition( + e => e.references.subsetOf(joinedRefs) && !SubqueryExpression.hasCorrelatedSubquery(e)) + val joined = Join(left, right, Inner, joinConditions.reduceLeftOption(And)) + + // should not have reference to same logical plan + createOrderedJoin(Seq(joined) ++ rest.filterNot(_ eq right), others) + } + } + + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case j @ ExtractFiltersAndInnerJoins(input, conditions) + if input.size > 2 && conditions.nonEmpty => + createOrderedJoin(input, conditions) + } +} + + +/** + * Elimination of outer joins, if the predicates can restrict the result sets so that + * all null-supplying rows are eliminated + * + * - full outer -> inner if both sides have such predicates + * - left outer -> inner if the right side has such predicates + * - right outer -> inner if the left side has such predicates + * - full outer -> left outer if only the left side has such predicates + * - full outer -> right outer if only the right side has such predicates + * + * This rule should be executed before pushing down the Filter + */ +object EliminateOuterJoin extends Rule[LogicalPlan] with PredicateHelper { + + /** + * Returns whether the expression returns null or false when all inputs are nulls. + */ + private def canFilterOutNull(e: Expression): Boolean = { + if (!e.deterministic || SubqueryExpression.hasCorrelatedSubquery(e)) return false + val attributes = e.references.toSeq + val emptyRow = new GenericInternalRow(attributes.length) + val v = BindReferences.bindReference(e, attributes).eval(emptyRow) + v == null || v == false + } + + private def buildNewJoinType(filter: Filter, join: Join): JoinType = { + val conditions = splitConjunctivePredicates(filter.condition) ++ filter.constraints + val leftConditions = conditions.filter(_.references.subsetOf(join.left.outputSet)) + val rightConditions = conditions.filter(_.references.subsetOf(join.right.outputSet)) + + val leftHasNonNullPredicate = leftConditions.exists(canFilterOutNull) + val rightHasNonNullPredicate = rightConditions.exists(canFilterOutNull) + + join.joinType match { + case RightOuter if leftHasNonNullPredicate => Inner + case LeftOuter if rightHasNonNullPredicate => Inner + case FullOuter if leftHasNonNullPredicate && rightHasNonNullPredicate => Inner + case FullOuter if leftHasNonNullPredicate => LeftOuter + case FullOuter if rightHasNonNullPredicate => RightOuter + case o => o + } + } + + def apply(plan: LogicalPlan): LogicalPlan = plan transform { + case f @ Filter(condition, j @ Join(_, _, RightOuter | LeftOuter | FullOuter, _)) => + val newJoinType = buildNewJoinType(f, j) + if (j.joinType == newJoinType) f else Filter(condition, j.copy(joinType = newJoinType)) + } +} From cd0ed31ea9965563a9b1ea3e8bfbeaf8347cacd9 Mon Sep 17 00:00:00 2001 From: Takeshi YAMAMURO Date: Sat, 27 Aug 2016 08:42:41 +0100 Subject: [PATCH 123/270] [SPARK-15382][SQL] Fix a bug in sampling with replacement ## What changes were proposed in this pull request? This pr to fix a bug below in sampling with replacement ``` val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b") df.sample(true, 2.0).withColumn("c", monotonically_increasing_id).select($"c").show +---+ | c| +---+ | 0| | 1| | 1| | 1| | 2| +---+ ``` ## How was this patch tested? Added a test in `DataFrameSuite`. Author: Takeshi YAMAMURO Closes #14800 from maropu/FixSampleBug. --- .../spark/sql/execution/basicPhysicalOperators.scala | 1 + .../test/scala/org/apache/spark/sql/DataFrameSuite.scala | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala index 3562083b06740..dd78a784915d2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala @@ -266,6 +266,7 @@ case class SampleExec( if (withReplacement) { val samplerClass = classOf[PoissonSampler[UnsafeRow]].getName val initSampler = ctx.freshName("initSampler") + ctx.copyResult = true ctx.addMutableState(s"$samplerClass", sampler, s"$initSampler();") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index cd485770d269c..ce0b92a461c39 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -1579,4 +1579,11 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { val df = spark.createDataFrame(rdd, StructType(schemas), false) assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100) } + + test("copy results for sampling with replacement") { + val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b") + val sampleDf = df.sample(true, 2.00) + val d = sampleDf.withColumn("c", monotonically_increasing_id).select($"c").collect + assert(d.size == d.distinct.size) + } } From 40168dbe771ae662ed61851a1f3c677dd14fe344 Mon Sep 17 00:00:00 2001 From: "Peng, Meng" Date: Sat, 27 Aug 2016 08:46:01 +0100 Subject: [PATCH 124/270] [ML][MLLIB] The require condition and message doesn't match in SparseMatrix. ## What changes were proposed in this pull request? The require condition and message doesn't match, and the condition also should be optimized. Small change. Please kindly let me know if JIRA required. ## How was this patch tested? No additional test required. Author: Peng, Meng Closes #14824 from mpjlu/smallChangeForMatrixRequire. --- .../scala/org/apache/spark/ml/linalg/Matrices.scala | 11 +++++++---- .../org/apache/spark/mllib/linalg/Matrices.scala | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala index 0ea687bbccc54..f1ecc65af1105 100644 --- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala +++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala @@ -454,10 +454,13 @@ class SparseMatrix @Since("2.0.0") ( require(values.length == rowIndices.length, "The number of row indices and values don't match! " + s"values.length: ${values.length}, rowIndices.length: ${rowIndices.length}") - // The Or statement is for the case when the matrix is transposed - require(colPtrs.length == numCols + 1 || colPtrs.length == numRows + 1, "The length of the " + - "column indices should be the number of columns + 1. Currently, colPointers.length: " + - s"${colPtrs.length}, numCols: $numCols") + if (isTransposed) { + require(colPtrs.length == numRows + 1, + s"Expecting ${numRows + 1} colPtrs when numRows = $numRows but got ${colPtrs.length}") + } else { + require(colPtrs.length == numCols + 1, + s"Expecting ${numCols + 1} colPtrs when numCols = $numCols but got ${colPtrs.length}") + } require(values.length == colPtrs.last, "The last value of colPtrs must equal the number of " + s"elements. values.length: ${values.length}, colPtrs.last: ${colPtrs.last}") diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala index e8f34388cd9fe..4c39cf17f4271 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala @@ -572,10 +572,13 @@ class SparseMatrix @Since("1.3.0") ( require(values.length == rowIndices.length, "The number of row indices and values don't match! " + s"values.length: ${values.length}, rowIndices.length: ${rowIndices.length}") - // The Or statement is for the case when the matrix is transposed - require(colPtrs.length == numCols + 1 || colPtrs.length == numRows + 1, "The length of the " + - "column indices should be the number of columns + 1. Currently, colPointers.length: " + - s"${colPtrs.length}, numCols: $numCols") + if (isTransposed) { + require(colPtrs.length == numRows + 1, + s"Expecting ${numRows + 1} colPtrs when numRows = $numRows but got ${colPtrs.length}") + } else { + require(colPtrs.length == numCols + 1, + s"Expecting ${numCols + 1} colPtrs when numCols = $numCols but got ${colPtrs.length}") + } require(values.length == colPtrs.last, "The last value of colPtrs must equal the number of " + s"elements. values.length: ${values.length}, colPtrs.last: ${colPtrs.last}") From 9fbced5b25c2f24d50c50516b4b7737f7e3eaf86 Mon Sep 17 00:00:00 2001 From: Robert Kruszewski Date: Sat, 27 Aug 2016 08:47:15 +0100 Subject: [PATCH 125/270] [SPARK-17216][UI] fix event timeline bars length ## What changes were proposed in this pull request? Make event timeline bar expand to full length of the bar (which is total time) This issue occurs only on chrome, firefox looks fine. Haven't tested other browsers. ## How was this patch tested? Inspection in browsers Before ![screen shot 2016-08-24 at 3 38 24 pm](https://cloud.githubusercontent.com/assets/512084/17935104/0d6cda74-6a12-11e6-9c66-e00cfa855606.png) After ![screen shot 2016-08-24 at 3 36 39 pm](https://cloud.githubusercontent.com/assets/512084/17935114/15740ea4-6a12-11e6-83a1-7c06eef6abb8.png) Author: Robert Kruszewski Closes #14791 from robert3005/robertk/event-timeline. --- .../resources/org/apache/spark/ui/static/timeline-view.css | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css index 0f400461c5293..f9ad9f8378803 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css +++ b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css @@ -83,6 +83,10 @@ rect.getting-result-time-proportion { stroke: #75B0A6; } +.vis-item .vis-item-content { + width: 100% +} + .vis.timeline { line-height: 14px; } From e07baf14120bc94b783649dabf5fffea58bff0de Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Sat, 27 Aug 2016 08:48:56 +0100 Subject: [PATCH 126/270] [SPARK-17001][ML] Enable standardScaler to standardize sparse vectors when withMean=True ## What changes were proposed in this pull request? Allow centering / mean scaling of sparse vectors in StandardScaler, if requested. This is for compatibility with `VectorAssembler` in common usages. ## How was this patch tested? Jenkins tests, including new caes to reflect the new behavior. Author: Sean Owen Closes #14663 from srowen/SPARK-17001. --- docs/ml-features.md | 2 +- docs/mllib-feature-extraction.md | 2 +- .../python/mllib/standard_scaler_example.py | 2 - .../mllib/StandardScalerExample.scala | 2 - .../spark/ml/feature/StandardScaler.scala | 3 +- .../spark/mllib/feature/StandardScaler.scala | 41 +++++------ .../ml/feature/StandardScalerSuite.scala | 16 +++++ .../mllib/feature/StandardScalerSuite.scala | 69 ++++++++++--------- python/pyspark/mllib/feature.py | 5 +- 9 files changed, 80 insertions(+), 62 deletions(-) diff --git a/docs/ml-features.md b/docs/ml-features.md index e41bf78521b6e..746593fb9e234 100644 --- a/docs/ml-features.md +++ b/docs/ml-features.md @@ -768,7 +768,7 @@ for more details on the API. `StandardScaler` transforms a dataset of `Vector` rows, normalizing each feature to have unit standard deviation and/or zero mean. It takes parameters: * `withStd`: True by default. Scales the data to unit standard deviation. -* `withMean`: False by default. Centers the data with mean before scaling. It will build a dense output, so this does not work on sparse input and will raise an exception. +* `withMean`: False by default. Centers the data with mean before scaling. It will build a dense output, so take care when applying to sparse input. `StandardScaler` is an `Estimator` which can be `fit` on a dataset to produce a `StandardScalerModel`; this amounts to computing summary statistics. The model can then transform a `Vector` column in a dataset to have unit standard deviation and/or zero mean features. diff --git a/docs/mllib-feature-extraction.md b/docs/mllib-feature-extraction.md index 867be7f2932ed..353d391249973 100644 --- a/docs/mllib-feature-extraction.md +++ b/docs/mllib-feature-extraction.md @@ -148,7 +148,7 @@ against features with very large variances exerting an overly large influence du following parameters in the constructor: * `withMean` False by default. Centers the data with mean before scaling. It will build a dense -output, so this does not work on sparse input and will raise an exception. +output, so take care when applying to sparse input. * `withStd` True by default. Scales the data to unit standard deviation. We provide a [`fit`](api/scala/index.html#org.apache.spark.mllib.feature.StandardScaler) method in diff --git a/examples/src/main/python/mllib/standard_scaler_example.py b/examples/src/main/python/mllib/standard_scaler_example.py index 20a77a470850f..442094e1bf366 100644 --- a/examples/src/main/python/mllib/standard_scaler_example.py +++ b/examples/src/main/python/mllib/standard_scaler_example.py @@ -38,8 +38,6 @@ # data1 will be unit variance. data1 = label.zip(scaler1.transform(features)) - # Without converting the features into dense vectors, transformation with zero mean will raise - # exception on sparse vector. # data2 will be unit variance and zero mean. data2 = label.zip(scaler2.transform(features.map(lambda x: Vectors.dense(x.toArray())))) # $example off$ diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/StandardScalerExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/StandardScalerExample.scala index fc0aa1b7f0915..769fc17b3dc65 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/StandardScalerExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/StandardScalerExample.scala @@ -44,8 +44,6 @@ object StandardScalerExample { // data1 will be unit variance. val data1 = data.map(x => (x.label, scaler1.transform(x.features))) - // Without converting the features into dense vectors, transformation with zero mean will raise - // exception on sparse vector. // data2 will be unit variance and zero mean. val data2 = data.map(x => (x.label, scaler2.transform(Vectors.dense(x.features.toArray)))) // $example off$ diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala index 2494cf51a2bd6..d76d556280e96 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala @@ -41,8 +41,7 @@ private[feature] trait StandardScalerParams extends Params with HasInputCol with /** * Whether to center the data with mean before scaling. - * It will build a dense output, so this does not work on sparse input - * and will raise an exception. + * It will build a dense output, so take care when applying to sparse input. * Default: false * @group param */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala index 3e86c6c59c953..7667936a3f85f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala @@ -32,7 +32,7 @@ import org.apache.spark.rdd.RDD * which is computed as the square root of the unbiased sample variance. * * @param withMean False by default. Centers the data with mean before scaling. It will build a - * dense output, so this does not work on sparse input and will raise an exception. + * dense output, so take care when applying to sparse input. * @param withStd True by default. Scales the data to unit standard deviation. */ @Since("1.1.0") @@ -139,26 +139,27 @@ class StandardScalerModel @Since("1.3.0") ( // the member variables are accessed, `invokespecial` will be called which is expensive. // This can be avoid by having a local reference of `shift`. val localShift = shift - vector match { - case DenseVector(vs) => - val values = vs.clone() - val size = values.length - if (withStd) { - var i = 0 - while (i < size) { - values(i) = if (std(i) != 0.0) (values(i) - localShift(i)) * (1.0 / std(i)) else 0.0 - i += 1 - } - } else { - var i = 0 - while (i < size) { - values(i) -= localShift(i) - i += 1 - } - } - Vectors.dense(values) - case v => throw new IllegalArgumentException("Do not support vector type " + v.getClass) + // Must have a copy of the values since it will be modified in place + val values = vector match { + // specially handle DenseVector because its toArray does not clone already + case d: DenseVector => d.values.clone() + case v: Vector => v.toArray + } + val size = values.length + if (withStd) { + var i = 0 + while (i < size) { + values(i) = if (std(i) != 0.0) (values(i) - localShift(i)) * (1.0 / std(i)) else 0.0 + i += 1 + } + } else { + var i = 0 + while (i < size) { + values(i) -= localShift(i) + i += 1 + } } + Vectors.dense(values) } else if (withStd) { vector match { case DenseVector(vs) => diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/StandardScalerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/StandardScalerSuite.scala index 2243a0f972d32..827ecb0fadbee 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/StandardScalerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/StandardScalerSuite.scala @@ -114,6 +114,22 @@ class StandardScalerSuite extends SparkFunSuite with MLlibTestSparkContext assertResult(standardScaler3.transform(df3)) } + test("sparse data and withMean") { + val someSparseData = Array( + Vectors.sparse(3, Array(0, 1), Array(-2.0, 2.3)), + Vectors.sparse(3, Array(1, 2), Array(-5.1, 1.0)), + Vectors.dense(1.7, -0.6, 3.3) + ) + val df = spark.createDataFrame(someSparseData.zip(resWithMean)).toDF("features", "expected") + val standardScaler = new StandardScaler() + .setInputCol("features") + .setOutputCol("standardized_features") + .setWithMean(true) + .setWithStd(false) + .fit(df) + assertResult(standardScaler.transform(df)) + } + test("StandardScaler read/write") { val t = new StandardScaler() .setInputCol("myInputCol") diff --git a/mllib/src/test/scala/org/apache/spark/mllib/feature/StandardScalerSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/feature/StandardScalerSuite.scala index b4e26b2aeb3cf..a5769631e510d 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/feature/StandardScalerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/feature/StandardScalerSuite.scala @@ -207,23 +207,17 @@ class StandardScalerSuite extends SparkFunSuite with MLlibTestSparkContext { val equivalentModel2 = new StandardScalerModel(model2.std, model2.mean, true, false) val equivalentModel3 = new StandardScalerModel(model3.std, model3.mean, false, true) + val data1 = sparseData.map(equivalentModel1.transform) val data2 = sparseData.map(equivalentModel2.transform) + val data3 = sparseData.map(equivalentModel3.transform) - withClue("Standardization with mean can not be applied on sparse input.") { - intercept[IllegalArgumentException] { - sparseData.map(equivalentModel1.transform) - } - } - - withClue("Standardization with mean can not be applied on sparse input.") { - intercept[IllegalArgumentException] { - sparseData.map(equivalentModel3.transform) - } - } - + val data1RDD = equivalentModel1.transform(dataRDD) val data2RDD = equivalentModel2.transform(dataRDD) + val data3RDD = equivalentModel3.transform(dataRDD) - val summary = computeSummary(data2RDD) + val summary1 = computeSummary(data1RDD) + val summary2 = computeSummary(data2RDD) + val summary3 = computeSummary(data3RDD) assert((sparseData, data2, data2RDD.collect()).zipped.forall { case (v1: DenseVector, v2: DenseVector, v3: DenseVector) => true @@ -231,13 +225,23 @@ class StandardScalerSuite extends SparkFunSuite with MLlibTestSparkContext { case _ => false }, "The vector type should be preserved after standardization.") + assert((data1, data1RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) assert((data2, data2RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) + assert((data3, data3RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) - assert(summary.mean !~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) - assert(summary.variance ~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(summary1.mean ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) + assert(summary1.variance ~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(summary2.mean !~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) + assert(summary2.variance ~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(summary3.mean ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) + assert(summary3.variance !~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(data1(4) ~== Vectors.dense(0.56854, -0.069068, 0.116377) absTol 1E-5) + assert(data1(5) ~== Vectors.dense(-0.296998, 0.872775, 0.116377) absTol 1E-5) assert(data2(4) ~== Vectors.sparse(3, Seq((0, 0.865538862), (1, -0.22604255))) absTol 1E-5) assert(data2(5) ~== Vectors.sparse(3, Seq((1, 0.71580142))) absTol 1E-5) + assert(data3(4) ~== Vectors.dense(1.116666, -0.183333, 0.183333) absTol 1E-5) + assert(data3(5) ~== Vectors.dense(-0.583333, 2.316666, 0.183333) absTol 1E-5) } test("Standardization with sparse input") { @@ -252,24 +256,17 @@ class StandardScalerSuite extends SparkFunSuite with MLlibTestSparkContext { val model2 = standardizer2.fit(dataRDD) val model3 = standardizer3.fit(dataRDD) + val data1 = sparseData.map(model1.transform) val data2 = sparseData.map(model2.transform) + val data3 = sparseData.map(model3.transform) - withClue("Standardization with mean can not be applied on sparse input.") { - intercept[IllegalArgumentException] { - sparseData.map(model1.transform) - } - } - - withClue("Standardization with mean can not be applied on sparse input.") { - intercept[IllegalArgumentException] { - sparseData.map(model3.transform) - } - } - + val data1RDD = model1.transform(dataRDD) val data2RDD = model2.transform(dataRDD) + val data3RDD = model3.transform(dataRDD) - - val summary = computeSummary(data2RDD) + val summary1 = computeSummary(data1RDD) + val summary2 = computeSummary(data2RDD) + val summary3 = computeSummary(data3RDD) assert((sparseData, data2, data2RDD.collect()).zipped.forall { case (v1: DenseVector, v2: DenseVector, v3: DenseVector) => true @@ -277,13 +274,23 @@ class StandardScalerSuite extends SparkFunSuite with MLlibTestSparkContext { case _ => false }, "The vector type should be preserved after standardization.") + assert((data1, data1RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) assert((data2, data2RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) + assert((data3, data3RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) - assert(summary.mean !~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) - assert(summary.variance ~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(summary1.mean ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) + assert(summary1.variance ~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(summary2.mean !~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) + assert(summary2.variance ~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(summary3.mean ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) + assert(summary3.variance !~== Vectors.dense(1.0, 1.0, 1.0) absTol 1E-5) + assert(data1(4) ~== Vectors.dense(0.56854, -0.069068, 0.116377) absTol 1E-5) + assert(data1(5) ~== Vectors.dense(-0.296998, 0.872775, 0.116377) absTol 1E-5) assert(data2(4) ~== Vectors.sparse(3, Seq((0, 0.865538862), (1, -0.22604255))) absTol 1E-5) assert(data2(5) ~== Vectors.sparse(3, Seq((1, 0.71580142))) absTol 1E-5) + assert(data3(4) ~== Vectors.dense(1.116666, -0.183333, 0.183333) absTol 1E-5) + assert(data3(5) ~== Vectors.dense(-0.583333, 2.316666, 0.183333) absTol 1E-5) } test("Standardization with constant input when means and stds are provided") { diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py index c8a6e33f4d9a4..324ba9758e446 100644 --- a/python/pyspark/mllib/feature.py +++ b/python/pyspark/mllib/feature.py @@ -208,9 +208,8 @@ class StandardScaler(object): training set. :param withMean: False by default. Centers the data with mean - before scaling. It will build a dense output, so this - does not work on sparse input and will raise an - exception. + before scaling. It will build a dense output, so take + care when applying to sparse input. :param withStd: True by default. Scales the data to unit standard deviation. From 095862a3cff73fd88db9ed37a63e7629e664ff64 Mon Sep 17 00:00:00 2001 From: Tejas Patil Date: Sun, 28 Aug 2016 19:14:58 +0200 Subject: [PATCH 127/270] [SPARK-17271][SQL] Planner adds un-necessary Sort even if child ordering is semantically same as required ordering ## What changes were proposed in this pull request? Jira : https://issues.apache.org/jira/browse/SPARK-17271 Planner is adding un-needed SORT operation due to bug in the way comparison for `SortOrder` is done at https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala#L253 `SortOrder` needs to be compared semantically because `Expression` within two `SortOrder` can be "semantically equal" but not literally equal objects. eg. In case of `sql("SELECT * FROM table1 a JOIN table2 b ON a.col1=b.col1")` Expression in required SortOrder: ``` AttributeReference( name = "col1", dataType = LongType, nullable = false ) (exprId = exprId, qualifier = Some("a") ) ``` Expression in child SortOrder: ``` AttributeReference( name = "col1", dataType = LongType, nullable = false ) (exprId = exprId) ``` Notice that the output column has a qualifier but the child attribute does not but the inherent expression is the same and hence in this case we can say that the child satisfies the required sort order. This PR includes following changes: - Added a `semanticEquals` method to `SortOrder` so that it can compare underlying child expressions semantically (and not using default Object.equals) - Fixed `EnsureRequirements` to use semantic comparison of SortOrder ## How was this patch tested? - Added a test case to `PlannerSuite`. Ran rest tests in `PlannerSuite` Author: Tejas Patil Closes #14841 from tejasapatil/SPARK-17271_sort_order_equals_bug. --- .../sql/catalyst/expressions/SortOrder.scala | 3 ++ .../exchange/EnsureRequirements.scala | 11 ++++- .../spark/sql/execution/PlannerSuite.scala | 40 ++++++++++++++++++- 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala index de779ed3702d3..f498f357924d0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala @@ -61,6 +61,9 @@ case class SortOrder(child: Expression, direction: SortDirection) override def sql: String = child.sql + " " + direction.sql def isAscending: Boolean = direction == Ascending + + def semanticEquals(other: SortOrder): Boolean = + (direction == other.direction) && child.semanticEquals(other.child) } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala index 951051c4df2f7..fee7010e8e033 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala @@ -250,7 +250,16 @@ case class EnsureRequirements(conf: SQLConf) extends Rule[SparkPlan] { children = children.zip(requiredChildOrderings).map { case (child, requiredOrdering) => if (requiredOrdering.nonEmpty) { // If child.outputOrdering is [a, b] and requiredOrdering is [a], we do not need to sort. - if (requiredOrdering != child.outputOrdering.take(requiredOrdering.length)) { + val orderingMatched = if (requiredOrdering.length > child.outputOrdering.length) { + false + } else { + requiredOrdering.zip(child.outputOrdering).forall { + case (requiredOrder, childOutputOrder) => + requiredOrder.semanticEquals(childOutputOrder) + } + } + + if (!orderingMatched) { SortExec(requiredOrdering, global = false, child = child) } else { child diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index 436ff59c4d3f7..07efc72bf6296 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD import org.apache.spark.sql.{execution, DataFrame, Row} import org.apache.spark.sql.catalyst.InternalRow -import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, Literal, SortOrder} +import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.Inner import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Repartition} import org.apache.spark.sql.catalyst.plans.physical._ @@ -444,6 +444,44 @@ class PlannerSuite extends SharedSQLContext { } } + test("EnsureRequirements skips sort when required ordering is semantically equal to " + + "existing ordering") { + val exprId: ExprId = NamedExpression.newExprId + val attribute1 = + AttributeReference( + name = "col1", + dataType = LongType, + nullable = false + ) (exprId = exprId, + qualifier = Some("col1_qualifier") + ) + + val attribute2 = + AttributeReference( + name = "col1", + dataType = LongType, + nullable = false + ) (exprId = exprId) + + val orderingA1 = SortOrder(attribute1, Ascending) + val orderingA2 = SortOrder(attribute2, Ascending) + + assert(orderingA1 != orderingA2, s"$orderingA1 should NOT equal to $orderingA2") + assert(orderingA1.semanticEquals(orderingA2), + s"$orderingA1 should be semantically equal to $orderingA2") + + val inputPlan = DummySparkPlan( + children = DummySparkPlan(outputOrdering = Seq(orderingA1)) :: Nil, + requiredChildOrdering = Seq(Seq(orderingA2)), + requiredChildDistribution = Seq(UnspecifiedDistribution) + ) + val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + assertDistributionRequirementsAreSatisfied(outputPlan) + if (outputPlan.collect { case s: SortExec => true }.nonEmpty) { + fail(s"No sorts should have been added:\n$outputPlan") + } + } + // This is a regression test for SPARK-11135 test("EnsureRequirements adds sort when required ordering isn't a prefix of existing ordering") { val orderingA = SortOrder(Literal(1), Ascending) From 1a48c0047bbdb6328c3ac5ec617a5e35e244d66d Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Mon, 29 Aug 2016 10:46:26 +0100 Subject: [PATCH 128/270] [BUILD] Closes some stale PRs. ## What changes were proposed in this pull request? Closes #10995 Closes #13658 Closes #14505 Closes #14536 Closes #12753 Closes #14449 Closes #12694 Closes #12695 Closes #14810 Closes #10572 ## How was this patch tested? N/A Author: Sean Owen Closes #14849 from srowen/CloseStalePRs. From 08913ce0002a80a989489a31b7353f5ec4a5849f Mon Sep 17 00:00:00 2001 From: "Seigneurin, Alexis (CONT)" Date: Mon, 29 Aug 2016 13:12:10 +0100 Subject: [PATCH 129/270] fixed a typo idempotant -> idempotent Author: Seigneurin, Alexis (CONT) Closes #14833 from aseigneurin/fix-typo. --- docs/structured-streaming-programming-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/structured-streaming-programming-guide.md b/docs/structured-streaming-programming-guide.md index 090b14f4ce2bc..8a88e06ebde56 100644 --- a/docs/structured-streaming-programming-guide.md +++ b/docs/structured-streaming-programming-guide.md @@ -406,7 +406,7 @@ Furthermore, this model naturally handles data that has arrived later than expec ## Fault Tolerance Semantics Delivering end-to-end exactly-once semantics was one of key goals behind the design of Structured Streaming. To achieve that, we have designed the Structured Streaming sources, the sinks and the execution engine to reliably track the exact progress of the processing so that it can handle any kind of failure by restarting and/or reprocessing. Every streaming source is assumed to have offsets (similar to Kafka offsets, or Kinesis sequence numbers) -to track the read position in the stream. The engine uses checkpointing and write ahead logs to record the offset range of the data being processed in each trigger. The streaming sinks are designed to be idempotent for handling reprocessing. Together, using replayable sources and idempotant sinks, Structured Streaming can ensure **end-to-end exactly-once semantics** under any failure. +to track the read position in the stream. The engine uses checkpointing and write ahead logs to record the offset range of the data being processed in each trigger. The streaming sinks are designed to be idempotent for handling reprocessing. Together, using replayable sources and idempotent sinks, Structured Streaming can ensure **end-to-end exactly-once semantics** under any failure. # API using Datasets and DataFrames Since Spark 2.0, DataFrames and Datasets can represent static, bounded data, as well as streaming, unbounded data. Similar to static Datasets/DataFrames, you can use the common entry point `SparkSession` ([Scala](api/scala/index.html#org.apache.spark.sql.SparkSession)/ From 6a0fda2c0590b455e8713da79cd5f2413e5d0f28 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Mon, 29 Aug 2016 10:23:10 -0700 Subject: [PATCH 130/270] [SPARKR][MINOR] Fix LDA doc ## What changes were proposed in this pull request? This PR tries to fix the name of the `SparkDataFrame` used in the example. Also, it gives a reference url of an example data file so that users can play with. ## How was this patch tested? Manual test. Author: Junyang Qian Closes #14853 from junyangq/SPARKR-FixLDADoc. --- R/pkg/R/mllib.R | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 6808aaea8cac3..64d19fab7ec8f 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -994,18 +994,22 @@ setMethod("spark.survreg", signature(data = "SparkDataFrame", formula = "formula #' @export #' @examples #' \dontrun{ -#' text <- read.df("path/to/data", source = "libsvm") +#' # nolint start +#' # An example "path/to/file" can be +#' # paste0(Sys.getenv("SPARK_HOME"), "/data/mllib/sample_lda_libsvm_data.txt") +#' # nolint end +#' text <- read.df("path/to/file", source = "libsvm") #' model <- spark.lda(data = text, optimizer = "em") #' #' # get a summary of the model #' summary(model) #' #' # compute posterior probabilities -#' posterior <- spark.posterior(model, df) +#' posterior <- spark.posterior(model, text) #' showDF(posterior) #' #' # compute perplexity -#' perplexity <- spark.perplexity(model, df) +#' perplexity <- spark.perplexity(model, text) #' #' # save and load the model #' path <- "path/to/model" From 48caec2516ef35bfa1a3de2dc0a80d0dc819e6bd Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Mon, 29 Aug 2016 11:23:53 -0700 Subject: [PATCH 131/270] [SPARK-17063] [SQL] Improve performance of MSCK REPAIR TABLE with Hive metastore ## What changes were proposed in this pull request? This PR split the the single `createPartitions()` call into smaller batches, which could prevent Hive metastore from OOM (caused by millions of partitions). It will also try to gather all the fast stats (number of files and total size of all files) in parallel to avoid the bottle neck of listing the files in metastore sequential, which is controlled by spark.sql.gatherFastStats (enabled by default). ## How was this patch tested? Tested locally with 10000 partitions and 100 files with embedded metastore, without gathering fast stats in parallel, adding partitions took 153 seconds, after enable that, gathering the fast stats took about 34 seconds, adding these partitions took 25 seconds (most of the time spent in object store), 59 seconds in total, 2.5X faster (with larger cluster, gathering will much faster). Author: Davies Liu Closes #14607 from davies/repair_batch. --- .../sql/catalyst/catalog/interface.scala | 4 +- .../spark/sql/execution/command/ddl.scala | 156 +++++++++++++++--- .../apache/spark/sql/internal/SQLConf.scala | 10 ++ .../sql/execution/command/DDLSuite.scala | 13 +- .../sql/hive/client/HiveClientImpl.scala | 4 +- .../spark/sql/hive/client/HiveShim.scala | 8 +- .../sql/hive/execution/HiveDDLSuite.scala | 38 +++++ 7 files changed, 200 insertions(+), 33 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala index 83e01f95c06af..8408d765d4918 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala @@ -81,10 +81,12 @@ object CatalogStorageFormat { * * @param spec partition spec values indexed by column name * @param storage storage format of the partition + * @param parameters some parameters for the partition, for example, stats. */ case class CatalogTablePartition( spec: CatalogTypes.TablePartitionSpec, - storage: CatalogStorageFormat) + storage: CatalogStorageFormat, + parameters: Map[String, String] = Map.empty) /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala index 3817f919f3a5a..53fb684eb5ce3 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala @@ -17,12 +17,13 @@ package org.apache.spark.sql.execution.command -import scala.collection.GenSeq +import scala.collection.{GenMap, GenSeq} import scala.collection.parallel.ForkJoinTaskSupport import scala.concurrent.forkjoin.ForkJoinPool import scala.util.control.NonFatal -import org.apache.hadoop.fs.{FileStatus, FileSystem, Path, PathFilter} +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.fs._ import org.apache.hadoop.mapred.{FileInputFormat, JobConf} import org.apache.spark.sql.{AnalysisException, Row, SparkSession} @@ -32,6 +33,7 @@ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} import org.apache.spark.sql.execution.datasources.PartitioningUtils import org.apache.spark.sql.types._ +import org.apache.spark.util.SerializableConfiguration // Note: The definition of these commands are based on the ones described in // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL @@ -422,6 +424,9 @@ case class AlterTableDropPartitionCommand( } + +case class PartitionStatistics(numFiles: Int, totalSize: Long) + /** * Recover Partitions in ALTER TABLE: recover all the partition in the directory of a table and * update the catalog. @@ -435,6 +440,31 @@ case class AlterTableDropPartitionCommand( case class AlterTableRecoverPartitionsCommand( tableName: TableIdentifier, cmd: String = "ALTER TABLE RECOVER PARTITIONS") extends RunnableCommand { + + // These are list of statistics that can be collected quickly without requiring a scan of the data + // see https://github.com/apache/hive/blob/master/ + // common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java + val NUM_FILES = "numFiles" + val TOTAL_SIZE = "totalSize" + val DDL_TIME = "transient_lastDdlTime" + + private def getPathFilter(hadoopConf: Configuration): PathFilter = { + // Dummy jobconf to get to the pathFilter defined in configuration + // It's very expensive to create a JobConf(ClassUtil.findContainingJar() is slow) + val jobConf = new JobConf(hadoopConf, this.getClass) + val pathFilter = FileInputFormat.getInputPathFilter(jobConf) + new PathFilter { + override def accept(path: Path): Boolean = { + val name = path.getName + if (name != "_SUCCESS" && name != "_temporary" && !name.startsWith(".")) { + pathFilter == null || pathFilter.accept(path) + } else { + false + } + } + } + } + override def run(spark: SparkSession): Seq[Row] = { val catalog = spark.sessionState.catalog if (!catalog.tableExists(tableName)) { @@ -449,10 +479,6 @@ case class AlterTableRecoverPartitionsCommand( throw new AnalysisException( s"Operation not allowed: $cmd on datasource tables: $tableName") } - if (table.tableType != CatalogTableType.EXTERNAL) { - throw new AnalysisException( - s"Operation not allowed: $cmd only works on external tables: $tableName") - } if (table.partitionColumnNames.isEmpty) { throw new AnalysisException( s"Operation not allowed: $cmd only works on partitioned tables: $tableName") @@ -463,19 +489,26 @@ case class AlterTableRecoverPartitionsCommand( } val root = new Path(table.storage.locationUri.get) + logInfo(s"Recover all the partitions in $root") val fs = root.getFileSystem(spark.sparkContext.hadoopConfiguration) - // Dummy jobconf to get to the pathFilter defined in configuration - // It's very expensive to create a JobConf(ClassUtil.findContainingJar() is slow) - val jobConf = new JobConf(spark.sparkContext.hadoopConfiguration, this.getClass) - val pathFilter = FileInputFormat.getInputPathFilter(jobConf) + + val threshold = spark.conf.get("spark.rdd.parallelListingThreshold", "10").toInt + val hadoopConf = spark.sparkContext.hadoopConfiguration + val pathFilter = getPathFilter(hadoopConf) val partitionSpecsAndLocs = scanPartitions( - spark, fs, pathFilter, root, Map(), table.partitionColumnNames.map(_.toLowerCase)) - val parts = partitionSpecsAndLocs.map { case (spec, location) => - // inherit table storage format (possibly except for location) - CatalogTablePartition(spec, table.storage.copy(locationUri = Some(location.toUri.toString))) + spark, fs, pathFilter, root, Map(), table.partitionColumnNames.map(_.toLowerCase), threshold) + val total = partitionSpecsAndLocs.length + logInfo(s"Found $total partitions in $root") + + val partitionStats = if (spark.sqlContext.conf.gatherFastStats) { + gatherPartitionStats(spark, partitionSpecsAndLocs, fs, pathFilter, threshold) + } else { + GenMap.empty[String, PartitionStatistics] } - spark.sessionState.catalog.createPartitions(tableName, - parts.toArray[CatalogTablePartition], ignoreIfExists = true) + logInfo(s"Finished to gather the fast stats for all $total partitions.") + + addPartitions(spark, table, partitionSpecsAndLocs, partitionStats) + logInfo(s"Recovered all partitions ($total).") Seq.empty[Row] } @@ -487,15 +520,16 @@ case class AlterTableRecoverPartitionsCommand( filter: PathFilter, path: Path, spec: TablePartitionSpec, - partitionNames: Seq[String]): GenSeq[(TablePartitionSpec, Path)] = { - if (partitionNames.length == 0) { + partitionNames: Seq[String], + threshold: Int): GenSeq[(TablePartitionSpec, Path)] = { + if (partitionNames.isEmpty) { return Seq(spec -> path) } - val statuses = fs.listStatus(path) - val threshold = spark.conf.get("spark.rdd.parallelListingThreshold", "10").toInt + val statuses = fs.listStatus(path, filter) val statusPar: GenSeq[FileStatus] = if (partitionNames.length > 1 && statuses.length > threshold || partitionNames.length > 2) { + // parallelize the list of partitions here, then we can have better parallelism later. val parArray = statuses.par parArray.tasksupport = evalTaskSupport parArray @@ -510,21 +544,89 @@ case class AlterTableRecoverPartitionsCommand( // TODO: Validate the value val value = PartitioningUtils.unescapePathName(ps(1)) // comparing with case-insensitive, but preserve the case - if (columnName == partitionNames(0)) { - scanPartitions( - spark, fs, filter, st.getPath, spec ++ Map(columnName -> value), partitionNames.drop(1)) + if (columnName == partitionNames.head) { + scanPartitions(spark, fs, filter, st.getPath, spec ++ Map(columnName -> value), + partitionNames.drop(1), threshold) } else { - logWarning(s"expect partition column ${partitionNames(0)}, but got ${ps(0)}, ignore it") + logWarning(s"expect partition column ${partitionNames.head}, but got ${ps(0)}, ignore it") Seq() } } else { - if (name != "_SUCCESS" && name != "_temporary" && !name.startsWith(".")) { - logWarning(s"ignore ${new Path(path, name)}") - } + logWarning(s"ignore ${new Path(path, name)}") Seq() } } } + + private def gatherPartitionStats( + spark: SparkSession, + partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)], + fs: FileSystem, + pathFilter: PathFilter, + threshold: Int): GenMap[String, PartitionStatistics] = { + if (partitionSpecsAndLocs.length > threshold) { + val hadoopConf = spark.sparkContext.hadoopConfiguration + val serializableConfiguration = new SerializableConfiguration(hadoopConf) + val serializedPaths = partitionSpecsAndLocs.map(_._2.toString).toArray + + // Set the number of parallelism to prevent following file listing from generating many tasks + // in case of large #defaultParallelism. + val numParallelism = Math.min(serializedPaths.length, + Math.min(spark.sparkContext.defaultParallelism, 10000)) + // gather the fast stats for all the partitions otherwise Hive metastore will list all the + // files for all the new partitions in sequential way, which is super slow. + logInfo(s"Gather the fast stats in parallel using $numParallelism tasks.") + spark.sparkContext.parallelize(serializedPaths, numParallelism) + .mapPartitions { paths => + val pathFilter = getPathFilter(serializableConfiguration.value) + paths.map(new Path(_)).map{ path => + val fs = path.getFileSystem(serializableConfiguration.value) + val statuses = fs.listStatus(path, pathFilter) + (path.toString, PartitionStatistics(statuses.length, statuses.map(_.getLen).sum)) + } + }.collectAsMap() + } else { + partitionSpecsAndLocs.map { case (_, location) => + val statuses = fs.listStatus(location, pathFilter) + (location.toString, PartitionStatistics(statuses.length, statuses.map(_.getLen).sum)) + }.toMap + } + } + + private def addPartitions( + spark: SparkSession, + table: CatalogTable, + partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)], + partitionStats: GenMap[String, PartitionStatistics]): Unit = { + val total = partitionSpecsAndLocs.length + var done = 0L + // Hive metastore may not have enough memory to handle millions of partitions in single RPC, + // we should split them into smaller batches. Since Hive client is not thread safe, we cannot + // do this in parallel. + val batchSize = 100 + partitionSpecsAndLocs.toIterator.grouped(batchSize).foreach { batch => + val now = System.currentTimeMillis() / 1000 + val parts = batch.map { case (spec, location) => + val params = partitionStats.get(location.toString).map { + case PartitionStatistics(numFiles, totalSize) => + // This two fast stat could prevent Hive metastore to list the files again. + Map(NUM_FILES -> numFiles.toString, + TOTAL_SIZE -> totalSize.toString, + // Workaround a bug in HiveMetastore that try to mutate a read-only parameters. + // see metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java + DDL_TIME -> now.toString) + }.getOrElse(Map.empty) + // inherit table storage format (possibly except for location) + CatalogTablePartition( + spec, + table.storage.copy(locationUri = Some(location.toUri.toString)), + params) + } + spark.sessionState.catalog.createPartitions(tableName, parts, ignoreIfExists = true) + done += parts.length + logDebug(s"Recovered ${parts.length} partitions ($done/$total so far)") + } + } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index f2b1afd71adc6..91988270ada8d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -310,6 +310,14 @@ object SQLConf { .booleanConf .createWithDefault(false) + val GATHER_FASTSTAT = SQLConfigBuilder("spark.sql.hive.gatherFastStats") + .internal() + .doc("When true, fast stats (number of files and total size of all files) will be gathered" + + " in parallel while repairing table partitions to avoid the sequential listing in Hive" + + " metastore.") + .booleanConf + .createWithDefault(true) + // This is used to control the when we will split a schema's JSON string to multiple pieces // in order to fit the JSON string in metastore's table property (by default, the value has // a length restriction of 4000 characters). We will split the JSON string of a schema @@ -608,6 +616,8 @@ private[sql] class SQLConf extends Serializable with CatalystConf with Logging { def metastorePartitionPruning: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING) + def gatherFastStats: Boolean = getConf(GATHER_FASTSTAT) + def optimizerMetadataOnly: Boolean = getConf(OPTIMIZER_METADATA_ONLY) def wholeStageEnabled: Boolean = getConf(WHOLESTAGE_CODEGEN_ENABLED) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index b343454b12d86..0073659a31541 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -824,13 +824,13 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { } test("alter table: recover partitions (sequential)") { - withSQLConf("spark.rdd.parallelListingThreshold" -> "1") { + withSQLConf("spark.rdd.parallelListingThreshold" -> "10") { testRecoverPartitions() } } test("alter table: recover partition (parallel)") { - withSQLConf("spark.rdd.parallelListingThreshold" -> "10") { + withSQLConf("spark.rdd.parallelListingThreshold" -> "1") { testRecoverPartitions() } } @@ -853,7 +853,14 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { val fs = root.getFileSystem(spark.sparkContext.hadoopConfiguration) // valid fs.mkdirs(new Path(new Path(root, "a=1"), "b=5")) + fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "a.csv")) // file + fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "_SUCCESS")) // file fs.mkdirs(new Path(new Path(root, "A=2"), "B=6")) + fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "b.csv")) // file + fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "c.csv")) // file + fs.createNewFile(new Path(new Path(root, "A=2/B=6"), ".hiddenFile")) // file + fs.mkdirs(new Path(new Path(root, "A=2/B=6"), "_temporary")) + // invalid fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order @@ -867,6 +874,8 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { sql("ALTER TABLE tab1 RECOVER PARTITIONS") assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2)) + assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1") + assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2") } finally { fs.delete(root, true) } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index 81d5a124e9d4a..b45ad30dcae41 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -829,6 +829,8 @@ private[hive] class HiveClientImpl( serde = Option(apiPartition.getSd.getSerdeInfo.getSerializationLib), compressed = apiPartition.getSd.isCompressed, properties = Option(apiPartition.getSd.getSerdeInfo.getParameters) - .map(_.asScala.toMap).orNull)) + .map(_.asScala.toMap).orNull), + parameters = + if (hp.getParameters() != null) hp.getParameters().asScala.toMap else Map.empty) } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala index 41527fcd05154..32387707612f4 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala @@ -267,6 +267,7 @@ private[client] class Shim_v0_12 extends Shim with Logging { val table = hive.getTable(database, tableName) parts.foreach { s => val location = s.storage.locationUri.map(new Path(table.getPath, _)).orNull + val params = if (s.parameters.nonEmpty) s.parameters.asJava else null val spec = s.spec.asJava if (hive.getPartition(table, spec, false) != null && ignoreIfExists) { // Ignore this partition since it already exists and ignoreIfExists == true @@ -280,7 +281,7 @@ private[client] class Shim_v0_12 extends Shim with Logging { table, spec, location, - null, // partParams + params, // partParams null, // inputFormat null, // outputFormat -1: JInteger, // numBuckets @@ -459,8 +460,11 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { parts: Seq[CatalogTablePartition], ignoreIfExists: Boolean): Unit = { val addPartitionDesc = new AddPartitionDesc(db, table, ignoreIfExists) - parts.foreach { s => + parts.zipWithIndex.foreach { case (s, i) => addPartitionDesc.addPartition(s.spec.asJava, s.storage.locationUri.orNull) + if (s.parameters.nonEmpty) { + addPartitionDesc.getPartition(i).setPartParams(s.parameters.asJava) + } } hive.createPartitions(addPartitionDesc) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index f00a99b6d0b3d..9019333d76869 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -378,6 +378,44 @@ class HiveDDLSuite expectedSerdeProps) } + test("MSCK REPAIR RABLE") { + val catalog = spark.sessionState.catalog + val tableIdent = TableIdentifier("tab1") + sql("CREATE TABLE tab1 (height INT, length INT) PARTITIONED BY (a INT, b INT)") + val part1 = Map("a" -> "1", "b" -> "5") + val part2 = Map("a" -> "2", "b" -> "6") + val root = new Path(catalog.getTableMetadata(tableIdent).storage.locationUri.get) + val fs = root.getFileSystem(spark.sparkContext.hadoopConfiguration) + // valid + fs.mkdirs(new Path(new Path(root, "a=1"), "b=5")) + fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "a.csv")) // file + fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "_SUCCESS")) // file + fs.mkdirs(new Path(new Path(root, "A=2"), "B=6")) + fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "b.csv")) // file + fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "c.csv")) // file + fs.createNewFile(new Path(new Path(root, "A=2/B=6"), ".hiddenFile")) // file + fs.mkdirs(new Path(new Path(root, "A=2/B=6"), "_temporary")) + + // invalid + fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name + fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order + fs.mkdirs(new Path(root, "a=4")) // not enough columns + fs.createNewFile(new Path(new Path(root, "a=1"), "b=4")) // file + fs.createNewFile(new Path(new Path(root, "a=1"), "_SUCCESS")) // _SUCCESS + fs.mkdirs(new Path(new Path(root, "a=1"), "_temporary")) // _temporary + fs.mkdirs(new Path(new Path(root, "a=1"), ".b=4")) // start with . + + try { + sql("MSCK REPAIR TABLE tab1") + assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == + Set(part1, part2)) + assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1") + assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2") + } finally { + fs.delete(root, true) + } + } + test("drop table using drop view") { withTable("tab1") { sql("CREATE TABLE tab1(c1 int)") From 736a7911cb0335cdb2b2f6c87f9e3c32047b5bbb Mon Sep 17 00:00:00 2001 From: Shivaram Venkataraman Date: Mon, 29 Aug 2016 12:55:32 -0700 Subject: [PATCH 132/270] [SPARK-16581][SPARKR] Make JVM backend calling functions public ## What changes were proposed in this pull request? This change exposes a public API in SparkR to create objects, call methods on the Spark driver JVM ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Unit tests, CRAN checks Author: Shivaram Venkataraman Closes #14775 from shivaram/sparkr-java-api. --- R/pkg/DESCRIPTION | 5 +- R/pkg/NAMESPACE | 4 + R/pkg/R/jvm.R | 117 +++++++++++++++++++++++ R/pkg/inst/tests/testthat/test_jvm_api.R | 43 +++++++++ 4 files changed, 167 insertions(+), 2 deletions(-) create mode 100644 R/pkg/R/jvm.R create mode 100644 R/pkg/inst/tests/testthat/test_jvm_api.R diff --git a/R/pkg/DESCRIPTION b/R/pkg/DESCRIPTION index e5afed2d0a93e..5a83883089e0e 100644 --- a/R/pkg/DESCRIPTION +++ b/R/pkg/DESCRIPTION @@ -2,7 +2,7 @@ Package: SparkR Type: Package Title: R Frontend for Apache Spark Version: 2.0.0 -Date: 2016-07-07 +Date: 2016-08-27 Authors@R: c(person("Shivaram", "Venkataraman", role = c("aut", "cre"), email = "shivaram@cs.berkeley.edu"), person("Xiangrui", "Meng", role = "aut", @@ -11,7 +11,7 @@ Authors@R: c(person("Shivaram", "Venkataraman", role = c("aut", "cre"), email = "felixcheung@apache.org"), person(family = "The Apache Software Foundation", role = c("aut", "cph"))) URL: http://www.apache.org/ http://spark.apache.org/ -BugReports: https://issues.apache.org/jira/secure/CreateIssueDetails!init.jspa?pid=12315420&components=12325400&issuetype=4 +BugReports: https://cwiki.apache.org/confluence/display/SPARK/Contributing+to+Spark#ContributingtoSpark-ContributingBugReports Depends: R (>= 3.0), methods @@ -39,6 +39,7 @@ Collate: 'deserialize.R' 'functions.R' 'install.R' + 'jvm.R' 'mllib.R' 'serialize.R' 'sparkR.R' diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index ad587a6b7d03a..5e625b2d8dbb4 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -364,4 +364,8 @@ S3method(structField, jobj) S3method(structType, jobj) S3method(structType, structField) +export("sparkR.newJObject") +export("sparkR.callJMethod") +export("sparkR.callJStatic") + export("install.spark") diff --git a/R/pkg/R/jvm.R b/R/pkg/R/jvm.R new file mode 100644 index 0000000000000..bb5c77544a3da --- /dev/null +++ b/R/pkg/R/jvm.R @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Methods to directly access the JVM running the SparkR backend. + +#' Call Java Methods +#' +#' Call a Java method in the JVM running the Spark driver. The return +#' values are automatically converted to R objects for simple objects. Other +#' values are returned as "jobj" which are references to objects on JVM. +#' +#' @details +#' This is a low level function to access the JVM directly and should only be used +#' for advanced use cases. The arguments and return values that are primitive R +#' types (like integer, numeric, character, lists) are automatically translated to/from +#' Java types (like Integer, Double, String, Array). A full list can be found in +#' serialize.R and deserialize.R in the Apache Spark code base. +#' +#' @param x object to invoke the method on. Should be a "jobj" created by newJObject. +#' @param methodName method name to call. +#' @param ... parameters to pass to the Java method. +#' @return the return value of the Java method. Either returned as a R object +#' if it can be deserialized or returned as a "jobj". See details section for more. +#' @export +#' @seealso \link{sparkR.callJStatic}, \link{sparkR.newJObject} +#' @rdname sparkR.callJMethod +#' @examples +#' \dontrun{ +#' sparkR.session() # Need to have a Spark JVM running before calling newJObject +#' # Create a Java ArrayList and populate it +#' jarray <- sparkR.newJObject("java.util.ArrayList") +#' sparkR.callJMethod(jarray, "add", 42L) +#' sparkR.callJMethod(jarray, "get", 0L) # Will print 42 +#' } +#' @note sparkR.callJMethod since 2.0.1 +sparkR.callJMethod <- function(x, methodName, ...) { + callJMethod(x, methodName, ...) +} + +#' Call Static Java Methods +#' +#' Call a static method in the JVM running the Spark driver. The return +#' value is automatically converted to R objects for simple objects. Other +#' values are returned as "jobj" which are references to objects on JVM. +#' +#' @details +#' This is a low level function to access the JVM directly and should only be used +#' for advanced use cases. The arguments and return values that are primitive R +#' types (like integer, numeric, character, lists) are automatically translated to/from +#' Java types (like Integer, Double, String, Array). A full list can be found in +#' serialize.R and deserialize.R in the Apache Spark code base. +#' +#' @param x fully qualified Java class name that contains the static method to invoke. +#' @param methodName name of static method to invoke. +#' @param ... parameters to pass to the Java method. +#' @return the return value of the Java method. Either returned as a R object +#' if it can be deserialized or returned as a "jobj". See details section for more. +#' @export +#' @seealso \link{sparkR.callJMethod}, \link{sparkR.newJObject} +#' @rdname sparkR.callJStatic +#' @examples +#' \dontrun{ +#' sparkR.session() # Need to have a Spark JVM running before calling callJStatic +#' sparkR.callJStatic("java.lang.System", "currentTimeMillis") +#' sparkR.callJStatic("java.lang.System", "getProperty", "java.home") +#' } +#' @note sparkR.callJStatic since 2.0.1 +sparkR.callJStatic <- function(x, methodName, ...) { + callJStatic(x, methodName, ...) +} + +#' Create Java Objects +#' +#' Create a new Java object in the JVM running the Spark driver. The return +#' value is automatically converted to an R object for simple objects. Other +#' values are returned as a "jobj" which is a reference to an object on JVM. +#' +#' @details +#' This is a low level function to access the JVM directly and should only be used +#' for advanced use cases. The arguments and return values that are primitive R +#' types (like integer, numeric, character, lists) are automatically translated to/from +#' Java types (like Integer, Double, String, Array). A full list can be found in +#' serialize.R and deserialize.R in the Apache Spark code base. +#' +#' @param x fully qualified Java class name. +#' @param ... arguments to be passed to the constructor. +#' @return the object created. Either returned as a R object +#' if it can be deserialized or returned as a "jobj". See details section for more. +#' @export +#' @seealso \link{sparkR.callJMethod}, \link{sparkR.callJStatic} +#' @rdname sparkR.newJObject +#' @examples +#' \dontrun{ +#' sparkR.session() # Need to have a Spark JVM running before calling newJObject +#' # Create a Java ArrayList and populate it +#' jarray <- sparkR.newJObject("java.util.ArrayList") +#' sparkR.callJMethod(jarray, "add", 42L) +#' sparkR.callJMethod(jarray, "get", 0L) # Will print 42 +#' } +#' @note sparkR.newJObject since 2.0.1 +sparkR.newJObject <- function(x, ...) { + newJObject(x, ...) +} diff --git a/R/pkg/inst/tests/testthat/test_jvm_api.R b/R/pkg/inst/tests/testthat/test_jvm_api.R new file mode 100644 index 0000000000000..151c52906bdb8 --- /dev/null +++ b/R/pkg/inst/tests/testthat/test_jvm_api.R @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +context("JVM API") + +sparkSession <- sparkR.session(enableHiveSupport = FALSE) + +test_that("Create and call methods on object", { + jarr <- newJObject("java.util.ArrayList") + # Add an element to the array + callJMethod(jarr, "add", 1L) + # Check if get returns the same element + expect_equal(callJMethod(jarr, "get", 0L), 1L) +}) + +test_that("Call static methods", { + # Convert a boolean to a string + strTrue <- callJStatic("java.lang.String", "valueOf", TRUE) + expect_equal(strTrue, "true") +}) + +test_that("Manually garbage collect objects", { + jarr <- newJObject("java.util.ArrayList") + cleanup.jobj(jarr) + # Using a jobj after GC should throw an error + expect_error(print(jarr), "Error in invokeJava.*") +}) + +sparkR.session.stop() From 48b459ddd58affd5519856cb6e204398b7739a2a Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Tue, 30 Aug 2016 09:58:00 +0800 Subject: [PATCH 133/270] [SPARK-17301][SQL] Remove unused classTag field from AtomicType base class There's an unused `classTag` val in the AtomicType base class which is causing unnecessary slowness in deserialization because it needs to grab ScalaReflectionLock and create a new runtime reflection mirror. Removing this unused code gives a small but measurable performance boost in SQL task deserialization. Author: Josh Rosen Closes #14869 from JoshRosen/remove-unused-classtag. --- .../org/apache/spark/sql/types/AbstractDataType.scala | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/AbstractDataType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/AbstractDataType.scala index 65eae869d40d1..1981fd8f0a1b5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/AbstractDataType.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/AbstractDataType.scala @@ -17,13 +17,10 @@ package org.apache.spark.sql.types -import scala.reflect.ClassTag -import scala.reflect.runtime.universe.{runtimeMirror, TypeTag} +import scala.reflect.runtime.universe.TypeTag import org.apache.spark.annotation.DeveloperApi -import org.apache.spark.sql.catalyst.ScalaReflectionLock import org.apache.spark.sql.catalyst.expressions.Expression -import org.apache.spark.util.Utils /** * A non-concrete data type, reserved for internal uses. @@ -130,11 +127,6 @@ protected[sql] abstract class AtomicType extends DataType { private[sql] type InternalType private[sql] val tag: TypeTag[InternalType] private[sql] val ordering: Ordering[InternalType] - - @transient private[sql] val classTag = ScalaReflectionLock.synchronized { - val mirror = runtimeMirror(Utils.getSparkClassLoader) - ClassTag[InternalType](mirror.runtimeClass(tag.tpe)) - } } From 8fb445d9bdead6f0ff2bd9879145fe688b3bdc80 Mon Sep 17 00:00:00 2001 From: frreiss Date: Mon, 29 Aug 2016 23:33:00 -0700 Subject: [PATCH 134/270] [SPARK-17303] Added spark-warehouse to dev/.rat-excludes ## What changes were proposed in this pull request? Excludes the `spark-warehouse` directory from the Apache RAT checks that src/run-tests performs. `spark-warehouse` is created by some of the Spark SQL tests, as well as by `bin/spark-sql`. ## How was this patch tested? Ran src/run-tests twice. The second time, the script failed because the first iteration Made the change in this PR. Ran src/run-tests a third time; RAT checks succeeded. Author: frreiss Closes #14870 from frreiss/fred-17303. --- dev/.rat-excludes | 1 + 1 file changed, 1 insertion(+) diff --git a/dev/.rat-excludes b/dev/.rat-excludes index 9171f3806e42d..a3efddeaa515a 100644 --- a/dev/.rat-excludes +++ b/dev/.rat-excludes @@ -101,3 +101,4 @@ org.apache.spark.scheduler.ExternalClusterManager .*\.sql .Rbuildignore org.apache.spark.deploy.yarn.security.ServiceCredentialProvider +spark-warehouse From 94922d79e9f90fac3777db0974ccf7566b8ac3b3 Mon Sep 17 00:00:00 2001 From: Takeshi YAMAMURO Date: Tue, 30 Aug 2016 16:43:47 +0800 Subject: [PATCH 135/270] [SPARK-17289][SQL] Fix a bug to satisfy sort requirements in partial aggregations ## What changes were proposed in this pull request? Partial aggregations are generated in `EnsureRequirements`, but the planner fails to check if partial aggregation satisfies sort requirements. For the following query: ``` val df2 = (0 to 1000).map(x => (x % 2, x.toString)).toDF("a", "b").createOrReplaceTempView("t2") spark.sql("select max(b) from t2 group by a").explain(true) ``` Now, the SortAggregator won't insert Sort operator before partial aggregation, this will break sort-based partial aggregation. ``` == Physical Plan == SortAggregate(key=[a#5], functions=[max(b#6)], output=[max(b)#17]) +- *Sort [a#5 ASC], false, 0 +- Exchange hashpartitioning(a#5, 200) +- SortAggregate(key=[a#5], functions=[partial_max(b#6)], output=[a#5, max#19]) +- LocalTableScan [a#5, b#6] ``` Actually, a correct plan is: ``` == Physical Plan == SortAggregate(key=[a#5], functions=[max(b#6)], output=[max(b)#17]) +- *Sort [a#5 ASC], false, 0 +- Exchange hashpartitioning(a#5, 200) +- SortAggregate(key=[a#5], functions=[partial_max(b#6)], output=[a#5, max#19]) +- *Sort [a#5 ASC], false, 0 +- LocalTableScan [a#5, b#6] ``` ## How was this patch tested? Added tests in `PlannerSuite`. Author: Takeshi YAMAMURO Closes #14865 from maropu/SPARK-17289. --- .../exchange/EnsureRequirements.scala | 3 ++- .../spark/sql/execution/PlannerSuite.scala | 22 ++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala index fee7010e8e033..66e99ded24886 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala @@ -164,7 +164,8 @@ case class EnsureRequirements(conf: SQLConf) extends Rule[SparkPlan] { // If an aggregation needs a shuffle and support partial aggregations, a map-side partial // aggregation and a shuffle are added as children. val (mergeAgg, mapSideAgg) = AggUtils.createMapMergeAggregatePair(operator) - (mergeAgg, createShuffleExchange(requiredChildDistributions.head, mapSideAgg) :: Nil) + (mergeAgg, createShuffleExchange( + requiredChildDistributions.head, ensureDistributionAndOrdering(mapSideAgg)) :: Nil) case _ => // Ensure that the operator's children satisfy their output distribution requirements: val childrenWithDist = operator.children.zip(requiredChildDistributions) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index 07efc72bf6296..b0aa3378e5f66 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -18,12 +18,13 @@ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{execution, DataFrame, Row} +import org.apache.spark.sql.{execution, Row} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.Inner import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Repartition} import org.apache.spark.sql.catalyst.plans.physical._ +import org.apache.spark.sql.execution.aggregate.SortAggregateExec import org.apache.spark.sql.execution.columnar.InMemoryRelation import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReusedExchangeExec, ReuseExchange, ShuffleExchange} import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec} @@ -70,6 +71,25 @@ class PlannerSuite extends SharedSQLContext { s"The plan of query $query does not have partial aggregations.") } + test("SPARK-17289 sort-based partial aggregation needs a sort operator as a child") { + withTempView("testSortBasedPartialAggregation") { + val schema = StructType( + StructField(s"key", IntegerType, true) :: StructField(s"value", StringType, true) :: Nil) + val rowRDD = sparkContext.parallelize((0 until 1000).map(d => Row(d % 2, d.toString))) + spark.createDataFrame(rowRDD, schema) + .createOrReplaceTempView("testSortBasedPartialAggregation") + + // This test assumes a query below uses sort-based aggregations + val planned = sql("SELECT MAX(value) FROM testSortBasedPartialAggregation GROUP BY key") + .queryExecution.executedPlan + // This line extracts both SortAggregate and Sort operators + val extractedOps = planned.collect { case n if n.nodeName contains "Sort" => n } + val aggOps = extractedOps.collect { case n if n.nodeName contains "SortAggregate" => n } + assert(extractedOps.size == 4 && aggOps.size == 2, + s"The plan $planned does not have correct sort-based partial aggregate pairs.") + } + } + test("non-partial aggregation for aggregates") { withTempView("testNonPartialAggregation") { val schema = StructType(StructField(s"value", IntegerType, true) :: Nil) From bca79c823024c41731ec89f96a3722d7b1c99639 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Tue, 30 Aug 2016 17:27:00 +0800 Subject: [PATCH 136/270] [SPARK-17234][SQL] Table Existence Checking when Index Table with the Same Name Exists ### What changes were proposed in this pull request? Hive Index tables are not supported by Spark SQL. Thus, we issue an exception when users try to access Hive Index tables. When the internal function `tableExists` tries to access Hive Index tables, it always gets the same error message: ```Hive index table is not supported```. This message could be confusing to users, since their SQL operations could be completely unrelated to Hive Index tables. For example, when users try to alter a table to a new name and there exists an index table with the same name, the expected exception should be a `TableAlreadyExistsException`. This PR made the following changes: - Introduced a new `AnalysisException` type: `SQLFeatureNotSupportedException`. When users try to access an `Index Table`, we will issue a `SQLFeatureNotSupportedException`. - `tableExists` returns `true` when hitting a `SQLFeatureNotSupportedException` and the feature is `Hive index table`. - Add a checking `requireTableNotExists` for `SessionCatalog`'s `createTable` API; otherwise, the current implementation relies on the Hive's internal checking. ### How was this patch tested? Added a test case Author: gatorsmile Closes #14801 from gatorsmile/tableExists. --- .../catalog/ExternalCatalogSuite.scala | 10 ++++++ .../spark/sql/hive/HiveExternalCatalog.scala | 7 +++- .../spark/sql/hive/client/HiveClient.scala | 3 ++ .../sql/hive/client/HiveClientImpl.scala | 4 +++ .../spark/sql/hive/client/VersionsSuite.scala | 6 ++++ .../sql/hive/execution/HiveDDLSuite.scala | 33 +++++++++++++++++++ 6 files changed, 62 insertions(+), 1 deletion(-) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala index 54365fd978ab9..19f8665383315 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala @@ -25,6 +25,7 @@ import org.scalatest.BeforeAndAfterEach import org.apache.spark.SparkFunSuite import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} +import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.types.StructType import org.apache.spark.util.Utils @@ -162,6 +163,15 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac assert(actual.tableType === CatalogTableType.EXTERNAL) } + test("create table when the table already exists") { + val catalog = newBasicCatalog() + assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2")) + val table = newTable("tbl1", "db2") + intercept[TableAlreadyExistsException] { + catalog.createTable(table, ignoreIfExists = false) + } + } + test("drop table") { val catalog = newBasicCatalog() assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2")) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index 7f50e38d30c9a..ed87ac3c3e634 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -30,6 +30,7 @@ import org.apache.spark.SparkConf import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.CaseInsensitiveMap @@ -171,9 +172,13 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat ignoreIfExists: Boolean): Unit = withClient { assert(tableDefinition.identifier.database.isDefined) val db = tableDefinition.identifier.database.get + val table = tableDefinition.identifier.table requireDbExists(db) verifyTableProperties(tableDefinition) + if (tableExists(db, table) && !ignoreIfExists) { + throw new TableAlreadyExistsException(db = db, table = table) + } // Before saving data source table metadata into Hive metastore, we should: // 1. Put table schema, partition column names and bucket specification in table properties. // 2. Check if this table is hive compatible @@ -450,7 +455,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat } override def tableExists(db: String, table: String): Boolean = withClient { - client.getTableOption(db, table).isDefined + client.tableExists(db, table) } override def listTables(db: String): Seq[String] = withClient { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala index 6f009d714bef4..dc74fa257aa4d 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala @@ -68,6 +68,9 @@ private[hive] trait HiveClient { /** List the names of all the databases that match the specified pattern. */ def listDatabases(pattern: String): Seq[String] + /** Return whether a table/view with the specified name exists. */ + def tableExists(dbName: String, tableName: String): Boolean + /** Returns the specified table, or throws [[NoSuchTableException]]. */ final def getTable(dbName: String, tableName: String): CatalogTable = { getTableOption(dbName, tableName).getOrElse(throw new NoSuchTableException(dbName, tableName)) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index b45ad30dcae41..dd982192a3837 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -347,6 +347,10 @@ private[hive] class HiveClientImpl( client.getDatabasesByPattern(pattern).asScala } + override def tableExists(dbName: String, tableName: String): Boolean = withHiveState { + Option(client.getTable(dbName, tableName, false /* do not throw exception */)).nonEmpty + } + override def getTableOption( dbName: String, tableName: String): Option[CatalogTable] = withHiveState { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala index a2509f2a75f47..10b6cd102416c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala @@ -218,6 +218,12 @@ class VersionsSuite extends SparkFunSuite with Logging { holdDDLTime = false) } + test(s"$version: tableExists") { + // No exception should be thrown + assert(client.tableExists("default", "src")) + assert(!client.tableExists("default", "nonexistent")) + } + test(s"$version: getTable") { // No exception should be thrown client.getTable("default", "src") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index 9019333d76869..58c43ebcae6fc 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -24,8 +24,10 @@ import org.scalatest.BeforeAndAfterEach import org.apache.spark.internal.config._ import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} +import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTableType} import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.hive.HiveExternalCatalog import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SQLTestUtils @@ -675,6 +677,37 @@ class HiveDDLSuite } } + test("create table with the same name as an index table") { + val tabName = "tab1" + val indexName = tabName + "_index" + withTable(tabName) { + // Spark SQL does not support creating index. Thus, we have to use Hive client. + val client = spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client + sql(s"CREATE TABLE $tabName(a int)") + + try { + client.runSqlHive( + s"CREATE INDEX $indexName ON TABLE $tabName (a) AS 'COMPACT' WITH DEFERRED REBUILD") + val indexTabName = + spark.sessionState.catalog.listTables("default", s"*$indexName*").head.table + intercept[TableAlreadyExistsException] { + sql(s"CREATE TABLE $indexTabName(b int)") + } + intercept[TableAlreadyExistsException] { + sql(s"ALTER TABLE $tabName RENAME TO $indexTabName") + } + + // When tableExists is not invoked, we still can get an AnalysisException + val e = intercept[AnalysisException] { + sql(s"DESCRIBE $indexTabName") + }.getMessage + assert(e.contains("Hive index table is not supported.")) + } finally { + client.runSqlHive(s"DROP INDEX IF EXISTS $indexName ON $tabName") + } + } + } + test("desc table for data source table - no user-defined schema") { Seq("parquet", "json", "orc").foreach { fileFormat => withTable("t1") { From 2d76cb11f51b20a57af7bdefb7a74fdfe41060aa Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Tue, 30 Aug 2016 11:18:29 +0100 Subject: [PATCH 137/270] [SPARK-17276][CORE][TEST] Stop env params output on Jenkins job page https://issues.apache.org/jira/browse/SPARK-17276 ## What changes were proposed in this pull request? When trying to find error msg in a failed Jenkins build job, I'm annoyed by the huge env output. The env parameter output should be muted. ![screen shot 2016-08-26 at 10 52 07 pm](https://cloud.githubusercontent.com/assets/3925641/18025581/b8d567ba-6be2-11e6-9eeb-6aec223f1730.png) ## How was this patch tested? Tested manually on local laptop. Author: Xin Ren Closes #14848 from keypointt/SPARK-17276. --- core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala | 2 +- .../org/apache/spark/sql/hive/execution/SQLQuerySuite.scala | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala index 387f3e2502c51..7293aa9a2584f 100644 --- a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala +++ b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala @@ -214,7 +214,7 @@ class PipedRDDSuite extends SparkFunSuite with SharedSparkContext { } def testCommandAvailable(command: String): Boolean = { - val attempt = Try(Process(command).run().exitValue()) + val attempt = Try(Process(command).run(ProcessLogger(_ => ())).exitValue()) attempt.isSuccess && attempt.get == 0 } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index 4ca882f840a58..05d0687fb7e48 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.execution import java.sql.{Date, Timestamp} -import scala.sys.process.Process +import scala.sys.process.{Process, ProcessLogger} import scala.util.Try import org.apache.hadoop.fs.Path @@ -1788,7 +1788,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton { } def testCommandAvailable(command: String): Boolean = { - val attempt = Try(Process(command).run().exitValue()) + val attempt = Try(Process(command).run(ProcessLogger(_ => ())).exitValue()) attempt.isSuccess && attempt.get == 0 } } From befab9c1c6b59ad90f63a7d10e12b186be897f15 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Tue, 30 Aug 2016 11:19:45 +0100 Subject: [PATCH 138/270] [SPARK-17264][SQL] DataStreamWriter should document that it only supports Parquet for now ## What changes were proposed in this pull request? Clarify that only parquet files are supported by DataStreamWriter now ## How was this patch tested? (Doc build -- no functional changes to test) Author: Sean Owen Closes #14860 from srowen/SPARK-17264. --- python/pyspark/sql/streaming.py | 2 +- .../scala/org/apache/spark/sql/streaming/DataStreamWriter.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py index a0ba5825f35c6..67375f6b5f942 100644 --- a/python/pyspark/sql/streaming.py +++ b/python/pyspark/sql/streaming.py @@ -591,7 +591,7 @@ def format(self, source): .. note:: Experimental. - :param source: string, name of the data source, e.g. 'json', 'parquet'. + :param source: string, name of the data source, which for now can be 'parquet'. >>> writer = sdf.writeStream.format('json') """ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala index d38e3e58125d9..f70c7d08a691c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala @@ -122,7 +122,7 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) { /** * :: Experimental :: - * Specifies the underlying output data source. Built-in options include "parquet", "json", etc. + * Specifies the underlying output data source. Built-in options include "parquet" for now. * * @since 2.0.0 */ From d4eee9932edf1a489d7fe9120a0f003150834df6 Mon Sep 17 00:00:00 2001 From: Dmitriy Sokolov Date: Tue, 30 Aug 2016 11:23:37 +0100 Subject: [PATCH 139/270] [MINOR][DOCS] Fix minor typos in python example code ## What changes were proposed in this pull request? Fix minor typos python example code in streaming programming guide ## How was this patch tested? N/A Author: Dmitriy Sokolov Closes #14805 from silentsokolov/fix-typos. --- docs/mllib-data-types.md | 16 ++-- docs/programming-guide.md | 16 ++-- docs/quick-start.md | 6 +- docs/streaming-kafka-0-8-integration.md | 4 +- docs/streaming-programming-guide.md | 33 ++++---- .../structured-streaming-programming-guide.md | 79 +++++++++---------- 6 files changed, 77 insertions(+), 77 deletions(-) diff --git a/docs/mllib-data-types.md b/docs/mllib-data-types.md index 7dd3c97a83e4d..35cee3275e3b5 100644 --- a/docs/mllib-data-types.md +++ b/docs/mllib-data-types.md @@ -104,7 +104,7 @@ dv2 = [1.0, 0.0, 3.0] # Create a SparseVector. sv1 = Vectors.sparse(3, [0, 2], [1.0, 3.0]) # Use a single-column SciPy csc_matrix as a sparse vector. -sv2 = sps.csc_matrix((np.array([1.0, 3.0]), np.array([0, 2]), np.array([0, 2])), shape = (3, 1)) +sv2 = sps.csc_matrix((np.array([1.0, 3.0]), np.array([0, 2]), np.array([0, 2])), shape=(3, 1)) {% endhighlight %} @@ -517,12 +517,12 @@ from pyspark.mllib.linalg.distributed import IndexedRow, IndexedRowMatrix # Create an RDD of indexed rows. # - This can be done explicitly with the IndexedRow class: -indexedRows = sc.parallelize([IndexedRow(0, [1, 2, 3]), - IndexedRow(1, [4, 5, 6]), - IndexedRow(2, [7, 8, 9]), +indexedRows = sc.parallelize([IndexedRow(0, [1, 2, 3]), + IndexedRow(1, [4, 5, 6]), + IndexedRow(2, [7, 8, 9]), IndexedRow(3, [10, 11, 12])]) # - or by using (long, vector) tuples: -indexedRows = sc.parallelize([(0, [1, 2, 3]), (1, [4, 5, 6]), +indexedRows = sc.parallelize([(0, [1, 2, 3]), (1, [4, 5, 6]), (2, [7, 8, 9]), (3, [10, 11, 12])]) # Create an IndexedRowMatrix from an RDD of IndexedRows. @@ -731,15 +731,15 @@ from pyspark.mllib.linalg import Matrices from pyspark.mllib.linalg.distributed import BlockMatrix # Create an RDD of sub-matrix blocks. -blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), +blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) # Create a BlockMatrix from an RDD of sub-matrix blocks. mat = BlockMatrix(blocks, 3, 2) # Get its size. -m = mat.numRows() # 6 -n = mat.numCols() # 2 +m = mat.numRows() # 6 +n = mat.numCols() # 2 # Get the blocks as an RDD of sub-matrix blocks. blocksRDD = mat.blocks diff --git a/docs/programming-guide.md b/docs/programming-guide.md index 40287d7702bd5..74d5ee1ca6b3f 100644 --- a/docs/programming-guide.md +++ b/docs/programming-guide.md @@ -445,7 +445,7 @@ Similarly to text files, SequenceFiles can be saved and loaded by specifying the classes can be specified, but for standard Writables this is not required. {% highlight python %} ->>> rdd = sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x )) +>>> rdd = sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x)) >>> rdd.saveAsSequenceFile("path/to/file") >>> sorted(sc.sequenceFile("path/to/file").collect()) [(1, u'a'), (2, u'aa'), (3, u'aaa')] @@ -459,10 +459,12 @@ Elasticsearch ESInputFormat: {% highlight python %} $ SPARK_CLASSPATH=/path/to/elasticsearch-hadoop.jar ./bin/pyspark ->>> conf = {"es.resource" : "index/type"} # assume Elasticsearch is running on localhost defaults ->>> rdd = sc.newAPIHadoopRDD("org.elasticsearch.hadoop.mr.EsInputFormat",\ - "org.apache.hadoop.io.NullWritable", "org.elasticsearch.hadoop.mr.LinkedMapWritable", conf=conf) ->>> rdd.first() # the result is a MapWritable that is converted to a Python dict +>>> conf = {"es.resource" : "index/type"} # assume Elasticsearch is running on localhost defaults +>>> rdd = sc.newAPIHadoopRDD("org.elasticsearch.hadoop.mr.EsInputFormat", + "org.apache.hadoop.io.NullWritable", + "org.elasticsearch.hadoop.mr.LinkedMapWritable", + conf=conf) +>>> rdd.first() # the result is a MapWritable that is converted to a Python dict (u'Elasticsearch ID', {u'field1': True, u'field2': u'Some Text', @@ -797,7 +799,6 @@ def increment_counter(x): rdd.foreach(increment_counter) print("Counter value: ", counter) - {% endhighlight %} @@ -1455,13 +1456,14 @@ The code below shows an accumulator being used to add up the elements of an arra {% highlight python %} >>> accum = sc.accumulator(0) +>>> accum Accumulator >>> sc.parallelize([1, 2, 3, 4]).foreach(lambda x: accum.add(x)) ... 10/09/29 18:41:08 INFO SparkContext: Tasks finished in 0.317106 s -scala> accum.value +>>> accum.value 10 {% endhighlight %} diff --git a/docs/quick-start.md b/docs/quick-start.md index a29e28faf242d..2eab8d19aa4c6 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -74,10 +74,10 @@ Spark's primary abstraction is a distributed collection of items called a Resili RDDs have _[actions](programming-guide.html#actions)_, which return values, and _[transformations](programming-guide.html#transformations)_, which return pointers to new RDDs. Let's start with a few actions: {% highlight python %} ->>> textFile.count() # Number of items in this RDD +>>> textFile.count() # Number of items in this RDD 126 ->>> textFile.first() # First item in this RDD +>>> textFile.first() # First item in this RDD u'# Apache Spark' {% endhighlight %} @@ -90,7 +90,7 @@ Now let's use a transformation. We will use the [`filter`](programming-guide.htm We can chain together transformations and actions: {% highlight python %} ->>> textFile.filter(lambda line: "Spark" in line).count() # How many lines contain "Spark"? +>>> textFile.filter(lambda line: "Spark" in line).count() # How many lines contain "Spark"? 15 {% endhighlight %} diff --git a/docs/streaming-kafka-0-8-integration.md b/docs/streaming-kafka-0-8-integration.md index f8f7b95cf7458..d3fc9adfcf3ce 100644 --- a/docs/streaming-kafka-0-8-integration.md +++ b/docs/streaming-kafka-0-8-integration.md @@ -195,8 +195,8 @@ Next, we discuss how to use this approach in your streaming application. for o in offsetRanges: print "%s %s %s %s" % (o.topic, o.partition, o.fromOffset, o.untilOffset) - directKafkaStream\ - .transform(storeOffsetRanges)\ + directKafkaStream \ + .transform(storeOffsetRanges) \ .foreachRDD(printOffsetRanges) diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index 82d36474ff4bf..c0e4f3b35afa5 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -930,7 +930,7 @@ JavaPairDStream cleanedDStream = wordCounts.transform(
    {% highlight python %} -spamInfoRDD = sc.pickleFile(...) # RDD containing spam information +spamInfoRDD = sc.pickleFile(...) # RDD containing spam information # join data stream with spam information to do data cleaning cleanedDStream = wordCounts.transform(lambda rdd: rdd.join(spamInfoRDD).filter(...)) @@ -1495,16 +1495,15 @@ See the full [source code]({{site.SPARK_GITHUB_URL}}/blob/v{{site.SPARK_VERSION_
    {% highlight python %} - def getWordBlacklist(sparkContext): - if ('wordBlacklist' not in globals()): - globals()['wordBlacklist'] = sparkContext.broadcast(["a", "b", "c"]) - return globals()['wordBlacklist'] + if ("wordBlacklist" not in globals()): + globals()["wordBlacklist"] = sparkContext.broadcast(["a", "b", "c"]) + return globals()["wordBlacklist"] def getDroppedWordsCounter(sparkContext): - if ('droppedWordsCounter' not in globals()): - globals()['droppedWordsCounter'] = sparkContext.accumulator(0) - return globals()['droppedWordsCounter'] + if ("droppedWordsCounter" not in globals()): + globals()["droppedWordsCounter"] = sparkContext.accumulator(0) + return globals()["droppedWordsCounter"] def echo(time, rdd): # Get or register the blacklist Broadcast @@ -1626,12 +1625,12 @@ See the full [source code]({{site.SPARK_GITHUB_URL}}/blob/v{{site.SPARK_VERSION_ # Lazily instantiated global instance of SparkSession def getSparkSessionInstance(sparkConf): - if ('sparkSessionSingletonInstance' not in globals()): - globals()['sparkSessionSingletonInstance'] = SparkSession\ - .builder\ - .config(conf=sparkConf)\ + if ("sparkSessionSingletonInstance" not in globals()): + globals()["sparkSessionSingletonInstance"] = SparkSession \ + .builder \ + .config(conf=sparkConf) \ .getOrCreate() - return globals()['sparkSessionSingletonInstance'] + return globals()["sparkSessionSingletonInstance"] ... @@ -1829,11 +1828,11 @@ This behavior is made simple by using `StreamingContext.getOrCreate`. This is us {% highlight python %} # Function to create and setup a new StreamingContext def functionToCreateContext(): - sc = SparkContext(...) # new context - ssc = new StreamingContext(...) - lines = ssc.socketTextStream(...) # create DStreams + sc = SparkContext(...) # new context + ssc = StreamingContext(...) + lines = ssc.socketTextStream(...) # create DStreams ... - ssc.checkpoint(checkpointDirectory) # set checkpoint directory + ssc.checkpoint(checkpointDirectory) # set checkpoint directory return ssc # Get StreamingContext from checkpoint data or create a new one diff --git a/docs/structured-streaming-programming-guide.md b/docs/structured-streaming-programming-guide.md index 8a88e06ebde56..cdc3975d7cb7c 100644 --- a/docs/structured-streaming-programming-guide.md +++ b/docs/structured-streaming-programming-guide.md @@ -59,9 +59,9 @@ from pyspark.sql import SparkSession from pyspark.sql.functions import explode from pyspark.sql.functions import split -spark = SparkSession\ - .builder()\ - .appName("StructuredNetworkWordCount")\ +spark = SparkSession \ + .builder() \ + .appName("StructuredNetworkWordCount") \ .getOrCreate() {% endhighlight %} @@ -124,22 +124,22 @@ This `lines` DataFrame represents an unbounded table containing the streaming te {% highlight python %} # Create DataFrame representing the stream of input lines from connection to localhost:9999 -lines = spark\ - .readStream\ - .format('socket')\ - .option('host', 'localhost')\ - .option('port', 9999)\ +lines = spark \ + .readStream \ + .format("socket") \ + .option("host", "localhost") \ + .option("port", 9999) \ .load() # Split the lines into words words = lines.select( explode( - split(lines.value, ' ') - ).alias('word') + split(lines.value, " ") + ).alias("word") ) # Generate running word count -wordCounts = words.groupBy('word').count() +wordCounts = words.groupBy("word").count() {% endhighlight %} This `lines` DataFrame represents an unbounded table containing the streaming text data. This table contains one column of strings named "value", and each line in the streaming text data becomes a row in the table. Note, that this is not currently receiving any data as we are just setting up the transformation, and have not yet started it. Next, we have used two built-in SQL functions - split and explode, to split each line into multiple rows with a word each. In addition, we use the function `alias` to name the new column as "word". Finally, we have defined the `wordCounts` DataFrame by grouping by the unique values in the Dataset and counting them. Note that this is a streaming DataFrame which represents the running word counts of the stream. @@ -180,10 +180,10 @@ query.awaitTermination(); {% highlight python %} # Start running the query that prints the running counts to the console -query = wordCounts\ - .writeStream\ - .outputMode('complete')\ - .format('console')\ +query = wordCounts \ + .writeStream \ + .outputMode("complete") \ + .format("console") \ .start() query.awaitTermination() @@ -488,7 +488,7 @@ spark = SparkSession. ... # Read text from socket socketDF = spark \ - .readStream() \ + .readStream() \ .format("socket") \ .option("host", "localhost") \ .option("port", 9999) \ @@ -504,7 +504,7 @@ csvDF = spark \ .readStream() \ .option("sep", ";") \ .schema(userSchema) \ - .csv("/path/to/directory") # Equivalent to format("csv").load("/path/to/directory") + .csv("/path/to/directory") # Equivalent to format("csv").load("/path/to/directory") {% endhighlight %}
    @@ -596,8 +596,7 @@ ds.groupByKey(new MapFunction() { // using typed API
    {% highlight python %} - -df = ... # streaming DataFrame with IOT device data with schema { device: string, type: string, signal: double, time: DateType } +df = ... # streaming DataFrame with IOT device data with schema { device: string, type: string, signal: double, time: DateType } # Select the devices which have signal more than 10 df.select("device").where("signal > 10") @@ -653,11 +652,11 @@ Dataset windowedCounts = words.groupBy(
    {% highlight python %} -words = ... # streaming DataFrame of schema { timestamp: Timestamp, word: String } +words = ... # streaming DataFrame of schema { timestamp: Timestamp, word: String } # Group the data by window and word and compute the count of each group windowedCounts = words.groupBy( - window(words.timestamp, '10 minutes', '5 minutes'), + window(words.timestamp, "10 minutes", "5 minutes"), words.word ).count() {% endhighlight %} @@ -704,7 +703,7 @@ streamingDf.join(staticDf, "type", "right_join"); // right outer join with a st {% highlight python %} staticDf = spark.read. ... streamingDf = spark.readStream. ... -streamingDf.join(staticDf, "type") # inner equi-join with a static DF +streamingDf.join(staticDf, "type") # inner equi-join with a static DF streamingDf.join(staticDf, "type", "right_join") # right outer join with a static DF {% endhighlight %} @@ -907,25 +906,25 @@ spark.sql("select * from aggregates").show(); // interactively query in-memory noAggDF = deviceDataDf.select("device").where("signal > 10") # Print new data to console -noAggDF\ - .writeStream()\ - .format("console")\ +noAggDF \ + .writeStream() \ + .format("console") \ .start() # Write new data to Parquet files -noAggDF\ - .writeStream()\ - .parquet("path/to/destination/directory")\ +noAggDF \ + .writeStream() \ + .parquet("path/to/destination/directory") \ .start() # ========== DF with aggregation ========== aggDF = df.groupBy("device").count() # Print updated aggregations to console -aggDF\ - .writeStream()\ - .outputMode("complete")\ - .format("console")\ +aggDF \ + .writeStream() \ + .outputMode("complete") \ + .format("console") \ .start() # Have all the aggregates in an in memory table. The query name will be the table name @@ -1072,11 +1071,11 @@ spark.streams().awaitAnyTermination(); // block until any one of them terminat {% highlight python %} spark = ... # spark session -spark.streams().active # get the list of currently active streaming queries +spark.streams().active # get the list of currently active streaming queries -spark.streams().get(id) # get a query object by its unique id +spark.streams().get(id) # get a query object by its unique id -spark.streams().awaitAnyTermination() # block until any one of them terminates +spark.streams().awaitAnyTermination() # block until any one of them terminates {% endhighlight %}
    @@ -1116,11 +1115,11 @@ aggDF
    {% highlight python %} -aggDF\ - .writeStream()\ - .outputMode("complete")\ - .option("checkpointLocation", "path/to/HDFS/dir")\ - .format("memory")\ +aggDF \ + .writeStream() \ + .outputMode("complete") \ + .option("checkpointLocation", "path/to/HDFS/dir") \ + .format("memory") \ .start() {% endhighlight %} From 27209252f09ff73c58e60c6df8aaba73b308088c Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Tue, 30 Aug 2016 11:24:55 +0100 Subject: [PATCH 140/270] [MINOR][MLLIB][SQL] Clean up unused variables and unused import ## What changes were proposed in this pull request? Clean up unused variables and unused import statements, unnecessary `return` and `toArray`, and some more style improvement, when I walk through the code examples. ## How was this patch tested? Testet manually on local laptop. Author: Xin Ren Closes #14836 from keypointt/codeWalkThroughML. --- .../test/scala/org/apache/spark/AccumulatorSuite.scala | 6 ++++-- .../scala/org/apache/spark/ml/feature/Interaction.scala | 2 +- .../org/apache/spark/ml/r/IsotonicRegressionWrapper.scala | 2 +- .../main/scala/org/apache/spark/ml/util/stopwatches.scala | 2 +- .../org/apache/spark/mllib/feature/ChiSqSelector.scala | 2 +- .../scala/org/apache/spark/mllib/random/RandomRDDs.scala | 8 ++++---- .../main/scala/org/apache/spark/sql/DataFrameReader.scala | 2 +- .../src/main/scala/org/apache/spark/sql/Dataset.scala | 2 +- .../src/main/scala/org/apache/spark/sql/SQLContext.scala | 2 +- .../spark/sql/execution/datasources/DataSource.scala | 2 +- 10 files changed, 16 insertions(+), 14 deletions(-) diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala index 6cbd5ae5d428a..6d03ee091e4ed 100644 --- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala +++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala @@ -100,7 +100,9 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex val acc: Accumulator[Int] = sc.accumulator(0) val d = sc.parallelize(1 to 20) - an [Exception] should be thrownBy {d.foreach{x => acc.value = x}} + intercept[SparkException] { + d.foreach(x => acc.value = x) + } } test ("add value to collection accumulators") { @@ -171,7 +173,7 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex d.foreach { x => acc.localValue ++= x } - acc.value should be ( (0 to maxI).toSet) + acc.value should be ((0 to maxI).toSet) resetSparkContext() } } diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala index 96d0bdee9e2b9..902f84f862c17 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala @@ -136,7 +136,7 @@ class Interaction @Since("1.6.0") (@Since("1.6.0") override val uid: String) ext case _: VectorUDT => val attrs = AttributeGroup.fromStructField(f).attributes.getOrElse( throw new SparkException("Vector attributes must be defined for interaction.")) - attrs.map(getNumFeatures).toArray + attrs.map(getNumFeatures) } new FeatureEncoder(numFeatures) }.toArray diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala index 1ea80cb46ab7b..a7992debe684a 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala @@ -23,7 +23,7 @@ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.ml.{Pipeline, PipelineModel} -import org.apache.spark.ml.attribute.{AttributeGroup} +import org.apache.spark.ml.attribute.AttributeGroup import org.apache.spark.ml.feature.RFormula import org.apache.spark.ml.regression.{IsotonicRegression, IsotonicRegressionModel} import org.apache.spark.ml.util._ diff --git a/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala b/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala index e79b1f31643d0..e539deca4b036 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala @@ -20,7 +20,7 @@ package org.apache.spark.ml.util import scala.collection.mutable import org.apache.spark.SparkContext -import org.apache.spark.util.LongAccumulator; +import org.apache.spark.util.LongAccumulator /** * Abstract class for stopwatches. diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala index 56fb2d33c2ca0..33a1f18bccca5 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala @@ -164,7 +164,7 @@ object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] { case Row(feature: Int) => (feature) }.collect() - return new ChiSqSelectorModel(features) + new ChiSqSelectorModel(features) } } } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala index c2bc1f17ccd58..6d60136ddc38f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala @@ -438,10 +438,10 @@ object RandomRDDs { @DeveloperApi @Since("1.6.0") def randomJavaRDD[T]( - jsc: JavaSparkContext, - generator: RandomDataGenerator[T], - size: Long): JavaRDD[T] = { - randomJavaRDD(jsc, generator, size, 0); + jsc: JavaSparkContext, + generator: RandomDataGenerator[T], + size: Long): JavaRDD[T] = { + randomJavaRDD(jsc, generator, size, 0) } // TODO Generate RDD[Vector] from multivariate distributions. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index c060091c7fc38..93bf74d06b71d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaRDD import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD import org.apache.spark.sql.execution.LogicalRDD -import org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation} +import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation} import org.apache.spark.sql.execution.datasources.json.{InferSchema, JacksonParser, JSONOptions} import org.apache.spark.sql.types.StructType diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index 6da99ce0dd683..e7dcf0f51f4a5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -43,7 +43,7 @@ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.util.usePrettyExpression import org.apache.spark.sql.execution.{FileRelation, LogicalRDD, QueryExecution, SQLExecution} import org.apache.spark.sql.execution.command.{CreateViewCommand, ExplainCommand} -import org.apache.spark.sql.execution.datasources.{CreateTable, LogicalRelation} +import org.apache.spark.sql.execution.datasources.LogicalRelation import org.apache.spark.sql.execution.datasources.json.JacksonGenerator import org.apache.spark.sql.execution.python.EvaluatePython import org.apache.spark.sql.streaming.{DataStreamWriter, StreamingQuery} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index fbf22197a1a11..2edf2e1972053 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -1093,7 +1093,7 @@ object SQLContext { } data.map{ element => new GenericInternalRow( - methodsToConverts.map { case (e, convert) => convert(e.invoke(element)) }.toArray[Any] + methodsToConverts.map { case (e, convert) => convert(e.invoke(element)) } ): InternalRow } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala index b783d699745b1..348530888de3a 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala @@ -61,7 +61,7 @@ import org.apache.spark.util.Utils * qualified. This option only works when reading from a [[FileFormat]]. * @param userSpecifiedSchema An optional specification of the schema of the data. When present * we skip attempting to infer the schema. - * @param partitionColumns A list of column names that the relation is partitioned by. When this + * @param partitionColumns A list of column names that the relation is partitioned by. When this * list is empty, the relation is unpartitioned. * @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data. */ From 4b4e329e49f8af28fa6301bd06c48d7097eaf9e6 Mon Sep 17 00:00:00 2001 From: Ferdinand Xu Date: Tue, 30 Aug 2016 09:15:31 -0700 Subject: [PATCH 141/270] [SPARK-5682][CORE] Add encrypted shuffle in spark This patch is using Apache Commons Crypto library to enable shuffle encryption support. Author: Ferdinand Xu Author: kellyzly Closes #8880 from winningsix/SPARK-10771. --- core/pom.xml | 4 + .../unsafe/sort/UnsafeSorterSpillReader.java | 2 +- .../org/apache/spark/SecurityManager.scala | 20 ++++ .../scala/org/apache/spark/SparkContext.scala | 5 + .../spark/internal/config/package.scala | 20 ++++ .../spark/security/CryptoStreamUtils.scala | 109 ++++++++++++++++++ .../spark/serializer/SerializerManager.scala | 47 ++++++-- .../shuffle/BlockStoreShuffleReader.scala | 4 +- .../apache/spark/storage/BlockManager.scala | 5 +- .../spark/storage/DiskBlockObjectWriter.scala | 5 +- .../spark/storage/memory/MemoryStore.scala | 2 +- .../collection/ExternalAppendOnlyMap.scala | 4 +- .../util/collection/ExternalSorter.scala | 6 +- .../sort/UnsafeShuffleWriterSuite.java | 4 +- .../map/AbstractBytesToBytesMapSuite.java | 4 +- .../sort/UnsafeExternalSorterSuite.java | 4 +- .../security/CryptoStreamUtilsSuite.scala | 107 +++++++++++++++++ .../BypassMergeSortShuffleWriterSuite.scala | 2 +- dev/deps/spark-deps-hadoop-2.2 | 1 + dev/deps/spark-deps-hadoop-2.3 | 1 + dev/deps/spark-deps-hadoop-2.4 | 1 + dev/deps/spark-deps-hadoop-2.6 | 1 + dev/deps/spark-deps-hadoop-2.7 | 1 + docs/configuration.md | 23 ++++ pom.xml | 12 ++ .../org/apache/spark/deploy/yarn/Client.scala | 4 + .../spark/deploy/yarn/IOEncryptionSuite.scala | 108 +++++++++++++++++ 27 files changed, 478 insertions(+), 28 deletions(-) create mode 100644 core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala create mode 100644 core/src/test/scala/org/apache/spark/security/CryptoStreamUtilsSuite.scala create mode 100644 yarn/src/test/scala/org/apache/spark/deploy/yarn/IOEncryptionSuite.scala diff --git a/core/pom.xml b/core/pom.xml index c04cf7e5255f2..69a0b0ff27c39 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -327,6 +327,10 @@ org.apache.spark spark-tags_${scala.binary.version} + + org.apache.commons + commons-crypto + target/scala-${scala.binary.version}/classes diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java index d048cf7aeb5f1..2875b0d69def6 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java @@ -72,7 +72,7 @@ public UnsafeSorterSpillReader( final BufferedInputStream bs = new BufferedInputStream(new FileInputStream(file), (int) bufferSizeBytes); try { - this.in = serializerManager.wrapForCompression(blockId, bs); + this.in = serializerManager.wrapStream(blockId, bs); this.din = new DataInputStream(this.in); numRecords = numRecordsRemaining = din.readInt(); } catch (IOException e) { diff --git a/core/src/main/scala/org/apache/spark/SecurityManager.scala b/core/src/main/scala/org/apache/spark/SecurityManager.scala index a6550b6ca8c94..199365ad925a3 100644 --- a/core/src/main/scala/org/apache/spark/SecurityManager.scala +++ b/core/src/main/scala/org/apache/spark/SecurityManager.scala @@ -21,15 +21,19 @@ import java.lang.{Byte => JByte} import java.net.{Authenticator, PasswordAuthentication} import java.security.{KeyStore, SecureRandom} import java.security.cert.X509Certificate +import javax.crypto.KeyGenerator import javax.net.ssl._ import com.google.common.hash.HashCodes import com.google.common.io.Files import org.apache.hadoop.io.Text +import org.apache.hadoop.security.Credentials import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging +import org.apache.spark.internal.config._ import org.apache.spark.network.sasl.SecretKeyHolder +import org.apache.spark.security.CryptoStreamUtils._ import org.apache.spark.util.Utils /** @@ -554,4 +558,20 @@ private[spark] object SecurityManager { // key used to store the spark secret in the Hadoop UGI val SECRET_LOOKUP_KEY = "sparkCookie" + + /** + * Setup the cryptographic key used by IO encryption in credentials. The key is generated using + * [[KeyGenerator]]. The algorithm and key length is specified by the [[SparkConf]]. + */ + def initIOEncryptionKey(conf: SparkConf, credentials: Credentials): Unit = { + if (credentials.getSecretKey(SPARK_IO_TOKEN) == null) { + val keyLen = conf.get(IO_ENCRYPTION_KEY_SIZE_BITS) + val ioKeyGenAlgorithm = conf.get(IO_ENCRYPTION_KEYGEN_ALGORITHM) + val keyGen = KeyGenerator.getInstance(ioKeyGenAlgorithm) + keyGen.init(keyLen) + + val ioKey = keyGen.generateKey() + credentials.addSecretKey(SPARK_IO_TOKEN, ioKey.getEncoded) + } + } } diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 08d6343d623cf..744d5d0f7aa8e 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -49,6 +49,7 @@ import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil} import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat} import org.apache.spark.internal.Logging +import org.apache.spark.internal.config._ import org.apache.spark.io.CompressionCodec import org.apache.spark.partial.{ApproximateEvaluator, PartialResult} import org.apache.spark.rdd._ @@ -411,6 +412,10 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli } if (master == "yarn" && deployMode == "client") System.setProperty("SPARK_YARN_MODE", "true") + if (_conf.get(IO_ENCRYPTION_ENABLED) && !SparkHadoopUtil.get.isYarnMode()) { + throw new SparkException("IO encryption is only supported in YARN mode, please disable it " + + s"by setting ${IO_ENCRYPTION_ENABLED.key} to false") + } // "_jobProgressListener" should be set up before creating SparkEnv because when creating // "SparkEnv", some messages will be posted to "listenerBus" and we should not miss them. diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index 47174e4efee81..ebce07c1e3b3e 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -119,4 +119,24 @@ package object config { private[spark] val UI_RETAINED_TASKS = ConfigBuilder("spark.ui.retainedTasks") .intConf .createWithDefault(100000) + + private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled") + .booleanConf + .createWithDefault(false) + + private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM = + ConfigBuilder("spark.io.encryption.keygen.algorithm") + .stringConf + .createWithDefault("HmacSHA1") + + private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits") + .intConf + .checkValues(Set(128, 192, 256)) + .createWithDefault(128) + + private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION = + ConfigBuilder("spark.io.crypto.cipher.transformation") + .internal() + .stringConf + .createWithDefaultString("AES/CTR/NoPadding") } diff --git a/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala b/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala new file mode 100644 index 0000000000000..8f15f50bee814 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.security + +import java.io.{InputStream, OutputStream} +import java.util.Properties +import javax.crypto.spec.{IvParameterSpec, SecretKeySpec} + +import org.apache.commons.crypto.random._ +import org.apache.commons.crypto.stream._ +import org.apache.hadoop.io.Text + +import org.apache.spark.SparkConf +import org.apache.spark.deploy.SparkHadoopUtil +import org.apache.spark.internal.Logging +import org.apache.spark.internal.config._ + +/** + * A util class for manipulating IO encryption and decryption streams. + */ +private[spark] object CryptoStreamUtils extends Logging { + /** + * Constants and variables for spark IO encryption + */ + val SPARK_IO_TOKEN = new Text("SPARK_IO_TOKEN") + + // The initialization vector length in bytes. + val IV_LENGTH_IN_BYTES = 16 + // The prefix of IO encryption related configurations in Spark configuration. + val SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX = "spark.io.encryption.commons.config." + // The prefix for the configurations passing to Apache Commons Crypto library. + val COMMONS_CRYPTO_CONF_PREFIX = "commons.crypto." + + /** + * Helper method to wrap [[OutputStream]] with [[CryptoOutputStream]] for encryption. + */ + def createCryptoOutputStream( + os: OutputStream, + sparkConf: SparkConf): OutputStream = { + val properties = toCryptoConf(sparkConf) + val iv = createInitializationVector(properties) + os.write(iv) + val credentials = SparkHadoopUtil.get.getCurrentUserCredentials() + val key = credentials.getSecretKey(SPARK_IO_TOKEN) + val transformationStr = sparkConf.get(IO_CRYPTO_CIPHER_TRANSFORMATION) + new CryptoOutputStream(transformationStr, properties, os, + new SecretKeySpec(key, "AES"), new IvParameterSpec(iv)) + } + + /** + * Helper method to wrap [[InputStream]] with [[CryptoInputStream]] for decryption. + */ + def createCryptoInputStream( + is: InputStream, + sparkConf: SparkConf): InputStream = { + val properties = toCryptoConf(sparkConf) + val iv = new Array[Byte](IV_LENGTH_IN_BYTES) + is.read(iv, 0, iv.length) + val credentials = SparkHadoopUtil.get.getCurrentUserCredentials() + val key = credentials.getSecretKey(SPARK_IO_TOKEN) + val transformationStr = sparkConf.get(IO_CRYPTO_CIPHER_TRANSFORMATION) + new CryptoInputStream(transformationStr, properties, is, + new SecretKeySpec(key, "AES"), new IvParameterSpec(iv)) + } + + /** + * Get Commons-crypto configurations from Spark configurations identified by prefix. + */ + def toCryptoConf(conf: SparkConf): Properties = { + val props = new Properties() + conf.getAll.foreach { case (k, v) => + if (k.startsWith(SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX)) { + props.put(COMMONS_CRYPTO_CONF_PREFIX + k.substring( + SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX.length()), v) + } + } + props + } + + /** + * This method to generate an IV (Initialization Vector) using secure random. + */ + private[this] def createInitializationVector(properties: Properties): Array[Byte] = { + val iv = new Array[Byte](IV_LENGTH_IN_BYTES) + val initialIVStart = System.currentTimeMillis() + CryptoRandomFactory.getCryptoRandom(properties).nextBytes(iv) + val initialIVFinish = System.currentTimeMillis() + val initialIVTime = initialIVFinish - initialIVStart + if (initialIVTime > 2000) { + logWarning(s"It costs ${initialIVTime} milliseconds to create the Initialization Vector " + + s"used by CryptoStream") + } + iv + } +} diff --git a/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala b/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala index 07caadbe40438..7b1ec6fcbbbf6 100644 --- a/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala +++ b/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala @@ -23,13 +23,15 @@ import java.nio.ByteBuffer import scala.reflect.ClassTag import org.apache.spark.SparkConf +import org.apache.spark.internal.config._ import org.apache.spark.io.CompressionCodec +import org.apache.spark.security.CryptoStreamUtils import org.apache.spark.storage._ import org.apache.spark.util.io.{ChunkedByteBuffer, ChunkedByteBufferOutputStream} /** - * Component which configures serialization and compression for various Spark components, including - * automatic selection of which [[Serializer]] to use for shuffles. + * Component which configures serialization, compression and encryption for various Spark + * components, including automatic selection of which [[Serializer]] to use for shuffles. */ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: SparkConf) { @@ -61,6 +63,9 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar // Whether to compress shuffle output temporarily spilled to disk private[this] val compressShuffleSpill = conf.getBoolean("spark.shuffle.spill.compress", true) + // Whether to enable IO encryption + private[this] val enableIOEncryption = conf.get(IO_ENCRYPTION_ENABLED) + /* The compression codec to use. Note that the "lazy" val is necessary because we want to delay * the initialization of the compression codec until it is first used. The reason is that a Spark * program could be using a user-defined codec in a third party jar, which is loaded in @@ -102,17 +107,45 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar } } + /** + * Wrap an input stream for encryption and compression + */ + def wrapStream(blockId: BlockId, s: InputStream): InputStream = { + wrapForCompression(blockId, wrapForEncryption(s)) + } + + /** + * Wrap an output stream for encryption and compression + */ + def wrapStream(blockId: BlockId, s: OutputStream): OutputStream = { + wrapForCompression(blockId, wrapForEncryption(s)) + } + + /** + * Wrap an input stream for encryption if shuffle encryption is enabled + */ + private[this] def wrapForEncryption(s: InputStream): InputStream = { + if (enableIOEncryption) CryptoStreamUtils.createCryptoInputStream(s, conf) else s + } + + /** + * Wrap an output stream for encryption if shuffle encryption is enabled + */ + private[this] def wrapForEncryption(s: OutputStream): OutputStream = { + if (enableIOEncryption) CryptoStreamUtils.createCryptoOutputStream(s, conf) else s + } + /** * Wrap an output stream for compression if block compression is enabled for its block type */ - def wrapForCompression(blockId: BlockId, s: OutputStream): OutputStream = { + private[this] def wrapForCompression(blockId: BlockId, s: OutputStream): OutputStream = { if (shouldCompress(blockId)) compressionCodec.compressedOutputStream(s) else s } /** * Wrap an input stream for compression if block compression is enabled for its block type */ - def wrapForCompression(blockId: BlockId, s: InputStream): InputStream = { + private[this] def wrapForCompression(blockId: BlockId, s: InputStream): InputStream = { if (shouldCompress(blockId)) compressionCodec.compressedInputStream(s) else s } @@ -123,7 +156,7 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar values: Iterator[T]): Unit = { val byteStream = new BufferedOutputStream(outputStream) val ser = getSerializer(implicitly[ClassTag[T]]).newInstance() - ser.serializeStream(wrapForCompression(blockId, byteStream)).writeAll(values).close() + ser.serializeStream(wrapStream(blockId, byteStream)).writeAll(values).close() } /** Serializes into a chunked byte buffer. */ @@ -139,7 +172,7 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar val bbos = new ChunkedByteBufferOutputStream(1024 * 1024 * 4, ByteBuffer.allocate) val byteStream = new BufferedOutputStream(bbos) val ser = getSerializer(classTag).newInstance() - ser.serializeStream(wrapForCompression(blockId, byteStream)).writeAll(values).close() + ser.serializeStream(wrapStream(blockId, byteStream)).writeAll(values).close() bbos.toChunkedByteBuffer } @@ -153,7 +186,7 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar val stream = new BufferedInputStream(inputStream) getSerializer(implicitly[ClassTag[T]]) .newInstance() - .deserializeStream(wrapForCompression(blockId, stream)) + .deserializeStream(wrapStream(blockId, stream)) .asIterator.asInstanceOf[Iterator[T]] } } diff --git a/core/src/main/scala/org/apache/spark/shuffle/BlockStoreShuffleReader.scala b/core/src/main/scala/org/apache/spark/shuffle/BlockStoreShuffleReader.scala index 5794f542b7564..b9d83495d29b6 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/BlockStoreShuffleReader.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/BlockStoreShuffleReader.scala @@ -51,9 +51,9 @@ private[spark] class BlockStoreShuffleReader[K, C]( SparkEnv.get.conf.getSizeAsMb("spark.reducer.maxSizeInFlight", "48m") * 1024 * 1024, SparkEnv.get.conf.getInt("spark.reducer.maxReqsInFlight", Int.MaxValue)) - // Wrap the streams for compression based on configuration + // Wrap the streams for compression and encryption based on configuration val wrappedStreams = blockFetcherItr.map { case (blockId, inputStream) => - serializerManager.wrapForCompression(blockId, inputStream) + serializerManager.wrapStream(blockId, inputStream) } val serializerInstance = dep.serializer.newInstance() diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index fe8465279860d..c72f28e00cdbc 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -721,10 +721,9 @@ private[spark] class BlockManager( serializerInstance: SerializerInstance, bufferSize: Int, writeMetrics: ShuffleWriteMetrics): DiskBlockObjectWriter = { - val compressStream: OutputStream => OutputStream = - serializerManager.wrapForCompression(blockId, _) + val wrapStream: OutputStream => OutputStream = serializerManager.wrapStream(blockId, _) val syncWrites = conf.getBoolean("spark.shuffle.sync", false) - new DiskBlockObjectWriter(file, serializerInstance, bufferSize, compressStream, + new DiskBlockObjectWriter(file, serializerInstance, bufferSize, wrapStream, syncWrites, writeMetrics, blockId) } diff --git a/core/src/main/scala/org/apache/spark/storage/DiskBlockObjectWriter.scala b/core/src/main/scala/org/apache/spark/storage/DiskBlockObjectWriter.scala index e5b1bf2f4b434..a499827ae1598 100644 --- a/core/src/main/scala/org/apache/spark/storage/DiskBlockObjectWriter.scala +++ b/core/src/main/scala/org/apache/spark/storage/DiskBlockObjectWriter.scala @@ -39,7 +39,7 @@ private[spark] class DiskBlockObjectWriter( val file: File, serializerInstance: SerializerInstance, bufferSize: Int, - compressStream: OutputStream => OutputStream, + wrapStream: OutputStream => OutputStream, syncWrites: Boolean, // These write metrics concurrently shared with other active DiskBlockObjectWriters who // are themselves performing writes. All updates must be relative. @@ -115,7 +115,8 @@ private[spark] class DiskBlockObjectWriter( initialize() initialized = true } - bs = compressStream(mcs) + + bs = wrapStream(mcs) objOut = serializerInstance.serializeStream(bs) streamOpen = true this diff --git a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala index 586339a58d236..d220ab51d115b 100644 --- a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala +++ b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala @@ -330,7 +330,7 @@ private[spark] class MemoryStore( redirectableStream.setOutputStream(bbos) val serializationStream: SerializationStream = { val ser = serializerManager.getSerializer(classTag).newInstance() - ser.serializeStream(serializerManager.wrapForCompression(blockId, redirectableStream)) + ser.serializeStream(serializerManager.wrapStream(blockId, redirectableStream)) } // Request enough memory to begin unrolling diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala index 8c8860bb37a40..09435281194b5 100644 --- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala +++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala @@ -486,8 +486,8 @@ class ExternalAppendOnlyMap[K, V, C]( ", batchOffsets = " + batchOffsets.mkString("[", ", ", "]")) val bufferedStream = new BufferedInputStream(ByteStreams.limit(fileStream, end - start)) - val compressedStream = serializerManager.wrapForCompression(blockId, bufferedStream) - ser.deserializeStream(compressedStream) + val wrappedStream = serializerManager.wrapStream(blockId, bufferedStream) + ser.deserializeStream(wrappedStream) } else { // No more batches left cleanup() diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala index 7c98e8cabb229..3579918fac45f 100644 --- a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala +++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala @@ -28,7 +28,6 @@ import com.google.common.io.ByteStreams import org.apache.spark._ import org.apache.spark.executor.ShuffleWriteMetrics import org.apache.spark.internal.Logging -import org.apache.spark.memory.TaskMemoryManager import org.apache.spark.serializer._ import org.apache.spark.storage.{BlockId, DiskBlockObjectWriter} @@ -522,8 +521,9 @@ private[spark] class ExternalSorter[K, V, C]( ", batchOffsets = " + batchOffsets.mkString("[", ", ", "]")) val bufferedStream = new BufferedInputStream(ByteStreams.limit(fileStream, end - start)) - val compressedStream = serializerManager.wrapForCompression(spill.blockId, bufferedStream) - serInstance.deserializeStream(compressedStream) + + val wrappedStream = serializerManager.wrapStream(spill.blockId, bufferedStream) + serInstance.deserializeStream(wrappedStream) } else { // No more batches left cleanup() diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java index daeb4675ea5f5..a96cd82382e2c 100644 --- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java +++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java @@ -86,7 +86,7 @@ public class UnsafeShuffleWriterSuite { @Mock(answer = RETURNS_SMART_NULLS) TaskContext taskContext; @Mock(answer = RETURNS_SMART_NULLS) ShuffleDependency shuffleDep; - private final class CompressStream extends AbstractFunction1 { + private final class WrapStream extends AbstractFunction1 { @Override public OutputStream apply(OutputStream stream) { if (conf.getBoolean("spark.shuffle.compress", true)) { @@ -136,7 +136,7 @@ public DiskBlockObjectWriter answer(InvocationOnMock invocationOnMock) throws Th (File) args[1], (SerializerInstance) args[2], (Integer) args[3], - new CompressStream(), + new WrapStream(), false, (ShuffleWriteMetrics) args[4], (BlockId) args[0] diff --git a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java index fc127f07c8d69..33709b454c4c9 100644 --- a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java +++ b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java @@ -75,7 +75,7 @@ public abstract class AbstractBytesToBytesMapSuite { @Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager; @Mock(answer = RETURNS_SMART_NULLS) DiskBlockManager diskBlockManager; - private static final class CompressStream extends AbstractFunction1 { + private static final class WrapStream extends AbstractFunction1 { @Override public OutputStream apply(OutputStream stream) { return stream; @@ -122,7 +122,7 @@ public DiskBlockObjectWriter answer(InvocationOnMock invocationOnMock) throws Th (File) args[1], (SerializerInstance) args[2], (Integer) args[3], - new CompressStream(), + new WrapStream(), false, (ShuffleWriteMetrics) args[4], (BlockId) args[0] diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java index 3ea99233fe171..a9cf8ff520ed4 100644 --- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java +++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java @@ -88,7 +88,7 @@ public int compare( private final long pageSizeBytes = new SparkConf().getSizeAsBytes("spark.buffer.pageSize", "4m"); - private static final class CompressStream extends AbstractFunction1 { + private static final class WrapStream extends AbstractFunction1 { @Override public OutputStream apply(OutputStream stream) { return stream; @@ -128,7 +128,7 @@ public DiskBlockObjectWriter answer(InvocationOnMock invocationOnMock) throws Th (File) args[1], (SerializerInstance) args[2], (Integer) args[3], - new CompressStream(), + new WrapStream(), false, (ShuffleWriteMetrics) args[4], (BlockId) args[0] diff --git a/core/src/test/scala/org/apache/spark/security/CryptoStreamUtilsSuite.scala b/core/src/test/scala/org/apache/spark/security/CryptoStreamUtilsSuite.scala new file mode 100644 index 0000000000000..81eb907ac7ba6 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/security/CryptoStreamUtilsSuite.scala @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.security + +import java.security.PrivilegedExceptionAction + +import org.apache.hadoop.security.{Credentials, UserGroupInformation} + +import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite} +import org.apache.spark.internal.config._ +import org.apache.spark.security.CryptoStreamUtils._ + +class CryptoStreamUtilsSuite extends SparkFunSuite { + val ugi = UserGroupInformation.createUserForTesting("testuser", Array("testgroup")) + + test("Crypto configuration conversion") { + val sparkKey1 = s"${SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX}a.b.c" + val sparkVal1 = "val1" + val cryptoKey1 = s"${COMMONS_CRYPTO_CONF_PREFIX}a.b.c" + + val sparkKey2 = SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX.stripSuffix(".") + "A.b.c" + val sparkVal2 = "val2" + val cryptoKey2 = s"${COMMONS_CRYPTO_CONF_PREFIX}A.b.c" + val conf = new SparkConf() + conf.set(sparkKey1, sparkVal1) + conf.set(sparkKey2, sparkVal2) + val props = CryptoStreamUtils.toCryptoConf(conf) + assert(props.getProperty(cryptoKey1) === sparkVal1) + assert(!props.containsKey(cryptoKey2)) + } + + test("Shuffle encryption is disabled by default") { + ugi.doAs(new PrivilegedExceptionAction[Unit]() { + override def run(): Unit = { + val credentials = UserGroupInformation.getCurrentUser.getCredentials() + val conf = new SparkConf() + initCredentials(conf, credentials) + assert(credentials.getSecretKey(SPARK_IO_TOKEN) === null) + } + }) + } + + test("Shuffle encryption key length should be 128 by default") { + ugi.doAs(new PrivilegedExceptionAction[Unit]() { + override def run(): Unit = { + val credentials = UserGroupInformation.getCurrentUser.getCredentials() + val conf = new SparkConf() + conf.set(IO_ENCRYPTION_ENABLED, true) + initCredentials(conf, credentials) + var key = credentials.getSecretKey(SPARK_IO_TOKEN) + assert(key !== null) + val actual = key.length * (java.lang.Byte.SIZE) + assert(actual === 128) + } + }) + } + + test("Initial credentials with key length in 256") { + ugi.doAs(new PrivilegedExceptionAction[Unit]() { + override def run(): Unit = { + val credentials = UserGroupInformation.getCurrentUser.getCredentials() + val conf = new SparkConf() + conf.set(IO_ENCRYPTION_KEY_SIZE_BITS, 256) + conf.set(IO_ENCRYPTION_ENABLED, true) + initCredentials(conf, credentials) + var key = credentials.getSecretKey(SPARK_IO_TOKEN) + assert(key !== null) + val actual = key.length * (java.lang.Byte.SIZE) + assert(actual === 256) + } + }) + } + + test("Initial credentials with invalid key length") { + ugi.doAs(new PrivilegedExceptionAction[Unit]() { + override def run(): Unit = { + val credentials = UserGroupInformation.getCurrentUser.getCredentials() + val conf = new SparkConf() + conf.set(IO_ENCRYPTION_KEY_SIZE_BITS, 328) + conf.set(IO_ENCRYPTION_ENABLED, true) + val thrown = intercept[IllegalArgumentException] { + initCredentials(conf, credentials) + } + } + }) + } + + private[this] def initCredentials(conf: SparkConf, credentials: Credentials): Unit = { + if (conf.get(IO_ENCRYPTION_ENABLED)) { + SecurityManager.initIOEncryptionKey(conf, credentials) + } + } +} diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala index 5132384a5ed7d..ed9428820ff6c 100644 --- a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala +++ b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala @@ -94,7 +94,7 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte args(1).asInstanceOf[File], args(2).asInstanceOf[SerializerInstance], args(3).asInstanceOf[Int], - compressStream = identity, + wrapStream = identity, syncWrites = false, args(4).asInstanceOf[ShuffleWriteMetrics], blockId = args(0).asInstanceOf[BlockId] diff --git a/dev/deps/spark-deps-hadoop-2.2 b/dev/deps/spark-deps-hadoop-2.2 index 326271a7e2b23..eaed0889ac36f 100644 --- a/dev/deps/spark-deps-hadoop-2.2 +++ b/dev/deps/spark-deps-hadoop-2.2 @@ -27,6 +27,7 @@ commons-collections-3.2.2.jar commons-compiler-2.7.6.jar commons-compress-1.4.1.jar commons-configuration-1.6.jar +commons-crypto-1.0.0.jar commons-dbcp-1.4.jar commons-digester-1.8.jar commons-httpclient-3.1.jar diff --git a/dev/deps/spark-deps-hadoop-2.3 b/dev/deps/spark-deps-hadoop-2.3 index 1ff6ecb7342bb..d68a7f462ba7f 100644 --- a/dev/deps/spark-deps-hadoop-2.3 +++ b/dev/deps/spark-deps-hadoop-2.3 @@ -30,6 +30,7 @@ commons-collections-3.2.2.jar commons-compiler-2.7.6.jar commons-compress-1.4.1.jar commons-configuration-1.6.jar +commons-crypto-1.0.0.jar commons-dbcp-1.4.jar commons-digester-1.8.jar commons-httpclient-3.1.jar diff --git a/dev/deps/spark-deps-hadoop-2.4 b/dev/deps/spark-deps-hadoop-2.4 index 68333849cf4c9..346f19767d367 100644 --- a/dev/deps/spark-deps-hadoop-2.4 +++ b/dev/deps/spark-deps-hadoop-2.4 @@ -30,6 +30,7 @@ commons-collections-3.2.2.jar commons-compiler-2.7.6.jar commons-compress-1.4.1.jar commons-configuration-1.6.jar +commons-crypto-1.0.0.jar commons-dbcp-1.4.jar commons-digester-1.8.jar commons-httpclient-3.1.jar diff --git a/dev/deps/spark-deps-hadoop-2.6 b/dev/deps/spark-deps-hadoop-2.6 index 787d06c3512db..6f4695f345a48 100644 --- a/dev/deps/spark-deps-hadoop-2.6 +++ b/dev/deps/spark-deps-hadoop-2.6 @@ -34,6 +34,7 @@ commons-collections-3.2.2.jar commons-compiler-2.7.6.jar commons-compress-1.4.1.jar commons-configuration-1.6.jar +commons-crypto-1.0.0.jar commons-dbcp-1.4.jar commons-digester-1.8.jar commons-httpclient-3.1.jar diff --git a/dev/deps/spark-deps-hadoop-2.7 b/dev/deps/spark-deps-hadoop-2.7 index 386495bf1bbb1..7a86a8bd88846 100644 --- a/dev/deps/spark-deps-hadoop-2.7 +++ b/dev/deps/spark-deps-hadoop-2.7 @@ -34,6 +34,7 @@ commons-collections-3.2.2.jar commons-compiler-2.7.6.jar commons-compress-1.4.1.jar commons-configuration-1.6.jar +commons-crypto-1.0.0.jar commons-dbcp-1.4.jar commons-digester-1.8.jar commons-httpclient-3.1.jar diff --git a/docs/configuration.md b/docs/configuration.md index 2f801961050e1..d0c76aaad0b35 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -559,6 +559,29 @@ Apart from these, the following properties are also available, and may be useful spark.io.compression.codec. + + spark.io.encryption.enabled + false + + Enable IO encryption. Only supported in YARN mode. + + + + spark.io.encryption.keySizeBits + 128 + + IO encryption key size in bits. Supported values are 128, 192 and 256. + + + + spark.io.encryption.keygen.algorithm + HmacSHA1 + + The algorithm to use when generating the IO encryption key. The supported algorithms are + described in the KeyGenerator section of the Java Cryptography Architecture Standard Algorithm + Name Documentation. + + #### Spark UI diff --git a/pom.xml b/pom.xml index 74238db59ed8f..2c265c1fa325e 100644 --- a/pom.xml +++ b/pom.xml @@ -180,6 +180,7 @@ 2.52.0 2.8 1.8 + 1.0.0 ${java.home} @@ -1825,6 +1826,17 @@ jline ${jline.version} + + org.apache.commons + commons-crypto + ${commons-crypto.version} + + + net.java.dev.jna + jna + + + diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala index 7fbbe91de94e5..2398f0aea316a 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala @@ -1003,6 +1003,10 @@ private[spark] class Client( val securityManager = new SecurityManager(sparkConf) amContainer.setApplicationACLs( YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava) + + if (sparkConf.get(IO_ENCRYPTION_ENABLED)) { + SecurityManager.initIOEncryptionKey(sparkConf, credentials) + } setupSecurityToken(amContainer) amContainer diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/IOEncryptionSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/IOEncryptionSuite.scala new file mode 100644 index 0000000000000..1c60315b21ae8 --- /dev/null +++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/IOEncryptionSuite.scala @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.deploy.yarn + +import java.io._ +import java.nio.charset.StandardCharsets +import java.security.PrivilegedExceptionAction +import java.util.UUID + +import org.apache.hadoop.security.{Credentials, UserGroupInformation} +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers} + +import org.apache.spark._ +import org.apache.spark.deploy.SparkHadoopUtil +import org.apache.spark.internal.config._ +import org.apache.spark.serializer._ +import org.apache.spark.storage._ + +class IOEncryptionSuite extends SparkFunSuite with Matchers with BeforeAndAfterAll + with BeforeAndAfterEach { + private[this] val blockId = new TempShuffleBlockId(UUID.randomUUID()) + private[this] val conf = new SparkConf() + private[this] val ugi = UserGroupInformation.createUserForTesting("testuser", Array("testgroup")) + private[this] val serializer = new KryoSerializer(conf) + + override def beforeAll(): Unit = { + System.setProperty("SPARK_YARN_MODE", "true") + ugi.doAs(new PrivilegedExceptionAction[Unit]() { + override def run(): Unit = { + conf.set(IO_ENCRYPTION_ENABLED, true) + val creds = new Credentials() + SecurityManager.initIOEncryptionKey(conf, creds) + SparkHadoopUtil.get.addCurrentUserCredentials(creds) + } + }) + } + + override def afterAll(): Unit = { + SparkEnv.set(null) + System.clearProperty("SPARK_YARN_MODE") + } + + override def beforeEach(): Unit = { + super.beforeEach() + } + + override def afterEach(): Unit = { + super.afterEach() + conf.set("spark.shuffle.compress", false.toString) + conf.set("spark.shuffle.spill.compress", false.toString) + } + + test("IO encryption read and write") { + ugi.doAs(new PrivilegedExceptionAction[Unit] { + override def run(): Unit = { + conf.set(IO_ENCRYPTION_ENABLED, true) + conf.set("spark.shuffle.compress", false.toString) + conf.set("spark.shuffle.spill.compress", false.toString) + testYarnIOEncryptionWriteRead() + } + }) + } + + test("IO encryption read and write with shuffle compression enabled") { + ugi.doAs(new PrivilegedExceptionAction[Unit] { + override def run(): Unit = { + conf.set(IO_ENCRYPTION_ENABLED, true) + conf.set("spark.shuffle.compress", true.toString) + conf.set("spark.shuffle.spill.compress", true.toString) + testYarnIOEncryptionWriteRead() + } + }) + } + + private[this] def testYarnIOEncryptionWriteRead(): Unit = { + val plainStr = "hello world" + val outputStream = new ByteArrayOutputStream() + val serializerManager = new SerializerManager(serializer, conf) + val wrappedOutputStream = serializerManager.wrapStream(blockId, outputStream) + wrappedOutputStream.write(plainStr.getBytes(StandardCharsets.UTF_8)) + wrappedOutputStream.close() + + val encryptedBytes = outputStream.toByteArray + val encryptedStr = new String(encryptedBytes) + assert(plainStr !== encryptedStr) + + val inputStream = new ByteArrayInputStream(encryptedBytes) + val wrappedInputStream = serializerManager.wrapStream(blockId, inputStream) + val decryptedBytes = new Array[Byte](1024) + val len = wrappedInputStream.read(decryptedBytes) + val decryptedStr = new String(decryptedBytes, 0, len, StandardCharsets.UTF_8) + assert(decryptedStr === plainStr) + } +} From fb20084313470593d8507a43fcb2cde2a4c854d9 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Tue, 30 Aug 2016 13:15:21 -0700 Subject: [PATCH 142/270] [SPARK-17304] Fix perf. issue caused by TaskSetManager.abortIfCompletelyBlacklisted This patch addresses a minor scheduler performance issue that was introduced in #13603. If you run ``` sc.parallelize(1 to 100000, 100000).map(identity).count() ``` then most of the time ends up being spent in `TaskSetManager.abortIfCompletelyBlacklisted()`: ![image](https://cloud.githubusercontent.com/assets/50748/18071032/428732b0-6e07-11e6-88b2-c9423cd61f53.png) When processing resource offers, the scheduler uses a nested loop which considers every task set at multiple locality levels: ```scala for (taskSet <- sortedTaskSets; maxLocality <- taskSet.myLocalityLevels) { do { launchedTask = resourceOfferSingleTaskSet( taskSet, maxLocality, shuffledOffers, availableCpus, tasks) } while (launchedTask) } ``` In order to prevent jobs with globally blacklisted tasks from hanging, #13603 added a `taskSet.abortIfCompletelyBlacklisted` call inside of `resourceOfferSingleTaskSet`; if a call to `resourceOfferSingleTaskSet` fails to schedule any tasks, then `abortIfCompletelyBlacklisted` checks whether the tasks are completely blacklisted in order to figure out whether they will ever be schedulable. The problem with this placement of the call is that the last call to `resourceOfferSingleTaskSet` in the `while` loop will return `false`, implying that `resourceOfferSingleTaskSet` will call `abortIfCompletelyBlacklisted`, so almost every call to `resourceOffers` will trigger the `abortIfCompletelyBlacklisted` check for every task set. Instead, I think that this call should be moved out of the innermost loop and should be called _at most_ once per task set in case none of the task set's tasks can be scheduled at any locality level. Before this patch's changes, the microbenchmark example that I posted above took 35 seconds to run, but it now only takes 15 seconds after this change. /cc squito and kayousterhout for review. Author: Josh Rosen Closes #14871 from JoshRosen/bail-early-if-no-cpus. --- .../spark/scheduler/TaskSchedulerImpl.scala | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala index dc05e764c3951..7d905538c66a7 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala @@ -278,9 +278,6 @@ private[spark] class TaskSchedulerImpl( } } } - if (!launchedTask) { - taskSet.abortIfCompletelyBlacklisted(executorIdToHost.keys) - } return launchedTask } @@ -326,12 +323,19 @@ private[spark] class TaskSchedulerImpl( // Take each TaskSet in our scheduling order, and then offer it each node in increasing order // of locality levels so that it gets a chance to launch local tasks on all of them. // NOTE: the preferredLocality order: PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY - var launchedTask = false - for (taskSet <- sortedTaskSets; maxLocality <- taskSet.myLocalityLevels) { - do { - launchedTask = resourceOfferSingleTaskSet( - taskSet, maxLocality, shuffledOffers, availableCpus, tasks) - } while (launchedTask) + for (taskSet <- sortedTaskSets) { + var launchedAnyTask = false + var launchedTaskAtCurrentMaxLocality = false + for (currentMaxLocality <- taskSet.myLocalityLevels) { + do { + launchedTaskAtCurrentMaxLocality = resourceOfferSingleTaskSet( + taskSet, currentMaxLocality, shuffledOffers, availableCpus, tasks) + launchedAnyTask |= launchedTaskAtCurrentMaxLocality + } while (launchedTaskAtCurrentMaxLocality) + } + if (!launchedAnyTask) { + taskSet.abortIfCompletelyBlacklisted(executorIdToHost.keys) + } } if (tasks.size > 0) { From 02ac379e8645ce5d32e033f6683136da16fbe584 Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Tue, 30 Aug 2016 13:22:21 -0700 Subject: [PATCH 143/270] [SPARK-17314][CORE] Use Netty's DefaultThreadFactory to enable its fast ThreadLocal impl ## What changes were proposed in this pull request? When a thread is a Netty's FastThreadLocalThread, Netty will use its fast ThreadLocal implementation. It has a better performance than JDK's (See the benchmark results in https://github.com/netty/netty/pull/4417, note: it's not a fix to Netty's FastThreadLocal. It just fixed an issue in Netty's benchmark codes) This PR just changed the ThreadFactory to Netty's DefaultThreadFactory which will use FastThreadLocalThread. There is also a minor change to the thread names. See https://github.com/netty/netty/blob/netty-4.0.22.Final/common/src/main/java/io/netty/util/concurrent/DefaultThreadFactory.java#L94 ## How was this patch tested? Author: Shixiong Zhu Closes #14879 from zsxwing/netty-thread. --- .../java/org/apache/spark/network/util/NettyUtils.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/NettyUtils.java b/common/network-common/src/main/java/org/apache/spark/network/util/NettyUtils.java index 10de9d3a5caf6..5e85180bd6f9f 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/util/NettyUtils.java +++ b/common/network-common/src/main/java/org/apache/spark/network/util/NettyUtils.java @@ -20,7 +20,6 @@ import java.lang.reflect.Field; import java.util.concurrent.ThreadFactory; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; @@ -31,6 +30,7 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.util.concurrent.DefaultThreadFactory; import io.netty.util.internal.PlatformDependent; /** @@ -39,10 +39,7 @@ public class NettyUtils { /** Creates a new ThreadFactory which prefixes each thread with the given name. */ public static ThreadFactory createThreadFactory(String threadPoolPrefix) { - return new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat(threadPoolPrefix + "-%d") - .build(); + return new DefaultThreadFactory(threadPoolPrefix, true); } /** Creates a Netty EventLoopGroup based on the IOMode. */ From f7beae6da02e6b70a34c714e93136becbde7679b Mon Sep 17 00:00:00 2001 From: Alex Bozarth Date: Tue, 30 Aug 2016 16:33:54 -0500 Subject: [PATCH 144/270] [SPARK-17243][WEB UI] Spark 2.0 History Server won't load with very large application history ## What changes were proposed in this pull request? With the new History Server the summary page loads the application list via the the REST API, this makes it very slow to impossible to load with large (10K+) application history. This pr fixes this by adding the `spark.history.ui.maxApplications` conf to limit the number of applications the History Server displays. This is accomplished using a new optional `limit` param for the `applications` api. (Note this only applies to what the summary page displays, all the Application UI's are still accessible if the user knows the App ID and goes to the Application UI directly.) I've also added a new test for the `limit` param in `HistoryServerSuite.scala` ## How was this patch tested? Manual testing and dev/run-tests Author: Alex Bozarth Closes #14835 from ajbozarth/spark17243. --- .../org/apache/spark/ui/static/historypage.js | 8 ++- .../spark/deploy/history/HistoryPage.scala | 3 +- .../spark/deploy/history/HistoryServer.scala | 4 ++ .../spark/internal/config/package.scala | 4 ++ .../api/v1/ApplicationListResource.scala | 10 ++- .../limit_app_list_json_expectation.json | 67 +++++++++++++++++++ .../deploy/history/HistoryServerSuite.scala | 1 + docs/monitoring.md | 16 ++++- 8 files changed, 106 insertions(+), 7 deletions(-) create mode 100644 core/src/test/resources/HistoryServerExpectations/limit_app_list_json_expectation.json diff --git a/core/src/main/resources/org/apache/spark/ui/static/historypage.js b/core/src/main/resources/org/apache/spark/ui/static/historypage.js index 5b9afb59ef8e2..c8094005c65dd 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/historypage.js +++ b/core/src/main/resources/org/apache/spark/ui/static/historypage.js @@ -15,6 +15,12 @@ * limitations under the License. */ +var appLimit = -1; + +function setAppLimit(val) { + appLimit = val; +} + function makeIdNumeric(id) { var strs = id.split("_"); if (strs.length < 3) { @@ -89,7 +95,7 @@ $(document).ready(function() { requestedIncomplete = getParameterByName("showIncomplete", searchString); requestedIncomplete = (requestedIncomplete == "true" ? true : false); - $.getJSON("api/v1/applications", function(response,status,jqXHR) { + $.getJSON("api/v1/applications?limit=" + appLimit, function(response,status,jqXHR) { var array = []; var hasMultipleAttempts = false; for (i in response) { diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryPage.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryPage.scala index 74f78021ed6cd..b4f5a6114f3de 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryPage.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryPage.scala @@ -45,7 +45,8 @@ private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("") ++ ++ ++ - + ++ + } else if (requestedIncomplete) {

    No incomplete applications found!

    } else { diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala index d821474bdb590..c178917d8da3b 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala @@ -28,6 +28,7 @@ import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder} import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging +import org.apache.spark.internal.config._ import org.apache.spark.status.api.v1.{ApiRootResource, ApplicationInfo, ApplicationsListResource, UIRoot} import org.apache.spark.ui.{SparkUI, UIUtils, WebUI} import org.apache.spark.ui.JettyUtils._ @@ -55,6 +56,9 @@ class HistoryServer( // How many applications to retain private val retainedApplications = conf.getInt("spark.history.retainedApplications", 50) + // How many applications the summary ui displays + private[history] val maxApplications = conf.get(HISTORY_UI_MAX_APPS); + // application private val appCache = new ApplicationCache(this, retainedApplications, new SystemClock()) diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index ebce07c1e3b3e..02d7d182a48c2 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -120,6 +120,10 @@ package object config { .intConf .createWithDefault(100000) + // To limit how many applications are shown in the History Server summary ui + private[spark] val HISTORY_UI_MAX_APPS = + ConfigBuilder("spark.history.ui.maxApplications").intConf.createWithDefault(Integer.MAX_VALUE) + private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled") .booleanConf .createWithDefault(false) diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/ApplicationListResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/ApplicationListResource.scala index 02fd2985fa20d..075b9ba37dc84 100644 --- a/core/src/main/scala/org/apache/spark/status/api/v1/ApplicationListResource.scala +++ b/core/src/main/scala/org/apache/spark/status/api/v1/ApplicationListResource.scala @@ -29,7 +29,8 @@ private[v1] class ApplicationListResource(uiRoot: UIRoot) { def appList( @QueryParam("status") status: JList[ApplicationStatus], @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam, - @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam) + @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam, + @QueryParam("limit") limit: Integer) : Iterator[ApplicationInfo] = { val allApps = uiRoot.getApplicationInfoList val adjStatus = { @@ -41,7 +42,7 @@ private[v1] class ApplicationListResource(uiRoot: UIRoot) { } val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED) val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING) - allApps.filter { app => + val appList = allApps.filter { app => val anyRunning = app.attempts.exists(!_.completed) // if any attempt is still running, we consider the app to also still be running val statusOk = (!anyRunning && includeCompleted) || @@ -53,6 +54,11 @@ private[v1] class ApplicationListResource(uiRoot: UIRoot) { } statusOk && dateOk } + if (limit != null) { + appList.take(limit) + } else { + appList + } } } diff --git a/core/src/test/resources/HistoryServerExpectations/limit_app_list_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/limit_app_list_json_expectation.json new file mode 100644 index 0000000000000..9165f549d7d25 --- /dev/null +++ b/core/src/test/resources/HistoryServerExpectations/limit_app_list_json_expectation.json @@ -0,0 +1,67 @@ +[ { + "id" : "local-1430917381534", + "name" : "Spark shell", + "attempts" : [ { + "startTime" : "2015-05-06T13:03:00.893GMT", + "endTime" : "2015-05-06T13:03:11.398GMT", + "lastUpdated" : "", + "duration" : 10505, + "sparkUser" : "irashid", + "completed" : true, + "startTimeEpoch" : 1430917380893, + "endTimeEpoch" : 1430917391398, + "lastUpdatedEpoch" : 0 + } ] +}, { + "id" : "local-1430917381535", + "name" : "Spark shell", + "attempts" : [ { + "attemptId" : "2", + "startTime" : "2015-05-06T13:03:00.893GMT", + "endTime" : "2015-05-06T13:03:00.950GMT", + "lastUpdated" : "", + "duration" : 57, + "sparkUser" : "irashid", + "completed" : true, + "startTimeEpoch" : 1430917380893, + "endTimeEpoch" : 1430917380950, + "lastUpdatedEpoch" : 0 + }, { + "attemptId" : "1", + "startTime" : "2015-05-06T13:03:00.880GMT", + "endTime" : "2015-05-06T13:03:00.890GMT", + "lastUpdated" : "", + "duration" : 10, + "sparkUser" : "irashid", + "completed" : true, + "startTimeEpoch" : 1430917380880, + "endTimeEpoch" : 1430917380890, + "lastUpdatedEpoch" : 0 + } ] +}, { + "id" : "local-1426533911241", + "name" : "Spark shell", + "attempts" : [ { + "attemptId" : "2", + "startTime" : "2015-03-17T23:11:50.242GMT", + "endTime" : "2015-03-17T23:12:25.177GMT", + "lastUpdated" : "", + "duration" : 34935, + "sparkUser" : "irashid", + "completed" : true, + "startTimeEpoch" : 1426633910242, + "endTimeEpoch" : 1426633945177, + "lastUpdatedEpoch" : 0 + }, { + "attemptId" : "1", + "startTime" : "2015-03-16T19:25:10.242GMT", + "endTime" : "2015-03-16T19:25:45.177GMT", + "lastUpdated" : "", + "duration" : 34935, + "sparkUser" : "irashid", + "completed" : true, + "startTimeEpoch" : 1426533910242, + "endTimeEpoch" : 1426533945177, + "lastUpdatedEpoch" : 0 + } ] +} ] diff --git a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala index 631a7cd9d5d7a..ae3f5d9c012ea 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala @@ -100,6 +100,7 @@ class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers "minDate app list json" -> "applications?minDate=2015-02-10", "maxDate app list json" -> "applications?maxDate=2015-02-10", "maxDate2 app list json" -> "applications?maxDate=2015-02-03T16:42:40.000GMT", + "limit app list json" -> "applications?limit=3", "one app json" -> "applications/local-1422981780767", "one app multi-attempt json" -> "applications/local-1426533911241", "job list json" -> "applications/local-1422981780767/jobs", diff --git a/docs/monitoring.md b/docs/monitoring.md index 6fdf87b4be57c..5804e4f26cd93 100644 --- a/docs/monitoring.md +++ b/docs/monitoring.md @@ -114,8 +114,17 @@ The history server can be configured as follows: spark.history.retainedApplications 50 - The number of application UIs to retain. If this cap is exceeded, then the oldest - applications will be removed. + The number of applications to retain UI data for in the cache. If this cap is exceeded, then + the oldest applications will be removed from the cache. If an application is not in the cache, + it will have to be loaded from disk if its accessed from the UI. + + + + spark.history.ui.maxApplications + Int.MaxValue + + The number of applications to display on the history summary page. Application UIs are still + available by accessing their URLs directly even if they are not displayed on the history summary page. @@ -242,7 +251,8 @@ can be identified by their `[attempt-id]`. In the API listed below, when running
    Examples:
    ?minDate=2015-02-10
    ?minDate=2015-02-03T16:42:40.000GMT -
    ?maxDate=[date] latest date/time to list; uses same format as minDate. +
    ?maxDate=[date] latest date/time to list; uses same format as minDate. +
    ?limit=[limit] limits the number of applications listed. /applications/[app-id]/jobs From 231f973295129dca976f2e4a8222a63318d4aafe Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Tue, 30 Aug 2016 20:04:52 -0700 Subject: [PATCH 145/270] [SPARK-17318][TESTS] Fix ReplSuite replicating blocks of object with class defined in repl ## What changes were proposed in this pull request? There are a lot of failures recently: http://spark-tests.appspot.com/tests/org.apache.spark.repl.ReplSuite/replicating%20blocks%20of%20object%20with%20class%20defined%20in%20repl This PR just changed the persist level to `MEMORY_AND_DISK_2` to avoid blocks being evicted from memory. ## How was this patch tested? Jenkins unit tests. Author: Shixiong Zhu Closes #14884 from zsxwing/SPARK-17318. --- .../src/test/scala/org/apache/spark/repl/ReplSuite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala index 06b09f3158d77..f1284b1df31b4 100644 --- a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala +++ b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala @@ -401,7 +401,7 @@ class ReplSuite extends SparkFunSuite { """ |import org.apache.spark.storage.StorageLevel._ |case class Foo(i: Int) - |val ret = sc.parallelize((1 to 100).map(Foo), 10).persist(MEMORY_ONLY_2) + |val ret = sc.parallelize((1 to 100).map(Foo), 10).persist(MEMORY_AND_DISK_2) |ret.count() |sc.getExecutorStorageStatus.map(s => s.rddBlocksById(ret.id).size).sum """.stripMargin) From d92cd227cf245be9ab8f9bce714386f8283a97cb Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Wed, 31 Aug 2016 12:40:53 +0800 Subject: [PATCH 146/270] [SPARK-15985][SQL] Eliminate redundant cast from an array without null or a map without null ## What changes were proposed in this pull request? This PR eliminates redundant cast from an `ArrayType` with `containsNull = false` or a `MapType` with `containsNull = false`. For example, in `ArrayType` case, current implementation leaves a cast `cast(value#63 as array).toDoubleArray`. However, we can eliminate `cast(value#63 as array)` if we know `value#63` does not include `null`. This PR apply this elimination for `ArrayType` and `MapType` in `SimplifyCasts` at a plan optimization phase. In summary, we got 1.2-1.3x performance improvements over the code before applying this PR. Here are performance results of benchmark programs: ``` test("Read array in Dataset") { import sparkSession.implicits._ val iters = 5 val n = 1024 * 1024 val rows = 15 val benchmark = new Benchmark("Read primnitive array", n) val rand = new Random(511) val intDS = sparkSession.sparkContext.parallelize(0 until rows, 1) .map(i => Array.tabulate(n)(i => i)).toDS() intDS.count() // force to create ds val lastElement = n - 1 val randElement = rand.nextInt(lastElement) benchmark.addCase(s"Read int array in Dataset", numIters = iters)(iter => { val idx0 = randElement val idx1 = lastElement intDS.map(a => a(0) + a(idx0) + a(idx1)).collect }) val doubleDS = sparkSession.sparkContext.parallelize(0 until rows, 1) .map(i => Array.tabulate(n)(i => i.toDouble)).toDS() doubleDS.count() // force to create ds benchmark.addCase(s"Read double array in Dataset", numIters = iters)(iter => { val idx0 = randElement val idx1 = lastElement doubleDS.map(a => a(0) + a(idx0) + a(idx1)).collect }) benchmark.run() } Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.10.4 Intel(R) Core(TM) i5-5257U CPU 2.70GHz without this PR Read primnitive array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ Read int array in Dataset 525 / 690 2.0 500.9 1.0X Read double array in Dataset 947 / 1209 1.1 902.7 0.6X with this PR Read primnitive array: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ Read int array in Dataset 400 / 492 2.6 381.5 1.0X Read double array in Dataset 788 / 870 1.3 751.4 0.5X ``` An example program that originally caused this performance issue. ``` val ds = Seq(Array(1.0, 2.0, 3.0), Array(4.0, 5.0, 6.0)).toDS() val ds2 = ds.map(p => { var s = 0.0 for (i <- 0 to 2) { s += p(i) } s }) ds2.show ds2.explain(true) ``` Plans before this PR ``` == Parsed Logical Plan == 'SerializeFromObject [input[0, double, true] AS value#68] +- 'MapElements , obj#67: double +- 'DeserializeToObject unresolveddeserializer(upcast(getcolumnbyordinal(0, ArrayType(DoubleType,false)), ArrayType(DoubleType,false), - root class: "scala.Array").toDoubleArray), obj#66: [D +- LocalRelation [value#63] == Analyzed Logical Plan == value: double SerializeFromObject [input[0, double, true] AS value#68] +- MapElements , obj#67: double +- DeserializeToObject cast(value#63 as array).toDoubleArray, obj#66: [D +- LocalRelation [value#63] == Optimized Logical Plan == SerializeFromObject [input[0, double, true] AS value#68] +- MapElements , obj#67: double +- DeserializeToObject cast(value#63 as array).toDoubleArray, obj#66: [D +- LocalRelation [value#63] == Physical Plan == *SerializeFromObject [input[0, double, true] AS value#68] +- *MapElements , obj#67: double +- *DeserializeToObject cast(value#63 as array).toDoubleArray, obj#66: [D +- LocalTableScan [value#63] ``` Plans after this PR ``` == Parsed Logical Plan == 'SerializeFromObject [input[0, double, true] AS value#6] +- 'MapElements , obj#5: double +- 'DeserializeToObject unresolveddeserializer(upcast(getcolumnbyordinal(0, ArrayType(DoubleType,false)), ArrayType(DoubleType,false), - root class: "scala.Array").toDoubleArray), obj#4: [D +- LocalRelation [value#1] == Analyzed Logical Plan == value: double SerializeFromObject [input[0, double, true] AS value#6] +- MapElements , obj#5: double +- DeserializeToObject cast(value#1 as array).toDoubleArray, obj#4: [D +- LocalRelation [value#1] == Optimized Logical Plan == SerializeFromObject [input[0, double, true] AS value#6] +- MapElements , obj#5: double +- DeserializeToObject value#1.toDoubleArray, obj#4: [D +- LocalRelation [value#1] == Physical Plan == *SerializeFromObject [input[0, double, true] AS value#6] +- *MapElements , obj#5: double +- *DeserializeToObject value#1.toDoubleArray, obj#4: [D +- LocalTableScan [value#1] ``` ## How was this patch tested? Tested by new test cases in `SimplifyCastsSuite` Author: Kazuaki Ishizaki Closes #13704 from kiszk/SPARK-15985. --- .../spark/sql/catalyst/dsl/package.scala | 3 + .../sql/catalyst/optimizer/expressions.scala | 6 ++ .../optimizer/SimplifyCastsSuite.scala | 67 +++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100644 sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyCastsSuite.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala index 9f54d709a022d..8549187a66369 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala @@ -242,6 +242,9 @@ package object dsl { def array(dataType: DataType): AttributeReference = AttributeReference(s, ArrayType(dataType), nullable = true)() + def array(arrayType: ArrayType): AttributeReference = + AttributeReference(s, arrayType)() + /** Creates a new AttributeReference of type map */ def map(keyType: DataType, valueType: DataType): AttributeReference = map(MapType(keyType, valueType)) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala index 74dfd10189d81..82ab111aa2259 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala @@ -475,6 +475,12 @@ case class OptimizeCodegen(conf: CatalystConf) extends Rule[LogicalPlan] { object SimplifyCasts extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { case Cast(e, dataType) if e.dataType == dataType => e + case c @ Cast(e, dataType) => (e.dataType, dataType) match { + case (ArrayType(from, false), ArrayType(to, true)) if from == to => e + case (MapType(fromKey, fromValue, false), MapType(toKey, toValue, true)) + if fromKey == toKey && fromValue == toValue => e + case _ => c + } } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyCastsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyCastsSuite.scala new file mode 100644 index 0000000000000..e84f11272d214 --- /dev/null +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyCastsSuite.scala @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import org.apache.spark.sql.catalyst.dsl._ +import org.apache.spark.sql.catalyst.dsl.expressions._ +import org.apache.spark.sql.catalyst.dsl.plans._ +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.plans.PlanTest +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules.RuleExecutor +import org.apache.spark.sql.types._ + +class SimplifyCastsSuite extends PlanTest { + + object Optimize extends RuleExecutor[LogicalPlan] { + val batches = Batch("SimplifyCasts", FixedPoint(50), SimplifyCasts) :: Nil + } + + test("non-nullable element array to nullable element array cast") { + val input = LocalRelation('a.array(ArrayType(IntegerType, false))) + val plan = input.select('a.cast(ArrayType(IntegerType, true)).as("casted")).analyze + val optimized = Optimize.execute(plan) + val expected = input.select('a.as("casted")).analyze + comparePlans(optimized, expected) + } + + test("nullable element to non-nullable element array cast") { + val input = LocalRelation('a.array(ArrayType(IntegerType, true))) + val plan = input.select('a.cast(ArrayType(IntegerType, false)).as("casted")).analyze + val optimized = Optimize.execute(plan) + comparePlans(optimized, plan) + } + + test("non-nullable value map to nullable value map cast") { + val input = LocalRelation('m.map(MapType(StringType, StringType, false))) + val plan = input.select('m.cast(MapType(StringType, StringType, true)) + .as("casted")).analyze + val optimized = Optimize.execute(plan) + val expected = input.select('m.as("casted")).analyze + comparePlans(optimized, expected) + } + + test("nullable value map to non-nullable value map cast") { + val input = LocalRelation('m.map(MapType(StringType, StringType, true))) + val plan = input.select('m.cast(MapType(StringType, StringType, false)) + .as("casted")).analyze + val optimized = Optimize.execute(plan) + comparePlans(optimized, plan) + } +} + From fa6347938fc1c72ddc03a5f3cd2e929b5694f0a6 Mon Sep 17 00:00:00 2001 From: Jeff Zhang Date: Wed, 31 Aug 2016 00:20:41 -0700 Subject: [PATCH 147/270] [SPARK-17178][SPARKR][SPARKSUBMIT] Allow to set sparkr shell command through --conf ## What changes were proposed in this pull request? Allow user to set sparkr shell command through --conf spark.r.shell.command ## How was this patch tested? Unit test is added and also verify it manually through ``` bin/sparkr --master yarn-client --conf spark.r.shell.command=/usr/local/bin/R ``` Author: Jeff Zhang Closes #14744 from zjffdu/SPARK-17178. --- docs/configuration.md | 11 ++++++++++- .../apache/spark/launcher/SparkLauncher.java | 2 ++ .../launcher/SparkSubmitCommandBuilder.java | 3 ++- .../SparkSubmitCommandBuilderSuite.java | 18 ++++++++++++++++++ 4 files changed, 32 insertions(+), 2 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index d0c76aaad0b35..6e98f67b73751 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1786,6 +1786,14 @@ showDF(properties, numRows = 200, truncate = FALSE) Executable for executing R scripts in client modes for driver. Ignored in cluster modes. + + spark.r.shell.command + R + + Executable for executing sparkR shell in client modes for driver. Ignored in cluster modes. It is the same as environment variable SPARKR_DRIVER_R, but take precedence over it. + spark.r.shell.command is used for sparkR shell while spark.r.driver.command is used for running R script. + + #### Deploy @@ -1852,7 +1860,8 @@ The following variables can be set in `spark-env.sh`: SPARKR_DRIVER_R - R binary executable to use for SparkR shell (default is R). + R binary executable to use for SparkR shell (default is R). + Property spark.r.shell.command take precedence if it is set SPARK_LOCAL_IP diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java index 7b7a7bf57b110..ea56214d2390c 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java +++ b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java @@ -68,6 +68,8 @@ public class SparkLauncher { static final String PYSPARK_PYTHON = "spark.pyspark.python"; + static final String SPARKR_R_SHELL = "spark.r.shell.command"; + /** Logger name to use when launching a child process. */ public static final String CHILD_PROCESS_LOGGER_NAME = "spark.launcher.childProcLoggerName"; diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java index f6da644e4c376..29c6d82cdbf19 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java +++ b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java @@ -336,7 +336,8 @@ private List buildSparkRCommand(Map env) throws IOExcept join(File.separator, sparkHome, "R", "lib", "SparkR", "profile", "shell.R")); List args = new ArrayList<>(); - args.add(firstNonEmpty(System.getenv("SPARKR_DRIVER_R"), "R")); + args.add(firstNonEmpty(conf.get(SparkLauncher.SPARKR_R_SHELL), + System.getenv("SPARKR_DRIVER_R"), "R")); return args; } diff --git a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java index 16e5a22401ca8..ad2e7a70c4eae 100644 --- a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java +++ b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java @@ -172,6 +172,24 @@ public void testPySparkFallback() throws Exception { assertEquals("arg1", cmd.get(cmd.size() - 1)); } + @Test + public void testSparkRShell() throws Exception { + List sparkSubmitArgs = Arrays.asList( + SparkSubmitCommandBuilder.SPARKR_SHELL, + "--master=foo", + "--deploy-mode=bar", + "--conf", "spark.r.shell.command=/usr/bin/R"); + + Map env = new HashMap<>(); + List cmd = buildCommand(sparkSubmitArgs, env); + assertEquals("/usr/bin/R", cmd.get(cmd.size() - 1)); + assertEquals( + String.format( + "\"%s\" \"foo\" \"%s\" \"bar\" \"--conf\" \"spark.r.shell.command=/usr/bin/R\" \"%s\"", + parser.MASTER, parser.DEPLOY_MODE, SparkSubmitCommandBuilder.SPARKR_SHELL_RESOURCE), + env.get("SPARKR_SUBMIT_ARGS")); + } + @Test public void testExamplesRunner() throws Exception { List sparkSubmitArgs = Arrays.asList( From 12fd0cd615683cd4c3e9094ce71a1e6fc33b8d6a Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Wed, 31 Aug 2016 17:08:08 +0800 Subject: [PATCH 148/270] [SPARK-17180][SPARK-17309][SPARK-17323][SQL] create AlterViewAsCommand to handle ALTER VIEW AS ## What changes were proposed in this pull request? Currently we use `CreateViewCommand` to implement ALTER VIEW AS, which has 3 bugs: 1. SPARK-17180: ALTER VIEW AS should alter temp view if view name has no database part and temp view exists 2. SPARK-17309: ALTER VIEW AS should issue exception if view does not exist. 3. SPARK-17323: ALTER VIEW AS should keep the previous table properties, comment, create_time, etc. The root cause is, ALTER VIEW AS is quite different from CREATE VIEW, we need different code path to handle them. However, in `CreateViewCommand`, there is no way to distinguish ALTER VIEW AS and CREATE VIEW, we have to introduce extra flag. But instead of doing this, I think a more natural way is to separate the ALTER VIEW AS logic into a new command. ## How was this patch tested? new tests in SQLViewSuite Author: Wenchen Fan Closes #14874 from cloud-fan/minor4. --- .../spark/sql/execution/SparkSqlParser.scala | 63 +++++---------- .../spark/sql/execution/command/views.scala | 77 ++++++++++++++++--- .../sql/hive/execution/SQLViewSuite.scala | 77 ++++++++++++++++++- 3 files changed, 157 insertions(+), 60 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index e32d30178eeb1..656494d97dbd5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -1254,60 +1254,33 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { ic.identifier.getText -> Option(ic.STRING).map(string) } } - createView( - ctx, - ctx.tableIdentifier, + + CreateViewCommand( + name = visitTableIdentifier(ctx.tableIdentifier), + userSpecifiedColumns = userSpecifiedColumns, comment = Option(ctx.STRING).map(string), - userSpecifiedColumns, - ctx.query, - Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty), + properties = Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty), + originalText = Option(source(ctx.query)), + child = plan(ctx.query), allowExisting = ctx.EXISTS != null, replace = ctx.REPLACE != null, - isTemporary = ctx.TEMPORARY != null - ) + isTemporary = ctx.TEMPORARY != null) } } /** - * Alter the query of a view. This creates a [[CreateViewCommand]] command. + * Alter the query of a view. This creates a [[AlterViewAsCommand]] command. + * + * For example: + * {{{ + * ALTER VIEW [db_name.]view_name AS SELECT ...; + * }}} */ override def visitAlterViewQuery(ctx: AlterViewQueryContext): LogicalPlan = withOrigin(ctx) { - createView( - ctx, - name = ctx.tableIdentifier, - comment = None, - userSpecifiedColumns = Seq.empty, - query = ctx.query, - properties = Map.empty, - allowExisting = false, - replace = true, - isTemporary = false) - } - - /** - * Create a [[CreateViewCommand]] command. - */ - private def createView( - ctx: ParserRuleContext, - name: TableIdentifierContext, - comment: Option[String], - userSpecifiedColumns: Seq[(String, Option[String])], - query: QueryContext, - properties: Map[String, String], - allowExisting: Boolean, - replace: Boolean, - isTemporary: Boolean): LogicalPlan = { - val originalText = source(query) - CreateViewCommand( - visitTableIdentifier(name), - userSpecifiedColumns, - comment, - properties, - Some(originalText), - plan(query), - allowExisting = allowExisting, - replace = replace, - isTemporary = isTemporary) + AlterViewAsCommand( + name = visitTableIdentifier(ctx.tableIdentifier), + originalText = source(ctx.query), + query = plan(ctx.query)) } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala index f0d7b64c3c160..15340ee921f68 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala @@ -22,15 +22,16 @@ import scala.util.control.NonFatal import org.apache.spark.sql.{AnalysisException, Row, SparkSession} import org.apache.spark.sql.catalyst.{SQLBuilder, TableIdentifier} import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} -import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute} +import org.apache.spark.sql.catalyst.expressions.Alias import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project} import org.apache.spark.sql.types.StructType /** - * Create Hive view on non-hive-compatible tables by specifying schema ourselves instead of - * depending on Hive meta-store. + * Create or replace a view with given query plan. This command will convert the query plan to + * canonicalized SQL string, and store it as view text in metastore, if we need to create a + * permanent view. * * @param name the name of this view. * @param userSpecifiedColumns the output column names and optional comments specified by users, @@ -64,11 +65,6 @@ case class CreateViewCommand( override protected def innerChildren: Seq[QueryPlan[_]] = Seq(child) - // TODO: Note that this class can NOT canonicalize the view SQL string entirely, which is - // different from Hive and may not work for some cases like create view on self join. - - override def output: Seq[Attribute] = Seq.empty[Attribute] - if (!isTemporary) { require(originalText.isDefined, "The table to created with CREATE VIEW must have 'originalText'.") @@ -119,9 +115,7 @@ case class CreateViewCommand( // Handles `CREATE VIEW IF NOT EXISTS v0 AS SELECT ...`. Does nothing when the target view // already exists. } else if (tableMetadata.tableType != CatalogTableType.VIEW) { - throw new AnalysisException( - "Existing table is not a view. The following is an existing table, " + - s"not a view: $qualifiedName") + throw new AnalysisException(s"$qualifiedName is not a view") } else if (replace) { // Handles `CREATE OR REPLACE VIEW v0 AS SELECT ...` sessionState.catalog.alterTable(prepareTable(sparkSession, analyzedPlan)) @@ -179,7 +173,7 @@ case class CreateViewCommand( sparkSession.sql(viewSQL).queryExecution.assertAnalyzed() } catch { case NonFatal(e) => - throw new RuntimeException(s"Failed to analyze the canonicalized SQL: ${viewSQL}", e) + throw new RuntimeException(s"Failed to analyze the canonicalized SQL: $viewSQL", e) } val viewSchema = if (userSpecifiedColumns.isEmpty) { @@ -202,3 +196,62 @@ case class CreateViewCommand( ) } } + +/** + * Alter a view with given query plan. If the view name contains database prefix, this command will + * alter a permanent view matching the given name, or throw an exception if view not exist. Else, + * this command will try to alter a temporary view first, if view not exist, try permanent view + * next, if still not exist, throw an exception. + * + * @param name the name of this view. + * @param originalText the original SQL text of this view. Note that we can only alter a view by + * SQL API, which means we always have originalText. + * @param query the logical plan that represents the view; this is used to generate a canonicalized + * version of the SQL that can be saved in the catalog. + */ +case class AlterViewAsCommand( + name: TableIdentifier, + originalText: String, + query: LogicalPlan) extends RunnableCommand { + + override protected def innerChildren: Seq[QueryPlan[_]] = Seq(query) + + override def run(session: SparkSession): Seq[Row] = { + // If the plan cannot be analyzed, throw an exception and don't proceed. + val qe = session.sessionState.executePlan(query) + qe.assertAnalyzed() + val analyzedPlan = qe.analyzed + + if (session.sessionState.catalog.isTemporaryTable(name)) { + session.sessionState.catalog.createTempView(name.table, analyzedPlan, overrideIfExists = true) + } else { + alterPermanentView(session, analyzedPlan) + } + + Seq.empty[Row] + } + + private def alterPermanentView(session: SparkSession, analyzedPlan: LogicalPlan): Unit = { + val viewMeta = session.sessionState.catalog.getTableMetadata(name) + if (viewMeta.tableType != CatalogTableType.VIEW) { + throw new AnalysisException(s"${viewMeta.identifier} is not a view.") + } + + val viewSQL: String = new SQLBuilder(analyzedPlan).toSQL + // Validate the view SQL - make sure we can parse it and analyze it. + // If we cannot analyze the generated query, there is probably a bug in SQL generation. + try { + session.sql(viewSQL).queryExecution.assertAnalyzed() + } catch { + case NonFatal(e) => + throw new RuntimeException(s"Failed to analyze the canonicalized SQL: $viewSQL", e) + } + + val updatedViewMeta = viewMeta.copy( + schema = analyzedPlan.schema, + viewOriginalText = Some(originalText), + viewText = Some(viewSQL)) + + session.sessionState.catalog.alterTable(updatedViewMeta) + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala index 6a80664417911..bc999d4724062 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala @@ -18,6 +18,8 @@ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} +import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.test.SQLTestUtils @@ -60,15 +62,15 @@ class SQLViewSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { var e = intercept[AnalysisException] { sql("CREATE OR REPLACE VIEW tab1 AS SELECT * FROM jt") }.getMessage - assert(e.contains("The following is an existing table, not a view: `default`.`tab1`")) + assert(e.contains("`default`.`tab1` is not a view")) e = intercept[AnalysisException] { sql("CREATE VIEW tab1 AS SELECT * FROM jt") }.getMessage - assert(e.contains("The following is an existing table, not a view: `default`.`tab1`")) + assert(e.contains("`default`.`tab1` is not a view")) e = intercept[AnalysisException] { sql("ALTER VIEW tab1 AS SELECT * FROM jt") }.getMessage - assert(e.contains("The following is an existing table, not a view: `default`.`tab1`")) + assert(e.contains("`default`.`tab1` is not a view")) } } @@ -274,6 +276,75 @@ class SQLViewSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { } } + test("should not allow ALTER VIEW AS when the view does not exist") { + intercept[NoSuchTableException]( + sql("ALTER VIEW testView AS SELECT 1, 2") + ) + + intercept[NoSuchTableException]( + sql("ALTER VIEW default.testView AS SELECT 1, 2") + ) + } + + test("ALTER VIEW AS should try to alter temp view first if view name has no database part") { + withView("test_view") { + withTempView("test_view") { + sql("CREATE VIEW test_view AS SELECT 1 AS a, 2 AS b") + sql("CREATE TEMP VIEW test_view AS SELECT 1 AS a, 2 AS b") + + sql("ALTER VIEW test_view AS SELECT 3 AS i, 4 AS j") + + // The temporary view should be updated. + checkAnswer(spark.table("test_view"), Row(3, 4)) + + // The permanent view should stay same. + checkAnswer(spark.table("default.test_view"), Row(1, 2)) + } + } + } + + test("ALTER VIEW AS should alter permanent view if view name has database part") { + withView("test_view") { + withTempView("test_view") { + sql("CREATE VIEW test_view AS SELECT 1 AS a, 2 AS b") + sql("CREATE TEMP VIEW test_view AS SELECT 1 AS a, 2 AS b") + + sql("ALTER VIEW default.test_view AS SELECT 3 AS i, 4 AS j") + + // The temporary view should stay same. + checkAnswer(spark.table("test_view"), Row(1, 2)) + + // The permanent view should be updated. + checkAnswer(spark.table("default.test_view"), Row(3, 4)) + } + } + } + + test("ALTER VIEW AS should keep the previous table properties, comment, create_time, etc.") { + withView("test_view") { + sql( + """ + |CREATE VIEW test_view + |COMMENT 'test' + |TBLPROPERTIES ('key' = 'a') + |AS SELECT 1 AS a, 2 AS b + """.stripMargin) + + val catalog = spark.sessionState.catalog + val viewMeta = catalog.getTableMetadata(TableIdentifier("test_view")) + assert(viewMeta.comment == Some("test")) + assert(viewMeta.properties("key") == "a") + + sql("ALTER VIEW test_view AS SELECT 3 AS i, 4 AS j") + val updatedViewMeta = catalog.getTableMetadata(TableIdentifier("test_view")) + assert(updatedViewMeta.comment == Some("test")) + assert(updatedViewMeta.properties("key") == "a") + assert(updatedViewMeta.createTime == viewMeta.createTime) + // The view should be updated. + checkAnswer(spark.table("test_view"), Row(3, 4)) + } + } + test("create hive view for json table") { // json table is not hive-compatible, make sure the new flag fix it. withView("testView") { From 9953442aca5a1528a6b85fa8713a56d36c9a199f Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Wed, 31 Aug 2016 09:06:23 -0700 Subject: [PATCH 149/270] [MINOR][SPARKR] Verbose build comment in WINDOWS.md rather than promoting default build without Hive ## What changes were proposed in this pull request? This PR fixes `WINDOWS.md` to imply referring other profiles in http://spark.apache.org/docs/latest/building-spark.html#building-with-buildmvn rather than directly pointing to run `mvn -DskipTests -Psparkr package` without Hive supports. ## How was this patch tested? Manually, 2016-08-31 6 01 08 Author: hyukjinkwon Closes #14890 from HyukjinKwon/minor-build-r. --- R/WINDOWS.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/R/WINDOWS.md b/R/WINDOWS.md index f67a1c51d1785..1afcbfcabe85f 100644 --- a/R/WINDOWS.md +++ b/R/WINDOWS.md @@ -4,13 +4,23 @@ To build SparkR on Windows, the following steps are required 1. Install R (>= 3.1) and [Rtools](http://cran.r-project.org/bin/windows/Rtools/). Make sure to include Rtools and R in `PATH`. + 2. Install [JDK7](http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html) and set `JAVA_HOME` in the system environment variables. + 3. Download and install [Maven](http://maven.apache.org/download.html). Also include the `bin` directory in Maven in `PATH`. + 4. Set `MAVEN_OPTS` as described in [Building Spark](http://spark.apache.org/docs/latest/building-spark.html). -5. Open a command shell (`cmd`) in the Spark directory and run `mvn -DskipTests -Psparkr package` + +5. Open a command shell (`cmd`) in the Spark directory and build Spark with [Maven](http://spark.apache.org/docs/latest/building-spark.html#building-with-buildmvn) and include the `-Psparkr` profile to build the R package. For example to use the default Hadoop versions you can run + + ```bash + mvn.cmd -DskipTests -Psparkr package + ``` + + `.\build\mvn` is a shell script so `mvn.cmd` should be used directly on Windows. ## Unit tests From 0611b3a2bf6d73ab62ee133fbb70430839bea7bc Mon Sep 17 00:00:00 2001 From: Michael Gummelt Date: Wed, 31 Aug 2016 10:17:05 -0700 Subject: [PATCH 150/270] [SPARK-17320] add build_profile_flags entry to mesos build module ## What changes were proposed in this pull request? add build_profile_flags entry to mesos build module ## How was this patch tested? unit tests Author: Michael Gummelt Closes #14885 from mgummelt/mesos-profile. --- dev/sparktestsupport/modules.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dev/sparktestsupport/modules.py b/dev/sparktestsupport/modules.py index f2aa241a4b8ff..d8e3989ec2853 100644 --- a/dev/sparktestsupport/modules.py +++ b/dev/sparktestsupport/modules.py @@ -462,6 +462,7 @@ def __hash__(self): name="mesos", dependencies=[], source_file_regexes=["mesos/"], + build_profile_flags=["-Pmesos"], sbt_test_goals=["mesos/test"] ) From 9bcb33c54117cebc9e087017bf4e4163edaeff17 Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Wed, 31 Aug 2016 10:56:02 -0700 Subject: [PATCH 151/270] [SPARK-17316][CORE] Make CoarseGrainedSchedulerBackend.removeExecutor non-blocking ## What changes were proposed in this pull request? StandaloneSchedulerBackend.executorRemoved is a blocking call right now. It may cause some deadlock since it's called inside StandaloneAppClient.ClientEndpoint. This PR just changed CoarseGrainedSchedulerBackend.removeExecutor to be non-blocking. It's safe since the only two usages (StandaloneSchedulerBackend and YarnSchedulerEndpoint) don't need the return value). ## How was this patch tested? Jenkins unit tests. Author: Shixiong Zhu Closes #14882 from zsxwing/SPARK-17316. --- .../cluster/CoarseGrainedSchedulerBackend.scala | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala index 8259923ce31c3..2db3a3bb81f61 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala @@ -406,14 +406,15 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2)) } - // Called by subclasses when notified of a lost worker - def removeExecutor(executorId: String, reason: ExecutorLossReason) { - try { - driverEndpoint.askWithRetry[Boolean](RemoveExecutor(executorId, reason)) - } catch { - case e: Exception => - throw new SparkException("Error notifying standalone scheduler's driver endpoint", e) - } + /** + * Called by subclasses when notified of a lost worker. It just fires the message and returns + * at once. + */ + protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = { + // Only log the failure since we don't care about the result. + driverEndpoint.ask(RemoveExecutor(executorId, reason)).onFailure { case t => + logError(t.getMessage, t) + }(ThreadUtils.sameThread) } def sufficientResourcesRegistered(): Boolean = true From 5d84c7fd83502aeb551d46a740502db4862508fe Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Wed, 31 Aug 2016 11:09:14 -0700 Subject: [PATCH 152/270] [SPARK-17332][CORE] Make Java Loggers static members ## What changes were proposed in this pull request? Make all Java Loggers static members ## How was this patch tested? Jenkins Author: Sean Owen Closes #14896 from srowen/SPARK-17332. --- .../main/java/org/apache/spark/network/TransportContext.java | 2 +- .../java/org/apache/spark/network/client/TransportClient.java | 2 +- .../apache/spark/network/client/TransportClientFactory.java | 2 +- .../apache/spark/network/client/TransportResponseHandler.java | 2 +- .../java/org/apache/spark/network/protocol/MessageDecoder.java | 3 ++- .../java/org/apache/spark/network/protocol/MessageEncoder.java | 2 +- .../org/apache/spark/network/sasl/SaslClientBootstrap.java | 2 +- .../java/org/apache/spark/network/sasl/SparkSaslClient.java | 2 +- .../java/org/apache/spark/network/sasl/SparkSaslServer.java | 2 +- .../apache/spark/network/server/OneForOneStreamManager.java | 2 +- .../main/java/org/apache/spark/network/server/RpcHandler.java | 2 +- .../apache/spark/network/server/TransportChannelHandler.java | 2 +- .../apache/spark/network/server/TransportRequestHandler.java | 2 +- .../java/org/apache/spark/network/server/TransportServer.java | 2 +- .../org/apache/spark/network/sasl/ShuffleSecretManager.java | 3 ++- .../spark/network/shuffle/ExternalShuffleBlockHandler.java | 2 +- .../apache/spark/network/shuffle/ExternalShuffleClient.java | 2 +- .../apache/spark/network/shuffle/OneForOneBlockFetcher.java | 2 +- .../org/apache/spark/network/shuffle/RetryingBlockFetcher.java | 2 +- .../network/shuffle/mesos/MesosExternalShuffleClient.java | 2 +- .../java/org/apache/spark/network/yarn/YarnShuffleService.java | 2 +- .../main/java/org/apache/spark/memory/TaskMemoryManager.java | 2 +- .../spark/shuffle/sort/BypassMergeSortShuffleWriter.java | 2 +- .../org/apache/spark/shuffle/sort/ShuffleExternalSorter.java | 2 +- .../org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java | 2 +- .../main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java | 2 +- .../util/collection/unsafe/sort/UnsafeExternalSorter.java | 2 +- 27 files changed, 29 insertions(+), 27 deletions(-) diff --git a/common/network-common/src/main/java/org/apache/spark/network/TransportContext.java b/common/network-common/src/main/java/org/apache/spark/network/TransportContext.java index 5320b28bc054c..5b69e2bb03546 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/TransportContext.java +++ b/common/network-common/src/main/java/org/apache/spark/network/TransportContext.java @@ -56,7 +56,7 @@ * processes to send messages back to the client on an existing channel. */ public class TransportContext { - private final Logger logger = LoggerFactory.getLogger(TransportContext.class); + private static final Logger logger = LoggerFactory.getLogger(TransportContext.class); private final TransportConf conf; private final RpcHandler rpcHandler; diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java index a67683b892218..600b80e2c5bd5 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java @@ -72,7 +72,7 @@ * Concurrency: thread safe and can be called from multiple threads. */ public class TransportClient implements Closeable { - private final Logger logger = LoggerFactory.getLogger(TransportClient.class); + private static final Logger logger = LoggerFactory.getLogger(TransportClient.class); private final Channel channel; private final TransportResponseHandler handler; diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java index 1c9916baee07c..e895f13f45458 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java @@ -73,7 +73,7 @@ private static class ClientPool { } } - private final Logger logger = LoggerFactory.getLogger(TransportClientFactory.class); + private static final Logger logger = LoggerFactory.getLogger(TransportClientFactory.class); private final TransportContext context; private final TransportConf conf; diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java index 179667296ec7d..41bead546cad6 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java @@ -48,7 +48,7 @@ * Concurrency: thread safe and can be called from multiple threads. */ public class TransportResponseHandler extends MessageHandler { - private final Logger logger = LoggerFactory.getLogger(TransportResponseHandler.class); + private static final Logger logger = LoggerFactory.getLogger(TransportResponseHandler.class); private final Channel channel; diff --git a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java index f0453186185e1..f0956438ade24 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java +++ b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java @@ -33,7 +33,8 @@ @ChannelHandler.Sharable public final class MessageDecoder extends MessageToMessageDecoder { - private final Logger logger = LoggerFactory.getLogger(MessageDecoder.class); + private static final Logger logger = LoggerFactory.getLogger(MessageDecoder.class); + @Override public void decode(ChannelHandlerContext ctx, ByteBuf in, List out) { Message.Type msgType = Message.Type.decode(in); diff --git a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageEncoder.java b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageEncoder.java index 664df57feca4f..276f16637efc9 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageEncoder.java +++ b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageEncoder.java @@ -33,7 +33,7 @@ @ChannelHandler.Sharable public final class MessageEncoder extends MessageToMessageEncoder { - private final Logger logger = LoggerFactory.getLogger(MessageEncoder.class); + private static final Logger logger = LoggerFactory.getLogger(MessageEncoder.class); /*** * Encodes a Message by invoking its encode() method. For non-data messages, we will add one diff --git a/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslClientBootstrap.java b/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslClientBootstrap.java index 68381037d6891..9e5c616ee5a1f 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslClientBootstrap.java +++ b/common/network-common/src/main/java/org/apache/spark/network/sasl/SaslClientBootstrap.java @@ -38,7 +38,7 @@ * server should be setup with a {@link SaslRpcHandler} with matching keys for the given appId. */ public class SaslClientBootstrap implements TransportClientBootstrap { - private final Logger logger = LoggerFactory.getLogger(SaslClientBootstrap.class); + private static final Logger logger = LoggerFactory.getLogger(SaslClientBootstrap.class); private final boolean encrypt; private final TransportConf conf; diff --git a/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslClient.java b/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslClient.java index 94685e91b862e..b6256debb8e3e 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslClient.java +++ b/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslClient.java @@ -43,7 +43,7 @@ * firstToken, which is then followed by a set of challenges and responses. */ public class SparkSaslClient implements SaslEncryptionBackend { - private final Logger logger = LoggerFactory.getLogger(SparkSaslClient.class); + private static final Logger logger = LoggerFactory.getLogger(SparkSaslClient.class); private final String secretKeyId; private final SecretKeyHolder secretKeyHolder; diff --git a/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslServer.java b/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslServer.java index b802a5af63c94..e24fdf0c74de3 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslServer.java +++ b/common/network-common/src/main/java/org/apache/spark/network/sasl/SparkSaslServer.java @@ -45,7 +45,7 @@ * connections on some socket.) */ public class SparkSaslServer implements SaslEncryptionBackend { - private final Logger logger = LoggerFactory.getLogger(SparkSaslServer.class); + private static final Logger logger = LoggerFactory.getLogger(SparkSaslServer.class); /** * This is passed as the server name when creating the sasl client/server. diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java b/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java index ae7e520b2f709..ee367f9998dbf 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java @@ -36,7 +36,7 @@ * individually fetched as chunks by the client. Each registered buffer is one chunk. */ public class OneForOneStreamManager extends StreamManager { - private final Logger logger = LoggerFactory.getLogger(OneForOneStreamManager.class); + private static final Logger logger = LoggerFactory.getLogger(OneForOneStreamManager.class); private final AtomicLong nextStreamId; private final ConcurrentHashMap streams; diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/RpcHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/RpcHandler.java index a99c3015b0e05..8f7554e2e07d5 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/RpcHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/RpcHandler.java @@ -83,7 +83,7 @@ public void exceptionCaught(Throwable cause, TransportClient client) { } private static class OneWayRpcCallback implements RpcResponseCallback { - private final Logger logger = LoggerFactory.getLogger(OneWayRpcCallback.class); + private static final Logger logger = LoggerFactory.getLogger(OneWayRpcCallback.class); @Override public void onSuccess(ByteBuffer response) { diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java index 884ea7d1152a5..c33848c8406c1 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportChannelHandler.java @@ -49,7 +49,7 @@ * timeout if the client is continuously sending but getting no responses, for simplicity. */ public class TransportChannelHandler extends SimpleChannelInboundHandler { - private final Logger logger = LoggerFactory.getLogger(TransportChannelHandler.class); + private static final Logger logger = LoggerFactory.getLogger(TransportChannelHandler.class); private final TransportClient client; private final TransportResponseHandler responseHandler; diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java index e67a034cb8e58..0373ed950e3f0 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java @@ -53,7 +53,7 @@ * The messages should have been processed by the pipeline setup by {@link TransportServer}. */ public class TransportRequestHandler extends MessageHandler { - private final Logger logger = LoggerFactory.getLogger(TransportRequestHandler.class); + private static final Logger logger = LoggerFactory.getLogger(TransportRequestHandler.class); /** The Netty channel that this handler is associated with. */ private final Channel channel; diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java index a67db4f69f086..0d7a677820d35 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java @@ -44,7 +44,7 @@ * Server for the efficient, low-level streaming service. */ public class TransportServer implements Closeable { - private final Logger logger = LoggerFactory.getLogger(TransportServer.class); + private static final Logger logger = LoggerFactory.getLogger(TransportServer.class); private final TransportContext context; private final TransportConf conf; diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java b/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java index 56a025c4d95d8..426a604f4f157 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java @@ -29,7 +29,8 @@ * A class that manages shuffle secret used by the external shuffle service. */ public class ShuffleSecretManager implements SecretKeyHolder { - private final Logger logger = LoggerFactory.getLogger(ShuffleSecretManager.class); + private static final Logger logger = LoggerFactory.getLogger(ShuffleSecretManager.class); + private final ConcurrentHashMap shuffleSecretMap; // Spark user used for authenticating SASL connections diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java index d05d0ac4d246e..6e02430a8edb8 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockHandler.java @@ -54,7 +54,7 @@ * level shuffle block. */ public class ExternalShuffleBlockHandler extends RpcHandler { - private final Logger logger = LoggerFactory.getLogger(ExternalShuffleBlockHandler.class); + private static final Logger logger = LoggerFactory.getLogger(ExternalShuffleBlockHandler.class); @VisibleForTesting final ExternalShuffleBlockResolver blockManager; diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleClient.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleClient.java index 58ca87d9d3b13..772fb88325b35 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleClient.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleClient.java @@ -44,7 +44,7 @@ * executors. */ public class ExternalShuffleClient extends ShuffleClient { - private final Logger logger = LoggerFactory.getLogger(ExternalShuffleClient.class); + private static final Logger logger = LoggerFactory.getLogger(ExternalShuffleClient.class); private final TransportConf conf; private final boolean saslEnabled; diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java index 1b2ddbf1ed917..35f69fe35c94b 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java @@ -41,7 +41,7 @@ * {@link org.apache.spark.network.server.OneForOneStreamManager} on the server side. */ public class OneForOneBlockFetcher { - private final Logger logger = LoggerFactory.getLogger(OneForOneBlockFetcher.class); + private static final Logger logger = LoggerFactory.getLogger(OneForOneBlockFetcher.class); private final TransportClient client; private final OpenBlocks openMessage; diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/RetryingBlockFetcher.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/RetryingBlockFetcher.java index d81cf869ddb9e..72bd0f803da33 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/RetryingBlockFetcher.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/RetryingBlockFetcher.java @@ -64,7 +64,7 @@ public interface BlockFetchStarter { private static final ExecutorService executorService = Executors.newCachedThreadPool( NettyUtils.createThreadFactory("Block Fetch Retry")); - private final Logger logger = LoggerFactory.getLogger(RetryingBlockFetcher.class); + private static final Logger logger = LoggerFactory.getLogger(RetryingBlockFetcher.class); /** Used to initiate new Block Fetches on our remaining blocks. */ private final BlockFetchStarter fetchStarter; diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/mesos/MesosExternalShuffleClient.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/mesos/MesosExternalShuffleClient.java index 2add9c83a73d2..42cedd9943150 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/mesos/MesosExternalShuffleClient.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/mesos/MesosExternalShuffleClient.java @@ -44,7 +44,7 @@ * has to detect this itself. */ public class MesosExternalShuffleClient extends ExternalShuffleClient { - private final Logger logger = LoggerFactory.getLogger(MesosExternalShuffleClient.class); + private static final Logger logger = LoggerFactory.getLogger(MesosExternalShuffleClient.class); private final ScheduledExecutorService heartbeaterThread = Executors.newSingleThreadScheduledExecutor( diff --git a/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java b/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java index 22e47ac1b7923..2cf3f53e6dfc1 100644 --- a/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java +++ b/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java @@ -58,7 +58,7 @@ * the service's. */ public class YarnShuffleService extends AuxiliaryService { - private final Logger logger = LoggerFactory.getLogger(YarnShuffleService.class); + private static final Logger logger = LoggerFactory.getLogger(YarnShuffleService.class); // Port on which the shuffle server listens for fetch requests private static final String SPARK_SHUFFLE_SERVICE_PORT_KEY = "spark.shuffle.service.port"; diff --git a/core/src/main/java/org/apache/spark/memory/TaskMemoryManager.java b/core/src/main/java/org/apache/spark/memory/TaskMemoryManager.java index 867c4a1050677..1a700aa37554e 100644 --- a/core/src/main/java/org/apache/spark/memory/TaskMemoryManager.java +++ b/core/src/main/java/org/apache/spark/memory/TaskMemoryManager.java @@ -53,7 +53,7 @@ */ public class TaskMemoryManager { - private final Logger logger = LoggerFactory.getLogger(TaskMemoryManager.class); + private static final Logger logger = LoggerFactory.getLogger(TaskMemoryManager.class); /** The number of bits used to address the page table. */ private static final int PAGE_NUMBER_BITS = 13; diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java index 83dc61c5e5e59..cd6d64a1ee4af 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java @@ -73,7 +73,7 @@ */ final class BypassMergeSortShuffleWriter extends ShuffleWriter { - private final Logger logger = LoggerFactory.getLogger(BypassMergeSortShuffleWriter.class); + private static final Logger logger = LoggerFactory.getLogger(BypassMergeSortShuffleWriter.class); private final int fileBufferSize; private final boolean transferToEnabled; diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java index cfec724fe9e81..c33d1e33f030f 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java @@ -62,7 +62,7 @@ */ final class ShuffleExternalSorter extends MemoryConsumer { - private final Logger logger = LoggerFactory.getLogger(ShuffleExternalSorter.class); + private static final Logger logger = LoggerFactory.getLogger(ShuffleExternalSorter.class); @VisibleForTesting static final int DISK_WRITE_BUFFER_SIZE = 1024 * 1024; diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java index 08fb887bbd095..876cb7f7d86d5 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java @@ -56,7 +56,7 @@ @Private public class UnsafeShuffleWriter extends ShuffleWriter { - private final Logger logger = LoggerFactory.getLogger(UnsafeShuffleWriter.class); + private static final Logger logger = LoggerFactory.getLogger(UnsafeShuffleWriter.class); private static final ClassTag OBJECT_CLASS_TAG = ClassTag$.MODULE$.Object(); diff --git a/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java b/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java index dc04025692909..e4289818f1e75 100644 --- a/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java +++ b/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java @@ -64,7 +64,7 @@ */ public final class BytesToBytesMap extends MemoryConsumer { - private final Logger logger = LoggerFactory.getLogger(BytesToBytesMap.class); + private static final Logger logger = LoggerFactory.getLogger(BytesToBytesMap.class); private static final HashMapGrowthStrategy growthStrategy = HashMapGrowthStrategy.DOUBLING; diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java index 196e67d8b29b6..8ca29a58f8f64 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java @@ -44,7 +44,7 @@ */ public final class UnsafeExternalSorter extends MemoryConsumer { - private final Logger logger = LoggerFactory.getLogger(UnsafeExternalSorter.class); + private static final Logger logger = LoggerFactory.getLogger(UnsafeExternalSorter.class); @Nullable private final PrefixComparator prefixComparator; From 50bb142332d1147861def692bf63f0055ecb8576 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Wed, 31 Aug 2016 14:02:21 -0700 Subject: [PATCH 153/270] [SPARK-17326][SPARKR] Fix tests with HiveContext in SparkR not to be skipped always ## What changes were proposed in this pull request? Currently, `HiveContext` in SparkR is not being tested and always skipped. This is because the initiation of `TestHiveContext` is being failed due to trying to load non-existing data paths (test tables). This is introduced from https://github.com/apache/spark/pull/14005 This enables the tests with SparkR. ## How was this patch tested? Manually, **Before** (on Mac OS) ``` ... Skipped ------------------------------------------------------------------------ 1. create DataFrame from RDD (test_sparkSQL.R#200) - Hive is not build with SparkSQL, skipped 2. test HiveContext (test_sparkSQL.R#1041) - Hive is not build with SparkSQL, skipped 3. read/write ORC files (test_sparkSQL.R#1748) - Hive is not build with SparkSQL, skipped 4. enableHiveSupport on SparkSession (test_sparkSQL.R#2480) - Hive is not build with SparkSQL, skipped 5. sparkJars tag in SparkContext (test_Windows.R#21) - This test is only for Windows, skipped ... ``` **After** (on Mac OS) ``` ... Skipped ------------------------------------------------------------------------ 1. sparkJars tag in SparkContext (test_Windows.R#21) - This test is only for Windows, skipped ... ``` Please refer the tests below (on Windows) - Before: https://ci.appveyor.com/project/HyukjinKwon/spark/build/45-test123 - After: https://ci.appveyor.com/project/HyukjinKwon/spark/build/46-test123 Author: hyukjinkwon Closes #14889 from HyukjinKwon/SPARK-17326. --- R/pkg/inst/tests/testthat/test_sparkSQL.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index 3ccb8b6d77bf6..8ff56eba1f7bf 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -39,7 +39,7 @@ setHiveContext <- function(sc) { # initialize once and reuse ssc <- callJMethod(sc, "sc") hiveCtx <- tryCatch({ - newJObject("org.apache.spark.sql.hive.test.TestHiveContext", ssc) + newJObject("org.apache.spark.sql.hive.test.TestHiveContext", ssc, FALSE) }, error = function(err) { skip("Hive is not build with SparkSQL, skipped") From d375c8a3de1d253c485078f55eb9c5b928ab96d5 Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Wed, 31 Aug 2016 15:25:13 -0700 Subject: [PATCH 154/270] [SPARK-17316][TESTS] Fix MesosCoarseGrainedSchedulerBackendSuite ## What changes were proposed in this pull request? The master is broken because #14882 didn't run mesos tests. ## How was this patch tested? Jenkins unit tests. Author: Shixiong Zhu Closes #14902 from zsxwing/hotfix. --- .../cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala index c06379707a69a..d98ddb2700fe0 100644 --- a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala +++ b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala @@ -19,6 +19,7 @@ package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer +import scala.concurrent.Promise import scala.reflect.ClassTag import org.apache.mesos.{Protos, Scheduler, SchedulerDriver} @@ -511,6 +512,7 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite when(taskScheduler.sc).thenReturn(sc) externalShuffleClient = mock[MesosExternalShuffleClient] driverEndpoint = mock[RpcEndpointRef] + when(driverEndpoint.ask(any())(any())).thenReturn(Promise().future) backend = createSchedulerBackend(taskScheduler, driver, externalShuffleClient, driverEndpoint) } From 2f9c27364ea00473933213700edb93b63b55b313 Mon Sep 17 00:00:00 2001 From: Shivaram Venkataraman Date: Wed, 31 Aug 2016 16:56:41 -0700 Subject: [PATCH 155/270] [SPARK-16581][SPARKR] Fix JVM API tests in SparkR ## What changes were proposed in this pull request? Remove cleanup.jobj test. Use JVM wrapper API for other test cases. ## How was this patch tested? Run R unit tests with testthat 1.0 Author: Shivaram Venkataraman Closes #14904 from shivaram/sparkr-jvm-tests-fix. --- R/pkg/inst/tests/testthat/test_jvm_api.R | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/R/pkg/inst/tests/testthat/test_jvm_api.R b/R/pkg/inst/tests/testthat/test_jvm_api.R index 151c52906bdb8..7348c893d0af3 100644 --- a/R/pkg/inst/tests/testthat/test_jvm_api.R +++ b/R/pkg/inst/tests/testthat/test_jvm_api.R @@ -20,24 +20,17 @@ context("JVM API") sparkSession <- sparkR.session(enableHiveSupport = FALSE) test_that("Create and call methods on object", { - jarr <- newJObject("java.util.ArrayList") + jarr <- sparkR.newJObject("java.util.ArrayList") # Add an element to the array - callJMethod(jarr, "add", 1L) + sparkR.callJMethod(jarr, "add", 1L) # Check if get returns the same element - expect_equal(callJMethod(jarr, "get", 0L), 1L) + expect_equal(sparkR.callJMethod(jarr, "get", 0L), 1L) }) test_that("Call static methods", { # Convert a boolean to a string - strTrue <- callJStatic("java.lang.String", "valueOf", TRUE) + strTrue <- sparkR.callJStatic("java.lang.String", "valueOf", TRUE) expect_equal(strTrue, "true") }) -test_that("Manually garbage collect objects", { - jarr <- newJObject("java.util.ArrayList") - cleanup.jobj(jarr) - # Using a jobj after GC should throw an error - expect_error(print(jarr), "Error in invokeJava.*") -}) - sparkR.session.stop() From d008638fbedc857c1adc1dff399d427b8bae848e Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Wed, 31 Aug 2016 21:28:53 -0700 Subject: [PATCH 156/270] [SPARKR][MINOR] Fix windowPartitionBy example ## What changes were proposed in this pull request? The usage in the original example is incorrect. This PR fixes it. ## How was this patch tested? Manual test. Author: Junyang Qian Closes #14903 from junyangq/SPARKR-FixWindowPartitionByDoc. --- R/pkg/R/window.R | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/R/pkg/R/window.R b/R/pkg/R/window.R index 215d0e7b5cfbe..0799d841e5dc9 100644 --- a/R/pkg/R/window.R +++ b/R/pkg/R/window.R @@ -21,9 +21,9 @@ #' #' Creates a WindowSpec with the partitioning defined. #' -#' @param col A column name or Column by which rows are partitioned to +#' @param col A column name or Column by which rows are partitioned to #' windows. -#' @param ... Optional column names or Columns in addition to col, by +#' @param ... Optional column names or Columns in addition to col, by #' which rows are partitioned to windows. #' #' @rdname windowPartitionBy @@ -32,10 +32,10 @@ #' @export #' @examples #' \dontrun{ -#' ws <- windowPartitionBy("key1", "key2") +#' ws <- orderBy(windowPartitionBy("key1", "key2"), "key3") #' df1 <- select(df, over(lead("value", 1), ws)) #' -#' ws <- windowPartitionBy(df$key1, df$key2) +#' ws <- orderBy(windowPartitionBy(df$key1, df$key2), df$key3) #' df1 <- select(df, over(lead("value", 1), ws)) #' } #' @note windowPartitionBy(character) since 2.0.0 @@ -70,9 +70,9 @@ setMethod("windowPartitionBy", #' #' Creates a WindowSpec with the ordering defined. #' -#' @param col A column name or Column by which rows are ordered within +#' @param col A column name or Column by which rows are ordered within #' windows. -#' @param ... Optional column names or Columns in addition to col, by +#' @param ... Optional column names or Columns in addition to col, by #' which rows are ordered within windows. #' #' @rdname windowOrderBy From 7a5000f39ef4f195696836f8a4e8ab4ff5c14dd2 Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Wed, 31 Aug 2016 21:39:31 -0700 Subject: [PATCH 157/270] [SPARK-17241][SPARKR][MLLIB] SparkR spark.glm should have configurable regularization parameter https://issues.apache.org/jira/browse/SPARK-17241 ## What changes were proposed in this pull request? Spark has configurable L2 regularization parameter for generalized linear regression. It is very important to have them in SparkR so that users can run ridge regression. ## How was this patch tested? Test manually on local laptop. Author: Xin Ren Closes #14856 from keypointt/SPARK-17241. --- R/pkg/R/mllib.R | 10 +++-- R/pkg/inst/tests/testthat/test_mllib.R | 6 +++ .../GeneralizedLinearRegressionWrapper.scala | 4 +- .../GeneralizedLinearRegressionSuite.scala | 40 +++++++++++++++++++ 4 files changed, 55 insertions(+), 5 deletions(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 64d19fab7ec8f..9a53f757b4d44 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -138,10 +138,11 @@ predict_internal <- function(object, newData) { #' This can be a character string naming a family function, a family function or #' the result of a call to a family function. Refer R family at #' \url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/family.html}. -#' @param weightCol the weight column name. If this is not set or \code{NULL}, we treat all instance -#' weights as 1.0. #' @param tol positive convergence tolerance of iterations. #' @param maxIter integer giving the maximal number of IRLS iterations. +#' @param weightCol the weight column name. If this is not set or \code{NULL}, we treat all instance +#' weights as 1.0. +#' @param regParam regularization parameter for L2 regularization. #' @param ... additional arguments passed to the method. #' @aliases spark.glm,SparkDataFrame,formula-method #' @return \code{spark.glm} returns a fitted generalized linear model @@ -171,7 +172,8 @@ predict_internal <- function(object, newData) { #' @note spark.glm since 2.0.0 #' @seealso \link{glm}, \link{read.ml} setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"), - function(data, formula, family = gaussian, tol = 1e-6, maxIter = 25, weightCol = NULL) { + function(data, formula, family = gaussian, tol = 1e-6, maxIter = 25, weightCol = NULL, + regParam = 0.0) { if (is.character(family)) { family <- get(family, mode = "function", envir = parent.frame()) } @@ -190,7 +192,7 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"), jobj <- callJStatic("org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper", "fit", formula, data@sdf, family$family, family$link, - tol, as.integer(maxIter), as.character(weightCol)) + tol, as.integer(maxIter), as.character(weightCol), regParam) new("GeneralizedLinearRegressionModel", jobj = jobj) }) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index 1e6da650d1bb8..825a24073b75c 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -148,6 +148,12 @@ test_that("spark.glm summary", { baseModel <- stats::glm(Sepal.Width ~ Sepal.Length + Species, data = iris) baseSummary <- summary(baseModel) expect_true(abs(baseSummary$deviance - 12.19313) < 1e-4) + + # Test spark.glm works with regularization parameter + data <- as.data.frame(cbind(a1, a2, b)) + df <- suppressWarnings(createDataFrame(data)) + regStats <- summary(spark.glm(df, b ~ a1 + a2, regParam = 1.0)) + expect_equal(regStats$aic, 13.32836, tolerance = 1e-4) # 13.32836 is from summary() result }) test_that("spark.glm save/load", { diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala index 0d3181d0acb48..7a6ab618a1f2b 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala @@ -69,7 +69,8 @@ private[r] object GeneralizedLinearRegressionWrapper link: String, tol: Double, maxIter: Int, - weightCol: String): GeneralizedLinearRegressionWrapper = { + weightCol: String, + regParam: Double): GeneralizedLinearRegressionWrapper = { val rFormula = new RFormula() .setFormula(formula) val rFormulaModel = rFormula.fit(data) @@ -86,6 +87,7 @@ private[r] object GeneralizedLinearRegressionWrapper .setTol(tol) .setMaxIter(maxIter) .setWeightCol(weightCol) + .setRegParam(regParam) val pipeline = new Pipeline() .setStages(Array(rFormulaModel, glr)) .fit(data) diff --git a/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala index a4568e83faca5..d8032c4e1705b 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala @@ -1034,6 +1034,46 @@ class GeneralizedLinearRegressionSuite .setFamily("gaussian") .fit(datasetGaussianIdentity.as[LabeledPoint]) } + + test("generalized linear regression: regularization parameter") { + /* + R code: + + a1 <- c(0, 1, 2, 3) + a2 <- c(5, 2, 1, 3) + b <- c(1, 0, 1, 0) + data <- as.data.frame(cbind(a1, a2, b)) + df <- suppressWarnings(createDataFrame(data)) + + for (regParam in c(0.0, 0.1, 1.0)) { + model <- spark.glm(df, b ~ a1 + a2, regParam = regParam) + print(as.vector(summary(model)$aic)) + } + + [1] 12.88188 + [1] 12.92681 + [1] 13.32836 + */ + val dataset = spark.createDataFrame(Seq( + LabeledPoint(1, Vectors.dense(5, 0)), + LabeledPoint(0, Vectors.dense(2, 1)), + LabeledPoint(1, Vectors.dense(1, 2)), + LabeledPoint(0, Vectors.dense(3, 3)) + )) + val expected = Seq(12.88188, 12.92681, 13.32836) + + var idx = 0 + for (regParam <- Seq(0.0, 0.1, 1.0)) { + val trainer = new GeneralizedLinearRegression() + .setRegParam(regParam) + .setLabelCol("label") + .setFeaturesCol("features") + val model = trainer.fit(dataset) + val actual = model.summary.aic + assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with regParam = $regParam.") + idx += 1 + } + } } object GeneralizedLinearRegressionSuite { From aaf632b2132750c697dddd0469b902d9308dbf36 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Thu, 1 Sep 2016 13:19:15 +0800 Subject: [PATCH 158/270] revert PR#10896 and PR#14865 ## What changes were proposed in this pull request? according to the discussion in the original PR #10896 and the new approach PR #14876 , we decided to revert these 2 PRs and go with the new approach. ## How was this patch tested? N/A Author: Wenchen Fan Closes #14909 from cloud-fan/revert. --- .../spark/sql/execution/SparkStrategies.scala | 17 +- .../sql/execution/aggregate/AggUtils.scala | 250 ++++++++++-------- .../execution/aggregate/AggregateExec.scala | 56 ---- .../aggregate/HashAggregateExec.scala | 22 +- .../aggregate/SortAggregateExec.scala | 24 +- .../exchange/EnsureRequirements.scala | 39 +-- .../org/apache/spark/sql/DataFrameSuite.scala | 15 +- .../spark/sql/execution/PlannerSuite.scala | 77 +----- 8 files changed, 223 insertions(+), 277 deletions(-) delete mode 100644 sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index cda3b2b75e6b4..4aaf454285f4f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -259,17 +259,24 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { } val aggregateOperator = - if (functionsWithDistinct.isEmpty) { + if (aggregateExpressions.map(_.aggregateFunction).exists(!_.supportsPartial)) { + if (functionsWithDistinct.nonEmpty) { + sys.error("Distinct columns cannot exist in Aggregate operator containing " + + "aggregate functions which don't support partial aggregation.") + } else { + aggregate.AggUtils.planAggregateWithoutPartial( + groupingExpressions, + aggregateExpressions, + resultExpressions, + planLater(child)) + } + } else if (functionsWithDistinct.isEmpty) { aggregate.AggUtils.planAggregateWithoutDistinct( groupingExpressions, aggregateExpressions, resultExpressions, planLater(child)) } else { - if (aggregateExpressions.map(_.aggregateFunction).exists(!_.supportsPartial)) { - sys.error("Distinct columns cannot exist in Aggregate operator containing " + - "aggregate functions which don't support partial aggregation.") - } aggregate.AggUtils.planAggregateWithOneDistinct( groupingExpressions, functionsWithDistinct, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala index fe75ecea177a7..4fbb9d554c9bf 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala @@ -19,97 +19,34 @@ package org.apache.spark.sql.execution.aggregate import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ -import org.apache.spark.sql.catalyst.plans.physical.Distribution import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.streaming.{StateStoreRestoreExec, StateStoreSaveExec} -/** - * A pattern that finds aggregate operators to support partial aggregations. - */ -object PartialAggregate { - - def unapply(plan: SparkPlan): Option[Distribution] = plan match { - case agg: AggregateExec if AggUtils.supportPartialAggregate(agg.aggregateExpressions) => - Some(agg.requiredChildDistribution.head) - case _ => - None - } -} - /** * Utility functions used by the query planner to convert our plan to new aggregation code path. */ object AggUtils { - def supportPartialAggregate(aggregateExpressions: Seq[AggregateExpression]): Boolean = { - aggregateExpressions.map(_.aggregateFunction).forall(_.supportsPartial) - } - - private def createPartialAggregateExec( + def planAggregateWithoutPartial( groupingExpressions: Seq[NamedExpression], aggregateExpressions: Seq[AggregateExpression], - child: SparkPlan): SparkPlan = { - val groupingAttributes = groupingExpressions.map(_.toAttribute) - val functionsWithDistinct = aggregateExpressions.filter(_.isDistinct) - val partialAggregateExpressions = aggregateExpressions.map { - case agg @ AggregateExpression(_, _, false, _) if functionsWithDistinct.length > 0 => - agg.copy(mode = PartialMerge) - case agg => - agg.copy(mode = Partial) - } - val partialAggregateAttributes = - partialAggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) - val partialResultExpressions = - groupingAttributes ++ - partialAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) + resultExpressions: Seq[NamedExpression], + child: SparkPlan): Seq[SparkPlan] = { - createAggregateExec( - requiredChildDistributionExpressions = None, + val completeAggregateExpressions = aggregateExpressions.map(_.copy(mode = Complete)) + val completeAggregateAttributes = completeAggregateExpressions.map(_.resultAttribute) + SortAggregateExec( + requiredChildDistributionExpressions = Some(groupingExpressions), groupingExpressions = groupingExpressions, - aggregateExpressions = partialAggregateExpressions, - aggregateAttributes = partialAggregateAttributes, - initialInputBufferOffset = if (functionsWithDistinct.length > 0) { - groupingExpressions.length + functionsWithDistinct.head.aggregateFunction.children.length - } else { - 0 - }, - resultExpressions = partialResultExpressions, - child = child) - } - - private def updateMergeAggregateMode(aggregateExpressions: Seq[AggregateExpression]) = { - def updateMode(mode: AggregateMode) = mode match { - case Partial => PartialMerge - case Complete => Final - case mode => mode - } - aggregateExpressions.map(e => e.copy(mode = updateMode(e.mode))) - } - - /** - * Builds new merge and map-side [[AggregateExec]]s from an input aggregate operator. - * If an aggregation needs a shuffle for satisfying its own distribution and supports partial - * aggregations, a map-side aggregation is appended before the shuffle in - * [[org.apache.spark.sql.execution.exchange.EnsureRequirements]]. - */ - def createMapMergeAggregatePair(operator: SparkPlan): (SparkPlan, SparkPlan) = operator match { - case agg: AggregateExec => - val mapSideAgg = createPartialAggregateExec( - agg.groupingExpressions, agg.aggregateExpressions, agg.child) - val mergeAgg = createAggregateExec( - requiredChildDistributionExpressions = agg.requiredChildDistributionExpressions, - groupingExpressions = agg.groupingExpressions.map(_.toAttribute), - aggregateExpressions = updateMergeAggregateMode(agg.aggregateExpressions), - aggregateAttributes = agg.aggregateAttributes, - initialInputBufferOffset = agg.groupingExpressions.length, - resultExpressions = agg.resultExpressions, - child = mapSideAgg - ) - - (mergeAgg, mapSideAgg) + aggregateExpressions = completeAggregateExpressions, + aggregateAttributes = completeAggregateAttributes, + initialInputBufferOffset = 0, + resultExpressions = resultExpressions, + child = child + ) :: Nil } - private def createAggregateExec( + private def createAggregate( requiredChildDistributionExpressions: Option[Seq[Expression]] = None, groupingExpressions: Seq[NamedExpression] = Nil, aggregateExpressions: Seq[AggregateExpression] = Nil, @@ -118,8 +55,7 @@ object AggUtils { resultExpressions: Seq[NamedExpression] = Nil, child: SparkPlan): SparkPlan = { val useHash = HashAggregateExec.supportsAggregate( - aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)) && - supportPartialAggregate(aggregateExpressions) + aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)) if (useHash) { HashAggregateExec( requiredChildDistributionExpressions = requiredChildDistributionExpressions, @@ -146,21 +82,43 @@ object AggUtils { aggregateExpressions: Seq[AggregateExpression], resultExpressions: Seq[NamedExpression], child: SparkPlan): Seq[SparkPlan] = { + // Check if we can use HashAggregate. + + // 1. Create an Aggregate Operator for partial aggregations. + val groupingAttributes = groupingExpressions.map(_.toAttribute) - val completeAggregateExpressions = aggregateExpressions.map(_.copy(mode = Complete)) - val completeAggregateAttributes = completeAggregateExpressions.map(_.resultAttribute) - val supportPartial = supportPartialAggregate(aggregateExpressions) + val partialAggregateExpressions = aggregateExpressions.map(_.copy(mode = Partial)) + val partialAggregateAttributes = + partialAggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) + val partialResultExpressions = + groupingAttributes ++ + partialAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) - createAggregateExec( - requiredChildDistributionExpressions = - Some(if (supportPartial) groupingAttributes else groupingExpressions), - groupingExpressions = groupingExpressions, - aggregateExpressions = completeAggregateExpressions, - aggregateAttributes = completeAggregateAttributes, - initialInputBufferOffset = 0, - resultExpressions = resultExpressions, - child = child - ) :: Nil + val partialAggregate = createAggregate( + requiredChildDistributionExpressions = None, + groupingExpressions = groupingExpressions, + aggregateExpressions = partialAggregateExpressions, + aggregateAttributes = partialAggregateAttributes, + initialInputBufferOffset = 0, + resultExpressions = partialResultExpressions, + child = child) + + // 2. Create an Aggregate Operator for final aggregations. + val finalAggregateExpressions = aggregateExpressions.map(_.copy(mode = Final)) + // The attributes of the final aggregation buffer, which is presented as input to the result + // projection: + val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute) + + val finalAggregate = createAggregate( + requiredChildDistributionExpressions = Some(groupingAttributes), + groupingExpressions = groupingAttributes, + aggregateExpressions = finalAggregateExpressions, + aggregateAttributes = finalAggregateAttributes, + initialInputBufferOffset = groupingExpressions.length, + resultExpressions = resultExpressions, + child = partialAggregate) + + finalAggregate :: Nil } def planAggregateWithOneDistinct( @@ -183,23 +141,39 @@ object AggUtils { val distinctAttributes = namedDistinctExpressions.map(_.toAttribute) val groupingAttributes = groupingExpressions.map(_.toAttribute) - // 1. Create an Aggregate Operator for non-distinct aggregations. + // 1. Create an Aggregate Operator for partial aggregations. val partialAggregate: SparkPlan = { val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial)) val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - createAggregateExec( + // We will group by the original grouping expression, plus an additional expression for the + // DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping + // expressions will be [key, value]. + createAggregate( + groupingExpressions = groupingExpressions ++ namedDistinctExpressions, + aggregateExpressions = aggregateExpressions, + aggregateAttributes = aggregateAttributes, + resultExpressions = groupingAttributes ++ distinctAttributes ++ + aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), + child = child) + } + + // 2. Create an Aggregate Operator for partial merge aggregations. + val partialMergeAggregate: SparkPlan = { + val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) + val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) + createAggregate( requiredChildDistributionExpressions = Some(groupingAttributes ++ distinctAttributes), - groupingExpressions = groupingExpressions ++ namedDistinctExpressions, + groupingExpressions = groupingAttributes ++ distinctAttributes, aggregateExpressions = aggregateExpressions, aggregateAttributes = aggregateAttributes, initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length, resultExpressions = groupingAttributes ++ distinctAttributes ++ aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), - child = child) + child = partialAggregate) } - // 2. Create an Aggregate Operator for the final aggregation. + // 3. Create an Aggregate operator for partial aggregation (for distinct) val distinctColumnAttributeLookup = distinctExpressions.zip(distinctAttributes).toMap val rewrittenDistinctFunctions = functionsWithDistinct.map { // Children of an AggregateFunction with DISTINCT keyword has already @@ -209,6 +183,38 @@ object AggUtils { aggregateFunction.transformDown(distinctColumnAttributeLookup) .asInstanceOf[AggregateFunction] } + + val partialDistinctAggregate: SparkPlan = { + val mergeAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) + // The attributes of the final aggregation buffer, which is presented as input to the result + // projection: + val mergeAggregateAttributes = mergeAggregateExpressions.map(_.resultAttribute) + val (distinctAggregateExpressions, distinctAggregateAttributes) = + rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) => + // We rewrite the aggregate function to a non-distinct aggregation because + // its input will have distinct arguments. + // We just keep the isDistinct setting to true, so when users look at the query plan, + // they still can see distinct aggregations. + val expr = AggregateExpression(func, Partial, isDistinct = true) + // Use original AggregationFunction to lookup attributes, which is used to build + // aggregateFunctionToAttribute + val attr = functionsWithDistinct(i).resultAttribute + (expr, attr) + }.unzip + + val partialAggregateResult = groupingAttributes ++ + mergeAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) ++ + distinctAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) + createAggregate( + groupingExpressions = groupingAttributes, + aggregateExpressions = mergeAggregateExpressions ++ distinctAggregateExpressions, + aggregateAttributes = mergeAggregateAttributes ++ distinctAggregateAttributes, + initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length, + resultExpressions = partialAggregateResult, + child = partialMergeAggregate) + } + + // 4. Create an Aggregate Operator for the final aggregation. val finalAndCompleteAggregate: SparkPlan = { val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final)) // The attributes of the final aggregation buffer, which is presented as input to the result @@ -219,23 +225,23 @@ object AggUtils { rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) => // We rewrite the aggregate function to a non-distinct aggregation because // its input will have distinct arguments. - // We keep the isDistinct setting to true because this flag is used to generate partial - // aggregations and it is easy to see aggregation types in the query plan. - val expr = AggregateExpression(func, Complete, isDistinct = true) + // We just keep the isDistinct setting to true, so when users look at the query plan, + // they still can see distinct aggregations. + val expr = AggregateExpression(func, Final, isDistinct = true) // Use original AggregationFunction to lookup attributes, which is used to build // aggregateFunctionToAttribute val attr = functionsWithDistinct(i).resultAttribute (expr, attr) - }.unzip + }.unzip - createAggregateExec( + createAggregate( requiredChildDistributionExpressions = Some(groupingAttributes), groupingExpressions = groupingAttributes, aggregateExpressions = finalAggregateExpressions ++ distinctAggregateExpressions, aggregateAttributes = finalAggregateAttributes ++ distinctAggregateAttributes, initialInputBufferOffset = groupingAttributes.length, resultExpressions = resultExpressions, - child = partialAggregate) + child = partialDistinctAggregate) } finalAndCompleteAggregate :: Nil @@ -243,14 +249,13 @@ object AggUtils { /** * Plans a streaming aggregation using the following progression: - * - Partial Aggregation (now there is at most 1 tuple per group) + * - Partial Aggregation + * - Shuffle + * - Partial Merge (now there is at most 1 tuple per group) * - StateStoreRestore (now there is 1 tuple from this batch + optionally one from the previous) * - PartialMerge (now there is at most 1 tuple per group) * - StateStoreSave (saves the tuple for the next batch) * - Complete (output the current result of the aggregation) - * - * If the first aggregation needs a shuffle to satisfy its distribution, a map-side partial - * an aggregation and a shuffle are added in `EnsureRequirements`. */ def planStreamingAggregation( groupingExpressions: Seq[NamedExpression], @@ -263,24 +268,39 @@ object AggUtils { val partialAggregate: SparkPlan = { val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial)) val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - createAggregateExec( + // We will group by the original grouping expression, plus an additional expression for the + // DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping + // expressions will be [key, value]. + createAggregate( + groupingExpressions = groupingExpressions, + aggregateExpressions = aggregateExpressions, + aggregateAttributes = aggregateAttributes, + resultExpressions = groupingAttributes ++ + aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), + child = child) + } + + val partialMerged1: SparkPlan = { + val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) + val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) + createAggregate( requiredChildDistributionExpressions = Some(groupingAttributes), - groupingExpressions = groupingExpressions, + groupingExpressions = groupingAttributes, aggregateExpressions = aggregateExpressions, aggregateAttributes = aggregateAttributes, initialInputBufferOffset = groupingAttributes.length, resultExpressions = groupingAttributes ++ aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes), - child = child) + child = partialAggregate) } - val restored = StateStoreRestoreExec(groupingAttributes, None, partialAggregate) + val restored = StateStoreRestoreExec(groupingAttributes, None, partialMerged1) - val partialMerged: SparkPlan = { + val partialMerged2: SparkPlan = { val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge)) val aggregateAttributes = aggregateExpressions.map(_.resultAttribute) - createAggregateExec( + createAggregate( requiredChildDistributionExpressions = Some(groupingAttributes), groupingExpressions = groupingAttributes, @@ -294,7 +314,7 @@ object AggUtils { // Note: stateId and returnAllStates are filled in later with preparation rules // in IncrementalExecution. val saved = StateStoreSaveExec( - groupingAttributes, stateId = None, returnAllStates = None, partialMerged) + groupingAttributes, stateId = None, returnAllStates = None, partialMerged2) val finalAndCompleteAggregate: SparkPlan = { val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final)) @@ -302,7 +322,7 @@ object AggUtils { // projection: val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute) - createAggregateExec( + createAggregate( requiredChildDistributionExpressions = Some(groupingAttributes), groupingExpressions = groupingAttributes, aggregateExpressions = finalAggregateExpressions, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala deleted file mode 100644 index b88a8aa3daecd..0000000000000 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregateExec.scala +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.aggregate - -import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression -import org.apache.spark.sql.catalyst.plans.physical._ -import org.apache.spark.sql.execution.SparkPlan -import org.apache.spark.sql.execution.UnaryExecNode - -/** - * A base class for aggregate implementation. - */ -abstract class AggregateExec extends UnaryExecNode { - - def requiredChildDistributionExpressions: Option[Seq[Expression]] - def groupingExpressions: Seq[NamedExpression] - def aggregateExpressions: Seq[AggregateExpression] - def aggregateAttributes: Seq[Attribute] - def initialInputBufferOffset: Int - def resultExpressions: Seq[NamedExpression] - - protected[this] val aggregateBufferAttributes = { - aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) - } - - override def producedAttributes: AttributeSet = - AttributeSet(aggregateAttributes) ++ - AttributeSet(resultExpressions.diff(groupingExpressions).map(_.toAttribute)) ++ - AttributeSet(aggregateBufferAttributes) - - override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute) - - override def requiredChildDistribution: List[Distribution] = { - requiredChildDistributionExpressions match { - case Some(exprs) if exprs.isEmpty => AllTuples :: Nil - case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil - case None => UnspecifiedDistribution :: Nil - } - } -} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala index 525c7e301add4..bd7efa606e0ce 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala @@ -24,6 +24,7 @@ import org.apache.spark.sql.catalyst.errors._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.expressions.codegen._ +import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} import org.apache.spark.sql.types.{DecimalType, StringType, StructType} @@ -41,7 +42,11 @@ case class HashAggregateExec( initialInputBufferOffset: Int, resultExpressions: Seq[NamedExpression], child: SparkPlan) - extends AggregateExec with CodegenSupport { + extends UnaryExecNode with CodegenSupport { + + private[this] val aggregateBufferAttributes = { + aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) + } require(HashAggregateExec.supportsAggregate(aggregateBufferAttributes)) @@ -55,6 +60,21 @@ case class HashAggregateExec( "spillSize" -> SQLMetrics.createSizeMetric(sparkContext, "spill size"), "aggTime" -> SQLMetrics.createTimingMetric(sparkContext, "aggregate time")) + override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute) + + override def producedAttributes: AttributeSet = + AttributeSet(aggregateAttributes) ++ + AttributeSet(resultExpressions.diff(groupingExpressions).map(_.toAttribute)) ++ + AttributeSet(aggregateBufferAttributes) + + override def requiredChildDistribution: List[Distribution] = { + requiredChildDistributionExpressions match { + case Some(exprs) if exprs.isEmpty => AllTuples :: Nil + case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil + case None => UnspecifiedDistribution :: Nil + } + } + // This is for testing. We force TungstenAggregationIterator to fall back to the unsafe row hash // map and/or the sort-based aggregation once it has processed a given number of input rows. private val testFallbackStartsAt: Option[(Int, Int)] = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala index 68f86fca80937..2a81a823c44b3 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala @@ -22,7 +22,8 @@ import org.apache.spark.sql.catalyst.errors._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ -import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.catalyst.plans.physical.{AllTuples, ClusteredDistribution, Distribution, UnspecifiedDistribution} +import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode} import org.apache.spark.sql.execution.metric.SQLMetrics import org.apache.spark.util.Utils @@ -37,11 +38,30 @@ case class SortAggregateExec( initialInputBufferOffset: Int, resultExpressions: Seq[NamedExpression], child: SparkPlan) - extends AggregateExec { + extends UnaryExecNode { + + private[this] val aggregateBufferAttributes = { + aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes) + } + + override def producedAttributes: AttributeSet = + AttributeSet(aggregateAttributes) ++ + AttributeSet(resultExpressions.diff(groupingExpressions).map(_.toAttribute)) ++ + AttributeSet(aggregateBufferAttributes) override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) + override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute) + + override def requiredChildDistribution: List[Distribution] = { + requiredChildDistributionExpressions match { + case Some(exprs) if exprs.isEmpty => AllTuples :: Nil + case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil + case None => UnspecifiedDistribution :: Nil + } + } + override def requiredChildOrdering: Seq[Seq[SortOrder]] = { groupingExpressions.map(SortOrder(_, Ascending)) :: Nil } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala index 66e99ded24886..f17049949aa47 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala @@ -21,8 +21,6 @@ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.execution._ -import org.apache.spark.sql.execution.aggregate.AggUtils -import org.apache.spark.sql.execution.aggregate.PartialAggregate import org.apache.spark.sql.internal.SQLConf /** @@ -153,31 +151,18 @@ case class EnsureRequirements(conf: SQLConf) extends Rule[SparkPlan] { private def ensureDistributionAndOrdering(operator: SparkPlan): SparkPlan = { val requiredChildDistributions: Seq[Distribution] = operator.requiredChildDistribution val requiredChildOrderings: Seq[Seq[SortOrder]] = operator.requiredChildOrdering - assert(requiredChildDistributions.length == operator.children.length) - assert(requiredChildOrderings.length == operator.children.length) + var children: Seq[SparkPlan] = operator.children + assert(requiredChildDistributions.length == children.length) + assert(requiredChildOrderings.length == children.length) - def createShuffleExchange(dist: Distribution, child: SparkPlan) = - ShuffleExchange(createPartitioning(dist, defaultNumPreShufflePartitions), child) - - var (parent, children) = operator match { - case PartialAggregate(childDist) if !operator.outputPartitioning.satisfies(childDist) => - // If an aggregation needs a shuffle and support partial aggregations, a map-side partial - // aggregation and a shuffle are added as children. - val (mergeAgg, mapSideAgg) = AggUtils.createMapMergeAggregatePair(operator) - (mergeAgg, createShuffleExchange( - requiredChildDistributions.head, ensureDistributionAndOrdering(mapSideAgg)) :: Nil) - case _ => - // Ensure that the operator's children satisfy their output distribution requirements: - val childrenWithDist = operator.children.zip(requiredChildDistributions) - val newChildren = childrenWithDist.map { - case (child, distribution) if child.outputPartitioning.satisfies(distribution) => - child - case (child, BroadcastDistribution(mode)) => - BroadcastExchangeExec(mode, child) - case (child, distribution) => - createShuffleExchange(distribution, child) - } - (operator, newChildren) + // Ensure that the operator's children satisfy their output distribution requirements: + children = children.zip(requiredChildDistributions).map { + case (child, distribution) if child.outputPartitioning.satisfies(distribution) => + child + case (child, BroadcastDistribution(mode)) => + BroadcastExchangeExec(mode, child) + case (child, distribution) => + ShuffleExchange(createPartitioning(distribution, defaultNumPreShufflePartitions), child) } // If the operator has multiple children and specifies child output distributions (e.g. join), @@ -270,7 +255,7 @@ case class EnsureRequirements(conf: SQLConf) extends Rule[SparkPlan] { } } - parent.withNewChildren(children) + operator.withNewChildren(children) } def apply(plan: SparkPlan): SparkPlan = plan.transformUp { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index ce0b92a461c39..f89951760f7d2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -1248,17 +1248,17 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { } /** - * Verifies that there is a single Aggregation for `df` + * Verifies that there is no Exchange between the Aggregations for `df` */ - private def verifyNonExchangingSingleAgg(df: DataFrame) = { + private def verifyNonExchangingAgg(df: DataFrame) = { var atFirstAgg: Boolean = false df.queryExecution.executedPlan.foreach { case agg: HashAggregateExec => + atFirstAgg = !atFirstAgg + case _ => if (atFirstAgg) { - fail("Should not have back to back Aggregates") + fail("Should not have operators between the two aggregations") } - atFirstAgg = true - case _ => } } @@ -1292,10 +1292,9 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { // Group by the column we are distributed by. This should generate a plan with no exchange // between the aggregates val df3 = testData.repartition($"key").groupBy("key").count() - verifyNonExchangingSingleAgg(df3) - verifyNonExchangingSingleAgg(testData.repartition($"key", $"value") + verifyNonExchangingAgg(df3) + verifyNonExchangingAgg(testData.repartition($"key", $"value") .groupBy("key", "value").count()) - verifyNonExchangingSingleAgg(testData.repartition($"key").groupBy("key", "value").count()) // Grouping by just the first distributeBy expr, need to exchange. verifyExchangingAgg(testData.repartition($"key", $"value") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index b0aa3378e5f66..375da224aaa7f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -24,7 +24,6 @@ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.Inner import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Repartition} import org.apache.spark.sql.catalyst.plans.physical._ -import org.apache.spark.sql.execution.aggregate.SortAggregateExec import org.apache.spark.sql.execution.columnar.InMemoryRelation import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReusedExchangeExec, ReuseExchange, ShuffleExchange} import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec} @@ -38,84 +37,36 @@ class PlannerSuite extends SharedSQLContext { setupTestData() - private def testPartialAggregationPlan(query: LogicalPlan): Seq[SparkPlan] = { + private def testPartialAggregationPlan(query: LogicalPlan): Unit = { val planner = spark.sessionState.planner import planner._ - val ensureRequirements = EnsureRequirements(spark.sessionState.conf) - val planned = Aggregation(query).headOption.map(ensureRequirements(_)) - .getOrElse(fail(s"Could query play aggregation query $query. Is it an aggregation query?")) - planned.collect { case n if n.nodeName contains "Aggregate" => n } + val plannedOption = Aggregation(query).headOption + val planned = + plannedOption.getOrElse( + fail(s"Could query play aggregation query $query. Is it an aggregation query?")) + val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n } + + // For the new aggregation code path, there will be four aggregate operator for + // distinct aggregations. + assert( + aggregations.size == 2 || aggregations.size == 4, + s"The plan of query $query does not have partial aggregations.") } test("count is partially aggregated") { val query = testData.groupBy('value).agg(count('key)).queryExecution.analyzed - assert(testPartialAggregationPlan(query).size == 2, - s"The plan of query $query does not have partial aggregations.") + testPartialAggregationPlan(query) } test("count distinct is partially aggregated") { val query = testData.groupBy('value).agg(countDistinct('key)).queryExecution.analyzed testPartialAggregationPlan(query) - // For the new aggregation code path, there will be four aggregate operator for distinct - // aggregations. - assert(testPartialAggregationPlan(query).size == 4, - s"The plan of query $query does not have partial aggregations.") } test("mixed aggregates are partially aggregated") { val query = testData.groupBy('value).agg(count('value), countDistinct('key)).queryExecution.analyzed - // For the new aggregation code path, there will be four aggregate operator for distinct - // aggregations. - assert(testPartialAggregationPlan(query).size == 4, - s"The plan of query $query does not have partial aggregations.") - } - - test("SPARK-17289 sort-based partial aggregation needs a sort operator as a child") { - withTempView("testSortBasedPartialAggregation") { - val schema = StructType( - StructField(s"key", IntegerType, true) :: StructField(s"value", StringType, true) :: Nil) - val rowRDD = sparkContext.parallelize((0 until 1000).map(d => Row(d % 2, d.toString))) - spark.createDataFrame(rowRDD, schema) - .createOrReplaceTempView("testSortBasedPartialAggregation") - - // This test assumes a query below uses sort-based aggregations - val planned = sql("SELECT MAX(value) FROM testSortBasedPartialAggregation GROUP BY key") - .queryExecution.executedPlan - // This line extracts both SortAggregate and Sort operators - val extractedOps = planned.collect { case n if n.nodeName contains "Sort" => n } - val aggOps = extractedOps.collect { case n if n.nodeName contains "SortAggregate" => n } - assert(extractedOps.size == 4 && aggOps.size == 2, - s"The plan $planned does not have correct sort-based partial aggregate pairs.") - } - } - - test("non-partial aggregation for aggregates") { - withTempView("testNonPartialAggregation") { - val schema = StructType(StructField(s"value", IntegerType, true) :: Nil) - val row = Row.fromSeq(Seq.fill(1)(null)) - val rowRDD = sparkContext.parallelize(row :: Nil) - spark.createDataFrame(rowRDD, schema).repartition($"value") - .createOrReplaceTempView("testNonPartialAggregation") - - val planned1 = sql("SELECT SUM(value) FROM testNonPartialAggregation GROUP BY value") - .queryExecution.executedPlan - - // If input data are already partitioned and the same columns are used in grouping keys and - // aggregation values, no partial aggregation exist in query plans. - val aggOps1 = planned1.collect { case n if n.nodeName contains "Aggregate" => n } - assert(aggOps1.size == 1, s"The plan $planned1 has partial aggregations.") - - val planned2 = sql( - """ - |SELECT t.value, SUM(DISTINCT t.value) - |FROM (SELECT * FROM testNonPartialAggregation ORDER BY value) t - |GROUP BY t.value - """.stripMargin).queryExecution.executedPlan - - val aggOps2 = planned1.collect { case n if n.nodeName contains "Aggregate" => n } - assert(aggOps2.size == 1, s"The plan $planned2 has partial aggregations.") - } + testPartialAggregationPlan(query) } test("sizeInBytes estimation of limit operator for broadcast hash join optimization") { From 21c0a4fe9d8e21819ba96e7dc2b1f2999d3299ae Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Wed, 31 Aug 2016 23:25:20 -0700 Subject: [PATCH 159/270] [SPARK-17318][TESTS] Fix ReplSuite replicating blocks of object with class defined in repl again ## What changes were proposed in this pull request? After digging into the logs, I noticed the failure is because in this test, it starts a local cluster with 2 executors. However, when SparkContext is created, executors may be still not up. When one of the executor is not up during running the job, the blocks won't be replicated. This PR just adds a wait loop before running the job to fix the flaky test. ## How was this patch tested? Jenkins Author: Shixiong Zhu Closes #14905 from zsxwing/SPARK-17318-2. --- .../src/test/scala/org/apache/spark/repl/ReplSuite.scala | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala index f1284b1df31b4..f7d7a4f041315 100644 --- a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala +++ b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala @@ -399,6 +399,15 @@ class ReplSuite extends SparkFunSuite { test("replicating blocks of object with class defined in repl") { val output = runInterpreter("local-cluster[2,1,1024]", """ + |val timeout = 60000 // 60 seconds + |val start = System.currentTimeMillis + |while(sc.getExecutorStorageStatus.size != 3 && + | (System.currentTimeMillis - start) < timeout) { + | Thread.sleep(10) + |} + |if (System.currentTimeMillis - start >= timeout) { + | throw new java.util.concurrent.TimeoutException("Executors were not up in 60 seconds") + |} |import org.apache.spark.storage.StorageLevel._ |case class Foo(i: Int) |val ret = sc.parallelize((1 to 100).map(Foo), 10).persist(MEMORY_AND_DISK_2) From 536fa911c181958d84f14156f7d57ef5fd68df48 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Thu, 1 Sep 2016 09:10:01 +0100 Subject: [PATCH 160/270] [SPARK-17329][BUILD] Don't build PRs with -Pyarn unless YARN code changed ## What changes were proposed in this pull request? Only build PRs with -Pyarn if YARN code was modified. ## How was this patch tested? Jenkins tests (will look to verify whether -Pyarn was included in the PR builder for this one.) Author: Sean Owen Closes #14892 from srowen/SPARK-17329. --- dev/run-tests.py | 10 +++++----- dev/sparktestsupport/modules.py | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/dev/run-tests.py b/dev/run-tests.py index 930d7f8bd9459..ae4b5306fc5cf 100755 --- a/dev/run-tests.py +++ b/dev/run-tests.py @@ -305,11 +305,11 @@ def get_hadoop_profiles(hadoop_version): """ sbt_maven_hadoop_profiles = { - "hadoop2.2": ["-Pyarn", "-Phadoop-2.2"], - "hadoop2.3": ["-Pyarn", "-Phadoop-2.3"], - "hadoop2.4": ["-Pyarn", "-Phadoop-2.4"], - "hadoop2.6": ["-Pyarn", "-Phadoop-2.6"], - "hadoop2.7": ["-Pyarn", "-Phadoop-2.7"], + "hadoop2.2": ["-Phadoop-2.2"], + "hadoop2.3": ["-Phadoop-2.3"], + "hadoop2.4": ["-Phadoop-2.4"], + "hadoop2.6": ["-Phadoop-2.6"], + "hadoop2.7": ["-Phadoop-2.7"], } if hadoop_version in sbt_maven_hadoop_profiles: diff --git a/dev/sparktestsupport/modules.py b/dev/sparktestsupport/modules.py index d8e3989ec2853..050cdf043757f 100644 --- a/dev/sparktestsupport/modules.py +++ b/dev/sparktestsupport/modules.py @@ -449,6 +449,7 @@ def __hash__(self): "yarn/", "common/network-yarn/", ], + build_profile_flags=["-Pyarn"], sbt_test_goals=[ "yarn/test", "network-yarn/test", From a18c169fd050e71fdb07b153ae0fa5c410d8de27 Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Thu, 1 Sep 2016 16:31:13 +0800 Subject: [PATCH 161/270] [SPARK-16283][SQL] Implements percentile_approx aggregation function which supports partial aggregation. ## What changes were proposed in this pull request? This PR implements aggregation function `percentile_approx`. Function `percentile_approx` returns the approximate percentile(s) of a column at the given percentage(s). A percentile is a watermark value below which a given percentage of the column values fall. For example, the percentile of column `col` at percentage 50% is the median value of column `col`. ### Syntax: ``` # Returns percentile at a given percentage value. The approximation error can be reduced by increasing parameter accuracy, at the cost of memory. percentile_approx(col, percentage [, accuracy]) # Returns percentile value array at given percentage value array percentile_approx(col, array(percentage1 [, percentage2]...) [, accuracy]) ``` ### Features: 1. This function supports partial aggregation. 2. The memory consumption is bounded. The larger `accuracy` parameter we choose, we smaller error we get. The default accuracy value is 10000, to match with Hive default setting. Choose a smaller value for smaller memory footprint. 3. This function supports window function aggregation. ### Example usages: ``` ## Returns the 25th percentile value, with default accuracy SELECT percentile_approx(col, 0.25) FROM table ## Returns an array of percentile value (25th, 50th, 75th), with default accuracy SELECT percentile_approx(col, array(0.25, 0.5, 0.75)) FROM table ## Returns 25th percentile value, with custom accuracy value 100, larger accuracy parameter yields smaller approximation error SELECT percentile_approx(col, 0.25, 100) FROM table ## Returns the 25th, and 50th percentile values, with custom accuracy value 100 SELECT percentile_approx(col, array(0.25, 0.5), 100) FROM table ``` ### NOTE: 1. The `percentile_approx` implementation is different from Hive, so the result returned on same query maybe slightly different with Hive. This implementation uses `QuantileSummaries` as the underlying probabilistic data structure, and mainly follows paper `Space-efficient Online Computation of Quantile Summaries` by Greenwald, Michael and Khanna, Sanjeev. (http://dx.doi.org/10.1145/375663.375670)` 2. The current implementation of `QuantileSummaries` doesn't support automatic compression. This PR has a rule to do compression automatically at the caller side, but it may not be optimal. ## How was this patch tested? Unit test, and Sql query test. ## Acknowledgement 1. This PR's work in based on lw-lin's PR https://github.com/apache/spark/pull/14298, with improvements like supporting partial aggregation, fixing out of memory issue. Author: Sean Zhong Closes #14868 from clockfly/appro_percentile_try_2. --- .../catalyst/analysis/FunctionRegistry.scala | 1 + .../aggregate/ApproximatePercentile.scala | 321 +++++++++++++++++ .../ApproximatePercentileSuite.scala | 339 ++++++++++++++++++ .../sql/ApproximatePercentileQuerySuite.scala | 226 ++++++++++++ .../spark/sql/hive/HiveSessionCatalog.scala | 3 +- .../sql/catalyst/ExpressionToSQLSuite.scala | 5 + 6 files changed, 893 insertions(+), 2 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala create mode 100644 sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentileSuite.scala create mode 100644 sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala index 35fd800df4a4f..b05f4f61f6a3e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala @@ -250,6 +250,7 @@ object FunctionRegistry { expression[Average]("mean"), expression[Min]("min"), expression[Skewness]("skewness"), + expression[ApproximatePercentile]("percentile_approx"), expression[StddevSamp]("std"), expression[StddevSamp]("stddev"), expression[StddevPop]("stddev_pop"), diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala new file mode 100644 index 0000000000000..f91ff87fc1c01 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.aggregate + +import java.nio.ByteBuffer + +import com.google.common.primitives.{Doubles, Ints, Longs} + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.catalyst.{InternalRow} +import org.apache.spark.sql.catalyst.analysis.TypeCheckResult +import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess} +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile.{PercentileDigest} +import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData} +import org.apache.spark.sql.catalyst.util.QuantileSummaries +import org.apache.spark.sql.catalyst.util.QuantileSummaries.{defaultCompressThreshold, Stats} +import org.apache.spark.sql.types._ + +/** + * The ApproximatePercentile function returns the approximate percentile(s) of a column at the given + * percentage(s). A percentile is a watermark value below which a given percentage of the column + * values fall. For example, the percentile of column `col` at percentage 50% is the median of + * column `col`. + * + * This function supports partial aggregation. + * + * @param child child expression that can produce column value with `child.eval(inputRow)` + * @param percentageExpression Expression that represents a single percentage value or + * an array of percentage values. Each percentage value must be between + * 0.0 and 1.0. + * @param accuracyExpression Integer literal expression of approximation accuracy. Higher value + * yields better accuracy, the default value is + * DEFAULT_PERCENTILE_ACCURACY. + */ +@ExpressionDescription( + usage = + """ + _FUNC_(col, percentage [, accuracy]) - Returns the approximate percentile value of numeric + column `col` at the given percentage. The value of percentage must be between 0.0 + and 1.0. The `accuracy` parameter (default: 10000) is a positive integer literal which + controls approximation accuracy at the cost of memory. Higher value of `accuracy` yields + better accuracy, `1.0/accuracy` is the relative error of the approximation. + + _FUNC_(col, array(percentage1 [, percentage2]...) [, accuracy]) - Returns the approximate + percentile array of column `col` at the given percentage array. Each value of the + percentage array must be between 0.0 and 1.0. The `accuracy` parameter (default: 10000) is + a positive integer literal which controls approximation accuracy at the cost of memory. + Higher value of `accuracy` yields better accuracy, `1.0/accuracy` is the relative error of + the approximation. + """) +case class ApproximatePercentile( + child: Expression, + percentageExpression: Expression, + accuracyExpression: Expression, + override val mutableAggBufferOffset: Int, + override val inputAggBufferOffset: Int) extends TypedImperativeAggregate[PercentileDigest] { + + def this(child: Expression, percentageExpression: Expression, accuracyExpression: Expression) = { + this(child, percentageExpression, accuracyExpression, 0, 0) + } + + def this(child: Expression, percentageExpression: Expression) = { + this(child, percentageExpression, Literal(ApproximatePercentile.DEFAULT_PERCENTILE_ACCURACY)) + } + + // Mark as lazy so that accuracyExpression is not evaluated during tree transformation. + private lazy val accuracy: Int = accuracyExpression.eval().asInstanceOf[Int] + + override def inputTypes: Seq[AbstractDataType] = { + Seq(DoubleType, TypeCollection(DoubleType, ArrayType), IntegerType) + } + + // Mark as lazy so that percentageExpression is not evaluated during tree transformation. + private lazy val (returnPercentileArray: Boolean, percentages: Array[Double]) = { + (percentageExpression.dataType, percentageExpression.eval()) match { + // Rule ImplicitTypeCasts can cast other numeric types to double + case (_, num: Double) => (false, Array(num)) + case (ArrayType(baseType: NumericType, _), arrayData: ArrayData) => + val numericArray = arrayData.toObjectArray(baseType) + (true, numericArray.map { x => + baseType.numeric.toDouble(x.asInstanceOf[baseType.InternalType]) + }) + case other => + throw new AnalysisException(s"Invalid data type ${other._1} for parameter percentage") + } + } + + override def checkInputDataTypes(): TypeCheckResult = { + val defaultCheck = super.checkInputDataTypes() + if (defaultCheck.isFailure) { + defaultCheck + } else if (!percentageExpression.foldable || !accuracyExpression.foldable) { + TypeCheckFailure(s"The accuracy or percentage provided must be a constant literal") + } else if (accuracy <= 0) { + TypeCheckFailure( + s"The accuracy provided must be a positive integer literal (current value = $accuracy)") + } else if (percentages.exists(percentage => percentage < 0.0D || percentage > 1.0D)) { + TypeCheckFailure( + s"All percentage values must be between 0.0 and 1.0 " + + s"(current = ${percentages.mkString(", ")})") + } else { + TypeCheckSuccess + } + } + + override def createAggregationBuffer(): PercentileDigest = { + val relativeError = 1.0D / accuracy + new PercentileDigest(relativeError) + } + + override def update(buffer: PercentileDigest, inputRow: InternalRow): Unit = { + val value = child.eval(inputRow) + // Ignore empty rows, for example: percentile_approx(null) + if (value != null) { + buffer.add(value.asInstanceOf[Double]) + } + } + + override def merge(buffer: PercentileDigest, other: PercentileDigest): Unit = { + buffer.merge(other) + } + + override def eval(buffer: PercentileDigest): Any = { + val result = buffer.getPercentiles(percentages) + if (result.length == 0) { + null + } else if (returnPercentileArray) { + new GenericArrayData(result) + } else { + result(0) + } + } + + override def withNewMutableAggBufferOffset(newOffset: Int): ApproximatePercentile = + copy(mutableAggBufferOffset = newOffset) + + override def withNewInputAggBufferOffset(newOffset: Int): ApproximatePercentile = + copy(inputAggBufferOffset = newOffset) + + override def children: Seq[Expression] = Seq(child, percentageExpression, accuracyExpression) + + // Returns null for empty inputs + override def nullable: Boolean = true + + override def dataType: DataType = { + if (returnPercentileArray) ArrayType(DoubleType) else DoubleType + } + + override def prettyName: String = "percentile_approx" + + override def serialize(obj: PercentileDigest): Array[Byte] = { + ApproximatePercentile.serializer.serialize(obj) + } + + override def deserialize(bytes: Array[Byte]): PercentileDigest = { + ApproximatePercentile.serializer.deserialize(bytes) + } +} + +object ApproximatePercentile { + + // Default accuracy of Percentile approximation. Larger value means better accuracy. + // The default relative error can be deduced by defaultError = 1.0 / DEFAULT_PERCENTILE_ACCURACY + val DEFAULT_PERCENTILE_ACCURACY: Int = 10000 + + /** + * PercentileDigest is a probabilistic data structure used for approximating percentiles + * with limited memory. PercentileDigest is backed by [[QuantileSummaries]]. + * + * @param summaries underlying probabilistic data structure [[QuantileSummaries]]. + * @param isCompressed An internal flag from class [[QuantileSummaries]] to indicate whether the + * underlying quantileSummaries is compressed. + */ + class PercentileDigest( + private var summaries: QuantileSummaries, + private var isCompressed: Boolean) { + + // Trigger compression if the QuantileSummaries's buffer length exceeds + // compressThresHoldBufferLength. The buffer length can be get by + // quantileSummaries.sampled.length + private[this] final val compressThresHoldBufferLength: Int = { + // Max buffer length after compression. + val maxBufferLengthAfterCompression: Int = (1 / summaries.relativeError).toInt * 2 + // A safe upper bound for buffer length before compression + maxBufferLengthAfterCompression * 2 + } + + def this(relativeError: Double) = { + this(new QuantileSummaries(defaultCompressThreshold, relativeError), isCompressed = true) + } + + /** Returns compressed object of [[QuantileSummaries]] */ + def quantileSummaries: QuantileSummaries = { + if (!isCompressed) compress() + summaries + } + + /** Insert an observation value into the PercentileDigest data structure. */ + def add(value: Double): Unit = { + summaries = summaries.insert(value) + // The result of QuantileSummaries.insert is un-compressed + isCompressed = false + + // Currently, QuantileSummaries ignores the construction parameter compressThresHold, + // which may cause QuantileSummaries to occupy unbounded memory. We have to hack around here + // to make sure QuantileSummaries doesn't occupy infinite memory. + // TODO: Figure out why QuantileSummaries ignores construction parameter compressThresHold + if (summaries.sampled.length >= compressThresHoldBufferLength) compress() + } + + /** In-place merges in another PercentileDigest. */ + def merge(other: PercentileDigest): Unit = { + if (!isCompressed) compress() + summaries = summaries.merge(other.quantileSummaries) + } + + /** + * Returns the approximate percentiles of all observation values at the given percentages. + * A percentile is a watermark value below which a given percentage of observation values fall. + * For example, the following code returns the 25th, median, and 75th percentiles of + * all observation values: + * + * {{{ + * val Array(p25, median, p75) = percentileDigest.getPercentiles(Array(0.25, 0.5, 0.75)) + * }}} + */ + def getPercentiles(percentages: Array[Double]): Array[Double] = { + if (!isCompressed) compress() + if (summaries.count == 0 || percentages.length == 0) { + Array.empty[Double] + } else { + val result = new Array[Double](percentages.length) + var i = 0 + while (i < percentages.length) { + result(i) = summaries.query(percentages(i)) + i += 1 + } + result + } + } + + private final def compress(): Unit = { + summaries = summaries.compress() + isCompressed = true + } + } + + /** + * Serializer for class [[PercentileDigest]] + * + * This class is thread safe. + */ + class PercentileDigestSerializer { + + private final def length(summaries: QuantileSummaries): Int = { + // summaries.compressThreshold, summary.relativeError, summary.count + Ints.BYTES + Doubles.BYTES + Longs.BYTES + + // length of summary.sampled + Ints.BYTES + + // summary.sampled, Array[Stat(value: Double, g: Int, delta: Int)] + summaries.sampled.length * (Doubles.BYTES + Ints.BYTES + Ints.BYTES) + } + + final def serialize(obj: PercentileDigest): Array[Byte] = { + val summary = obj.quantileSummaries + val buffer = ByteBuffer.wrap(new Array(length(summary))) + buffer.putInt(summary.compressThreshold) + buffer.putDouble(summary.relativeError) + buffer.putLong(summary.count) + buffer.putInt(summary.sampled.length) + + var i = 0 + while (i < summary.sampled.length) { + val stat = summary.sampled(i) + buffer.putDouble(stat.value) + buffer.putInt(stat.g) + buffer.putInt(stat.delta) + i += 1 + } + buffer.array() + } + + final def deserialize(bytes: Array[Byte]): PercentileDigest = { + val buffer = ByteBuffer.wrap(bytes) + val compressThreshold = buffer.getInt() + val relativeError = buffer.getDouble() + val count = buffer.getLong() + val sampledLength = buffer.getInt() + val sampled = new Array[Stats](sampledLength) + + var i = 0 + while (i < sampledLength) { + val value = buffer.getDouble() + val g = buffer.getInt() + val delta = buffer.getInt() + sampled(i) = Stats(value, g, delta) + i += 1 + } + val summary = new QuantileSummaries(compressThreshold, relativeError, sampled, count) + new PercentileDigest(summary, isCompressed = true) + } + } + + val serializer: PercentileDigestSerializer = new PercentileDigestSerializer +} diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentileSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentileSuite.scala new file mode 100644 index 0000000000000..61298a1b72d77 --- /dev/null +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentileSuite.scala @@ -0,0 +1,339 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.aggregate + +import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.analysis.{SimpleAnalyzer, UnresolvedAttribute} +import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.TypeCheckFailure +import org.apache.spark.sql.catalyst.dsl.expressions._ +import org.apache.spark.sql.catalyst.dsl.plans._ +import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, BoundReference, Cast, CreateArray, DecimalLiteral, GenericMutableRow, Literal} +import org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile.{PercentileDigest, PercentileDigestSerializer} +import org.apache.spark.sql.catalyst.plans.logical.LocalRelation +import org.apache.spark.sql.catalyst.util.ArrayData +import org.apache.spark.sql.catalyst.util.QuantileSummaries +import org.apache.spark.sql.catalyst.util.QuantileSummaries.Stats +import org.apache.spark.sql.types.{ArrayType, DoubleType, IntegerType} +import org.apache.spark.util.SizeEstimator + +class ApproximatePercentileSuite extends SparkFunSuite { + + private val random = new java.util.Random() + + private val data = (0 until 10000).map { _ => + random.nextInt(10000) + } + + test("serialize and de-serialize") { + val serializer = new PercentileDigestSerializer + + // Check empty serialize and de-serialize + val emptyBuffer = new PercentileDigest(relativeError = 0.01) + assert(compareEquals(emptyBuffer, serializer.deserialize(serializer.serialize(emptyBuffer)))) + + val buffer = new PercentileDigest(relativeError = 0.01) + data.foreach { value => + buffer.add(value) + } + assert(compareEquals(buffer, serializer.deserialize(serializer.serialize(buffer)))) + + val agg = new ApproximatePercentile(BoundReference(0, DoubleType, true), Literal(0.5)) + assert(compareEquals(agg.deserialize(agg.serialize(buffer)), buffer)) + } + + test("class PercentileDigest, basic operations") { + val valueCount = 10000 + val percentages = Array(0.25, 0.5, 0.75) + Seq(0.0001, 0.001, 0.01, 0.1).foreach { relativeError => + val buffer = new PercentileDigest(relativeError) + (1 to valueCount).grouped(10).foreach { group => + val partialBuffer = new PercentileDigest(relativeError) + group.foreach(x => partialBuffer.add(x)) + buffer.merge(partialBuffer) + } + val expectedPercentiles = percentages.map(_ * valueCount) + val approxPercentiles = buffer.getPercentiles(Array(0.25, 0.5, 0.75)) + expectedPercentiles.zip(approxPercentiles).foreach { pair => + val (expected, estimate) = pair + assert((estimate - expected) / valueCount <= relativeError) + } + } + } + + test("class PercentileDigest, makes sure the memory foot print is bounded") { + val relativeError = 0.01 + val memoryFootPrintUpperBound = { + val headBufferSize = + SizeEstimator.estimate(new Array[Double](QuantileSummaries.defaultHeadSize)) + val bufferSize = SizeEstimator.estimate(new Stats(0, 0, 0)) * (1 / relativeError) * 2 + // A safe upper bound + (headBufferSize + bufferSize) * 2 + } + + val sizePerInputs = Seq(100, 1000, 10000, 100000, 1000000, 10000000).map { count => + val buffer = new PercentileDigest(relativeError) + // Worst case, data is linear sorted + (0 until count).foreach(buffer.add(_)) + assert(SizeEstimator.estimate(buffer) < memoryFootPrintUpperBound) + } + } + + test("class ApproximatePercentile, high level interface, update, merge, eval...") { + val count = 10000 + val data = (1 until 10000).toSeq + val percentages = Array(0.25D, 0.5D, 0.75D) + val accuracy = 10000 + val expectedPercentiles = percentages.map(count * _) + val childExpression = Cast(BoundReference(0, IntegerType, nullable = false), DoubleType) + val percentageExpression = CreateArray(percentages.toSeq.map(Literal(_))) + val accuracyExpression = Literal(10000) + val agg = new ApproximatePercentile(childExpression, percentageExpression, accuracyExpression) + + assert(agg.nullable) + val group1 = (0 until data.length / 2) + val group1Buffer = agg.createAggregationBuffer() + group1.foreach { index => + val input = InternalRow(data(index)) + agg.update(group1Buffer, input) + } + + val group2 = (data.length / 2 until data.length) + val group2Buffer = agg.createAggregationBuffer() + group2.foreach { index => + val input = InternalRow(data(index)) + agg.update(group2Buffer, input) + } + + val mergeBuffer = agg.createAggregationBuffer() + agg.merge(mergeBuffer, group1Buffer) + agg.merge(mergeBuffer, group2Buffer) + + agg.eval(mergeBuffer) match { + case arrayData: ArrayData => + val error = count / accuracy + val percentiles = arrayData.toDoubleArray() + assert(percentiles.zip(expectedPercentiles) + .forall(pair => Math.abs(pair._1 - pair._2) < error)) + } + } + + test("class ApproximatePercentile, low level interface, update, merge, eval...") { + val childExpression = Cast(BoundReference(0, IntegerType, nullable = true), DoubleType) + val inputAggregationBufferOffset = 1 + val mutableAggregationBufferOffset = 2 + val percentage = 0.5D + + // Phase one, partial mode aggregation + val agg = new ApproximatePercentile(childExpression, Literal(percentage)) + .withNewInputAggBufferOffset(inputAggregationBufferOffset) + .withNewMutableAggBufferOffset(mutableAggregationBufferOffset) + + val mutableAggBuffer = new GenericMutableRow(new Array[Any](mutableAggregationBufferOffset + 1)) + agg.initialize(mutableAggBuffer) + val dataCount = 10 + (1 to dataCount).foreach { data => + agg.update(mutableAggBuffer, InternalRow(data)) + } + agg.serializeAggregateBufferInPlace(mutableAggBuffer) + + // Serialize the aggregation buffer + val serialized = mutableAggBuffer.getBinary(mutableAggregationBufferOffset) + val inputAggBuffer = new GenericMutableRow(Array[Any](null, serialized)) + + // Phase 2: final mode aggregation + // Re-initialize the aggregation buffer + agg.initialize(mutableAggBuffer) + agg.merge(mutableAggBuffer, inputAggBuffer) + val expectedPercentile = dataCount * percentage + assert(Math.abs(agg.eval(mutableAggBuffer).asInstanceOf[Double] - expectedPercentile) < 0.1) + } + + test("class ApproximatePercentile, sql string") { + val defaultAccuracy = ApproximatePercentile.DEFAULT_PERCENTILE_ACCURACY + // sql, single percentile + assertEqual( + s"percentile_approx(`a`, 0.5D, $defaultAccuracy)", + new ApproximatePercentile("a".attr, percentageExpression = Literal(0.5D)).sql: String) + + // sql, array of percentile + assertEqual( + s"percentile_approx(`a`, array(0.25D, 0.5D, 0.75D), $defaultAccuracy)", + new ApproximatePercentile( + "a".attr, + percentageExpression = CreateArray(Seq(0.25D, 0.5D, 0.75D).map(Literal(_))) + ).sql: String) + + // sql(isDistinct = false), single percentile + assertEqual( + s"percentile_approx(`a`, 0.5D, $defaultAccuracy)", + new ApproximatePercentile("a".attr, percentageExpression = Literal(0.5D)) + .sql(isDistinct = false)) + + // sql(isDistinct = false), array of percentile + assertEqual( + s"percentile_approx(`a`, array(0.25D, 0.5D, 0.75D), $defaultAccuracy)", + new ApproximatePercentile( + "a".attr, + percentageExpression = CreateArray(Seq(0.25D, 0.5D, 0.75D).map(Literal(_))) + ).sql(isDistinct = false)) + + // sql(isDistinct = true), single percentile + assertEqual( + s"percentile_approx(DISTINCT `a`, 0.5D, $defaultAccuracy)", + new ApproximatePercentile("a".attr, percentageExpression = Literal(0.5D)) + .sql(isDistinct = true)) + + // sql(isDistinct = true), array of percentile + assertEqual( + s"percentile_approx(DISTINCT `a`, array(0.25D, 0.5D, 0.75D), $defaultAccuracy)", + new ApproximatePercentile( + "a".attr, + percentageExpression = CreateArray(Seq(0.25D, 0.5D, 0.75D).map(Literal(_))) + ).sql(isDistinct = true)) + } + + test("class ApproximatePercentile, fails analysis if percentage or accuracy is not a constant") { + val attribute = AttributeReference("a", DoubleType)() + val wrongAccuracy = new ApproximatePercentile( + attribute, + percentageExpression = Literal(0.5D), + accuracyExpression = AttributeReference("b", IntegerType)()) + + assertEqual( + wrongAccuracy.checkInputDataTypes(), + TypeCheckFailure("The accuracy or percentage provided must be a constant literal") + ) + + val wrongPercentage = new ApproximatePercentile( + attribute, + percentageExpression = attribute, + accuracyExpression = Literal(10000)) + + assertEqual( + wrongPercentage.checkInputDataTypes(), + TypeCheckFailure("The accuracy or percentage provided must be a constant literal") + ) + } + + test("class ApproximatePercentile, fails analysis if parameters are invalid") { + val wrongAccuracy = new ApproximatePercentile( + AttributeReference("a", DoubleType)(), + percentageExpression = Literal(0.5D), + accuracyExpression = Literal(-1)) + assertEqual( + wrongAccuracy.checkInputDataTypes(), + TypeCheckFailure( + "The accuracy provided must be a positive integer literal (current value = -1)")) + + val correctPercentageExpresions = Seq( + Literal(0D), + Literal(1D), + Literal(0.5D), + CreateArray(Seq(0D, 1D, 0.5D).map(Literal(_))) + ) + correctPercentageExpresions.foreach { percentageExpression => + val correctPercentage = new ApproximatePercentile( + AttributeReference("a", DoubleType)(), + percentageExpression = percentageExpression, + accuracyExpression = Literal(100)) + + // no exception should be thrown + correctPercentage.checkInputDataTypes() + } + + val wrongPercentageExpressions = Seq( + Literal(1.1D), + Literal(-0.5D), + CreateArray(Seq(0D, 0.5D, 1.1D).map(Literal(_))) + ) + + wrongPercentageExpressions.foreach { percentageExpression => + val wrongPercentage = new ApproximatePercentile( + AttributeReference("a", DoubleType)(), + percentageExpression = percentageExpression, + accuracyExpression = Literal(100)) + + val result = wrongPercentage.checkInputDataTypes() + assert( + wrongPercentage.checkInputDataTypes() match { + case TypeCheckFailure(msg) if msg.contains("must be between 0.0 and 1.0") => true + case _ => false + }) + } + } + + test("class ApproximatePercentile, automatically add type casting for parameters") { + val testRelation = LocalRelation('a.int) + val analyzer = SimpleAnalyzer + + // Compatible accuracy types: Long type and decimal type + val accuracyExpressions = Seq(Literal(1000L), DecimalLiteral(10000), Literal(123.0D)) + // Compatible percentage types: float, decimal + val percentageExpressions = Seq(Literal(0.3f), DecimalLiteral(0.5), + CreateArray(Seq(Literal(0.3f), Literal(0.5D), DecimalLiteral(0.7)))) + + accuracyExpressions.foreach { accuracyExpression => + percentageExpressions.foreach { percentageExpression => + val agg = new ApproximatePercentile( + UnresolvedAttribute("a"), + percentageExpression, + accuracyExpression) + val analyzed = testRelation.select(agg).analyze.expressions.head + analyzed match { + case Alias(agg: ApproximatePercentile, _) => + assert(agg.resolved) + assert(agg.child.dataType == DoubleType) + assert(agg.percentageExpression.dataType == DoubleType || + agg.percentageExpression.dataType == ArrayType(DoubleType, containsNull = false)) + assert(agg.accuracyExpression.dataType == IntegerType) + case _ => fail() + } + } + } + } + + test("class ApproximatePercentile, null handling") { + val childExpression = Cast(BoundReference(0, IntegerType, nullable = true), DoubleType) + val agg = new ApproximatePercentile(childExpression, Literal(0.5D)) + val buffer = new GenericMutableRow(new Array[Any](1)) + agg.initialize(buffer) + // Empty aggregation buffer + assert(agg.eval(buffer) == null) + // Empty input row + agg.update(buffer, InternalRow(null)) + assert(agg.eval(buffer) == null) + + // Add some non-empty row + agg.update(buffer, InternalRow(0)) + assert(agg.eval(buffer) != null) + } + + private def compareEquals(left: PercentileDigest, right: PercentileDigest): Boolean = { + val leftSummary = left.quantileSummaries + val rightSummary = right.quantileSummaries + leftSummary.compressThreshold == rightSummary.compressThreshold && + leftSummary.relativeError == rightSummary.relativeError && + leftSummary.count == rightSummary.count && + leftSummary.sampled.sameElements(rightSummary.sampled) + } + + private def assertEqual[T](left: T, right: T): Unit = { + assert(left == right) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala new file mode 100644 index 0000000000000..37d7c442bbeb8 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile.DEFAULT_PERCENTILE_ACCURACY +import org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile.PercentileDigest +import org.apache.spark.sql.test.SharedSQLContext + +class ApproximatePercentileQuerySuite extends QueryTest with SharedSQLContext { + import testImplicits._ + + private val table = "percentile_test" + + test("percentile_approx, single percentile value") { + withTempView(table) { + (1 to 1000).toDF("col").createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s""" + |SELECT + | percentile_approx(col, 0.25), + | percentile_approx(col, 0.5), + | percentile_approx(col, 0.75d), + | percentile_approx(col, 0.0), + | percentile_approx(col, 1.0), + | percentile_approx(col, 0), + | percentile_approx(col, 1) + |FROM $table + """.stripMargin), + Row(250D, 500D, 750D, 1D, 1000D, 1D, 1000D) + ) + } + } + + test("percentile_approx, array of percentile value") { + withTempView(table) { + (1 to 1000).toDF("col").createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s"""SELECT + | percentile_approx(col, array(0.25, 0.5, 0.75D)), + | count(col), + | percentile_approx(col, array(0.0, 1.0)), + | sum(col) + |FROM $table + """.stripMargin), + Row(Seq(250D, 500D, 750D), 1000, Seq(1D, 1000D), 500500) + ) + } + } + + test("percentile_approx, with different accuracies") { + + withTempView(table) { + (1 to 1000).toDF("col").createOrReplaceTempView(table) + + // With different accuracies + val expectedPercentile = 250D + val accuracies = Array(1, 10, 100, 1000, 10000) + val errors = accuracies.map { accuracy => + val df = spark.sql(s"SELECT percentile_approx(col, 0.25, $accuracy) FROM $table") + val approximatePercentile = df.collect().head.getDouble(0) + val error = Math.abs(approximatePercentile - expectedPercentile) + error + } + + // The larger accuracy value we use, the smaller error we get + assert(errors.sorted.sameElements(errors.reverse)) + } + } + + test("percentile_approx, supports constant folding for parameter accuracy and percentages") { + withTempView(table) { + (1 to 1000).toDF("col").createOrReplaceTempView(table) + checkAnswer( + spark.sql(s"SELECT percentile_approx(col, array(0.25 + 0.25D), 200 + 800D) FROM $table"), + Row(Seq(500D)) + ) + } + } + + test("percentile_approx(), aggregation on empty input table, no group by") { + withTempView(table) { + Seq.empty[Int].toDF("col").createOrReplaceTempView(table) + checkAnswer( + spark.sql(s"SELECT sum(col), percentile_approx(col, 0.5) FROM $table"), + Row(null, null) + ) + } + } + + test("percentile_approx(), aggregation on empty input table, with group by") { + withTempView(table) { + Seq.empty[Int].toDF("col").createOrReplaceTempView(table) + checkAnswer( + spark.sql(s"SELECT sum(col), percentile_approx(col, 0.5) FROM $table GROUP BY col"), + Seq.empty[Row] + ) + } + } + + test("percentile_approx(null), aggregation with group by") { + withTempView(table) { + (1 to 1000).map(x => (x % 3, x)).toDF("key", "value").createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s"""SELECT + | key, + | percentile_approx(null, 0.5) + |FROM $table + |GROUP BY key + """.stripMargin), + Seq( + Row(0, null), + Row(1, null), + Row(2, null)) + ) + } + } + + test("percentile_approx(null), aggregation without group by") { + withTempView(table) { + (1 to 1000).map(x => (x % 3, x)).toDF("key", "value").createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s"""SELECT + | percentile_approx(null, 0.5), + | sum(null), + | percentile_approx(null, 0.5) + |FROM $table + """.stripMargin), + Row(null, null, null) + ) + } + } + + test("percentile_approx(col, ...), input rows contains null, with out group by") { + withTempView(table) { + (1 to 1000).map(new Integer(_)).flatMap(Seq(null: Integer, _)).toDF("col") + .createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s"""SELECT + | percentile_approx(col, 0.5), + | sum(null), + | percentile_approx(col, 0.5) + |FROM $table + """.stripMargin), + Row(500D, null, 500D)) + } + } + + test("percentile_approx(col, ...), input rows contains null, with group by") { + withTempView(table) { + val rand = new java.util.Random() + (1 to 1000) + .map(new Integer(_)) + .map(v => (new Integer(v % 2), v)) + // Add some nulls + .flatMap(Seq(_, (null: Integer, null: Integer))) + .toDF("key", "value").createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s"""SELECT + | percentile_approx(value, 0.5), + | sum(value), + | percentile_approx(value, 0.5) + |FROM $table + |GROUP BY key + """.stripMargin), + Seq( + Row(499.0D, 250000, 499.0D), + Row(500.0D, 250500, 500.0D), + Row(null, null, null)) + ) + } + } + + test("percentile_approx(col, ...) works in window function") { + withTempView(table) { + val data = (1 to 10).map(v => (v % 2, v)) + data.toDF("key", "value").createOrReplaceTempView(table) + + val query = spark.sql( + s""" + |SElECT percentile_approx(value, 0.5) + |OVER + | (PARTITION BY key ORDER BY value ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) + | AS percentile + |FROM $table + """.stripMargin) + + val expected = data.groupBy(_._1).toSeq.flatMap { group => + val (key, values) = group + val sortedValues = values.map(_._2).sorted + + var outputRows = Seq.empty[Row] + var i = 0 + + val percentile = new PercentileDigest(1.0 / DEFAULT_PERCENTILE_ACCURACY) + sortedValues.foreach { value => + percentile.add(value) + outputRows :+= Row(percentile.getPercentiles(Array(0.5D)).head) + } + outputRows + } + + checkAnswer(query, expected) + } + } +} diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala index bfa5899fafdbe..85c509847d8ef 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala @@ -227,7 +227,6 @@ private[sql] class HiveSessionCatalog( private val hiveFunctions = Seq( "hash", "histogram_numeric", - "percentile", - "percentile_approx" + "percentile" ) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionToSQLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionToSQLSuite.scala index b4eb50e331cf9..fdd02821dfa29 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionToSQLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionToSQLSuite.scala @@ -155,6 +155,11 @@ class ExpressionToSQLSuite extends SQLBuilderTest with SQLTestUtils { test("aggregate functions") { checkSqlGeneration("SELECT approx_count_distinct(value) FROM t1 GROUP BY key") + checkSqlGeneration("SELECT percentile_approx(value, 0.25) FROM t1 GROUP BY key") + checkSqlGeneration("SELECT percentile_approx(value, array(0.25, 0.75)) FROM t1 GROUP BY key") + checkSqlGeneration("SELECT percentile_approx(value, 0.25, 100) FROM t1 GROUP BY key") + checkSqlGeneration( + "SELECT percentile_approx(value, array(0.25, 0.75), 100) FROM t1 GROUP BY key") checkSqlGeneration("SELECT avg(value) FROM t1 GROUP BY key") checkSqlGeneration("SELECT corr(value, key) FROM t1 GROUP BY key") checkSqlGeneration("SELECT count(value) FROM t1 GROUP BY key") From dd859f95c0aaa0b7c8fbff0a5f108cf3c9bf520a Mon Sep 17 00:00:00 2001 From: "Seigneurin, Alexis (CONT)" Date: Thu, 1 Sep 2016 09:32:05 +0100 Subject: [PATCH 162/270] fixed typos fixed 2 typos Author: Seigneurin, Alexis (CONT) Closes #14877 from aseigneurin/fix-typo-2. --- docs/structured-streaming-programming-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/structured-streaming-programming-guide.md b/docs/structured-streaming-programming-guide.md index cdc3975d7cb7c..c7ed3b04bced1 100644 --- a/docs/structured-streaming-programming-guide.md +++ b/docs/structured-streaming-programming-guide.md @@ -400,7 +400,7 @@ data, thus relieving the users from reasoning about it. As an example, let’s see how this model handles event-time based processing and late arriving data. ## Handling Event-time and Late Data -Event-time is the time embedded in the data itself. For many applications, you may want to operate on this event-time. For example, if you want to get the number of events generated by IoT devices every minute, then you probably want to use the time when the data was generated (that is, event-time in the data), rather than the time Spark receives them. This event-time is very naturally expressed in this model -- each event from the devices is a row in the table, and event-time is a column value in the row. This allows window-based aggregations (e.g. number of event every minute) to be just a special type of grouping and aggregation on the even-time column -- each time window is a group and each row can belong to multiple windows/groups. Therefore, such event-time-window-based aggregation queries can be defined consistently on both a static dataset (e.g. from collected device events logs) as well as on a data stream, making the life of the user much easier. +Event-time is the time embedded in the data itself. For many applications, you may want to operate on this event-time. For example, if you want to get the number of events generated by IoT devices every minute, then you probably want to use the time when the data was generated (that is, event-time in the data), rather than the time Spark receives them. This event-time is very naturally expressed in this model -- each event from the devices is a row in the table, and event-time is a column value in the row. This allows window-based aggregations (e.g. number of events every minute) to be just a special type of grouping and aggregation on the even-time column -- each time window is a group and each row can belong to multiple windows/groups. Therefore, such event-time-window-based aggregation queries can be defined consistently on both a static dataset (e.g. from collected device events logs) as well as on a data stream, making the life of the user much easier. Furthermore, this model naturally handles data that has arrived later than expected based on its event-time. Since Spark is updating the Result Table, it has full control over updating/cleaning up the aggregates when there is late data. While not yet implemented in Spark 2.0, event-time watermarking will be used to manage this data. These are explained later in more details in the [Window Operations](#window-operations-on-event-time) section. @@ -535,7 +535,7 @@ ds.filter(_.signal > 10).map(_.device) // using typed APIs df.groupBy("type").count() // using untyped API // Running average signal for each device type -Import org.apache.spark.sql.expressions.scalalang.typed._ +import org.apache.spark.sql.expressions.scalalang.typed._ ds.groupByKey(_.type).agg(typed.avg(_.signal)) // using typed API {% endhighlight %} From 1f06a5b6a0584d0c9656f58eaf54e54e2383c82b Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Thu, 1 Sep 2016 16:36:14 +0800 Subject: [PATCH 163/270] [SPARK-17353][SPARK-16943][SPARK-16942][SQL] Fix multiple bugs in CREATE TABLE LIKE command ### What changes were proposed in this pull request? The existing `CREATE TABLE LIKE` command has multiple issues: - The generated table is non-empty when the source table is a data source table. The major reason is the data source table is using the table property `path` to store the location of table contents. Currently, we keep it unchanged. Thus, we still create the same table with the same location. - The table type of the generated table is `EXTERNAL` when the source table is an external Hive Serde table. Currently, we explicitly set it to `MANAGED`, but Hive is checking the table property `EXTERNAL` to decide whether the table is `EXTERNAL` or not. (See https://github.com/apache/hive/blob/master/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java#L1407-L1408) Thus, the created table is still `EXTERNAL`. - When the source table is a `VIEW`, the metadata of the generated table contains the original view text and view original text. So far, this does not break anything, but it could cause something wrong in Hive. (For example, https://github.com/apache/hive/blob/master/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java#L1405-L1406) - The issue regarding the table `comment`. To follow what Hive does, the table comment should be cleaned, but the column comments should be still kept. - The `INDEX` table is not supported. Thus, we should throw an exception in this case. - `owner` should not be retained. `ToHiveTable` set it [here](https://github.com/apache/spark/blob/e679bc3c1cd418ef0025d2ecbc547c9660cac433/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala#L793) no matter which value we set in `CatalogTable`. We set it to an empty string for avoiding the confusing output in Explain. - Add a support for temp tables - Like Hive, we should not copy the table properties from the source table to the created table, especially for the statistics-related properties, which could be wrong in the created table. - `unsupportedFeatures` should not be copied from the source table. The created table does not have these unsupported features. - When the type of source table is a view, the target table is using the default format of data source tables: `spark.sql.sources.default`. This PR is to fix the above issues. ### How was this patch tested? Improve the test coverage by adding more test cases Author: gatorsmile Closes #14531 from gatorsmile/createTableLike. --- .../spark/sql/execution/command/tables.scala | 57 ++++- .../sql/hive/client/HiveClientImpl.scala | 4 +- .../sql/hive/execution/HiveDDLSuite.scala | 226 +++++++++++++++++- 3 files changed, 273 insertions(+), 14 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index b4a15b8b2882e..67b2329effb7b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -29,17 +29,23 @@ import org.apache.hadoop.fs.Path import org.apache.spark.sql.{AnalysisException, Row, SparkSession} import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogTable, CatalogTableType} +import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.catalyst.catalog.CatalogTableType._ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} import org.apache.spark.sql.catalyst.util.quoteIdentifier import org.apache.spark.sql.execution.datasources.PartitioningUtils +import org.apache.spark.sql.internal.HiveSerDe import org.apache.spark.sql.types._ import org.apache.spark.util.Utils /** - * A command to create a table with the same definition of the given existing table. + * A command to create a MANAGED table with the same definition of the given existing table. + * In the target table definition, the table comment is always empty but the column comments + * are identical to the ones defined in the source table. + * + * The CatalogTable attributes copied from the source table are storage(inputFormat, outputFormat, + * serde, compressed, properties), schema, provider, partitionColumnNames, bucketSpec. * * The syntax of using this command in SQL is: * {{{ @@ -58,18 +64,45 @@ case class CreateTableLikeCommand( throw new AnalysisException( s"Source table in CREATE TABLE LIKE does not exist: '$sourceTable'") } - if (catalog.isTemporaryTable(sourceTable)) { - throw new AnalysisException( - s"Source table in CREATE TABLE LIKE cannot be temporary: '$sourceTable'") + + val sourceTableDesc = catalog.getTableMetadata(sourceTable) + + // Storage format + val newStorage = + if (sourceTableDesc.tableType == CatalogTableType.VIEW) { + val newPath = catalog.defaultTablePath(targetTable) + CatalogStorageFormat.empty.copy(properties = Map("path" -> newPath)) + } else if (DDLUtils.isDatasourceTable(sourceTableDesc)) { + val newPath = catalog.defaultTablePath(targetTable) + val newSerdeProp = + sourceTableDesc.storage.properties.filterKeys(_.toLowerCase != "path") ++ + Map("path" -> newPath) + sourceTableDesc.storage.copy( + locationUri = None, + properties = newSerdeProp) + } else { + sourceTableDesc.storage.copy( + locationUri = None, + properties = sourceTableDesc.storage.properties) + } + + val newProvider = if (sourceTableDesc.tableType == CatalogTableType.VIEW) { + Some(sparkSession.sessionState.conf.defaultDataSourceName) + } else { + sourceTableDesc.provider } - val tableToCreate = catalog.getTableMetadata(sourceTable).copy( - identifier = targetTable, - tableType = CatalogTableType.MANAGED, - createTime = System.currentTimeMillis, - lastAccessTime = -1).withNewStorage(locationUri = None) + val newTableDesc = + CatalogTable( + identifier = targetTable, + tableType = CatalogTableType.MANAGED, + storage = newStorage, + schema = sourceTableDesc.schema, + provider = newProvider, + partitionColumnNames = sourceTableDesc.partitionColumnNames, + bucketSpec = sourceTableDesc.bucketSpec) - catalog.createTable(tableToCreate, ifNotExists) + catalog.createTable(newTableDesc, ifNotExists) Seq.empty[Row] } } @@ -517,7 +550,7 @@ case class ShowTablesCommand( /** - * A command for users to list the properties for a table If propertyKey is specified, the value + * A command for users to list the properties for a table. If propertyKey is specified, the value * for the propertyKey is returned. If propertyKey is not specified, all the keys and their * corresponding values are returned. * The syntax of using this command in SQL is: diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index dd982192a3837..54ec61abedb12 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -404,7 +404,9 @@ private[hive] class HiveClientImpl( properties = Option(h.getTTable.getSd.getSerdeInfo.getParameters) .map(_.asScala.toMap).orNull ), - properties = properties.filter(kv => kv._1 != "comment"), + // For EXTERNAL_TABLE, the table properties has a particular field "EXTERNAL". This is added + // in the function toHiveTable. + properties = properties.filter(kv => kv._1 != "comment" && kv._1 != "EXTERNAL"), comment = properties.get("comment"), viewOriginalText = Option(h.getViewOriginalText), viewText = Option(h.getViewExpandedText), diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index 58c43ebcae6fc..7f3d96de85ae7 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -25,8 +25,10 @@ import org.scalatest.BeforeAndAfterEach import org.apache.spark.internal.config._ import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException -import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTableType} +import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTable, CatalogTableType} import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.execution.command.DDLUtils +import org.apache.spark.sql.execution.datasources.CaseInsensitiveMap import org.apache.spark.sql.hive.HiveExternalCatalog import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.SQLConf @@ -660,6 +662,228 @@ class HiveDDLSuite } } + test("CREATE TABLE LIKE a temporary view") { + val sourceViewName = "tab1" + val targetTabName = "tab2" + withTempView(sourceViewName) { + withTable(targetTabName) { + spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd) + .createTempView(sourceViewName) + sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName") + + val sourceTable = spark.sessionState.catalog.getTableMetadata( + TableIdentifier(sourceViewName, None)) + val targetTable = spark.sessionState.catalog.getTableMetadata( + TableIdentifier(targetTabName, Some("default"))) + + checkCreateTableLike(sourceTable, targetTable) + } + } + } + + test("CREATE TABLE LIKE a data source table") { + val sourceTabName = "tab1" + val targetTabName = "tab2" + withTable(sourceTabName, targetTabName) { + spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd) + .write.format("json").saveAsTable(sourceTabName) + sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName") + + val sourceTable = + spark.sessionState.catalog.getTableMetadata(TableIdentifier(sourceTabName, Some("default"))) + val targetTable = + spark.sessionState.catalog.getTableMetadata(TableIdentifier(targetTabName, Some("default"))) + // The table type of the source table should be a Hive-managed data source table + assert(DDLUtils.isDatasourceTable(sourceTable)) + assert(sourceTable.tableType == CatalogTableType.MANAGED) + + checkCreateTableLike(sourceTable, targetTable) + } + } + + test("CREATE TABLE LIKE an external data source table") { + val sourceTabName = "tab1" + val targetTabName = "tab2" + withTable(sourceTabName, targetTabName) { + withTempPath { dir => + val path = dir.getCanonicalPath + spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd) + .write.format("parquet").save(path) + sql(s"CREATE TABLE $sourceTabName USING parquet OPTIONS (PATH '$path')") + sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName") + + // The source table should be an external data source table + val sourceTable = spark.sessionState.catalog.getTableMetadata( + TableIdentifier(sourceTabName, Some("default"))) + val targetTable = spark.sessionState.catalog.getTableMetadata( + TableIdentifier(targetTabName, Some("default"))) + // The table type of the source table should be an external data source table + assert(DDLUtils.isDatasourceTable(sourceTable)) + assert(sourceTable.tableType == CatalogTableType.EXTERNAL) + + checkCreateTableLike(sourceTable, targetTable) + } + } + } + + test("CREATE TABLE LIKE a managed Hive serde table") { + val catalog = spark.sessionState.catalog + val sourceTabName = "tab1" + val targetTabName = "tab2" + withTable(sourceTabName, targetTabName) { + sql(s"CREATE TABLE $sourceTabName TBLPROPERTIES('prop1'='value1') AS SELECT 1 key, 'a'") + sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName") + + val sourceTable = catalog.getTableMetadata(TableIdentifier(sourceTabName, Some("default"))) + assert(sourceTable.tableType == CatalogTableType.MANAGED) + assert(sourceTable.properties.get("prop1").nonEmpty) + val targetTable = catalog.getTableMetadata(TableIdentifier(targetTabName, Some("default"))) + + checkCreateTableLike(sourceTable, targetTable) + } + } + + test("CREATE TABLE LIKE an external Hive serde table") { + val catalog = spark.sessionState.catalog + withTempDir { tmpDir => + val basePath = tmpDir.getCanonicalPath + val sourceTabName = "tab1" + val targetTabName = "tab2" + withTable(sourceTabName, targetTabName) { + assert(tmpDir.listFiles.isEmpty) + sql( + s""" + |CREATE EXTERNAL TABLE $sourceTabName (key INT comment 'test', value STRING) + |COMMENT 'Apache Spark' + |PARTITIONED BY (ds STRING, hr STRING) + |LOCATION '$basePath' + """.stripMargin) + for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) { + sql( + s""" + |INSERT OVERWRITE TABLE $sourceTabName + |partition (ds='$ds',hr='$hr') + |SELECT 1, 'a' + """.stripMargin) + } + sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName") + + val sourceTable = catalog.getTableMetadata(TableIdentifier(sourceTabName, Some("default"))) + assert(sourceTable.tableType == CatalogTableType.EXTERNAL) + assert(sourceTable.comment == Option("Apache Spark")) + val targetTable = catalog.getTableMetadata(TableIdentifier(targetTabName, Some("default"))) + + checkCreateTableLike(sourceTable, targetTable) + } + } + } + + test("CREATE TABLE LIKE a view") { + val sourceTabName = "tab1" + val sourceViewName = "view" + val targetTabName = "tab2" + withTable(sourceTabName, targetTabName) { + withView(sourceViewName) { + spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd) + .write.format("json").saveAsTable(sourceTabName) + sql(s"CREATE VIEW $sourceViewName AS SELECT * FROM $sourceTabName") + sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName") + + val sourceView = spark.sessionState.catalog.getTableMetadata( + TableIdentifier(sourceViewName, Some("default"))) + // The original source should be a VIEW with an empty path + assert(sourceView.tableType == CatalogTableType.VIEW) + assert(sourceView.viewText.nonEmpty && sourceView.viewOriginalText.nonEmpty) + val targetTable = spark.sessionState.catalog.getTableMetadata( + TableIdentifier(targetTabName, Some("default"))) + + checkCreateTableLike(sourceView, targetTable) + } + } + } + + private def getTablePath(table: CatalogTable): Option[String] = { + if (DDLUtils.isDatasourceTable(table)) { + new CaseInsensitiveMap(table.storage.properties).get("path") + } else { + table.storage.locationUri + } + } + + private def checkCreateTableLike(sourceTable: CatalogTable, targetTable: CatalogTable): Unit = { + // The created table should be a MANAGED table with empty view text and original text. + assert(targetTable.tableType == CatalogTableType.MANAGED, + "the created table must be a Hive managed table") + assert(targetTable.viewText.isEmpty && targetTable.viewOriginalText.isEmpty, + "the view text and original text in the created table must be empty") + assert(targetTable.comment.isEmpty, + "the comment in the created table must be empty") + assert(targetTable.unsupportedFeatures.isEmpty, + "the unsupportedFeatures in the create table must be empty") + + val metastoreGeneratedProperties = Seq( + "CreateTime", + "transient_lastDdlTime", + "grantTime", + "lastUpdateTime", + "last_modified_by", + "last_modified_time", + "Owner:", + "COLUMN_STATS_ACCURATE", + "numFiles", + "numRows", + "rawDataSize", + "totalSize", + "totalNumberFiles", + "maxFileSize", + "minFileSize" + ) + assert(targetTable.properties.filterKeys(!metastoreGeneratedProperties.contains(_)).isEmpty, + "the table properties of source tables should not be copied in the created table") + + if (DDLUtils.isDatasourceTable(sourceTable) || + sourceTable.tableType == CatalogTableType.VIEW) { + assert(DDLUtils.isDatasourceTable(targetTable), + "the target table should be a data source table") + } else { + assert(!DDLUtils.isDatasourceTable(targetTable), + "the target table should be a Hive serde table") + } + + if (sourceTable.tableType == CatalogTableType.VIEW) { + // Source table is a temporary/permanent view, which does not have a provider. The created + // target table uses the default data source format + assert(targetTable.provider == Option(spark.sessionState.conf.defaultDataSourceName)) + } else { + assert(targetTable.provider == sourceTable.provider) + } + + val sourceTablePath = getTablePath(sourceTable) + val targetTablePath = getTablePath(targetTable) + assert(targetTablePath.nonEmpty, "target table path should not be empty") + assert(sourceTablePath != targetTablePath, + "source table/view path should be different from target table path") + + // The source table contents should not been seen in the target table. + assert(spark.table(sourceTable.identifier).count() != 0, "the source table should be nonempty") + assert(spark.table(targetTable.identifier).count() == 0, "the target table should be empty") + + // Their schema should be identical + checkAnswer( + sql(s"DESC ${sourceTable.identifier}"), + sql(s"DESC ${targetTable.identifier}")) + + withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") { + // Check whether the new table can be inserted using the data from the original table + sql(s"INSERT INTO TABLE ${targetTable.identifier} SELECT * FROM ${sourceTable.identifier}") + } + + // After insertion, the data should be identical + checkAnswer( + sql(s"SELECT * FROM ${sourceTable.identifier}"), + sql(s"SELECT * FROM ${targetTable.identifier}")) + } + test("desc table for data source table") { withTable("tab1") { val tabName = "tab1" From 8e740ae44d55570a3e7b6eae1f0239ac1319b986 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Thu, 1 Sep 2016 16:45:22 +0800 Subject: [PATCH 164/270] [SPARK-17257][SQL] the physical plan of CREATE TABLE or CTAS should take CatalogTable ## What changes were proposed in this pull request? This is kind of a follow-up of https://github.com/apache/spark/pull/14482 . As we put `CatalogTable` in the logical plan directly, it makes sense to let physical plans take `CatalogTable` directly, instead of extracting some fields of `CatalogTable` in planner and then construct a new `CatalogTable` in physical plan. ## How was this patch tested? existing tests. Author: Wenchen Fan Closes #14823 from cloud-fan/create-table. --- .../apache/spark/sql/DataFrameWriter.scala | 10 +- .../spark/sql/execution/SparkSqlParser.scala | 15 +- .../spark/sql/execution/SparkStrategies.scala | 16 +-- .../command/createDataSourceTables.scala | 135 ++++++++---------- .../sql/hive/MetastoreDataSourcesSuite.scala | 6 +- 5 files changed, 78 insertions(+), 104 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index a9049a60f25e5..c05c7a6551600 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.catalyst.plans.logical.InsertIntoTable -import org.apache.spark.sql.execution.datasources.{CreateTable, DataSource, HadoopFsRelation} +import org.apache.spark.sql.execution.datasources.{CaseInsensitiveMap, CreateTable, DataSource, HadoopFsRelation} import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils} import org.apache.spark.sql.types.StructType @@ -368,9 +368,15 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { throw new AnalysisException(s"Table $tableIdent already exists.") case _ => + val tableType = if (new CaseInsensitiveMap(extraOptions.toMap).contains("path")) { + CatalogTableType.EXTERNAL + } else { + CatalogTableType.MANAGED + } + val tableDesc = CatalogTable( identifier = tableIdent, - tableType = CatalogTableType.EXTERNAL, + tableType = tableType, storage = CatalogStorageFormat.empty.copy(properties = extraOptions.toMap), schema = new StructType, provider = Some(source), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 656494d97dbd5..8fc1a8595a45f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -325,14 +325,17 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { .getOrElse(Array.empty[String]) val bucketSpec = Option(ctx.bucketSpec()).map(visitBucketSpec) + // TODO: this may be wrong for non file-based data source like JDBC, which should be external + // even there is no `path` in options. We should consider allow the EXTERNAL keyword. + val tableType = if (new CaseInsensitiveMap(options).contains("path")) { + CatalogTableType.EXTERNAL + } else { + CatalogTableType.MANAGED + } + val tableDesc = CatalogTable( identifier = table, - // TODO: actually the table type may be EXTERNAL if we have `path` in options. However, the - // physical plan `CreateDataSourceTableCommand` doesn't take table type as parameter, but a - // boolean flag called `managedIfNoPath`. We set the table type to MANAGED here to simulate - // setting the `managedIfNoPath` flag. In the future we should refactor the physical plan and - // make it take `CatalogTable` directly. - tableType = CatalogTableType.MANAGED, + tableType = tableType, storage = CatalogStorageFormat.empty.copy(properties = options), schema = schema.getOrElse(new StructType), provider = Some(provider), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index 4aaf454285f4f..b4899ad688f96 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -424,15 +424,7 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { case CreateTable(tableDesc, mode, None) => val cmd = - CreateDataSourceTableCommand( - tableDesc.identifier, - if (tableDesc.schema.nonEmpty) Some(tableDesc.schema) else None, - tableDesc.provider.get, - tableDesc.storage.properties, - tableDesc.partitionColumnNames.toArray, - tableDesc.bucketSpec, - ignoreIfExists = mode == SaveMode.Ignore, - managedIfNoPath = tableDesc.tableType == CatalogTableType.MANAGED) + CreateDataSourceTableCommand(tableDesc, ignoreIfExists = mode == SaveMode.Ignore) ExecutedCommandExec(cmd) :: Nil // CREATE TABLE ... AS SELECT ... for hive serde table is handled in hive module, by rule @@ -441,12 +433,8 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { case CreateTable(tableDesc, mode, Some(query)) if tableDesc.provider.get != "hive" => val cmd = CreateDataSourceTableAsSelectCommand( - tableDesc.identifier, - tableDesc.provider.get, - tableDesc.partitionColumnNames.toArray, - tableDesc.bucketSpec, + tableDesc, mode, - tableDesc.storage.properties, query) ExecutedCommandExec(cmd) :: Nil diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala index 7400a0e7bb1f1..da3f6c600adef 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala @@ -40,71 +40,56 @@ import org.apache.spark.sql.types._ * USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...]) * }}} */ -case class CreateDataSourceTableCommand( - tableIdent: TableIdentifier, - userSpecifiedSchema: Option[StructType], - provider: String, - options: Map[String, String], - userSpecifiedPartitionColumns: Array[String], - bucketSpec: Option[BucketSpec], - ignoreIfExists: Boolean, - managedIfNoPath: Boolean) +case class CreateDataSourceTableCommand(table: CatalogTable, ignoreIfExists: Boolean) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { - val tableName = tableIdent.unquotedString - val sessionState = sparkSession.sessionState + assert(table.tableType != CatalogTableType.VIEW) + assert(table.provider.isDefined) - if (sessionState.catalog.tableExists(tableIdent)) { + val sessionState = sparkSession.sessionState + if (sessionState.catalog.tableExists(table.identifier)) { if (ignoreIfExists) { return Seq.empty[Row] } else { - throw new AnalysisException(s"Table $tableName already exists.") + throw new AnalysisException(s"Table ${table.identifier.unquotedString} already exists.") } } - var isExternal = true - val optionsWithPath = - if (!new CaseInsensitiveMap(options).contains("path") && managedIfNoPath) { - isExternal = false - options + ("path" -> sessionState.catalog.defaultTablePath(tableIdent)) - } else { - options - } + val optionsWithPath = if (table.tableType == CatalogTableType.MANAGED) { + table.storage.properties + ("path" -> sessionState.catalog.defaultTablePath(table.identifier)) + } else { + table.storage.properties + } - // Create the relation to validate the arguments before writing the metadata to the metastore. + // Create the relation to validate the arguments before writing the metadata to the metastore, + // and infer the table schema and partition if users didn't specify schema in CREATE TABLE. val dataSource: BaseRelation = DataSource( sparkSession = sparkSession, - userSpecifiedSchema = userSpecifiedSchema, - className = provider, - bucketSpec = None, + userSpecifiedSchema = if (table.schema.isEmpty) None else Some(table.schema), + className = table.provider.get, + bucketSpec = table.bucketSpec, options = optionsWithPath).resolveRelation(checkPathExist = false) - val partitionColumns = if (userSpecifiedSchema.nonEmpty) { - userSpecifiedPartitionColumns + val partitionColumnNames = if (table.schema.nonEmpty) { + table.partitionColumnNames } else { // This is guaranteed in `PreprocessDDL`. - assert(userSpecifiedPartitionColumns.isEmpty) + assert(table.partitionColumnNames.isEmpty) dataSource match { - case r: HadoopFsRelation => r.partitionSchema.fieldNames - case _ => Array.empty[String] + case r: HadoopFsRelation => r.partitionSchema.fieldNames.toSeq + case _ => Nil } } - val table = CatalogTable( - identifier = tableIdent, - tableType = if (isExternal) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED, - storage = CatalogStorageFormat.empty.copy(properties = optionsWithPath), + val newTable = table.copy( + storage = table.storage.copy(properties = optionsWithPath), schema = dataSource.schema, - provider = Some(provider), - partitionColumnNames = partitionColumns, - bucketSpec = bucketSpec - ) - + partitionColumnNames = partitionColumnNames) // We will return Nil or throw exception at the beginning if the table already exists, so when // we reach here, the table should not exist and we should set `ignoreIfExists` to false. - sessionState.catalog.createTable(table, ignoreIfExists = false) + sessionState.catalog.createTable(newTable, ignoreIfExists = false) Seq.empty[Row] } } @@ -112,7 +97,7 @@ case class CreateDataSourceTableCommand( /** * A command used to create a data source table using the result of a query. * - * Note: This is different from [[CreateTableAsSelectLogicalPlan]]. Please check the syntax for + * Note: This is different from `CreateHiveTableAsSelectCommand`. Please check the syntax for * difference. This is not intended for temporary tables. * * The syntax of using this command in SQL is: @@ -123,32 +108,31 @@ case class CreateDataSourceTableCommand( * }}} */ case class CreateDataSourceTableAsSelectCommand( - tableIdent: TableIdentifier, - provider: String, - partitionColumns: Array[String], - bucketSpec: Option[BucketSpec], + table: CatalogTable, mode: SaveMode, - options: Map[String, String], query: LogicalPlan) extends RunnableCommand { override protected def innerChildren: Seq[QueryPlan[_]] = Seq(query) override def run(sparkSession: SparkSession): Seq[Row] = { - val tableName = tableIdent.unquotedString + assert(table.tableType != CatalogTableType.VIEW) + assert(table.provider.isDefined) + assert(table.schema.isEmpty) + + val tableName = table.identifier.unquotedString + val provider = table.provider.get val sessionState = sparkSession.sessionState - var createMetastoreTable = false - var isExternal = true - val optionsWithPath = - if (!new CaseInsensitiveMap(options).contains("path")) { - isExternal = false - options + ("path" -> sessionState.catalog.defaultTablePath(tableIdent)) - } else { - options - } + val optionsWithPath = if (table.tableType == CatalogTableType.MANAGED) { + table.storage.properties + ("path" -> sessionState.catalog.defaultTablePath(table.identifier)) + } else { + table.storage.properties + } + + var createMetastoreTable = false var existingSchema = Option.empty[StructType] - if (sparkSession.sessionState.catalog.tableExists(tableIdent)) { + if (sparkSession.sessionState.catalog.tableExists(table.identifier)) { // Check if we need to throw an exception or just return. mode match { case SaveMode.ErrorIfExists => @@ -165,21 +149,21 @@ case class CreateDataSourceTableAsSelectCommand( val dataSource = DataSource( sparkSession = sparkSession, userSpecifiedSchema = Some(query.schema.asNullable), - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, + partitionColumns = table.partitionColumnNames, + bucketSpec = table.bucketSpec, className = provider, options = optionsWithPath) // TODO: Check that options from the resolved relation match the relation that we are // inserting into (i.e. using the same compression). EliminateSubqueryAliases( - sessionState.catalog.lookupRelation(tableIdent)) match { + sessionState.catalog.lookupRelation(table.identifier)) match { case l @ LogicalRelation(_: InsertableRelation | _: HadoopFsRelation, _, _) => // check if the file formats match l.relation match { case r: HadoopFsRelation if r.fileFormat.getClass != dataSource.providingClass => throw new AnalysisException( - s"The file format of the existing table $tableIdent is " + + s"The file format of the existing table $tableName is " + s"`${r.fileFormat.getClass.getName}`. It doesn't match the specified " + s"format `$provider`") case _ => @@ -216,36 +200,29 @@ case class CreateDataSourceTableAsSelectCommand( val dataSource = DataSource( sparkSession, className = provider, - partitionColumns = partitionColumns, - bucketSpec = bucketSpec, + partitionColumns = table.partitionColumnNames, + bucketSpec = table.bucketSpec, options = optionsWithPath) val result = try { dataSource.write(mode, df) } catch { case ex: AnalysisException => - logError(s"Failed to write to table ${tableIdent.identifier} in $mode mode", ex) + logError(s"Failed to write to table $tableName in $mode mode", ex) throw ex } if (createMetastoreTable) { - // We will use the schema of resolved.relation as the schema of the table (instead of - // the schema of df). It is important since the nullability may be changed by the relation - // provider (for example, see org.apache.spark.sql.parquet.DefaultSource). - val schema = result.schema - val table = CatalogTable( - identifier = tableIdent, - tableType = if (isExternal) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED, - storage = CatalogStorageFormat.empty.copy(properties = optionsWithPath), - schema = schema, - provider = Some(provider), - partitionColumnNames = partitionColumns, - bucketSpec = bucketSpec - ) - sessionState.catalog.createTable(table, ignoreIfExists = false) + val newTable = table.copy( + storage = table.storage.copy(properties = optionsWithPath), + // We will use the schema of resolved.relation as the schema of the table (instead of + // the schema of df). It is important since the nullability may be changed by the relation + // provider (for example, see org.apache.spark.sql.parquet.DefaultSource). + schema = result.schema) + sessionState.catalog.createTable(newTable, ignoreIfExists = false) } // Refresh the cache of the table in the catalog. - sessionState.catalog.refreshTable(tableIdent) + sessionState.catalog.refreshTable(table.identifier) Seq.empty[Row] } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index d77bb5cf95f69..7a71475a2f192 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -906,7 +906,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv val e = intercept[AnalysisException] { createDF(10, 19).write.mode(SaveMode.Append).format("orc").saveAsTable("appendOrcToParquet") } - assert(e.getMessage.contains("The file format of the existing table `appendOrcToParquet` " + + assert(e.getMessage.contains("The file format of the existing table appendOrcToParquet " + "is `org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat`. " + "It doesn't match the specified format `orc`")) } @@ -917,7 +917,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv createDF(10, 19).write.mode(SaveMode.Append).format("parquet") .saveAsTable("appendParquetToJson") } - assert(e.getMessage.contains("The file format of the existing table `appendParquetToJson` " + + assert(e.getMessage.contains("The file format of the existing table appendParquetToJson " + "is `org.apache.spark.sql.execution.datasources.json.JsonFileFormat`. " + "It doesn't match the specified format `parquet`")) } @@ -928,7 +928,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv createDF(10, 19).write.mode(SaveMode.Append).format("text") .saveAsTable("appendTextToJson") } - assert(e.getMessage.contains("The file format of the existing table `appendTextToJson` is " + + assert(e.getMessage.contains("The file format of the existing table appendTextToJson is " + "`org.apache.spark.sql.execution.datasources.json.JsonFileFormat`. " + "It doesn't match the specified format `text`")) } From adaaffa34ef0ef6a7baa5c1fea848cf5bc3987a2 Mon Sep 17 00:00:00 2001 From: Tejas Patil Date: Thu, 1 Sep 2016 16:47:37 +0800 Subject: [PATCH 165/270] [SPARK-17271][SQL] Remove redundant `semanticEquals()` from `SortOrder` ## What changes were proposed in this pull request? Removing `semanticEquals()` from `SortOrder` because it can use the `semanticEquals()` provided by its parent class (`Expression`). This was as per suggestion by cloud-fan at https://github.com/apache/spark/pull/14841/files/7192418b3a26a14642fc04fc92bf496a954ffa5d#r77106801 ## How was this patch tested? Ran the test added in https://github.com/apache/spark/pull/14841 Author: Tejas Patil Closes #14910 from tejasapatil/SPARK-17271_remove_semantic_ordering. --- .../org/apache/spark/sql/catalyst/expressions/SortOrder.scala | 3 --- 1 file changed, 3 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala index f498f357924d0..de779ed3702d3 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala @@ -61,9 +61,6 @@ case class SortOrder(child: Expression, direction: SortDirection) override def sql: String = child.sql + " " + direction.sql def isAscending: Boolean = direction == Ascending - - def semanticEquals(other: SortOrder): Boolean = - (direction == other.direction) && child.semanticEquals(other.child) } /** From a0aac4b775bc8c275f96ad0fbf85c9d8a3690588 Mon Sep 17 00:00:00 2001 From: Angus Gerry Date: Thu, 1 Sep 2016 10:35:31 -0700 Subject: [PATCH 166/270] [SPARK-16533][CORE] resolve deadlocking in driver when executors die ## What changes were proposed in this pull request? This pull request reverts the changes made as a part of #14605, which simply side-steps the deadlock issue. Instead, I propose the following approach: * Use `scheduleWithFixedDelay` when calling `ExecutorAllocationManager.schedule` for scheduling executor requests. The intent of this is that if invocations are delayed beyond the default schedule interval on account of lock contention, then we avoid a situation where calls to `schedule` are made back-to-back, potentially releasing and then immediately reacquiring these locks - further exacerbating contention. * Replace a number of calls to `askWithRetry` with `ask` inside of message handling code in `CoarseGrainedSchedulerBackend` and its ilk. This allows us queue messages with the relevant endpoints, release whatever locks we might be holding, and then block whilst awaiting the response. This change is made at the cost of being able to retry should sending the message fail, as retrying outside of the lock could easily cause race conditions if other conflicting messages have been sent whilst awaiting a response. I believe this to be the lesser of two evils, as in many cases these RPC calls are to process local components, and so failures are more likely to be deterministic, and timeouts are more likely to be caused by lock contention. ## How was this patch tested? Existing tests, and manual tests under yarn-client mode. Author: Angus Gerry Closes #14710 from angolon/SPARK-16533. --- .../spark/ExecutorAllocationManager.scala | 2 +- .../deploy/client/StandaloneAppClient.scala | 38 +++---- .../CoarseGrainedSchedulerBackend.scala | 105 +++++++++++------- .../cluster/StandaloneSchedulerBackend.scala | 10 +- .../apache/spark/HeartbeatReceiverSuite.scala | 9 +- .../spark/deploy/client/AppClientSuite.scala | 30 +++-- .../MesosCoarseGrainedSchedulerBackend.scala | 5 +- ...osCoarseGrainedSchedulerBackendSuite.scala | 13 ++- .../cluster/YarnSchedulerBackend.scala | 95 +++++++--------- 9 files changed, 169 insertions(+), 138 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala index 932ba16812bbb..6f320c524201c 100644 --- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala +++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala @@ -230,7 +230,7 @@ private[spark] class ExecutorAllocationManager( } } } - executor.scheduleAtFixedRate(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) + executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) } diff --git a/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala b/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala index a9df732df93ca..7a60f08aadd4f 100644 --- a/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala +++ b/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala @@ -21,6 +21,8 @@ import java.util.concurrent._ import java.util.concurrent.{Future => JFuture, ScheduledFuture => JScheduledFuture} import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.concurrent.Future +import scala.util.{Failure, Success} import scala.util.control.NonFatal import org.apache.spark.SparkConf @@ -79,11 +81,6 @@ private[spark] class StandaloneAppClient( private val registrationRetryThread = ThreadUtils.newDaemonSingleThreadScheduledExecutor("appclient-registration-retry-thread") - // A thread pool to perform receive then reply actions in a thread so as not to block the - // event loop. - private val askAndReplyThreadPool = - ThreadUtils.newDaemonCachedThreadPool("appclient-receive-and-reply-threadpool") - override def onStart(): Unit = { try { registerWithMaster(1) @@ -220,19 +217,13 @@ private[spark] class StandaloneAppClient( endpointRef: RpcEndpointRef, context: RpcCallContext, msg: T): Unit = { - // Create a thread to ask a message and reply with the result. Allow thread to be + // Ask a message and create a thread to reply with the result. Allow thread to be // interrupted during shutdown, otherwise context must be notified of NonFatal errors. - askAndReplyThreadPool.execute(new Runnable { - override def run(): Unit = { - try { - context.reply(endpointRef.askWithRetry[Boolean](msg)) - } catch { - case ie: InterruptedException => // Cancelled - case NonFatal(t) => - context.sendFailure(t) - } - } - }) + endpointRef.ask[Boolean](msg).andThen { + case Success(b) => context.reply(b) + case Failure(ie: InterruptedException) => // Cancelled + case Failure(NonFatal(t)) => context.sendFailure(t) + }(ThreadUtils.sameThread) } override def onDisconnected(address: RpcAddress): Unit = { @@ -272,7 +263,6 @@ private[spark] class StandaloneAppClient( registrationRetryThread.shutdownNow() registerMasterFutures.get.foreach(_.cancel(true)) registerMasterThreadPool.shutdownNow() - askAndReplyThreadPool.shutdownNow() } } @@ -301,12 +291,12 @@ private[spark] class StandaloneAppClient( * * @return whether the request is acknowledged. */ - def requestTotalExecutors(requestedTotal: Int): Boolean = { + def requestTotalExecutors(requestedTotal: Int): Future[Boolean] = { if (endpoint.get != null && appId.get != null) { - endpoint.get.askWithRetry[Boolean](RequestExecutors(appId.get, requestedTotal)) + endpoint.get.ask[Boolean](RequestExecutors(appId.get, requestedTotal)) } else { logWarning("Attempted to request executors before driver fully initialized.") - false + Future.successful(false) } } @@ -314,12 +304,12 @@ private[spark] class StandaloneAppClient( * Kill the given list of executors through the Master. * @return whether the kill request is acknowledged. */ - def killExecutors(executorIds: Seq[String]): Boolean = { + def killExecutors(executorIds: Seq[String]): Future[Boolean] = { if (endpoint.get != null && appId.get != null) { - endpoint.get.askWithRetry[Boolean](KillExecutors(appId.get, executorIds)) + endpoint.get.ask[Boolean](KillExecutors(appId.get, executorIds)) } else { logWarning("Attempted to kill executors before driver fully initialized.") - false + Future.successful(false) } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala index 2db3a3bb81f61..6d26705377e9d 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala @@ -22,6 +22,8 @@ import java.util.concurrent.atomic.AtomicInteger import javax.annotation.concurrent.GuardedBy import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet} +import scala.concurrent.Future +import scala.concurrent.duration.Duration import org.apache.spark.{ExecutorAllocationClient, SparkEnv, SparkException, TaskState} import org.apache.spark.internal.Logging @@ -49,6 +51,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp protected val totalRegisteredExecutors = new AtomicInteger(0) protected val conf = scheduler.sc.conf private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf) + private val defaultAskTimeout = RpcUtils.askRpcTimeout(conf) // Submit tasks only after (registered resources / total expected resources) // is equal to at least this value, that is double between 0 and 1. private val _minRegisteredRatio = @@ -272,6 +275,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp // Remove a disconnected slave from the cluster private def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = { + logDebug(s"Asked to remove executor $executorId with reason $reason") executorDataMap.get(executorId) match { case Some(executorInfo) => // This must be synchronized because variables mutated @@ -446,19 +450,24 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp * Request an additional number of executors from the cluster manager. * @return whether the request is acknowledged. */ - final override def requestExecutors(numAdditionalExecutors: Int): Boolean = synchronized { + final override def requestExecutors(numAdditionalExecutors: Int): Boolean = { if (numAdditionalExecutors < 0) { throw new IllegalArgumentException( "Attempted to request a negative number of additional executor(s) " + s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!") } logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager") - logDebug(s"Number of pending executors is now $numPendingExecutors") - numPendingExecutors += numAdditionalExecutors - // Account for executors pending to be added or removed - val newTotal = numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size - doRequestTotalExecutors(newTotal) + val response = synchronized { + numPendingExecutors += numAdditionalExecutors + logDebug(s"Number of pending executors is now $numPendingExecutors") + + // Account for executors pending to be added or removed + doRequestTotalExecutors( + numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size) + } + + defaultAskTimeout.awaitResult(response) } /** @@ -479,19 +488,24 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: Map[String, Int] - ): Boolean = synchronized { + ): Boolean = { if (numExecutors < 0) { throw new IllegalArgumentException( "Attempted to request a negative number of executor(s) " + s"$numExecutors from the cluster manager. Please specify a positive number!") } - this.localityAwareTasks = localityAwareTasks - this.hostToLocalTaskCount = hostToLocalTaskCount + val response = synchronized { + this.localityAwareTasks = localityAwareTasks + this.hostToLocalTaskCount = hostToLocalTaskCount + + numPendingExecutors = + math.max(numExecutors - numExistingExecutors + executorsPendingToRemove.size, 0) - numPendingExecutors = - math.max(numExecutors - numExistingExecutors + executorsPendingToRemove.size, 0) - doRequestTotalExecutors(numExecutors) + doRequestTotalExecutors(numExecutors) + } + + defaultAskTimeout.awaitResult(response) } /** @@ -504,16 +518,17 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp * insufficient resources to satisfy the first request. We make the assumption here that the * cluster manager will eventually fulfill all requests when resources free up. * - * @return whether the request is acknowledged. + * @return a future whose evaluation indicates whether the request is acknowledged. */ - protected def doRequestTotalExecutors(requestedTotal: Int): Boolean = false + protected def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = + Future.successful(false) /** * Request that the cluster manager kill the specified executors. * @return whether the kill request is acknowledged. If list to kill is empty, it will return * false. */ - final override def killExecutors(executorIds: Seq[String]): Boolean = synchronized { + final override def killExecutors(executorIds: Seq[String]): Boolean = { killExecutors(executorIds, replace = false, force = false) } @@ -533,39 +548,53 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp final def killExecutors( executorIds: Seq[String], replace: Boolean, - force: Boolean): Boolean = synchronized { + force: Boolean): Boolean = { logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}") - val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains) - unknownExecutors.foreach { id => - logWarning(s"Executor to kill $id does not exist!") - } - // If an executor is already pending to be removed, do not kill it again (SPARK-9795) - // If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552) - val executorsToKill = knownExecutors - .filter { id => !executorsPendingToRemove.contains(id) } - .filter { id => force || !scheduler.isExecutorBusy(id) } - executorsToKill.foreach { id => executorsPendingToRemove(id) = !replace } - - // If we do not wish to replace the executors we kill, sync the target number of executors - // with the cluster manager to avoid allocating new ones. When computing the new target, - // take into account executors that are pending to be added or removed. - if (!replace) { - doRequestTotalExecutors( - numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size) - } else { - numPendingExecutors += knownExecutors.size + val response = synchronized { + val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains) + unknownExecutors.foreach { id => + logWarning(s"Executor to kill $id does not exist!") + } + + // If an executor is already pending to be removed, do not kill it again (SPARK-9795) + // If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552) + val executorsToKill = knownExecutors + .filter { id => !executorsPendingToRemove.contains(id) } + .filter { id => force || !scheduler.isExecutorBusy(id) } + executorsToKill.foreach { id => executorsPendingToRemove(id) = !replace } + + // If we do not wish to replace the executors we kill, sync the target number of executors + // with the cluster manager to avoid allocating new ones. When computing the new target, + // take into account executors that are pending to be added or removed. + val adjustTotalExecutors = + if (!replace) { + doRequestTotalExecutors( + numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size) + } else { + numPendingExecutors += knownExecutors.size + Future.successful(true) + } + + val killExecutors: Boolean => Future[Boolean] = + if (!executorsToKill.isEmpty) { + _ => doKillExecutors(executorsToKill) + } else { + _ => Future.successful(false) + } + + adjustTotalExecutors.flatMap(killExecutors)(ThreadUtils.sameThread) } - !executorsToKill.isEmpty && doKillExecutors(executorsToKill) + defaultAskTimeout.awaitResult(response) } /** * Kill the given list of executors through the cluster manager. * @return whether the kill request is acknowledged. */ - protected def doKillExecutors(executorIds: Seq[String]): Boolean = false - + protected def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = + Future.successful(false) } private[spark] object CoarseGrainedSchedulerBackend { diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala index 8382fbe9ddb80..5068bf2e66b60 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala @@ -19,6 +19,8 @@ package org.apache.spark.scheduler.cluster import java.util.concurrent.Semaphore +import scala.concurrent.Future + import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.deploy.{ApplicationDescription, Command} import org.apache.spark.deploy.client.{StandaloneAppClient, StandaloneAppClientListener} @@ -173,12 +175,12 @@ private[spark] class StandaloneSchedulerBackend( * * @return whether the request is acknowledged. */ - protected override def doRequestTotalExecutors(requestedTotal: Int): Boolean = { + protected override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = { Option(client) match { case Some(c) => c.requestTotalExecutors(requestedTotal) case None => logWarning("Attempted to request executors before driver fully initialized.") - false + Future.successful(false) } } @@ -186,12 +188,12 @@ private[spark] class StandaloneSchedulerBackend( * Kill the given list of executors through the Master. * @return whether the kill request is acknowledged. */ - protected override def doKillExecutors(executorIds: Seq[String]): Boolean = { + protected override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = { Option(client) match { case Some(c) => c.killExecutors(executorIds) case None => logWarning("Attempted to kill executors before driver fully initialized.") - false + Future.successful(false) } } diff --git a/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala b/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala index 5f59c176ab782..915d7a1b8b164 100644 --- a/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala +++ b/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala @@ -21,6 +21,7 @@ import java.util.concurrent.{ExecutorService, TimeUnit} import scala.collection.Map import scala.collection.mutable +import scala.concurrent.Future import scala.concurrent.duration._ import org.mockito.Matchers @@ -269,13 +270,13 @@ private class FakeSchedulerBackend( clusterManagerEndpoint: RpcEndpointRef) extends CoarseGrainedSchedulerBackend(scheduler, rpcEnv) { - protected override def doRequestTotalExecutors(requestedTotal: Int): Boolean = { - clusterManagerEndpoint.askWithRetry[Boolean]( + protected override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = { + clusterManagerEndpoint.ask[Boolean]( RequestExecutors(requestedTotal, localityAwareTasks, hostToLocalTaskCount)) } - protected override def doKillExecutors(executorIds: Seq[String]): Boolean = { - clusterManagerEndpoint.askWithRetry[Boolean](KillExecutors(executorIds)) + protected override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = { + clusterManagerEndpoint.ask[Boolean](KillExecutors(executorIds)) } } diff --git a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala index f6ef9d15ddee2..416efaa75b8d2 100644 --- a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala @@ -22,7 +22,7 @@ import java.util.concurrent.ConcurrentLinkedQueue import scala.concurrent.duration._ import org.scalatest.BeforeAndAfterAll -import org.scalatest.concurrent.Eventually._ +import org.scalatest.concurrent.{Eventually, ScalaFutures} import org.apache.spark._ import org.apache.spark.deploy.{ApplicationDescription, Command} @@ -36,7 +36,12 @@ import org.apache.spark.util.Utils /** * End-to-end tests for application client in standalone mode. */ -class AppClientSuite extends SparkFunSuite with LocalSparkContext with BeforeAndAfterAll { +class AppClientSuite + extends SparkFunSuite + with LocalSparkContext + with BeforeAndAfterAll + with Eventually + with ScalaFutures { private val numWorkers = 2 private val conf = new SparkConf() private val securityManager = new SecurityManager(conf) @@ -93,7 +98,12 @@ class AppClientSuite extends SparkFunSuite with LocalSparkContext with BeforeAnd // Send message to Master to request Executors, verify request by change in executor limit val numExecutorsRequested = 1 - assert(ci.client.requestTotalExecutors(numExecutorsRequested)) + whenReady( + ci.client.requestTotalExecutors(numExecutorsRequested), + timeout(10.seconds), + interval(10.millis)) { acknowledged => + assert(acknowledged) + } eventually(timeout(10.seconds), interval(10.millis)) { val apps = getApplications() @@ -101,10 +111,12 @@ class AppClientSuite extends SparkFunSuite with LocalSparkContext with BeforeAnd } // Send request to kill executor, verify request was made - assert { - val apps = getApplications() - val executorId: String = apps.head.executors.head._2.fullId - ci.client.killExecutors(Seq(executorId)) + val executorId: String = getApplications().head.executors.head._2.fullId + whenReady( + ci.client.killExecutors(Seq(executorId)), + timeout(10.seconds), + interval(10.millis)) { acknowledged => + assert(acknowledged) } // Issue stop command for Client to disconnect from Master @@ -122,7 +134,9 @@ class AppClientSuite extends SparkFunSuite with LocalSparkContext with BeforeAnd val ci = new AppClientInst(masterRpcEnv.address.toSparkURL) // requests to master should fail immediately - assert(ci.client.requestTotalExecutors(3) === false) + whenReady(ci.client.requestTotalExecutors(3), timeout(1.seconds)) { success => + assert(success === false) + } } // =============================== diff --git a/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala index fde1fb3228020..a64b5768c57b2 100644 --- a/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala +++ b/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala @@ -23,6 +23,7 @@ import java.util.concurrent.locks.ReentrantLock import scala.collection.JavaConverters._ import scala.collection.mutable +import scala.concurrent.Future import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, _} @@ -606,7 +607,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend( super.applicationId } - override def doRequestTotalExecutors(requestedTotal: Int): Boolean = { + override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = Future.successful { // We don't truly know if we can fulfill the full amount of executors // since at coarse grain it depends on the amount of slaves available. logInfo("Capping the total amount of executors to " + requestedTotal) @@ -614,7 +615,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend( true } - override def doKillExecutors(executorIds: Seq[String]): Boolean = { + override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = Future.successful { if (mesosDriver == null) { logWarning("Asked to kill executors before the Mesos driver was started.") false diff --git a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala index d98ddb2700fe0..6948be0ead5eb 100644 --- a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala +++ b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala @@ -19,6 +19,7 @@ package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer +import scala.concurrent.duration._ import scala.concurrent.Promise import scala.reflect.ClassTag @@ -27,6 +28,7 @@ import org.apache.mesos.Protos._ import org.mockito.Matchers import org.mockito.Matchers._ import org.mockito.Mockito._ +import org.scalatest.concurrent.ScalaFutures import org.scalatest.mock.MockitoSugar import org.scalatest.BeforeAndAfter @@ -40,7 +42,8 @@ import org.apache.spark.scheduler.cluster.mesos.Utils._ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkContext with MockitoSugar - with BeforeAndAfter { + with BeforeAndAfter + with ScalaFutures { private var sparkConf: SparkConf = _ private var driver: SchedulerDriver = _ @@ -50,6 +53,10 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite private var driverEndpoint: RpcEndpointRef = _ @volatile private var stopCalled = false + // All 'requests' to the scheduler run immediately on the same thread, so + // demand that all futures have their value available immediately. + implicit override val patienceConfig = PatienceConfig(timeout = 0.seconds) + test("mesos supports killing and limiting executors") { setBackend() sparkConf.set("spark.driver.host", "driverHost") @@ -64,8 +71,8 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite verifyTaskLaunched(driver, "o1") // kills executors - backend.doRequestTotalExecutors(0) - assert(backend.doKillExecutors(Seq("0"))) + assert(backend.doRequestTotalExecutors(0).futureValue) + assert(backend.doKillExecutors(Seq("0")).futureValue) val taskID0 = createTaskId("0") verify(driver, times(1)).killTask(taskID0) diff --git a/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala index ea63ff5dc1580..2f9ea1911fd61 100644 --- a/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala +++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala @@ -18,6 +18,7 @@ package org.apache.spark.scheduler.cluster import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} import scala.util.control.NonFatal import org.apache.hadoop.yarn.api.records.{ApplicationAttemptId, ApplicationId} @@ -124,28 +125,16 @@ private[spark] abstract class YarnSchedulerBackend( * Request executors from the ApplicationMaster by specifying the total number desired. * This includes executors already pending or running. */ - override def doRequestTotalExecutors(requestedTotal: Int): Boolean = { - val r = RequestExecutors(requestedTotal, localityAwareTasks, hostToLocalTaskCount) - yarnSchedulerEndpoint.amEndpoint match { - case Some(am) => - try { - am.askWithRetry[Boolean](r) - } catch { - case NonFatal(e) => - logError(s"Sending $r to AM was unsuccessful", e) - return false - } - case None => - logWarning("Attempted to request executors before the AM has registered!") - return false - } + override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = { + yarnSchedulerEndpointRef.ask[Boolean]( + RequestExecutors(requestedTotal, localityAwareTasks, hostToLocalTaskCount)) } /** * Request that the ApplicationMaster kill the specified executors. */ - override def doKillExecutors(executorIds: Seq[String]): Boolean = { - yarnSchedulerEndpointRef.askWithRetry[Boolean](KillExecutors(executorIds)) + override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = { + yarnSchedulerEndpointRef.ask[Boolean](KillExecutors(executorIds)) } override def sufficientResourcesRegistered(): Boolean = { @@ -221,37 +210,37 @@ private[spark] abstract class YarnSchedulerBackend( */ private class YarnSchedulerEndpoint(override val rpcEnv: RpcEnv) extends ThreadSafeRpcEndpoint with Logging { - var amEndpoint: Option[RpcEndpointRef] = None - - private val askAmThreadPool = - ThreadUtils.newDaemonCachedThreadPool("yarn-scheduler-ask-am-thread-pool") - implicit val askAmExecutor = ExecutionContext.fromExecutor(askAmThreadPool) + private var amEndpoint: Option[RpcEndpointRef] = None private[YarnSchedulerBackend] def handleExecutorDisconnectedFromDriver( executorId: String, executorRpcAddress: RpcAddress): Unit = { - amEndpoint match { + val removeExecutorMessage = amEndpoint match { case Some(am) => val lossReasonRequest = GetExecutorLossReason(executorId) - val future = am.ask[ExecutorLossReason](lossReasonRequest, askTimeout) - future onSuccess { - case reason: ExecutorLossReason => - driverEndpoint.askWithRetry[Boolean](RemoveExecutor(executorId, reason)) - } - future onFailure { - case NonFatal(e) => - logWarning(s"Attempted to get executor loss reason" + - s" for executor id ${executorId} at RPC address ${executorRpcAddress}," + - s" but got no response. Marking as slave lost.", e) - driverEndpoint.askWithRetry[Boolean](RemoveExecutor(executorId, SlaveLost())) - case t => throw t - } + am.ask[ExecutorLossReason](lossReasonRequest, askTimeout) + .map { reason => RemoveExecutor(executorId, reason) }(ThreadUtils.sameThread) + .recover { + case NonFatal(e) => + logWarning(s"Attempted to get executor loss reason" + + s" for executor id ${executorId} at RPC address ${executorRpcAddress}," + + s" but got no response. Marking as slave lost.", e) + RemoveExecutor(executorId, SlaveLost()) + }(ThreadUtils.sameThread) case None => logWarning("Attempted to check for an executor loss reason" + " before the AM has registered!") - driverEndpoint.askWithRetry[Boolean]( - RemoveExecutor(executorId, SlaveLost("AM is not yet registered."))) + Future.successful(RemoveExecutor(executorId, SlaveLost("AM is not yet registered."))) } + + removeExecutorMessage + .flatMap { message => + driverEndpoint.ask[Boolean](message) + }(ThreadUtils.sameThread) + .onFailure { + case NonFatal(e) => logError( + s"Error requesting driver to remove executor $executorId after disconnection.", e) + }(ThreadUtils.sameThread) } override def receive: PartialFunction[Any, Unit] = { @@ -269,9 +258,13 @@ private[spark] abstract class YarnSchedulerBackend( case AddWebUIFilter(filterName, filterParams, proxyBase) => addWebUIFilter(filterName, filterParams, proxyBase) - case RemoveExecutor(executorId, reason) => + case r @ RemoveExecutor(executorId, reason) => logWarning(reason.toString) - removeExecutor(executorId, reason) + driverEndpoint.ask[Boolean](r).onFailure { + case e => + logError("Error requesting driver to remove executor" + + s" $executorId for reason $reason", e) + }(ThreadUtils.sameThread) } @@ -279,13 +272,12 @@ private[spark] abstract class YarnSchedulerBackend( case r: RequestExecutors => amEndpoint match { case Some(am) => - Future { - context.reply(am.askWithRetry[Boolean](r)) - } onFailure { - case NonFatal(e) => + am.ask[Boolean](r).andThen { + case Success(b) => context.reply(b) + case Failure(NonFatal(e)) => logError(s"Sending $r to AM was unsuccessful", e) context.sendFailure(e) - } + }(ThreadUtils.sameThread) case None => logWarning("Attempted to request executors before the AM has registered!") context.reply(false) @@ -294,13 +286,12 @@ private[spark] abstract class YarnSchedulerBackend( case k: KillExecutors => amEndpoint match { case Some(am) => - Future { - context.reply(am.askWithRetry[Boolean](k)) - } onFailure { - case NonFatal(e) => + am.ask[Boolean](k).andThen { + case Success(b) => context.reply(b) + case Failure(NonFatal(e)) => logError(s"Sending $k to AM was unsuccessful", e) context.sendFailure(e) - } + }(ThreadUtils.sameThread) case None => logWarning("Attempted to kill executors before the AM has registered!") context.reply(false) @@ -316,10 +307,6 @@ private[spark] abstract class YarnSchedulerBackend( amEndpoint = None } } - - override def onStop(): Unit = { - askAmThreadPool.shutdownNow() - } } } From 2be5f8d7e0819de03971d0af6fa310793d2d0e65 Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Thu, 1 Sep 2016 12:01:22 -0700 Subject: [PATCH 167/270] [SPARK-17263][SQL] Add hexadecimal literal parsing ## What changes were proposed in this pull request? This PR adds the ability to parse SQL (hexadecimal) binary literals (AKA bit strings). It follows the following syntax `X'[Hexadecimal Characters]+'`, for example: `X'01AB'` would create a binary the following binary array `0x01AB`. If an uneven number of hexadecimal characters is passed, then the upper 4 bits of the initial byte are kept empty, and the lower 4 bits are filled using the first character. For example `X'1C7'` would create the following binary array `0x01C7`. Binary data (Array[Byte]) does not have a proper `hashCode` and `equals` functions. This meant that comparing `Literal`s containing binary data was a pain. I have updated Literal.hashCode and Literal.equals to deal properly with binary data. ## How was this patch tested? Added tests to the `ExpressionParserSuite`, `SQLQueryTestSuite` and `ExpressionSQLBuilderSuite`. Author: Herman van Hovell Closes #14832 from hvanhovell/SPARK-17263. --- .../sql/catalyst/expressions/literals.scala | 26 ++++++++++-- .../sql/catalyst/parser/AstBuilder.scala | 29 ++++++++----- .../parser/ExpressionParserSuite.scala | 13 +++--- .../resources/sql-tests/inputs/literals.sql | 7 ++++ .../sql-tests/results/literals.sql.out | 42 ++++++++++++++++--- .../catalyst/ExpressionSQLBuilderSuite.scala | 1 + 6 files changed, 93 insertions(+), 25 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala index 41e3952f0e253..a597a17aadd99 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala @@ -19,7 +19,9 @@ package org.apache.spark.sql.catalyst.expressions import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} +import java.util import java.util.Objects +import javax.xml.bind.DatatypeConverter import org.json4s.JsonAST._ @@ -168,14 +170,29 @@ case class Literal (value: Any, dataType: DataType) extends LeafExpression with override def foldable: Boolean = true override def nullable: Boolean = value == null - override def toString: String = if (value != null) value.toString else "null" + override def toString: String = value match { + case null => "null" + case binary: Array[Byte] => s"0x" + DatatypeConverter.printHexBinary(binary) + case other => other.toString + } - override def hashCode(): Int = 31 * (31 * Objects.hashCode(dataType)) + Objects.hashCode(value) + override def hashCode(): Int = { + val valueHashCode = value match { + case null => 0 + case binary: Array[Byte] => util.Arrays.hashCode(binary) + case other => other.hashCode() + } + 31 * Objects.hashCode(dataType) + valueHashCode + } override def equals(other: Any): Boolean = other match { + case o: Literal if !dataType.equals(o.dataType) => false case o: Literal => - dataType.equals(o.dataType) && - (value == null && null == o.value || value != null && value.equals(o.value)) + (value, o.value) match { + case (null, null) => true + case (a: Array[Byte], b: Array[Byte]) => util.Arrays.equals(a, b) + case (a, b) => a != null && a.equals(b) + } case _ => false } @@ -269,6 +286,7 @@ case class Literal (value: Any, dataType: DataType) extends LeafExpression with case (v: Decimal, t: DecimalType) => v + "BD" case (v: Int, DateType) => s"DATE '${DateTimeUtils.toJavaDate(v)}'" case (v: Long, TimestampType) => s"TIMESTAMP('${DateTimeUtils.toJavaTimestamp(v)}')" + case (v: Array[Byte], BinaryType) => s"X'${DatatypeConverter.printHexBinary(v)}'" case _ => value.toString } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 893db93368457..42fbc16d0396a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -18,6 +18,7 @@ package org.apache.spark.sql.catalyst.parser import java.sql.{Date, Timestamp} +import javax.xml.bind.DatatypeConverter import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer @@ -1215,19 +1216,27 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { * {{{ * [TYPE] '[VALUE]' * }}} - * Currently Date and Timestamp typed literals are supported. - * - * TODO what the added value of this over casting? + * Currently Date, Timestamp and Binary typed literals are supported. */ override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = withOrigin(ctx) { val value = string(ctx.STRING) - ctx.identifier.getText.toUpperCase match { - case "DATE" => - Literal(Date.valueOf(value)) - case "TIMESTAMP" => - Literal(Timestamp.valueOf(value)) - case other => - throw new ParseException(s"Literals of type '$other' are currently not supported.", ctx) + val valueType = ctx.identifier.getText.toUpperCase + try { + valueType match { + case "DATE" => + Literal(Date.valueOf(value)) + case "TIMESTAMP" => + Literal(Timestamp.valueOf(value)) + case "X" => + val padding = if (value.length % 2 == 1) "0" else "" + Literal(DatatypeConverter.parseHexBinary(padding + value)) + case other => + throw new ParseException(s"Literals of type '$other' are currently not supported.", ctx) + } + } catch { + case e: IllegalArgumentException => + val message = Option(e.getMessage).getOrElse(s"Exception parsing $valueType") + throw new ParseException(message, ctx) } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala index dbc5db39aed98..4e399eef1fed8 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala @@ -331,16 +331,17 @@ class ExpressionParserSuite extends PlanTest { test("type constructors") { // Dates. assertEqual("dAte '2016-03-11'", Literal(Date.valueOf("2016-03-11"))) - intercept[IllegalArgumentException] { - parseExpression("DAtE 'mar 11 2016'") - } + intercept("DAtE 'mar 11 2016'") // Timestamps. assertEqual("tImEstAmp '2016-03-11 20:54:00.000'", Literal(Timestamp.valueOf("2016-03-11 20:54:00.000"))) - intercept[IllegalArgumentException] { - parseExpression("timestamP '2016-33-11 20:54:00.000'") - } + intercept("timestamP '2016-33-11 20:54:00.000'") + + // Binary. + assertEqual("X'A'", Literal(Array(0x0a).map(_.toByte))) + assertEqual("x'A10C'", Literal(Array(0xa1, 0x0c).map(_.toByte))) + intercept("x'A1OC'") // Unsupported datatype. intercept("GEO '(10,-6)'", "Literals of type 'GEO' are currently not supported.") diff --git a/sql/core/src/test/resources/sql-tests/inputs/literals.sql b/sql/core/src/test/resources/sql-tests/inputs/literals.sql index a532a598c6bf9..40dceb19cfc5b 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/literals.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/literals.sql @@ -96,3 +96,10 @@ select 90912830918230182310293801923652346786BD, 123.0E-28BD, 123.08BD; -- out of range big decimal select 1.20E-38BD; + +-- hexadecimal binary literal +select x'2379ACFe'; + +-- invalid hexadecimal binary literal +select X'XuZ'; + diff --git a/sql/core/src/test/resources/sql-tests/results/literals.sql.out b/sql/core/src/test/resources/sql-tests/results/literals.sql.out index 85629f7ba813a..e2d8daef9868f 100644 --- a/sql/core/src/test/resources/sql-tests/results/literals.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/literals.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 40 +-- Number of queries: 42 -- !query 0 @@ -289,8 +289,13 @@ select date 'mar 11 2016' -- !query 31 schema struct<> -- !query 31 output -java.lang.IllegalArgumentException -null +org.apache.spark.sql.catalyst.parser.ParseException + +Exception parsing DATE(line 1, pos 7) + +== SQL == +select date 'mar 11 2016' +-------^^^ -- !query 32 @@ -306,8 +311,13 @@ select timestamp '2016-33-11 20:54:00.000' -- !query 33 schema struct<> -- !query 33 output -java.lang.IllegalArgumentException -Timestamp format must be yyyy-mm-dd hh:mm:ss[.fffffffff] +org.apache.spark.sql.catalyst.parser.ParseException + +Timestamp format must be yyyy-mm-dd hh:mm:ss[.fffffffff](line 1, pos 7) + +== SQL == +select timestamp '2016-33-11 20:54:00.000' +-------^^^ -- !query 34 @@ -376,3 +386,25 @@ DecimalType can only support precision up to 38(line 1, pos 7) == SQL == select 1.20E-38BD -------^^^ + + +-- !query 40 +select x'2379ACFe' +-- !query 40 schema +struct +-- !query 40 output +#y�� + + +-- !query 41 +select X'XuZ' +-- !query 41 schema +struct<> +-- !query 41 output +org.apache.spark.sql.catalyst.parser.ParseException + +contains illegal character for hexBinary: 0XuZ(line 1, pos 7) + +== SQL == +select X'XuZ' +-------^^^ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala index 43a218b4d14b3..d2b2f38fa1f71 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/ExpressionSQLBuilderSuite.scala @@ -40,6 +40,7 @@ class ExpressionSQLBuilderSuite extends SQLBuilderTest { checkSQL(Literal(Double.NegativeInfinity), "CAST('-Infinity' AS DOUBLE)") checkSQL(Literal(Double.NaN), "CAST('NaN' AS DOUBLE)") checkSQL(Literal(BigDecimal("10.0000000").underlying), "10.0000000BD") + checkSQL(Literal(Array(0x01, 0xA3).map(_.toByte)), "X'01A3'") checkSQL( Literal(Timestamp.valueOf("2016-01-01 00:00:00")), "TIMESTAMP('2016-01-01 00:00:00.0')") // TODO tests for decimals From 3893e8c576cf1a6decc18701267ce7cd8caaf521 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Thu, 1 Sep 2016 12:13:07 -0700 Subject: [PATCH 168/270] [SPARK-17331][CORE][MLLIB] Avoid allocating 0-length arrays ## What changes were proposed in this pull request? Avoid allocating some 0-length arrays, esp. in UTF8String, and by using Array.empty in Scala over Array[T]() ## How was this patch tested? Jenkins Author: Sean Owen Closes #14895 from srowen/SPARK-17331. --- .../java/org/apache/spark/unsafe/types/UTF8String.java | 8 ++++---- .../main/scala/org/apache/spark/MapOutputTracker.scala | 4 ++-- .../scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala | 2 +- docs/streaming-kafka-0-8-integration.md | 2 +- .../main/scala/org/apache/spark/ml/linalg/Matrices.scala | 6 +++--- .../ml/classification/MultinomialLogisticRegression.scala | 2 +- .../scala/org/apache/spark/ml/feature/OneHotEncoder.scala | 4 ++-- .../scala/org/apache/spark/mllib/linalg/Matrices.scala | 6 +++--- .../org/apache/spark/mllib/stat/test/ChiSqTest.scala | 2 +- .../main/java/org/apache/spark/sql/types/DataTypes.java | 2 +- .../spark/sql/execution/datasources/jdbc/JdbcUtils.scala | 2 +- 11 files changed, 20 insertions(+), 20 deletions(-) diff --git a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java index 54a54569240c0..dc03d893a5361 100644 --- a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java +++ b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java @@ -470,7 +470,7 @@ public UTF8String trim() { while (e >= 0 && getByte(e) <= 0x20 && getByte(e) >= 0x00) e--; if (s > e) { // empty string - return UTF8String.fromBytes(new byte[0]); + return EMPTY_UTF8; } else { return copyUTF8String(s, e); } @@ -482,7 +482,7 @@ public UTF8String trimLeft() { while (s < this.numBytes && getByte(s) <= 0x20 && getByte(s) >= 0x00) s++; if (s == this.numBytes) { // empty string - return UTF8String.fromBytes(new byte[0]); + return EMPTY_UTF8; } else { return copyUTF8String(s, this.numBytes - 1); } @@ -495,7 +495,7 @@ public UTF8String trimRight() { if (e < 0) { // empty string - return UTF8String.fromBytes(new byte[0]); + return EMPTY_UTF8; } else { return copyUTF8String(0, e); } @@ -761,7 +761,7 @@ public static UTF8String concatWs(UTF8String separator, UTF8String... inputs) { if (numInputs == 0) { // Return an empty string if there is no input, or all the inputs are null. - return fromBytes(new byte[0]); + return EMPTY_UTF8; } // Allocate a new byte array, and copy the inputs one by one into it. diff --git a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala index 486d535da0bcd..7f8f0f513134f 100644 --- a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala +++ b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala @@ -383,7 +383,7 @@ private[spark] class MapOutputTrackerMaster(conf: SparkConf, /** Register multiple map output information for the given shuffle */ def registerMapOutputs(shuffleId: Int, statuses: Array[MapStatus], changeEpoch: Boolean = false) { - mapStatuses.put(shuffleId, Array[MapStatus]() ++ statuses) + mapStatuses.put(shuffleId, statuses.clone()) if (changeEpoch) { incrementEpoch() } @@ -535,7 +535,7 @@ private[spark] class MapOutputTrackerMaster(conf: SparkConf, true case None => logDebug("cached status not found for : " + shuffleId) - statuses = mapStatuses.getOrElse(shuffleId, Array[MapStatus]()) + statuses = mapStatuses.getOrElse(shuffleId, Array.empty[MapStatus]) epochGotten = epoch false } diff --git a/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala index 32931d59acb18..b5738b9a95c36 100644 --- a/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala @@ -43,7 +43,7 @@ class ZippedWithIndexRDD[T: ClassTag](prev: RDD[T]) extends RDD[(T, Long)](prev) @transient private val startIndices: Array[Long] = { val n = prev.partitions.length if (n == 0) { - Array[Long]() + Array.empty } else if (n == 1) { Array(0L) } else { diff --git a/docs/streaming-kafka-0-8-integration.md b/docs/streaming-kafka-0-8-integration.md index d3fc9adfcf3ce..58b17aa4ce882 100644 --- a/docs/streaming-kafka-0-8-integration.md +++ b/docs/streaming-kafka-0-8-integration.md @@ -139,7 +139,7 @@ Next, we discuss how to use this approach in your streaming application.
    // Hold a reference to the current offset ranges, so it can be used downstream - var offsetRanges = Array[OffsetRange]() + var offsetRanges = Array.empty[OffsetRange] directKafkaStream.transform { rdd => offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges diff --git a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala index f1ecc65af1105..98080bb71ac83 100644 --- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala +++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala @@ -713,7 +713,7 @@ object SparseMatrix { "The expected number of nonzeros cannot be greater than Int.MaxValue.") val nnz = math.ceil(expected).toInt if (density == 0.0) { - new SparseMatrix(numRows, numCols, new Array[Int](numCols + 1), Array[Int](), Array[Double]()) + new SparseMatrix(numRows, numCols, new Array[Int](numCols + 1), Array.empty, Array.empty) } else if (density == 1.0) { val colPtrs = Array.tabulate(numCols + 1)(j => j * numRows) val rowIndices = Array.tabulate(size.toInt)(idx => idx % numRows) @@ -961,7 +961,7 @@ object Matrices { @Since("2.0.0") def horzcat(matrices: Array[Matrix]): Matrix = { if (matrices.isEmpty) { - return new DenseMatrix(0, 0, Array[Double]()) + return new DenseMatrix(0, 0, Array.empty) } else if (matrices.length == 1) { return matrices(0) } @@ -1020,7 +1020,7 @@ object Matrices { @Since("2.0.0") def vertcat(matrices: Array[Matrix]): Matrix = { if (matrices.isEmpty) { - return new DenseMatrix(0, 0, Array[Double]()) + return new DenseMatrix(0, 0, Array.empty) } else if (matrices.length == 1) { return matrices(0) } diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala index f85ac76a8d129..006f57c0ce260 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/MultinomialLogisticRegression.scala @@ -363,7 +363,7 @@ class MultinomialLogisticRegression @Since("2.1.0") ( rawCoefficients(coefIndex) } } else { - Array[Double]() + Array.empty } val coefficientArray: Array[Double] = Array.tabulate(numClasses * numFeatures) { i => diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala index 8b04b5de6fd2b..e8e28ba29c841 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/OneHotEncoder.scala @@ -164,8 +164,8 @@ class OneHotEncoder @Since("1.4.0") (@Since("1.4.0") override val uid: String) e // data transformation val size = outputAttrGroup.size val oneValue = Array(1.0) - val emptyValues = Array[Double]() - val emptyIndices = Array[Int]() + val emptyValues = Array.empty[Double] + val emptyIndices = Array.empty[Int] val encode = udf { label: Double => if (label < size) { Vectors.sparse(size, Array(label.toInt), oneValue) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala index 4c39cf17f4271..ad882c969aa8a 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala @@ -842,7 +842,7 @@ object SparseMatrix { "The expected number of nonzeros cannot be greater than Int.MaxValue.") val nnz = math.ceil(expected).toInt if (density == 0.0) { - new SparseMatrix(numRows, numCols, new Array[Int](numCols + 1), Array[Int](), Array[Double]()) + new SparseMatrix(numRows, numCols, new Array[Int](numCols + 1), Array.empty, Array.empty) } else if (density == 1.0) { val colPtrs = Array.tabulate(numCols + 1)(j => j * numRows) val rowIndices = Array.tabulate(size.toInt)(idx => idx % numRows) @@ -1098,7 +1098,7 @@ object Matrices { @Since("1.3.0") def horzcat(matrices: Array[Matrix]): Matrix = { if (matrices.isEmpty) { - return new DenseMatrix(0, 0, Array[Double]()) + return new DenseMatrix(0, 0, Array.empty) } else if (matrices.length == 1) { return matrices(0) } @@ -1157,7 +1157,7 @@ object Matrices { @Since("1.3.0") def vertcat(matrices: Array[Matrix]): Matrix = { if (matrices.isEmpty) { - return new DenseMatrix(0, 0, Array[Double]()) + return new DenseMatrix(0, 0, Array.empty[Double]) } else if (matrices.length == 1) { return matrices(0) } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/ChiSqTest.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/ChiSqTest.scala index da5df9bf45e5a..9a63b8a5d63db 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/ChiSqTest.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/ChiSqTest.scala @@ -146,7 +146,7 @@ private[stat] object ChiSqTest extends Logging { * Uniform distribution is assumed when `expected` is not passed in. */ def chiSquared(observed: Vector, - expected: Vector = Vectors.dense(Array[Double]()), + expected: Vector = Vectors.dense(Array.empty[Double]), methodName: String = PEARSON.name): ChiSqTestResult = { // Validate input arguments diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java index 24adeadf95675..747ab1809fc0a 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java @@ -191,7 +191,7 @@ public static StructField createStructField(String name, DataType dataType, bool * Creates a StructType with the given list of StructFields ({@code fields}). */ public static StructType createStructType(List fields) { - return createStructType(fields.toArray(new StructField[0])); + return createStructType(fields.toArray(new StructField[fields.size()])); } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala index cbd504603bbf4..37153e545a0b8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala @@ -322,6 +322,7 @@ object JdbcUtils extends Logging { conn.commit() } committed = true + Iterator.empty } catch { case e: SQLException => val cause = e.getNextException @@ -351,7 +352,6 @@ object JdbcUtils extends Logging { } } } - Array[Byte]().iterator } /** From edb45734f43216b352bfaaef00faaf43bbac38bf Mon Sep 17 00:00:00 2001 From: Marcelo Vanzin Date: Thu, 1 Sep 2016 14:02:58 -0700 Subject: [PATCH 169/270] [SPARK-16533][HOTFIX] Fix compilation on Scala 2.10. No idea why it was failing (the needed import was there), but this makes things work. Author: Marcelo Vanzin Closes #14925 from vanzin/SPARK-16533. --- .../mesos/MesosCoarseGrainedSchedulerBackendSuite.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala index 6948be0ead5eb..bbc79dd1eda07 100644 --- a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala +++ b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala @@ -17,6 +17,8 @@ package org.apache.spark.scheduler.cluster.mesos +import java.util.concurrent.TimeUnit + import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ @@ -55,7 +57,7 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite // All 'requests' to the scheduler run immediately on the same thread, so // demand that all futures have their value available immediately. - implicit override val patienceConfig = PatienceConfig(timeout = 0.seconds) + implicit override val patienceConfig = PatienceConfig(timeout = Duration(0, TimeUnit.SECONDS)) test("mesos supports killing and limiting executors") { setBackend() From 473d78649dec7583bcc4ec24b6f38303c38e81a2 Mon Sep 17 00:00:00 2001 From: Brian Cho Date: Thu, 1 Sep 2016 14:13:17 -0700 Subject: [PATCH 170/270] [SPARK-16926] [SQL] Remove partition columns from partition metadata. ## What changes were proposed in this pull request? This removes partition columns from column metadata of partitions to match tables. A change introduced in SPARK-14388 removed partition columns from the column metadata of tables, but not for partitions. This causes TableReader to believe that the schema is different between table and partition, and create an unnecessary conversion object inspector in TableReader. ## How was this patch tested? Existing unit tests. Author: Brian Cho Closes #14515 from dafrista/partition-columns-metadata. --- .../org/apache/spark/sql/hive/MetastoreRelation.scala | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala index d62bc983d0279..a90da98811f5d 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala @@ -161,7 +161,13 @@ private[hive] case class MetastoreRelation( val sd = new org.apache.hadoop.hive.metastore.api.StorageDescriptor() tPartition.setSd(sd) - sd.setCols(catalogTable.schema.map(toHiveColumn).asJava) + + // Note: In Hive the schema and partition columns must be disjoint sets + val schema = catalogTable.schema.map(toHiveColumn).filter { c => + !catalogTable.partitionColumnNames.contains(c.getName) + } + sd.setCols(schema.asJava) + p.storage.locationUri.foreach(sd.setLocation) p.storage.inputFormat.foreach(sd.setInputFormat) p.storage.outputFormat.foreach(sd.setOutputFormat) From e388bd54499cb4c26a0e14efd47af0c684ca250f Mon Sep 17 00:00:00 2001 From: Yucai Yu Date: Thu, 1 Sep 2016 14:13:38 -0700 Subject: [PATCH 171/270] [SPARK-16732][SQL] Remove unused codes in subexpressionEliminationForWholeStageCodegen ## What changes were proposed in this pull request? Some codes in subexpressionEliminationForWholeStageCodegen are never used actually. Remove them using this PR. ## How was this patch tested? Local unit tests. Author: Yucai Yu Closes #14366 from yucai/subExpr_unused_codes. --- .../sql/catalyst/expressions/codegen/CodeGenerator.scala | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala index 4bd9ee03f96dd..28064a5cfa2ee 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala @@ -662,10 +662,6 @@ class CodegenContext { val commonExprs = equivalentExpressions.getAllEquivalentExprs.filter(_.size > 1) val codes = commonExprs.map { e => val expr = e.head - val fnName = freshName("evalExpr") - val isNull = s"${fnName}IsNull" - val value = s"${fnName}Value" - // Generate the code for this expression tree. val code = expr.genCode(this) val state = SubExprEliminationState(code.isNull, code.value) From d314677cfd9cb4140005765938841bae9dc48a2d Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Thu, 1 Sep 2016 15:32:07 -0700 Subject: [PATCH 172/270] [SPARK-16461][SQL] Support partition batch pruning with `<=>` predicate in InMemoryTableScanExec ## What changes were proposed in this pull request? It seems `EqualNullSafe` filter was missed for batch pruneing partitions in cached tables. It seems supporting this improves the performance roughly 5 times faster. Running the codes below: ```scala test("Null-safe equal comparison") { val N = 20000000 val df = spark.range(N).repartition(20) val benchmark = new Benchmark("Null-safe equal comparison", N) df.createOrReplaceTempView("t") spark.catalog.cacheTable("t") sql("select id from t where id <=> 1").collect() benchmark.addCase("Null-safe equal comparison", 10) { _ => sql("select id from t where id <=> 1").collect() } benchmark.run() } ``` produces the results below: **Before:** ``` Running benchmark: Null-safe equal comparison Running case: Null-safe equal comparison Stopped after 10 iterations, 2098 ms Java HotSpot(TM) 64-Bit Server VM 1.8.0_45-b14 on Mac OS X 10.11.5 Intel(R) Core(TM) i7-4850HQ CPU 2.30GHz Null-safe equal comparison: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ Null-safe equal comparison 204 / 210 98.1 10.2 1.0X ``` **After:** ``` Running benchmark: Null-safe equal comparison Running case: Null-safe equal comparison Stopped after 10 iterations, 478 ms Java HotSpot(TM) 64-Bit Server VM 1.8.0_45-b14 on Mac OS X 10.11.5 Intel(R) Core(TM) i7-4850HQ CPU 2.30GHz Null-safe equal comparison: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ Null-safe equal comparison 42 / 48 474.1 2.1 1.0X ``` ## How was this patch tested? Unit tests in `PartitionBatchPruningSuite`. Author: hyukjinkwon Closes #14117 from HyukjinKwon/SPARK-16461. --- .../spark/sql/execution/columnar/InMemoryTableScanExec.scala | 5 +++++ .../sql/execution/columnar/PartitionBatchPruningSuite.scala | 2 ++ 2 files changed, 7 insertions(+) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryTableScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryTableScanExec.scala index b86825902ab3d..b87016d5a5696 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryTableScanExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryTableScanExec.scala @@ -65,6 +65,11 @@ case class InMemoryTableScanExec( case EqualTo(l: Literal, a: AttributeReference) => statsFor(a).lowerBound <= l && l <= statsFor(a).upperBound + case EqualNullSafe(a: AttributeReference, l: Literal) => + statsFor(a).lowerBound <= l && l <= statsFor(a).upperBound + case EqualNullSafe(l: Literal, a: AttributeReference) => + statsFor(a).lowerBound <= l && l <= statsFor(a).upperBound + case LessThan(a: AttributeReference, l: Literal) => statsFor(a).lowerBound < l case LessThan(l: Literal, a: AttributeReference) => l < statsFor(a).upperBound diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala index b99cd67a6344c..9d862cfdecb21 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala @@ -85,6 +85,8 @@ class PartitionBatchPruningSuite // Comparisons checkBatchPruning("SELECT key FROM pruningData WHERE key = 1", 1, 1)(Seq(1)) checkBatchPruning("SELECT key FROM pruningData WHERE 1 = key", 1, 1)(Seq(1)) + checkBatchPruning("SELECT key FROM pruningData WHERE key <=> 1", 1, 1)(Seq(1)) + checkBatchPruning("SELECT key FROM pruningData WHERE 1 <=> key", 1, 1)(Seq(1)) checkBatchPruning("SELECT key FROM pruningData WHERE key < 12", 1, 2)(1 to 11) checkBatchPruning("SELECT key FROM pruningData WHERE key <= 11", 1, 2)(1 to 11) checkBatchPruning("SELECT key FROM pruningData WHERE key > 88", 1, 2)(89 to 100) From 15539e54c2650a164f09c072f8fae934bb0468c9 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Thu, 1 Sep 2016 16:45:26 -0700 Subject: [PATCH 173/270] [SPARK-17355] Workaround for HIVE-14684 / HiveResultSetMetaData.isSigned exception ## What changes were proposed in this pull request? Attempting to use Spark SQL's JDBC data source against the Hive ThriftServer results in a `java.sql.SQLException: Method` not supported exception from `org.apache.hive.jdbc.HiveResultSetMetaData.isSigned`. Here are two user reports of this issue: - https://stackoverflow.com/questions/34067686/spark-1-5-1-not-working-with-hive-jdbc-1-2-0 - https://stackoverflow.com/questions/32195946/method-not-supported-in-spark I have filed [HIVE-14684](https://issues.apache.org/jira/browse/HIVE-14684) to attempt to fix this in Hive by implementing the isSigned method, but in the meantime / for compatibility with older JDBC drivers I think we should add special-case error handling to work around this bug. This patch updates `JDBCRDD`'s `ResultSetMetadata` to schema conversion to catch the "Method not supported" exception from Hive and return `isSigned = true`. I believe that this is safe because, as far as I know, Hive does not support unsigned numeric types. ## How was this patch tested? Tested manually against a Spark Thrift Server. Author: Josh Rosen Closes #14911 from JoshRosen/hive-jdbc-workaround. --- .../sql/execution/datasources/jdbc/JDBCRDD.scala | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala index 8d9048ab82ac6..9b5088fbfd409 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala @@ -136,7 +136,16 @@ object JDBCRDD extends Logging { val typeName = rsmd.getColumnTypeName(i + 1) val fieldSize = rsmd.getPrecision(i + 1) val fieldScale = rsmd.getScale(i + 1) - val isSigned = rsmd.isSigned(i + 1) + val isSigned = { + try { + rsmd.isSigned(i + 1) + } catch { + // Workaround for HIVE-14684: + case e: SQLException if + e.getMessage == "Method not supported" && + rsmd.getClass.getName == "org.apache.hive.jdbc.HiveResultSetMetaData" => true + } + } val nullable = rsmd.isNullable(i + 1) != ResultSetMetaData.columnNoNulls val metadata = new MetadataBuilder() .putString("name", columnName) From 03d77af9ec4ce9a42affd6ab4381ae5bd3c79a5a Mon Sep 17 00:00:00 2001 From: Qifan Pu Date: Thu, 1 Sep 2016 16:56:35 -0700 Subject: [PATCH 174/270] [SPARK-16525] [SQL] Enable Row Based HashMap in HashAggregateExec ## What changes were proposed in this pull request? This PR is the second step for the following feature: For hash aggregation in Spark SQL, we use a fast aggregation hashmap to act as a "cache" in order to boost aggregation performance. Previously, the hashmap is backed by a `ColumnarBatch`. This has performance issues when we have wide schema for the aggregation table (large number of key fields or value fields). In this JIRA, we support another implementation of fast hashmap, which is backed by a `RowBatch`. We then automatically pick between the two implementations based on certain knobs. In this second-step PR, we enable `RowBasedHashMapGenerator` in `HashAggregateExec`. ## How was this patch tested? Added tests: `RowBasedAggregateHashMapSuite` and ` VectorizedAggregateHashMapSuite` Additional micro-benchmarks tests and TPCDS results will be added in a separate PR in the series. Author: Qifan Pu Author: ooq Closes #14176 from ooq/rowbasedfastaggmap-pr2. --- .../FixedLengthRowBasedKeyValueBatch.java | 8 +- .../aggregate/HashAggregateExec.scala | 290 ++++++++++++------ .../aggregate/RowBasedHashMapGenerator.scala | 12 +- .../apache/spark/sql/internal/SQLConf.scala | 17 +- .../spark/sql/AggregateHashMapSuite.scala | 79 +++++ .../spark/sql/DataFrameAggregateSuite.scala | 8 + .../benchmark/AggregateBenchmark.scala | 25 +- .../execution/AggregationQuerySuite.scala | 6 +- 8 files changed, 326 insertions(+), 119 deletions(-) create mode 100644 sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java index 85529f6a0aa1e..a88a315bf479f 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java @@ -165,10 +165,10 @@ private void freeCurrentPage() { protected FixedLengthRowBasedKeyValueBatch(StructType keySchema, StructType valueSchema, int maxRows, TaskMemoryManager manager) { super(keySchema, valueSchema, maxRows, manager); - klen = keySchema.defaultSize() - + UnsafeRow.calculateBitSetWidthInBytes(keySchema.length()); - vlen = valueSchema.defaultSize() - + UnsafeRow.calculateBitSetWidthInBytes(valueSchema.length()); + int keySize = keySchema.size() * 8; // each fixed-length field is stored in a 8-byte word + int valueSize = valueSchema.size() * 8; + klen = keySize + UnsafeRow.calculateBitSetWidthInBytes(keySchema.length()); + vlen = valueSize + UnsafeRow.calculateBitSetWidthInBytes(valueSchema.length()); recordLength = klen + vlen + 8; } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala index bd7efa606e0ce..59e132dfb252d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql.execution.aggregate +import org.apache.spark.memory.TaskMemoryManager import org.apache.spark.TaskContext import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow @@ -279,9 +280,14 @@ case class HashAggregateExec( .map(_.asInstanceOf[DeclarativeAggregate]) private val bufferSchema = StructType.fromAttributes(aggregateBufferAttributes) - // The name for Vectorized HashMap - private var vectorizedHashMapTerm: String = _ - private var isVectorizedHashMapEnabled: Boolean = _ + // The name for Fast HashMap + private var fastHashMapTerm: String = _ + private var isFastHashMapEnabled: Boolean = false + + // whether a vectorized hashmap is used instead + // we have decided to always use the row-based hashmap, + // but the vectorized hashmap can still be switched on for testing and benchmarking purposes. + private var isVectorizedHashMapEnabled: Boolean = false // The name for UnsafeRow HashMap private var hashMapTerm: String = _ @@ -307,6 +313,16 @@ case class HashAggregateExec( ) } + def getTaskMemoryManager(): TaskMemoryManager = { + TaskContext.get().taskMemoryManager() + } + + def getEmptyAggregationBuffer(): InternalRow = { + val initExpr = declFunctions.flatMap(f => f.initialValues) + val initialBuffer = UnsafeProjection.create(initExpr)(EmptyRow) + initialBuffer + } + /** * This is called by generated Java class, should be public. */ @@ -459,52 +475,91 @@ case class HashAggregateExec( } /** - * Using the vectorized hash map in HashAggregate is currently supported for all primitive - * data types during partial aggregation. However, we currently only enable the hash map for a - * subset of cases that've been verified to show performance improvements on our benchmarks - * subject to an internal conf that sets an upper limit on the maximum length of the aggregate - * key/value schema. - * + * A required check for any fast hash map implementation (basically the common requirements + * for row-based and vectorized). + * Currently fast hash map is supported for primitive data types during partial aggregation. * This list of supported use-cases should be expanded over time. */ - private def enableVectorizedHashMap(ctx: CodegenContext): Boolean = { - val schemaLength = (groupingKeySchema ++ bufferSchema).length + private def checkIfFastHashMapSupported(ctx: CodegenContext): Boolean = { val isSupported = (groupingKeySchema ++ bufferSchema).forall(f => ctx.isPrimitiveType(f.dataType) || f.dataType.isInstanceOf[DecimalType] || f.dataType.isInstanceOf[StringType]) && bufferSchema.nonEmpty && modes.forall(mode => mode == Partial || mode == PartialMerge) - // We do not support byte array based decimal type for aggregate values as - // ColumnVector.putDecimal for high-precision decimals doesn't currently support in-place + // For vectorized hash map, We do not support byte array based decimal type for aggregate values + // as ColumnVector.putDecimal for high-precision decimals doesn't currently support in-place // updates. Due to this, appending the byte array in the vectorized hash map can turn out to be // quite inefficient and can potentially OOM the executor. + // For row-based hash map, while decimal update is supported in UnsafeRow, we will just act + // conservative here, due to lack of testing and benchmarking. val isNotByteArrayDecimalType = bufferSchema.map(_.dataType).filter(_.isInstanceOf[DecimalType]) .forall(!DecimalType.isByteArrayDecimalType(_)) - isSupported && isNotByteArrayDecimalType && - schemaLength <= sqlContext.conf.vectorizedAggregateMapMaxColumns + isSupported && isNotByteArrayDecimalType + } + + private def enableTwoLevelHashMap(ctx: CodegenContext) = { + if (!checkIfFastHashMapSupported(ctx)) { + if (modes.forall(mode => mode == Partial || mode == PartialMerge) && !Utils.isTesting) { + logInfo("spark.sql.codegen.aggregate.map.twolevel.enable is set to true, but" + + " current version of codegened fast hashmap does not support this aggregate.") + } + } else { + isFastHashMapEnabled = true + + // This is for testing/benchmarking only. + // We enforce to first level to be a vectorized hashmap, instead of the default row-based one. + sqlContext.getConf("spark.sql.codegen.aggregate.map.vectorized.enable", null) match { + case "true" => isVectorizedHashMapEnabled = true + case null | "" | "false" => None } + } } private def doProduceWithKeys(ctx: CodegenContext): String = { val initAgg = ctx.freshName("initAgg") ctx.addMutableState("boolean", initAgg, s"$initAgg = false;") - isVectorizedHashMapEnabled = enableVectorizedHashMap(ctx) - vectorizedHashMapTerm = ctx.freshName("vectorizedHashMap") - val vectorizedHashMapClassName = ctx.freshName("VectorizedHashMap") - val vectorizedHashMapGenerator = new VectorizedHashMapGenerator(ctx, aggregateExpressions, - vectorizedHashMapClassName, groupingKeySchema, bufferSchema) + if (sqlContext.conf.enableTwoLevelAggMap) { + enableTwoLevelHashMap(ctx) + } else { + sqlContext.getConf("spark.sql.codegen.aggregate.map.vectorized.enable", null) match { + case "true" => logWarning("Two level hashmap is disabled but vectorized hashmap is " + + "enabled.") + case null | "" | "false" => None + } + } + fastHashMapTerm = ctx.freshName("fastHashMap") + val fastHashMapClassName = ctx.freshName("FastHashMap") + val fastHashMapGenerator = + if (isVectorizedHashMapEnabled) { + new VectorizedHashMapGenerator(ctx, aggregateExpressions, + fastHashMapClassName, groupingKeySchema, bufferSchema) + } else { + new RowBasedHashMapGenerator(ctx, aggregateExpressions, + fastHashMapClassName, groupingKeySchema, bufferSchema) + } + + val thisPlan = ctx.addReferenceObj("plan", this) + // Create a name for iterator from vectorized HashMap - val iterTermForVectorizedHashMap = ctx.freshName("vectorizedHashMapIter") - if (isVectorizedHashMapEnabled) { - ctx.addMutableState(vectorizedHashMapClassName, vectorizedHashMapTerm, - s"$vectorizedHashMapTerm = new $vectorizedHashMapClassName();") - ctx.addMutableState( - "java.util.Iterator", - iterTermForVectorizedHashMap, "") + val iterTermForFastHashMap = ctx.freshName("fastHashMapIter") + if (isFastHashMapEnabled) { + if (isVectorizedHashMapEnabled) { + ctx.addMutableState(fastHashMapClassName, fastHashMapTerm, + s"$fastHashMapTerm = new $fastHashMapClassName();") + ctx.addMutableState( + "java.util.Iterator", + iterTermForFastHashMap, "") + } else { + ctx.addMutableState(fastHashMapClassName, fastHashMapTerm, + s"$fastHashMapTerm = new $fastHashMapClassName(" + + s"agg_plan.getTaskMemoryManager(), agg_plan.getEmptyAggregationBuffer());") + ctx.addMutableState( + "org.apache.spark.unsafe.KVIterator", + iterTermForFastHashMap, "") + } } // create hashMap - val thisPlan = ctx.addReferenceObj("plan", this) hashMapTerm = ctx.freshName("hashMap") val hashMapClassName = classOf[UnsafeFixedWidthAggregationMap].getName ctx.addMutableState(hashMapClassName, hashMapTerm, "") @@ -518,15 +573,30 @@ case class HashAggregateExec( val doAgg = ctx.freshName("doAggregateWithKeys") val peakMemory = metricTerm(ctx, "peakMemory") val spillSize = metricTerm(ctx, "spillSize") + + def generateGenerateCode(): String = { + if (isFastHashMapEnabled) { + if (isVectorizedHashMapEnabled) { + s""" + | ${fastHashMapGenerator.asInstanceOf[VectorizedHashMapGenerator].generate()} + """.stripMargin + } else { + s""" + | ${fastHashMapGenerator.asInstanceOf[RowBasedHashMapGenerator].generate()} + """.stripMargin + } + } else "" + } + ctx.addNewFunction(doAgg, s""" - ${if (isVectorizedHashMapEnabled) vectorizedHashMapGenerator.generate() else ""} + ${generateGenerateCode} private void $doAgg() throws java.io.IOException { $hashMapTerm = $thisPlan.createHashMap(); ${child.asInstanceOf[CodegenSupport].produce(ctx, this)} - ${if (isVectorizedHashMapEnabled) { - s"$iterTermForVectorizedHashMap = $vectorizedHashMapTerm.rowIterator();"} else ""} + ${if (isFastHashMapEnabled) { + s"$iterTermForFastHashMap = $fastHashMapTerm.rowIterator();"} else ""} $iterTerm = $thisPlan.finishAggregate($hashMapTerm, $sorterTerm, $peakMemory, $spillSize); } @@ -542,34 +612,56 @@ case class HashAggregateExec( // so `copyResult` should be reset to `false`. ctx.copyResult = false + def outputFromGeneratedMap: String = { + if (isFastHashMapEnabled) { + if (isVectorizedHashMapEnabled) { + outputFromVectorizedMap + } else { + outputFromRowBasedMap + } + } else "" + } + + def outputFromRowBasedMap: String = { + s""" + while ($iterTermForFastHashMap.next()) { + $numOutput.add(1); + UnsafeRow $keyTerm = (UnsafeRow) $iterTermForFastHashMap.getKey(); + UnsafeRow $bufferTerm = (UnsafeRow) $iterTermForFastHashMap.getValue(); + $outputCode + + if (shouldStop()) return; + } + $fastHashMapTerm.close(); + """ + } + // Iterate over the aggregate rows and convert them from ColumnarBatch.Row to UnsafeRow - def outputFromGeneratedMap: Option[String] = { - if (isVectorizedHashMapEnabled) { - val row = ctx.freshName("vectorizedHashMapRow") + def outputFromVectorizedMap: String = { + val row = ctx.freshName("fastHashMapRow") ctx.currentVars = null ctx.INPUT_ROW = row var schema: StructType = groupingKeySchema bufferSchema.foreach(i => schema = schema.add(i)) val generateRow = GenerateUnsafeProjection.createCode(ctx, schema.toAttributes.zipWithIndex .map { case (attr, i) => BoundReference(i, attr.dataType, attr.nullable) }) - Option( - s""" - | while ($iterTermForVectorizedHashMap.hasNext()) { - | $numOutput.add(1); - | org.apache.spark.sql.execution.vectorized.ColumnarBatch.Row $row = - | (org.apache.spark.sql.execution.vectorized.ColumnarBatch.Row) - | $iterTermForVectorizedHashMap.next(); - | ${generateRow.code} - | ${consume(ctx, Seq.empty, {generateRow.value})} - | - | if (shouldStop()) return; - | } - | - | $vectorizedHashMapTerm.close(); - """.stripMargin) - } else None + s""" + | while ($iterTermForFastHashMap.hasNext()) { + | $numOutput.add(1); + | org.apache.spark.sql.execution.vectorized.ColumnarBatch.Row $row = + | (org.apache.spark.sql.execution.vectorized.ColumnarBatch.Row) + | $iterTermForFastHashMap.next(); + | ${generateRow.code} + | ${consume(ctx, Seq.empty, {generateRow.value})} + | + | if (shouldStop()) return; + | } + | + | $fastHashMapTerm.close(); + """.stripMargin } + val aggTime = metricTerm(ctx, "aggTime") val beforeAgg = ctx.freshName("beforeAgg") s""" @@ -581,7 +673,7 @@ case class HashAggregateExec( } // output the result - ${outputFromGeneratedMap.getOrElse("")} + ${outputFromGeneratedMap} while ($iterTerm.next()) { $numOutput.add(1); @@ -605,11 +697,11 @@ case class HashAggregateExec( ctx.currentVars = input val unsafeRowKeyCode = GenerateUnsafeProjection.createCode( ctx, groupingExpressions.map(e => BindReferences.bindReference[Expression](e, child.output))) - val vectorizedRowKeys = ctx.generateExpressions( - groupingExpressions.map(e => BindReferences.bindReference[Expression](e, child.output))) + val fastRowKeys = ctx.generateExpressions( + groupingExpressions.map(e => BindReferences.bindReference[Expression](e, child.output))) val unsafeRowKeys = unsafeRowKeyCode.value val unsafeRowBuffer = ctx.freshName("unsafeRowAggBuffer") - val vectorizedRowBuffer = ctx.freshName("vectorizedAggBuffer") + val fastRowBuffer = ctx.freshName("fastAggBuffer") // only have DeclarativeAggregate val updateExpr = aggregateExpressions.flatMap { e => @@ -639,17 +731,18 @@ case class HashAggregateExec( ("true", "true", "", "") } - // We first generate code to probe and update the vectorized hash map. If the probe is - // successful the corresponding vectorized row buffer will hold the mutable row - val findOrInsertInVectorizedHashMap: Option[String] = { - if (isVectorizedHashMapEnabled) { + // We first generate code to probe and update the fast hash map. If the probe is + // successful the corresponding fast row buffer will hold the mutable row + val findOrInsertFastHashMap: Option[String] = { + if (isFastHashMapEnabled) { Option( s""" + | |if ($checkFallbackForGeneratedHashMap) { - | ${vectorizedRowKeys.map(_.code).mkString("\n")} - | if (${vectorizedRowKeys.map("!" + _.isNull).mkString(" && ")}) { - | $vectorizedRowBuffer = $vectorizedHashMapTerm.findOrInsert( - | ${vectorizedRowKeys.map(_.value).mkString(", ")}); + | ${fastRowKeys.map(_.code).mkString("\n")} + | if (${fastRowKeys.map("!" + _.isNull).mkString(" && ")}) { + | $fastRowBuffer = $fastHashMapTerm.findOrInsert( + | ${fastRowKeys.map(_.value).mkString(", ")}); | } |} """.stripMargin) @@ -658,36 +751,35 @@ case class HashAggregateExec( } } - val updateRowInVectorizedHashMap: Option[String] = { - if (isVectorizedHashMapEnabled) { - ctx.INPUT_ROW = vectorizedRowBuffer - val boundUpdateExpr = updateExpr.map(BindReferences.bindReference(_, inputAttr)) - val subExprs = ctx.subexpressionEliminationForWholeStageCodegen(boundUpdateExpr) - val effectiveCodes = subExprs.codes.mkString("\n") - val vectorizedRowEvals = ctx.withSubExprEliminationExprs(subExprs.states) { - boundUpdateExpr.map(_.genCode(ctx)) - } - val updateVectorizedRow = vectorizedRowEvals.zipWithIndex.map { case (ev, i) => - val dt = updateExpr(i).dataType - ctx.updateColumn(vectorizedRowBuffer, dt, i, ev, updateExpr(i).nullable, - isVectorized = true) - } - Option( - s""" - |// common sub-expressions - |$effectiveCodes - |// evaluate aggregate function - |${evaluateVariables(vectorizedRowEvals)} - |// update vectorized row - |${updateVectorizedRow.mkString("\n").trim} - """.stripMargin) - } else None + + def updateRowInFastHashMap(isVectorized: Boolean): Option[String] = { + ctx.INPUT_ROW = fastRowBuffer + val boundUpdateExpr = updateExpr.map(BindReferences.bindReference(_, inputAttr)) + val subExprs = ctx.subexpressionEliminationForWholeStageCodegen(boundUpdateExpr) + val effectiveCodes = subExprs.codes.mkString("\n") + val fastRowEvals = ctx.withSubExprEliminationExprs(subExprs.states) { + boundUpdateExpr.map(_.genCode(ctx)) + } + val updateFastRow = fastRowEvals.zipWithIndex.map { case (ev, i) => + val dt = updateExpr(i).dataType + ctx.updateColumn(fastRowBuffer, dt, i, ev, updateExpr(i).nullable, isVectorized) + } + Option( + s""" + |// common sub-expressions + |$effectiveCodes + |// evaluate aggregate function + |${evaluateVariables(fastRowEvals)} + |// update fast row + |${updateFastRow.mkString("\n").trim} + | + """.stripMargin) } // Next, we generate code to probe and update the unsafe row hash map. val findOrInsertInUnsafeRowMap: String = { s""" - | if ($vectorizedRowBuffer == null) { + | if ($fastRowBuffer == null) { | // generate grouping key | ${unsafeRowKeyCode.code.trim} | ${hashEval.code.trim} @@ -745,17 +837,31 @@ case class HashAggregateExec( // Finally, sort the spilled aggregate buffers by key, and merge them together for same key. s""" UnsafeRow $unsafeRowBuffer = null; - org.apache.spark.sql.execution.vectorized.ColumnarBatch.Row $vectorizedRowBuffer = null; + ${ + if (isVectorizedHashMapEnabled) { + s""" + | org.apache.spark.sql.execution.vectorized.ColumnarBatch.Row $fastRowBuffer = null; + """.stripMargin + } else { + s""" + | UnsafeRow $fastRowBuffer = null; + """.stripMargin + } + } - ${findOrInsertInVectorizedHashMap.getOrElse("")} + ${findOrInsertFastHashMap.getOrElse("")} $findOrInsertInUnsafeRowMap $incCounter - if ($vectorizedRowBuffer != null) { - // update vectorized row - ${updateRowInVectorizedHashMap.getOrElse("")} + if ($fastRowBuffer != null) { + // update fast row + ${ + if (isFastHashMapEnabled) { + updateRowInFastHashMap(isVectorizedHashMapEnabled).getOrElse("") + } else "" + } } else { // update unsafe row $updateRowInUnsafeRowMap diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala index 1dea33037c85c..a77e178546ef8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala @@ -141,8 +141,16 @@ class RowBasedHashMapGenerator( } val createUnsafeRowForKey = groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) => - s"agg_rowWriter.write(${ordinal}, ${key.name})"} - .mkString(";\n") + key.dataType match { + case t: DecimalType => + s"agg_rowWriter.write(${ordinal}, ${key.name}, ${t.precision}, ${t.scale})" + case t: DataType => + if (!t.isInstanceOf[StringType] && !ctx.isPrimitiveType(t)) { + throw new IllegalArgumentException(s"cannot generate code for unsupported type: $t") + } + s"agg_rowWriter.write(${ordinal}, ${key.name})" + } + }.mkString(";\n") s""" |public org.apache.spark.sql.catalyst.expressions.UnsafeRow findOrInsert(${ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 91988270ada8d..d3440a2644163 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -509,14 +509,15 @@ object SQLConf { .intConf .createWithDefault(40) - val VECTORIZED_AGG_MAP_MAX_COLUMNS = - SQLConfigBuilder("spark.sql.codegen.aggregate.map.columns.max") + val ENABLE_TWOLEVEL_AGG_MAP = + SQLConfigBuilder("spark.sql.codegen.aggregate.map.twolevel.enable") .internal() - .doc("Sets the maximum width of schema (aggregate keys + values) for which aggregate with" + - "keys uses an in-memory columnar map to speed up execution. Setting this to 0 effectively" + - "disables the columnar map") - .intConf - .createWithDefault(3) + .doc("Enable two-level aggregate hash map. When enabled, records will first be " + + "inserted/looked-up at a 1st-level, small, fast map, and then fallback to a " + + "2nd-level, larger, slower map when 1st level is full or keys cannot be found. " + + "When disabled, records go directly to the 2nd level. Defaults to true.") + .booleanConf + .createWithDefault(true) val FILE_SINK_LOG_DELETION = SQLConfigBuilder("spark.sql.streaming.fileSink.log.deletion") .internal() @@ -687,7 +688,7 @@ private[sql] class SQLConf extends Serializable with CatalystConf with Logging { override def runSQLonFile: Boolean = getConf(RUN_SQL_ON_FILES) - def vectorizedAggregateMapMaxColumns: Int = getConf(VECTORIZED_AGG_MAP_MAX_COLUMNS) + def enableTwoLevelAggMap: Boolean = getConf(ENABLE_TWOLEVEL_AGG_MAP) def variableSubstituteEnabled: Boolean = getConf(VARIABLE_SUBSTITUTE_ENABLED) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala new file mode 100644 index 0000000000000..3e85d95523125 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.scalatest.BeforeAndAfter + +class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter { + + protected override def beforeAll(): Unit = { + sparkConf.set("spark.sql.codegen.fallback", "false") + sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false") + super.beforeAll() + } + + // adding some checking after each test is run, assuring that the configs are not changed + // in test code + after { + assert(sparkConf.get("spark.sql.codegen.fallback") == "false", + "configuration parameter changed in test body") + assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == "false", + "configuration parameter changed in test body") + } +} + +class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter { + + protected override def beforeAll(): Unit = { + sparkConf.set("spark.sql.codegen.fallback", "false") + sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + super.beforeAll() + } + + // adding some checking after each test is run, assuring that the configs are not changed + // in test code + after { + assert(sparkConf.get("spark.sql.codegen.fallback") == "false", + "configuration parameter changed in test body") + assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == "true", + "configuration parameter changed in test body") + } +} + +class TwoLevelAggregateHashMapWithVectorizedMapSuite extends DataFrameAggregateSuite with +BeforeAndAfter { + + protected override def beforeAll(): Unit = { + sparkConf.set("spark.sql.codegen.fallback", "false") + sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + sparkConf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true") + super.beforeAll() + } + + // adding some checking after each test is run, assuring that the configs are not changed + // in test code + after { + assert(sparkConf.get("spark.sql.codegen.fallback") == "false", + "configuration parameter changed in test body") + assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == "true", + "configuration parameter changed in test body") + assert(sparkConf.get("spark.sql.codegen.aggregate.map.vectorized.enable") == "true", + "configuration parameter changed in test body") + } +} + diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala index 69a3b5f278fd8..427390a90f1e6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala @@ -485,4 +485,12 @@ class DataFrameAggregateSuite extends QueryTest with SharedSQLContext { spark.sql("select avg(a) over () from values 1.0, 2.0, 3.0 T(a)"), Row(2.0) :: Row(2.0) :: Row(2.0) :: Nil) } + + test("SQL decimal test (used for catching certain demical handling bugs in aggregates)") { + checkAnswer( + decimalData.groupBy('a cast DecimalType(10, 2)).agg(avg('b cast DecimalType(10, 2))), + Seq(Row(new java.math.BigDecimal(1.0), new java.math.BigDecimal(1.5)), + Row(new java.math.BigDecimal(2.0), new java.math.BigDecimal(1.5)), + Row(new java.math.BigDecimal(3.0), new java.math.BigDecimal(1.5)))) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala index bf3a39c84b3b2..8a2993bdf4b28 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala @@ -106,13 +106,14 @@ class AggregateBenchmark extends BenchmarkBase { benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "0") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false") f() } benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "3") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true") f() } @@ -146,13 +147,14 @@ class AggregateBenchmark extends BenchmarkBase { benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true) - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", 0) + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false") f() } benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true) - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", 3) + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true") f() } @@ -184,13 +186,14 @@ class AggregateBenchmark extends BenchmarkBase { benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "0") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false") f() } benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "3") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true") f() } @@ -221,13 +224,14 @@ class AggregateBenchmark extends BenchmarkBase { benchmark.addCase(s"codegen = T hashmap = F") { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "0") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false") f() } benchmark.addCase(s"codegen = T hashmap = T") { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "3") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true") f() } @@ -268,13 +272,14 @@ class AggregateBenchmark extends BenchmarkBase { benchmark.addCase(s"codegen = T hashmap = F") { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "0") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false") f() } benchmark.addCase(s"codegen = T hashmap = T") { iter => sparkSession.conf.set("spark.sql.codegen.wholeStage", "true") - sparkSession.conf.set("spark.sql.codegen.aggregate.map.columns.max", "10") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true") + sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true") f() } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala index 2dcf13c02a466..4a8086d7e5400 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala @@ -998,9 +998,9 @@ class HashAggregationQuerySuite extends AggregationQuerySuite class HashAggregationQueryWithControlledFallbackSuite extends AggregationQuerySuite { override protected def checkAnswer(actual: => DataFrame, expectedAnswer: Seq[Row]): Unit = { - Seq(0, 10).foreach { maxColumnarHashMapColumns => - withSQLConf("spark.sql.codegen.aggregate.map.columns.max" -> - maxColumnarHashMapColumns.toString) { + Seq("true", "false").foreach { enableTwoLevelMaps => + withSQLConf("spark.sql.codegen.aggregate.map.twolevel.enable" -> + enableTwoLevelMaps) { (1 to 3).foreach { fallbackStartsAt => withSQLConf("spark.sql.TungstenAggregate.testFallbackStartsAt" -> s"${(fallbackStartsAt - 1).toString}, ${fallbackStartsAt.toString}") { From 5bea8757cce0b5e7c1f1ab9cd767c76fc63e2978 Mon Sep 17 00:00:00 2001 From: Yangyang Liu Date: Thu, 1 Sep 2016 17:01:01 -0700 Subject: [PATCH 175/270] [SPARK-16619] Add shuffle service metrics entry in monitoring docs After change [SPARK-16405](https://github.com/apache/spark/pull/14080), we need to update docs by adding shuffle service metrics entry in currently supporting metrics list. Author: Yangyang Liu Closes #14254 from lovexi/yangyang-monitoring-doc. --- docs/monitoring.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/monitoring.md b/docs/monitoring.md index 5804e4f26cd93..5bc5e18c4d45f 100644 --- a/docs/monitoring.md +++ b/docs/monitoring.md @@ -377,6 +377,7 @@ set of sinks to which metrics are reported. The following instances are currentl * `worker`: A Spark standalone worker process. * `executor`: A Spark executor. * `driver`: The Spark driver process (the process in which your SparkContext is created). +* `shuffleService`: The Spark shuffle service. Each instance can report to zero or more _sinks_. Sinks are contained in the `org.apache.spark.metrics.sink` package: From 06e33985c631fe91e1c4cef6039b8752548cc435 Mon Sep 17 00:00:00 2001 From: Lianhui Wang Date: Thu, 1 Sep 2016 17:08:33 -0700 Subject: [PATCH 176/270] [SPARK-16302][SQL] Set the right number of partitions for reading data from a local collection. follow #13137 This pr sets the right number of partitions when reading data from a local collection. Query 'val df = Seq((1, 2)).toDF("key", "value").count' always use defaultParallelism tasks. So it causes run many empty or small tasks. Manually tested and checked. Author: Lianhui Wang Closes #13979 from lianhuiwang/localTable-Parallel. --- .../org/apache/spark/sql/execution/LocalTableScanExec.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScanExec.scala index 556f482f4b472..6598fa381aa3d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScanExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScanExec.scala @@ -42,7 +42,10 @@ case class LocalTableScanExec( } } - private lazy val rdd = sqlContext.sparkContext.parallelize(unsafeRows) + private lazy val numParallelism: Int = math.min(math.max(unsafeRows.length, 1), + sqlContext.sparkContext.defaultParallelism) + + private lazy val rdd = sqlContext.sparkContext.parallelize(unsafeRows, numParallelism) protected override def doExecute(): RDD[InternalRow] = { val numOutputRows = longMetric("numOutputRows") From f2d6e2ef23b3f862c336ce5f7b98c43c3fde1e36 Mon Sep 17 00:00:00 2001 From: Brian Cho Date: Fri, 2 Sep 2016 11:12:34 +0800 Subject: [PATCH 177/270] [SPARK-16926][SQL] Add unit test to compare table and partition column metadata. ## What changes were proposed in this pull request? Add unit test for changes made in PR #14515. It makes sure that a newly created table has the same number of columns in table and partition metadata. This test fails before the changes introduced in #14515. ## How was this patch tested? Run new unit test. Author: Brian Cho Closes #14930 from dafrista/partition-metadata-unit-test. --- .../hive/execution/HiveTableScanSuite.scala | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala index 5b464764f0a99..5c460d25f3723 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala @@ -18,6 +18,7 @@ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.Row +import org.apache.spark.sql.hive.MetastoreRelation import org.apache.spark.sql.hive.test.{TestHive, TestHiveSingleton} import org.apache.spark.sql.hive.test.TestHive._ import org.apache.spark.sql.hive.test.TestHive.implicits._ @@ -143,4 +144,38 @@ class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestH } } } + + test("SPARK-16926: number of table and partition columns match for new partitioned table") { + val view = "src" + withTempView(view) { + spark.range(1, 5).createOrReplaceTempView(view) + val table = "table_with_partition" + withTable(table) { + sql( + s""" + |CREATE TABLE $table(id string) + |PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string) + """.stripMargin) + sql( + s""" + |FROM $view v + |INSERT INTO TABLE $table + |PARTITION (p1='a',p2='b',p3='c',p4='d',p5='e') + |SELECT v.id + |INSERT INTO TABLE $table + |PARTITION (p1='a',p2='c',p3='c',p4='d',p5='e') + |SELECT v.id + """.stripMargin) + val plan = sql( + s""" + |SELECT * FROM $table + """.stripMargin).queryExecution.sparkPlan + val relation = plan.collectFirst { + case p: HiveTableScanExec => p.relation + }.get + val tableCols = relation.hiveQlTable.getCols + relation.getHiveQlPartitions().foreach(p => assert(p.getCols.size == tableCols.size)) + } + } + } } From 2ab8dbddaa31e4491b52eb0e495660ebbebfdb9e Mon Sep 17 00:00:00 2001 From: Kousuke Saruta Date: Fri, 2 Sep 2016 08:46:15 +0100 Subject: [PATCH 178/270] [SPARK-17342][WEBUI] Style of event timeline is broken ## What changes were proposed in this pull request? SPARK-15373 (#13158) updated the version of vis.js to 4.16.1. As of 4.0.0, some class was renamed like 'timeline to vis-timeline' but that ticket didn't care and now style is broken. In this PR, I've restored the style by modifying `timeline-view.css` and `timeline-view.js`. ## How was this patch tested? manual tests. (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) * Before 2016-09-01 1 38 31 * After 2016-09-01 3 30 19 Author: Kousuke Saruta Closes #14900 from sarutak/SPARK-17342. --- .../apache/spark/ui/static/timeline-view.css | 57 +++++++++---------- .../apache/spark/ui/static/timeline-view.js | 6 +- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css index f9ad9f8378803..3bf3e8bfa1f31 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css +++ b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.css @@ -33,12 +33,15 @@ div#application-timeline, div#job-timeline { height: 55px; } -#task-assignment-timeline div.item.range { - padding: 0px; +#task-assignment-timeline div.vis-item.vis-range { height: 26px; border-width: 0; } +#task-assignment-timeline .vis-item-content { + padding: 0px; +} + .task-assignment-timeline-content { width: 100%; } @@ -83,28 +86,24 @@ rect.getting-result-time-proportion { stroke: #75B0A6; } -.vis-item .vis-item-content { - width: 100% -} - -.vis.timeline { +.vis-timeline { line-height: 14px; } -.vis.timeline div.content { +.vis-timeline div.vis-item-content { width: 100%; } -.vis.timeline .item.stage { +.vis-timeline .vis-item.stage { cursor: pointer; } -.vis.timeline .item.stage.succeeded { +.vis-timeline .vis-item.stage.succeeded { background-color: #A0DFFF; border-color: #3EC0FF; } -.vis.timeline .item.stage.succeeded.selected { +.vis-timeline .vis-item.stage.succeeded.vis-selected { background-color: #A0DFFF; border-color: #3EC0FF; z-index: auto; @@ -115,12 +114,12 @@ rect.getting-result-time-proportion { stroke: #3EC0FF; } -.vis.timeline .item.stage.failed { +.vis-timeline .vis-item.stage.failed { background-color: #FFA1B0; border-color: #FF4D6D; } -.vis.timeline .item.stage.failed.selected { +.vis-timeline .vis-item.stage.failed.vis-selected { background-color: #FFA1B0; border-color: #FF4D6D; z-index: auto; @@ -131,12 +130,12 @@ rect.getting-result-time-proportion { stroke: #FF4D6D; } -.vis.timeline .item.stage.running { +.vis-timeline .vis-item.stage.running { background-color: #A2FCC0; border-color: #36F572; } -.vis.timeline .item.stage.running.selected { +.vis-timeline .vis-item.stage.running.vis-selected { background-color: #A2FCC0; border-color: #36F572; z-index: auto; @@ -147,20 +146,20 @@ rect.getting-result-time-proportion { stroke: #36F572; } -.vis.timeline .foreground { +.vis-timeline .vis-foreground { cursor: move; } -.vis.timeline .item.job { +.vis-timeline .vis-item.job { cursor: pointer; } -.vis.timeline .item.job.succeeded { +.vis-timeline .vis-item.job.succeeded { background-color: #A0DFFF; border-color: #3EC0FF; } -.vis.timeline .item.job.succeeded.selected { +.vis-timeline .vis-item.job.succeeded.vis-selected { background-color: #A0DFFF; border-color: #3EC0FF; z-index: auto; @@ -171,12 +170,12 @@ rect.getting-result-time-proportion { stroke: #3EC0FF; } -.vis.timeline .item.job.failed { +.vis-timeline .vis-item.job.failed { background-color: #FFA1B0; border-color: #FF4D6D; } -.vis.timeline .item.job.failed.selected { +.vis-timeline .vis-item.job.failed.vis-selected { background-color: #FFA1B0; border-color: #FF4D6D; z-index: auto; @@ -187,12 +186,12 @@ rect.getting-result-time-proportion { stroke: #FF4D6D; } -.vis.timeline .item.job.running { +.vis-timeline .vis-item.job.running { background-color: #A2FCC0; border-color: #36F572; } -.vis.timeline .item.job.running.selected { +.vis-timeline .vis-item.job.running.vis-selected { background-color: #A2FCC0; border-color: #36F572; z-index: auto; @@ -203,7 +202,7 @@ rect.getting-result-time-proportion { stroke: #36F572; } -.vis.timeline .item.executor.added { +.vis-timeline .vis-item.executor.added { background-color: #A0DFFF; border-color: #3EC0FF; } @@ -213,7 +212,7 @@ rect.getting-result-time-proportion { stroke: #3EC0FF; } -.vis.timeline .item.executor.removed { +.vis-timeline .vis-item.executor.removed { background-color: #FFA1B0; border-color: #FF4D6D; } @@ -223,7 +222,7 @@ rect.getting-result-time-proportion { stroke: #FF4D6D; } -.vis.timeline .item.executor.selected { +.vis-timeline .vis-item.executor.vis-selected { background-color: #A2FCC0; border-color: #36F572; z-index: 2; @@ -262,15 +261,15 @@ span.expand-task-assignment-timeline { cursor: pointer; } -.vis.timeline .item.range .content { +.vis-timeline .vis-item.vis-range .vis-item-content { position: unset; } -.vis.timeline .item .tooltip-inner { +.vis-timeline .vis-item .tooltip-inner { max-width: unset !important; } -.vispanel.center { +.vis-panel.vis-center { font-size: 12px; line-height: 12px; } diff --git a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js index 9ab5684d901f0..a6153ceda75e2 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js +++ b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js @@ -41,7 +41,7 @@ function drawApplicationTimeline(groupArray, eventObjArray, startTime, offset) { setupExecutorEventAction(); function setupJobEventAction() { - $(".item.range.job.application-timeline-object").each(function() { + $(".vis-item.vis-range.job.application-timeline-object").each(function() { var getSelectorForJobEntry = function(baseElem) { var jobIdText = $($(baseElem).find(".application-timeline-content")[0]).text(); var jobId = jobIdText.match("\\(Job (\\d+)\\)$")[1]; @@ -116,7 +116,7 @@ function drawJobTimeline(groupArray, eventObjArray, startTime, offset) { setupExecutorEventAction(); function setupStageEventAction() { - $(".item.range.stage.job-timeline-object").each(function() { + $(".vis-item.vis-range.stage.job-timeline-object").each(function() { var getSelectorForStageEntry = function(baseElem) { var stageIdText = $($(baseElem).find(".job-timeline-content")[0]).text(); var stageIdAndAttempt = stageIdText.match("\\(Stage (\\d+\\.\\d+)\\)$")[1].split("."); @@ -233,7 +233,7 @@ $(function (){ }); function setupExecutorEventAction() { - $(".item.box.executor").each(function () { + $(".vis-item.vis-box.executor").each(function () { $(this).hover( function() { $($(this).find(".executor-event-content")[0]).tooltip("show"); From 0f30cdedbdb0d38e8c479efab6bb1c6c376206ff Mon Sep 17 00:00:00 2001 From: "wm624@hotmail.com" Date: Fri, 2 Sep 2016 01:47:17 -0700 Subject: [PATCH 179/270] [SPARK-16883][SPARKR] SQL decimal type is not properly cast to number when collecting SparkDataFrame ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) registerTempTable(createDataFrame(iris), "iris") str(collect(sql("select cast('1' as double) as x, cast('2' as decimal) as y from iris limit 5"))) 'data.frame': 5 obs. of 2 variables: $ x: num 1 1 1 1 1 $ y:List of 5 ..$ : num 2 ..$ : num 2 ..$ : num 2 ..$ : num 2 ..$ : num 2 The problem is that spark returns `decimal(10, 0)` col type, instead of `decimal`. Thus, `decimal(10, 0)` is not handled correctly. It should be handled as "double". As discussed in JIRA thread, we can have two potential fixes: 1). Scala side fix to add a new case when writing the object back; However, I can't use spark.sql.types._ in Spark core due to dependency issues. I don't find a way of doing type case match; 2). SparkR side fix: Add a helper function to check special type like `"decimal(10, 0)"` and replace it with `double`, which is PRIMITIVE type. This special helper is generic for adding new types handling in the future. I open this PR to discuss pros and cons of both approaches. If we want to do Scala side fix, we need to find a way to match the case of DecimalType and StructType in Spark Core. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Manual test: > str(collect(sql("select cast('1' as double) as x, cast('2' as decimal) as y from iris limit 5"))) 'data.frame': 5 obs. of 2 variables: $ x: num 1 1 1 1 1 $ y: num 2 2 2 2 2 R Unit tests Author: wm624@hotmail.com Closes #14613 from wangmiao1981/type. --- R/pkg/R/DataFrame.R | 13 ++++++++++++- R/pkg/R/types.R | 16 ++++++++++++++++ R/pkg/inst/tests/testthat/test_sparkSQL.R | 22 ++++++++++++++++++++++ 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index e12b58e2eefc5..a92450274e077 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -397,7 +397,11 @@ setMethod("coltypes", } if (is.null(type)) { - stop(paste("Unsupported data type: ", x)) + specialtype <- specialtypeshandle(x) + if (is.null(specialtype)) { + stop(paste("Unsupported data type: ", x)) + } + type <- PRIMITIVE_TYPES[[specialtype]] } } type @@ -1063,6 +1067,13 @@ setMethod("collect", df[[colIndex]] <- col } else { colType <- dtypes[[colIndex]][[2]] + if (is.null(PRIMITIVE_TYPES[[colType]])) { + specialtype <- specialtypeshandle(colType) + if (!is.null(specialtype)) { + colType <- specialtype + } + } + # Note that "binary" columns behave like complex types. if (!is.null(PRIMITIVE_TYPES[[colType]]) && colType != "binary") { vec <- do.call(c, col) diff --git a/R/pkg/R/types.R b/R/pkg/R/types.R index ad048b1cd1795..abca703617c7b 100644 --- a/R/pkg/R/types.R +++ b/R/pkg/R/types.R @@ -67,3 +67,19 @@ rToSQLTypes <- as.environment(list( "double" = "double", "character" = "string", "logical" = "boolean")) + +# Helper function of coverting decimal type. When backend returns column type in the +# format of decimal(,) (e.g., decimal(10, 0)), this function coverts the column type +# as double type. This function converts backend returned types that are not the key +# of PRIMITIVE_TYPES, but should be treated as PRIMITIVE_TYPES. +# @param A type returned from the JVM backend. +# @return A type is the key of the PRIMITIVE_TYPES. +specialtypeshandle <- function(type) { + returntype <- NULL + m <- regexec("^decimal(.+)$", type) + matchedStrings <- regmatches(type, m) + if (length(matchedStrings[[1]]) >= 2) { + returntype <- "double" + } + returntype +} diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index 8ff56eba1f7bf..683a15cb4ffcd 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -526,6 +526,17 @@ test_that( expect_is(newdf, "SparkDataFrame") expect_equal(count(newdf), 1) dropTempView("table1") + + createOrReplaceTempView(df, "dfView") + sqlCast <- collect(sql("select cast('2' as decimal) as x from dfView limit 1")) + out <- capture.output(sqlCast) + expect_true(is.data.frame(sqlCast)) + expect_equal(names(sqlCast)[1], "x") + expect_equal(nrow(sqlCast), 1) + expect_equal(ncol(sqlCast), 1) + expect_equal(out[1], " x") + expect_equal(out[2], "1 2") + dropTempView("dfView") }) test_that("test cache, uncache and clearCache", { @@ -2089,6 +2100,9 @@ test_that("Method coltypes() to get and set R's data types of a DataFrame", { # Test primitive types DF <- createDataFrame(data, schema) expect_equal(coltypes(DF), c("integer", "logical", "POSIXct")) + createOrReplaceTempView(DF, "DFView") + sqlCast <- sql("select cast('2' as decimal) as x from DFView limit 1") + expect_equal(coltypes(sqlCast), "numeric") # Test complex types x <- createDataFrame(list(list(as.environment( @@ -2132,6 +2146,14 @@ test_that("Method str()", { "setosa\" \"setosa\" \"setosa\" \"setosa\"")) expect_equal(out[7], " $ col : logi TRUE TRUE TRUE TRUE TRUE TRUE") + createOrReplaceTempView(irisDF2, "irisView") + + sqlCast <- sql("select cast('2' as decimal) as x from irisView limit 1") + castStr <- capture.output(str(sqlCast)) + expect_equal(length(castStr), 2) + expect_equal(castStr[1], "'SparkDataFrame': 1 variables:") + expect_equal(castStr[2], " $ x: num 2") + # A random dataset with many columns. This test is to check str limits # the number of columns. Therefore, it will suffice to check for the # number of returned rows From 6969dcc79a33d715250958b24361f2d43552d840 Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Fri, 2 Sep 2016 01:54:28 -0700 Subject: [PATCH 180/270] [SPARK-15509][ML][SPARKR] R MLlib algorithms should support input columns "features" and "label" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://issues.apache.org/jira/browse/SPARK-15509 ## What changes were proposed in this pull request? Currently in SparkR, when you load a LibSVM dataset using the sqlContext and then pass it to an MLlib algorithm, the ML wrappers will fail since they will try to create a "features" column, which conflicts with the existing "features" column from the LibSVM loader. E.g., using the "mnist" dataset from LibSVM: `training <- loadDF(sqlContext, ".../mnist", "libsvm")` `model <- naiveBayes(label ~ features, training)` This fails with: ``` 16/05/24 11:52:41 ERROR RBackendHandler: fit on org.apache.spark.ml.r.NaiveBayesWrapper failed Error in invokeJava(isStatic = TRUE, className, methodName, ...) : java.lang.IllegalArgumentException: Output column features already exists. at org.apache.spark.ml.feature.VectorAssembler.transformSchema(VectorAssembler.scala:120) at org.apache.spark.ml.Pipeline$$anonfun$transformSchema$4.apply(Pipeline.scala:179) at org.apache.spark.ml.Pipeline$$anonfun$transformSchema$4.apply(Pipeline.scala:179) at scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57) at scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66) at scala.collection.mutable.ArrayOps$ofRef.foldLeft(ArrayOps.scala:186) at org.apache.spark.ml.Pipeline.transformSchema(Pipeline.scala:179) at org.apache.spark.ml.PipelineStage.transformSchema(Pipeline.scala:67) at org.apache.spark.ml.Pipeline.fit(Pipeline.scala:131) at org.apache.spark.ml.feature.RFormula.fit(RFormula.scala:169) at org.apache.spark.ml.r.NaiveBayesWrapper$.fit(NaiveBayesWrapper.scala:62) at org.apache.spark.ml.r.NaiveBayesWrapper.fit(NaiveBayesWrapper.sca The same issue appears for the "label" column once you rename the "features" column. ``` The cause is, when using `loadDF()` to generate dataframes, sometimes it’s with default column name `“label”` and `“features”`, and these two name will conflict with default column names `setDefault(labelCol, "label")` and ` setDefault(featuresCol, "features")` of `SharedParams.scala` ## How was this patch tested? Test on my local machine. Author: Xin Ren Closes #13584 from keypointt/SPARK-15509. --- .../ml/r/AFTSurvivalRegressionWrapper.scala | 1 + .../spark/ml/r/GaussianMixtureWrapper.scala | 5 +- .../GeneralizedLinearRegressionWrapper.scala | 1 + .../ml/r/IsotonicRegressionWrapper.scala | 5 +- .../org/apache/spark/ml/r/KMeansWrapper.scala | 5 +- .../apache/spark/ml/r/NaiveBayesWrapper.scala | 11 +-- .../org/apache/spark/ml/r/RWrapperUtils.scala | 71 +++++++++++++++++++ .../spark/ml/feature/RFormulaSuite.scala | 3 - .../spark/ml/r/RWrapperUtilsSuite.scala | 56 +++++++++++++++ 9 files changed, 144 insertions(+), 14 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala create mode 100644 mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala index 5462f80d69ff7..67d037ed6e024 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala @@ -87,6 +87,7 @@ private[r] object AFTSurvivalRegressionWrapper extends MLReadable[AFTSurvivalReg val (rewritedFormula, censorCol) = formulaRewrite(formula) val rFormula = new RFormula().setFormula(rewritedFormula) + RWrapperUtils.checkDataColumns(rFormula, data) val rFormulaModel = rFormula.fit(data) // get feature names from output schema diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala index 1e8b3bbab6655..b654233a89360 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala @@ -68,10 +68,11 @@ private[r] object GaussianMixtureWrapper extends MLReadable[GaussianMixtureWrapp maxIter: Int, tol: Double): GaussianMixtureWrapper = { - val rFormulaModel = new RFormula() + val rFormula = new RFormula() .setFormula(formula) .setFeaturesCol("features") - .fit(data) + RWrapperUtils.checkDataColumns(rFormula, data) + val rFormulaModel = rFormula.fit(data) // get feature names from output schema val schema = rFormulaModel.transform(data).schema diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala index 7a6ab618a1f2b..35313258f940e 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala @@ -73,6 +73,7 @@ private[r] object GeneralizedLinearRegressionWrapper regParam: Double): GeneralizedLinearRegressionWrapper = { val rFormula = new RFormula() .setFormula(formula) + RWrapperUtils.checkDataColumns(rFormula, data) val rFormulaModel = rFormula.fit(data) // get labels and feature names from output schema val schema = rFormulaModel.transform(data).schema diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala index a7992debe684a..2ed7d7b770cc9 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala @@ -57,10 +57,11 @@ private[r] object IsotonicRegressionWrapper featureIndex: Int, weightCol: String): IsotonicRegressionWrapper = { - val rFormulaModel = new RFormula() + val rFormula = new RFormula() .setFormula(formula) .setFeaturesCol("features") - .fit(data) + RWrapperUtils.checkDataColumns(rFormula, data) + val rFormulaModel = rFormula.fit(data) // get feature names from output schema val schema = rFormulaModel.transform(data).schema diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala index 4d4c303fc8c27..8616a8c01e5ac 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala @@ -70,10 +70,11 @@ private[r] object KMeansWrapper extends MLReadable[KMeansWrapper] { maxIter: Int, initMode: String): KMeansWrapper = { - val rFormulaModel = new RFormula() + val rFormula = new RFormula() .setFormula(formula) .setFeaturesCol("features") - .fit(data) + RWrapperUtils.checkDataColumns(rFormula, data) + val rFormulaModel = rFormula.fit(data) // get feature names from output schema val schema = rFormulaModel.transform(data).schema diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala index 1dac246b03329..f2cb24b96404b 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala @@ -59,13 +59,14 @@ private[r] object NaiveBayesWrapper extends MLReadable[NaiveBayesWrapper] { def fit(formula: String, data: DataFrame, smoothing: Double): NaiveBayesWrapper = { val rFormula = new RFormula() .setFormula(formula) - .fit(data) + RWrapperUtils.checkDataColumns(rFormula, data) + val rFormulaModel = rFormula.fit(data) // get labels and feature names from output schema - val schema = rFormula.transform(data).schema - val labelAttr = Attribute.fromStructField(schema(rFormula.getLabelCol)) + val schema = rFormulaModel.transform(data).schema + val labelAttr = Attribute.fromStructField(schema(rFormulaModel.getLabelCol)) .asInstanceOf[NominalAttribute] val labels = labelAttr.values.get - val featureAttrs = AttributeGroup.fromStructField(schema(rFormula.getFeaturesCol)) + val featureAttrs = AttributeGroup.fromStructField(schema(rFormulaModel.getFeaturesCol)) .attributes.get val features = featureAttrs.map(_.name.get) // assemble and fit the pipeline @@ -78,7 +79,7 @@ private[r] object NaiveBayesWrapper extends MLReadable[NaiveBayesWrapper] { .setOutputCol(PREDICTED_LABEL_COL) .setLabels(labels) val pipeline = new Pipeline() - .setStages(Array(rFormula, naiveBayes, idxToStr)) + .setStages(Array(rFormulaModel, naiveBayes, idxToStr)) .fit(data) new NaiveBayesWrapper(pipeline, labels, features) } diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala new file mode 100644 index 0000000000000..6a435992e3b35 --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.spark.internal.Logging +import org.apache.spark.ml.feature.RFormula +import org.apache.spark.sql.Dataset + +object RWrapperUtils extends Logging { + + /** + * DataFrame column check. + * When loading data, default columns "features" and "label" will be added. And these two names + * would conflict with RFormula default feature and label column names. + * Here is to change the column name to avoid "column already exists" error. + * + * @param rFormula RFormula instance + * @param data Input dataset + * @return Unit + */ + def checkDataColumns(rFormula: RFormula, data: Dataset[_]): Unit = { + if (data.schema.fieldNames.contains(rFormula.getLabelCol)) { + val newLabelName = convertToUniqueName(rFormula.getLabelCol, data.schema.fieldNames) + logWarning( + s"data containing ${rFormula.getLabelCol} column, using new name $newLabelName instead") + rFormula.setLabelCol(newLabelName) + } + + if (data.schema.fieldNames.contains(rFormula.getFeaturesCol)) { + val newFeaturesName = convertToUniqueName(rFormula.getFeaturesCol, data.schema.fieldNames) + logWarning(s"data containing ${rFormula.getFeaturesCol} column, " + + s"using new name $newFeaturesName instead") + rFormula.setFeaturesCol(newFeaturesName) + } + } + + /** + * Convert conflicting name to be an unique name. + * Appending a sequence number, like originalName_output1 + * and incrementing until it is not already there + * + * @param originalName Original name + * @param fieldNames Array of field names in existing schema + * @return String + */ + def convertToUniqueName(originalName: String, fieldNames: Array[String]): String = { + var counter = 1 + var newName = originalName + "_output" + + while (fieldNames.contains(newName)) { + newName = originalName + "_output" + counter + counter += 1 + } + newName + } +} diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/RFormulaSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/RFormulaSuite.scala index c12ab8fe9efe7..0794a049d9cd8 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/RFormulaSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/RFormulaSuite.scala @@ -54,9 +54,6 @@ class RFormulaSuite extends SparkFunSuite with MLlibTestSparkContext with Defaul intercept[IllegalArgumentException] { formula.fit(original) } - intercept[IllegalArgumentException] { - formula.fit(original) - } } test("label column already exists") { diff --git a/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala new file mode 100644 index 0000000000000..ddc24cb3a6481 --- /dev/null +++ b/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.spark.SparkFunSuite +import org.apache.spark.ml.feature.{RFormula, RFormulaModel} +import org.apache.spark.mllib.util.MLlibTestSparkContext + +class RWrapperUtilsSuite extends SparkFunSuite with MLlibTestSparkContext { + + test("avoid libsvm data column name conflicting") { + val rFormula = new RFormula().setFormula("label ~ features") + val data = spark.read.format("libsvm").load("../data/mllib/sample_libsvm_data.txt") + + // if not checking column name, then IllegalArgumentException + intercept[IllegalArgumentException] { + rFormula.fit(data) + } + + // after checking, model build is ok + RWrapperUtils.checkDataColumns(rFormula, data) + + assert(rFormula.getLabelCol == "label_output") + assert(rFormula.getFeaturesCol == "features_output") + + val model = rFormula.fit(data) + assert(model.isInstanceOf[RFormulaModel]) + + assert(model.getLabelCol == "label_output") + assert(model.getFeaturesCol == "features_output") + } + + test("generate unique name by appending a sequence number") { + val originalName = "label" + val fieldNames = Array("label_output", "label_output1", "label_output2") + val newName = RWrapperUtils.convertToUniqueName(originalName, fieldNames) + + assert(newName === "label_output3") + } + +} From a3097e2b314b1cd7446c6106a54fe4b49871c26d Mon Sep 17 00:00:00 2001 From: Jacek Laskowski Date: Fri, 2 Sep 2016 10:25:42 +0100 Subject: [PATCH 181/270] [SQL][DOC][MINOR] Add (Scala-specific) and (Java-specific) ## What changes were proposed in this pull request? Adds (Scala-specific) and (Java-specific) to Scaladoc. ## How was this patch tested? local build Author: Jacek Laskowski Closes #14891 from jaceklaskowski/scala-specifics. --- .../org/apache/spark/sql/KeyValueGroupedDataset.scala | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala index 61a3e6e0bc4f8..cea16fba76e47 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala @@ -79,6 +79,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Scala-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an iterator containing elements of an arbitrary type which will be returned @@ -107,6 +108,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Java-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an iterator containing elements of an arbitrary type which will be returned @@ -129,6 +131,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Scala-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an element of arbitrary type which will be returned as a new [[Dataset]]. @@ -151,6 +154,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Java-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an element of arbitrary type which will be returned as a new [[Dataset]]. @@ -172,6 +176,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Scala-specific) * Reduces the elements of each group of data using the specified binary function. * The given function must be commutative and associative or the result may be non-deterministic. * @@ -184,6 +189,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Java-specific) * Reduces the elements of each group of data using the specified binary function. * The given function must be commutative and associative or the result may be non-deterministic. * @@ -269,6 +275,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( def count(): Dataset[(K, Long)] = agg(functions.count("*").as(ExpressionEncoder[Long]())) /** + * (Scala-specific) * Applies the given function to each cogrouped data. For each unique group, the function will * be passed the grouping key and 2 iterators containing all elements in the group from * [[Dataset]] `this` and `other`. The function can return an iterator containing elements of an @@ -293,6 +300,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( } /** + * (Java-specific) * Applies the given function to each cogrouped data. For each unique group, the function will * be passed the grouping key and 2 iterators containing all elements in the group from * [[Dataset]] `this` and `other`. The function can return an iterator containing elements of an From 7ee24dac8e779f6a9bf45371fdc2be83fb679cb2 Mon Sep 17 00:00:00 2001 From: Kousuke Saruta Date: Fri, 2 Sep 2016 10:26:43 +0100 Subject: [PATCH 182/270] [SPARK-17352][WEBUI] Executor computing time can be negative-number because of calculation error ## What changes were proposed in this pull request? In StagePage, executor-computing-time is calculated but calculation error can occur potentially because it's calculated by subtraction of floating numbers. Following capture is an example. capture-timeline ## How was this patch tested? Manual tests. Author: Kousuke Saruta Closes #14908 from sarutak/SPARK-17352. --- core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala index a266164587e4e..de787f257737d 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala @@ -651,9 +651,9 @@ private[ui] class StagePage(parent: StagesTab) extends WebUIPage("stage") { } val executorComputingTime = executorRunTime - shuffleReadTime - shuffleWriteTime val executorComputingTimeProportion = - (100 - schedulerDelayProportion - shuffleReadTimeProportion - + math.max(100 - schedulerDelayProportion - shuffleReadTimeProportion - shuffleWriteTimeProportion - serializationTimeProportion - - deserializationTimeProportion - gettingResultTimeProportion) + deserializationTimeProportion - gettingResultTimeProportion, 0) val schedulerDelayProportionPos = 0 val deserializationTimeProportionPos = From 247a4faf06c1dd47a6543c56929cd0182a03e106 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Fri, 2 Sep 2016 22:31:01 +0800 Subject: [PATCH 183/270] [SPARK-16935][SQL] Verification of Function-related ExternalCatalog APIs ### What changes were proposed in this pull request? Function-related `HiveExternalCatalog` APIs do not have enough verification logics. After the PR, `HiveExternalCatalog` and `InMemoryCatalog` become consistent in the error handling. For example, below is the exception we got when calling `renameFunction`. ``` 15:13:40.369 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database db1, returning NoSuchObjectException 15:13:40.377 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database db2, returning NoSuchObjectException 15:13:40.739 ERROR DataNucleus.Datastore.Persist: Update of object "org.apache.hadoop.hive.metastore.model.MFunction205629e9" using statement "UPDATE FUNCS SET FUNC_NAME=? WHERE FUNC_ID=?" failed : org.apache.derby.shared.common.error.DerbySQLIntegrityConstraintViolationException: The statement was aborted because it would have caused a duplicate key value in a unique or primary key constraint or unique index identified by 'UNIQUEFUNCTION' defined on 'FUNCS'. at org.apache.derby.impl.jdbc.SQLExceptionFactory.getSQLException(Unknown Source) at org.apache.derby.impl.jdbc.Util.generateCsSQLException(Unknown Source) at org.apache.derby.impl.jdbc.TransactionResourceImpl.wrapInSQLException(Unknown Source) at org.apache.derby.impl.jdbc.TransactionResourceImpl.handleException(Unknown Source) ``` ### How was this patch tested? Improved the existing test cases to check whether the messages are right. Author: gatorsmile Closes #14521 from gatorsmile/functionChecking. --- .../catalyst/catalog/ExternalCatalog.scala | 14 ++++++++++++- .../catalyst/catalog/InMemoryCatalog.scala | 19 ++--------------- .../catalog/ExternalCatalogSuite.scala | 21 ++++++++++--------- .../spark/sql/hive/HiveExternalCatalog.scala | 8 +++++++ 4 files changed, 34 insertions(+), 28 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala index 27e1810814c66..df72baaba29c5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.catalyst.catalog -import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException +import org.apache.spark.sql.catalyst.analysis.{FunctionAlreadyExistsException, NoSuchDatabaseException, NoSuchFunctionException} /** @@ -38,6 +38,18 @@ abstract class ExternalCatalog { } } + protected def requireFunctionExists(db: String, funcName: String): Unit = { + if (!functionExists(db, funcName)) { + throw new NoSuchFunctionException(db = db, func = funcName) + } + } + + protected def requireFunctionNotExists(db: String, funcName: String): Unit = { + if (functionExists(db, funcName)) { + throw new FunctionAlreadyExistsException(db = db, func = funcName) + } + } + // -------------------------------------------------------------------------- // Databases // -------------------------------------------------------------------------- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala index b55ddcb54b457..4e361a536d444 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala @@ -63,18 +63,6 @@ class InMemoryCatalog( catalog(db).tables(table).partitions.contains(spec) } - private def requireFunctionExists(db: String, funcName: String): Unit = { - if (!functionExists(db, funcName)) { - throw new NoSuchFunctionException(db = db, func = funcName) - } - } - - private def requireFunctionNotExists(db: String, funcName: String): Unit = { - if (functionExists(db, funcName)) { - throw new FunctionAlreadyExistsException(db = db, func = funcName) - } - } - private def requireTableExists(db: String, table: String): Unit = { if (!tableExists(db, table)) { throw new NoSuchTableException(db = db, table = table) @@ -474,11 +462,8 @@ class InMemoryCatalog( override def createFunction(db: String, func: CatalogFunction): Unit = synchronized { requireDbExists(db) - if (functionExists(db, func.identifier.funcName)) { - throw new FunctionAlreadyExistsException(db = db, func = func.identifier.funcName) - } else { - catalog(db).functions.put(func.identifier.funcName, func) - } + requireFunctionNotExists(db, func.identifier.funcName) + catalog(db).functions.put(func.identifier.funcName, func) } override def dropFunction(db: String, funcName: String): Unit = synchronized { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala index 19f8665383315..f283f4287c5bf 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala @@ -25,6 +25,7 @@ import org.scalatest.BeforeAndAfterEach import org.apache.spark.SparkFunSuite import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} +import org.apache.spark.sql.catalyst.analysis.{FunctionAlreadyExistsException, NoSuchDatabaseException, NoSuchFunctionException} import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.types.StructType import org.apache.spark.util.Utils @@ -450,14 +451,14 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac test("create function when database does not exist") { val catalog = newBasicCatalog() - intercept[AnalysisException] { + intercept[NoSuchDatabaseException] { catalog.createFunction("does_not_exist", newFunc()) } } test("create function that already exists") { val catalog = newBasicCatalog() - intercept[AnalysisException] { + intercept[FunctionAlreadyExistsException] { catalog.createFunction("db2", newFunc("func1")) } } @@ -471,14 +472,14 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac test("drop function when database does not exist") { val catalog = newBasicCatalog() - intercept[AnalysisException] { + intercept[NoSuchDatabaseException] { catalog.dropFunction("does_not_exist", "something") } } test("drop function that does not exist") { val catalog = newBasicCatalog() - intercept[AnalysisException] { + intercept[NoSuchFunctionException] { catalog.dropFunction("db2", "does_not_exist") } } @@ -488,14 +489,14 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac assert(catalog.getFunction("db2", "func1") == CatalogFunction(FunctionIdentifier("func1", Some("db2")), funcClass, Seq.empty[FunctionResource])) - intercept[AnalysisException] { + intercept[NoSuchFunctionException] { catalog.getFunction("db2", "does_not_exist") } } test("get function when database does not exist") { val catalog = newBasicCatalog() - intercept[AnalysisException] { + intercept[NoSuchDatabaseException] { catalog.getFunction("does_not_exist", "func1") } } @@ -505,15 +506,15 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac val newName = "funcky" assert(catalog.getFunction("db2", "func1").className == funcClass) catalog.renameFunction("db2", "func1", newName) - intercept[AnalysisException] { catalog.getFunction("db2", "func1") } + intercept[NoSuchFunctionException] { catalog.getFunction("db2", "func1") } assert(catalog.getFunction("db2", newName).identifier.funcName == newName) assert(catalog.getFunction("db2", newName).className == funcClass) - intercept[AnalysisException] { catalog.renameFunction("db2", "does_not_exist", "me") } + intercept[NoSuchFunctionException] { catalog.renameFunction("db2", "does_not_exist", "me") } } test("rename function when database does not exist") { val catalog = newBasicCatalog() - intercept[AnalysisException] { + intercept[NoSuchDatabaseException] { catalog.renameFunction("does_not_exist", "func1", "func5") } } @@ -521,7 +522,7 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac test("rename function when new function already exists") { val catalog = newBasicCatalog() catalog.createFunction("db2", newFunc("func2", Some("db2"))) - intercept[AnalysisException] { + intercept[FunctionAlreadyExistsException] { catalog.renameFunction("db2", "func1", "func2") } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index ed87ac3c3e634..8541ae2322ada 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -570,31 +570,39 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat override def createFunction( db: String, funcDefinition: CatalogFunction): Unit = withClient { + requireDbExists(db) // Hive's metastore is case insensitive. However, Hive's createFunction does // not normalize the function name (unlike the getFunction part). So, // we are normalizing the function name. val functionName = funcDefinition.identifier.funcName.toLowerCase + requireFunctionNotExists(db, functionName) val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName) client.createFunction(db, funcDefinition.copy(identifier = functionIdentifier)) } override def dropFunction(db: String, name: String): Unit = withClient { + requireFunctionExists(db, name) client.dropFunction(db, name) } override def renameFunction(db: String, oldName: String, newName: String): Unit = withClient { + requireFunctionExists(db, oldName) + requireFunctionNotExists(db, newName) client.renameFunction(db, oldName, newName) } override def getFunction(db: String, funcName: String): CatalogFunction = withClient { + requireFunctionExists(db, funcName) client.getFunction(db, funcName) } override def functionExists(db: String, funcName: String): Boolean = withClient { + requireDbExists(db) client.functionExists(db, funcName) } override def listFunctions(db: String, pattern: String): Seq[String] = withClient { + requireDbExists(db) client.listFunctions(db, pattern) } From 806d8a8e980d8ba2f4261bceb393c40bafaa2f73 Mon Sep 17 00:00:00 2001 From: Robert Kruszewski Date: Fri, 2 Sep 2016 17:14:43 +0200 Subject: [PATCH 184/270] =?UTF-8?q?[SPARK-16984][SQL]=20don't=20try=20whol?= =?UTF-8?q?e=20dataset=20immediately=20when=20first=20partition=20doesn't?= =?UTF-8?q?=20have=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What changes were proposed in this pull request? Try increase number of partitions to try so we don't revert to all. ## How was this patch tested? Empirically. This is common case optimization. Author: Robert Kruszewski Closes #14573 from robert3005/robertk/execute-take-backoff. --- .../main/scala/org/apache/spark/rdd/RDD.scala | 7 +++-- .../spark/sql/execution/SparkPlan.scala | 28 +++++++++---------- .../apache/spark/sql/internal/SQLConf.scala | 10 +++++++ 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala index 2ee13dc4db5fd..10b5f8291a03a 100644 --- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala @@ -1296,6 +1296,7 @@ abstract class RDD[T: ClassTag]( * an exception if called on an RDD of `Nothing` or `Null`. */ def take(num: Int): Array[T] = withScope { + val scaleUpFactor = Math.max(conf.getInt("spark.rdd.limit.scaleUpFactor", 4), 2) if (num == 0) { new Array[T](0) } else { @@ -1310,12 +1311,12 @@ abstract class RDD[T: ClassTag]( // If we didn't find any rows after the previous iteration, quadruple and retry. // Otherwise, interpolate the number of partitions we need to try, but overestimate // it by 50%. We also cap the estimation in the end. - if (buf.size == 0) { - numPartsToTry = partsScanned * 4 + if (buf.isEmpty) { + numPartsToTry = partsScanned * scaleUpFactor } else { // the left side of max is >=1 whenever partsScanned >= 2 numPartsToTry = Math.max((1.5 * num * partsScanned / buf.size).toInt - partsScanned, 1) - numPartsToTry = Math.min(numPartsToTry, partsScanned * 4) + numPartsToTry = Math.min(numPartsToTry, partsScanned * scaleUpFactor) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala index 7f2e18586d347..6a2d97c9b1797 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala @@ -20,14 +20,13 @@ package org.apache.spark.sql.execution import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream} import scala.collection.mutable.ArrayBuffer -import scala.concurrent.{ExecutionContext, Future} -import scala.concurrent.duration._ +import scala.concurrent.ExecutionContext import org.apache.spark.{broadcast, SparkEnv} import org.apache.spark.internal.Logging import org.apache.spark.io.CompressionCodec import org.apache.spark.rdd.{RDD, RDDOperationScope} -import org.apache.spark.sql.{Row, SparkSession, SQLContext} +import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.codegen._ @@ -316,26 +315,25 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializ // greater than totalParts because we actually cap it at totalParts in runJob. var numPartsToTry = 1L if (partsScanned > 0) { - // If we didn't find any rows after the first iteration, just try all partitions next. - // Otherwise, interpolate the number of partitions we need to try, but overestimate it - // by 50%. - if (buf.size == 0) { - numPartsToTry = totalParts - 1 + // If we didn't find any rows after the previous iteration, quadruple and retry. + // Otherwise, interpolate the number of partitions we need to try, but overestimate + // it by 50%. We also cap the estimation in the end. + val limitScaleUpFactor = Math.max(sqlContext.conf.limitScaleUpFactor, 2) + if (buf.isEmpty) { + numPartsToTry = partsScanned * limitScaleUpFactor } else { - numPartsToTry = (1.5 * n * partsScanned / buf.size).toInt + // the left side of max is >=1 whenever partsScanned >= 2 + numPartsToTry = Math.max((1.5 * n * partsScanned / buf.size).toInt - partsScanned, 1) + numPartsToTry = Math.min(numPartsToTry, partsScanned * limitScaleUpFactor) } } - numPartsToTry = math.max(0, numPartsToTry) // guard against negative num of partitions - val left = n - buf.size val p = partsScanned.until(math.min(partsScanned + numPartsToTry, totalParts).toInt) val sc = sqlContext.sparkContext val res = sc.runJob(childRDD, - (it: Iterator[Array[Byte]]) => if (it.hasNext) it.next() else Array.empty, p) + (it: Iterator[Array[Byte]]) => if (it.hasNext) it.next() else Array.empty[Byte], p) - res.foreach { r => - decodeUnsafeRows(r.asInstanceOf[Array[Byte]]).foreach(buf.+=) - } + buf ++= res.flatMap(decodeUnsafeRows) partsScanned += p.size } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index d3440a2644163..a54342f82e249 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -116,6 +116,14 @@ object SQLConf { .longConf .createWithDefault(10L * 1024 * 1024) + val LIMIT_SCALE_UP_FACTOR = SQLConfigBuilder("spark.sql.limit.scaleUpFactor") + .internal() + .doc("Minimal increase rate in number of partitions between attempts when executing a take " + + "on a query. Higher values lead to more partitions read. Lower values might lead to " + + "longer execution times as more jobs will be run") + .intConf + .createWithDefault(4) + val ENABLE_FALL_BACK_TO_HDFS_FOR_STATS = SQLConfigBuilder("spark.sql.statistics.fallBackToHdfs") .doc("If the table statistics are not available from table metadata enable fall back to hdfs." + @@ -638,6 +646,8 @@ private[sql] class SQLConf extends Serializable with CatalystConf with Logging { def autoBroadcastJoinThreshold: Long = getConf(AUTO_BROADCASTJOIN_THRESHOLD) + def limitScaleUpFactor: Int = getConf(LIMIT_SCALE_UP_FACTOR) + def fallBackToHdfsForStatsEnabled: Boolean = getConf(ENABLE_FALL_BACK_TO_HDFS_FOR_STATS) def preferSortMergeJoin: Boolean = getConf(PREFER_SORTMERGEJOIN) From 6bcbf9b74351b5ac5221e3c309cb98e6f9cc7c5a Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Fri, 2 Sep 2016 18:53:12 +0200 Subject: [PATCH 185/270] [SPARK-17351] Refactor JDBCRDD to expose ResultSet -> Seq[Row] utility methods This patch refactors the internals of the JDBC data source in order to allow some of its code to be re-used in an automated comparison testing harness. Here are the key changes: - Move the JDBC `ResultSetMetadata` to `StructType` conversion logic from `JDBCRDD.resolveTable()` to the `JdbcUtils` object (as a new `getSchema(ResultSet, JdbcDialect)` method), allowing it to be applied on `ResultSet`s that are created elsewhere. - Move the `ResultSet` to `InternalRow` conversion methods from `JDBCRDD` to `JdbcUtils`: - It makes sense to move the `JDBCValueGetter` type and `makeGetter` functions here given that their write-path counterparts (`JDBCValueSetter`) are already in `JdbcUtils`. - Add an internal `resultSetToSparkInternalRows` method which takes a `ResultSet` and schema and returns an `Iterator[InternalRow]`. This effectively extracts the main loop of `JDBCRDD` into its own method. - Add a public `resultSetToRows` method to `JdbcUtils`, which wraps the minimal machinery around `resultSetToSparkInternalRows` in order to allow it to be called from outside of a Spark job. - Make `JdbcDialect.get` into a `DeveloperApi` (`JdbcDialect` itself is already a `DeveloperApi`). Put together, these changes enable the following testing pattern: ```scala val jdbResultSet: ResultSet = conn.prepareStatement(query).executeQuery() val resultSchema: StructType = JdbcUtils.getSchema(jdbResultSet, JdbcDialects.get("jdbc:postgresql")) val jdbcRows: Seq[Row] = JdbcUtils.resultSetToRows(jdbResultSet, schema).toSeq checkAnswer(sparkResult, jdbcRows) // in a test case ``` Author: Josh Rosen Closes #14907 from JoshRosen/modularize-jdbc-internals. --- .../execution/datasources/jdbc/JDBCRDD.scala | 340 ++---------------- .../datasources/jdbc/JdbcUtils.scala | 302 +++++++++++++++- .../apache/spark/sql/jdbc/JdbcDialects.scala | 2 +- 3 files changed, 335 insertions(+), 309 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala index 9b5088fbfd409..a7da29f9252b3 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.execution.datasources.jdbc -import java.sql.{Connection, Date, ResultSet, ResultSetMetaData, SQLException, Timestamp} +import java.sql.{Connection, Date, PreparedStatement, ResultSet, SQLException, Timestamp} import java.util.Properties import scala.util.control.NonFatal @@ -28,12 +28,10 @@ import org.apache.spark.{Partition, SparkContext, TaskContext} import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow -import org.apache.spark.sql.catalyst.expressions.{MutableRow, SpecificMutableRow} -import org.apache.spark.sql.catalyst.util.{DateTimeUtils, GenericArrayData} import org.apache.spark.sql.jdbc.JdbcDialects import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ -import org.apache.spark.unsafe.types.UTF8String +import org.apache.spark.util.CompletionIterator /** * Data corresponding to one partition of a JDBCRDD. @@ -44,68 +42,6 @@ case class JDBCPartition(whereClause: String, idx: Int) extends Partition { object JDBCRDD extends Logging { - /** - * Maps a JDBC type to a Catalyst type. This function is called only when - * the JdbcDialect class corresponding to your database driver returns null. - * - * @param sqlType - A field of java.sql.Types - * @return The Catalyst type corresponding to sqlType. - */ - private def getCatalystType( - sqlType: Int, - precision: Int, - scale: Int, - signed: Boolean): DataType = { - val answer = sqlType match { - // scalastyle:off - case java.sql.Types.ARRAY => null - case java.sql.Types.BIGINT => if (signed) { LongType } else { DecimalType(20,0) } - case java.sql.Types.BINARY => BinaryType - case java.sql.Types.BIT => BooleanType // @see JdbcDialect for quirks - case java.sql.Types.BLOB => BinaryType - case java.sql.Types.BOOLEAN => BooleanType - case java.sql.Types.CHAR => StringType - case java.sql.Types.CLOB => StringType - case java.sql.Types.DATALINK => null - case java.sql.Types.DATE => DateType - case java.sql.Types.DECIMAL - if precision != 0 || scale != 0 => DecimalType.bounded(precision, scale) - case java.sql.Types.DECIMAL => DecimalType.SYSTEM_DEFAULT - case java.sql.Types.DISTINCT => null - case java.sql.Types.DOUBLE => DoubleType - case java.sql.Types.FLOAT => FloatType - case java.sql.Types.INTEGER => if (signed) { IntegerType } else { LongType } - case java.sql.Types.JAVA_OBJECT => null - case java.sql.Types.LONGNVARCHAR => StringType - case java.sql.Types.LONGVARBINARY => BinaryType - case java.sql.Types.LONGVARCHAR => StringType - case java.sql.Types.NCHAR => StringType - case java.sql.Types.NCLOB => StringType - case java.sql.Types.NULL => null - case java.sql.Types.NUMERIC - if precision != 0 || scale != 0 => DecimalType.bounded(precision, scale) - case java.sql.Types.NUMERIC => DecimalType.SYSTEM_DEFAULT - case java.sql.Types.NVARCHAR => StringType - case java.sql.Types.OTHER => null - case java.sql.Types.REAL => DoubleType - case java.sql.Types.REF => StringType - case java.sql.Types.ROWID => LongType - case java.sql.Types.SMALLINT => IntegerType - case java.sql.Types.SQLXML => StringType - case java.sql.Types.STRUCT => StringType - case java.sql.Types.TIME => TimestampType - case java.sql.Types.TIMESTAMP => TimestampType - case java.sql.Types.TINYINT => IntegerType - case java.sql.Types.VARBINARY => BinaryType - case java.sql.Types.VARCHAR => StringType - case _ => null - // scalastyle:on - } - - if (answer == null) throw new SQLException("Unsupported type " + sqlType) - answer - } - /** * Takes a (schema, table) specification and returns the table's Catalyst * schema. @@ -126,37 +62,7 @@ object JDBCRDD extends Logging { try { val rs = statement.executeQuery() try { - val rsmd = rs.getMetaData - val ncols = rsmd.getColumnCount - val fields = new Array[StructField](ncols) - var i = 0 - while (i < ncols) { - val columnName = rsmd.getColumnLabel(i + 1) - val dataType = rsmd.getColumnType(i + 1) - val typeName = rsmd.getColumnTypeName(i + 1) - val fieldSize = rsmd.getPrecision(i + 1) - val fieldScale = rsmd.getScale(i + 1) - val isSigned = { - try { - rsmd.isSigned(i + 1) - } catch { - // Workaround for HIVE-14684: - case e: SQLException if - e.getMessage == "Method not supported" && - rsmd.getClass.getName == "org.apache.hive.jdbc.HiveResultSetMetaData" => true - } - } - val nullable = rsmd.isNullable(i + 1) != ResultSetMetaData.columnNoNulls - val metadata = new MetadataBuilder() - .putString("name", columnName) - .putLong("scale", fieldScale) - val columnType = - dialect.getCatalystType(dataType, typeName, fieldSize, metadata).getOrElse( - getCatalystType(dataType, fieldSize, fieldScale, isSigned)) - fields(i) = StructField(columnName, columnType, nullable, metadata.build()) - i = i + 1 - } - return new StructType(fields) + return JdbcUtils.getSchema(rs, dialect) } finally { rs.close() } @@ -331,195 +237,15 @@ private[jdbc] class JDBCRDD( } } - // A `JDBCValueGetter` is responsible for getting a value from `ResultSet` into a field - // for `MutableRow`. The last argument `Int` means the index for the value to be set in - // the row and also used for the value in `ResultSet`. - private type JDBCValueGetter = (ResultSet, MutableRow, Int) => Unit - - /** - * Creates `JDBCValueGetter`s according to [[StructType]], which can set - * each value from `ResultSet` to each field of [[MutableRow]] correctly. - */ - def makeGetters(schema: StructType): Array[JDBCValueGetter] = - schema.fields.map(sf => makeGetter(sf.dataType, sf.metadata)) - - private def makeGetter(dt: DataType, metadata: Metadata): JDBCValueGetter = dt match { - case BooleanType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.setBoolean(pos, rs.getBoolean(pos + 1)) - - case DateType => - (rs: ResultSet, row: MutableRow, pos: Int) => - // DateTimeUtils.fromJavaDate does not handle null value, so we need to check it. - val dateVal = rs.getDate(pos + 1) - if (dateVal != null) { - row.setInt(pos, DateTimeUtils.fromJavaDate(dateVal)) - } else { - row.update(pos, null) - } - - // When connecting with Oracle DB through JDBC, the precision and scale of BigDecimal - // object returned by ResultSet.getBigDecimal is not correctly matched to the table - // schema reported by ResultSetMetaData.getPrecision and ResultSetMetaData.getScale. - // If inserting values like 19999 into a column with NUMBER(12, 2) type, you get through - // a BigDecimal object with scale as 0. But the dataframe schema has correct type as - // DecimalType(12, 2). Thus, after saving the dataframe into parquet file and then - // retrieve it, you will get wrong result 199.99. - // So it is needed to set precision and scale for Decimal based on JDBC metadata. - case DecimalType.Fixed(p, s) => - (rs: ResultSet, row: MutableRow, pos: Int) => - val decimal = - nullSafeConvert[java.math.BigDecimal](rs.getBigDecimal(pos + 1), d => Decimal(d, p, s)) - row.update(pos, decimal) - - case DoubleType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.setDouble(pos, rs.getDouble(pos + 1)) - - case FloatType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.setFloat(pos, rs.getFloat(pos + 1)) - - case IntegerType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.setInt(pos, rs.getInt(pos + 1)) - - case LongType if metadata.contains("binarylong") => - (rs: ResultSet, row: MutableRow, pos: Int) => - val bytes = rs.getBytes(pos + 1) - var ans = 0L - var j = 0 - while (j < bytes.size) { - ans = 256 * ans + (255 & bytes(j)) - j = j + 1 - } - row.setLong(pos, ans) - - case LongType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.setLong(pos, rs.getLong(pos + 1)) - - case ShortType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.setShort(pos, rs.getShort(pos + 1)) - - case StringType => - (rs: ResultSet, row: MutableRow, pos: Int) => - // TODO(davies): use getBytes for better performance, if the encoding is UTF-8 - row.update(pos, UTF8String.fromString(rs.getString(pos + 1))) - - case TimestampType => - (rs: ResultSet, row: MutableRow, pos: Int) => - val t = rs.getTimestamp(pos + 1) - if (t != null) { - row.setLong(pos, DateTimeUtils.fromJavaTimestamp(t)) - } else { - row.update(pos, null) - } - - case BinaryType => - (rs: ResultSet, row: MutableRow, pos: Int) => - row.update(pos, rs.getBytes(pos + 1)) - - case ArrayType(et, _) => - val elementConversion = et match { - case TimestampType => - (array: Object) => - array.asInstanceOf[Array[java.sql.Timestamp]].map { timestamp => - nullSafeConvert(timestamp, DateTimeUtils.fromJavaTimestamp) - } - - case StringType => - (array: Object) => - array.asInstanceOf[Array[java.lang.String]] - .map(UTF8String.fromString) - - case DateType => - (array: Object) => - array.asInstanceOf[Array[java.sql.Date]].map { date => - nullSafeConvert(date, DateTimeUtils.fromJavaDate) - } - - case dt: DecimalType => - (array: Object) => - array.asInstanceOf[Array[java.math.BigDecimal]].map { decimal => - nullSafeConvert[java.math.BigDecimal]( - decimal, d => Decimal(d, dt.precision, dt.scale)) - } - - case LongType if metadata.contains("binarylong") => - throw new IllegalArgumentException(s"Unsupported array element " + - s"type ${dt.simpleString} based on binary") - - case ArrayType(_, _) => - throw new IllegalArgumentException("Nested arrays unsupported") - - case _ => (array: Object) => array.asInstanceOf[Array[Any]] - } - - (rs: ResultSet, row: MutableRow, pos: Int) => - val array = nullSafeConvert[Object]( - rs.getArray(pos + 1).getArray, - array => new GenericArrayData(elementConversion.apply(array))) - row.update(pos, array) - - case _ => throw new IllegalArgumentException(s"Unsupported type ${dt.simpleString}") - } - /** * Runs the SQL query against the JDBC driver. * */ - override def compute(thePart: Partition, context: TaskContext): Iterator[InternalRow] = - new Iterator[InternalRow] { + override def compute(thePart: Partition, context: TaskContext): Iterator[InternalRow] = { var closed = false - var finished = false - var gotNext = false - var nextValue: InternalRow = null - - context.addTaskCompletionListener{ context => close() } - val inputMetrics = context.taskMetrics().inputMetrics - val part = thePart.asInstanceOf[JDBCPartition] - val conn = getConnection() - val dialect = JdbcDialects.get(url) - import scala.collection.JavaConverters._ - dialect.beforeFetch(conn, properties.asScala.toMap) - - // H2's JDBC driver does not support the setSchema() method. We pass a - // fully-qualified table name in the SELECT statement. I don't know how to - // talk about a table in a completely portable way. - - val myWhereClause = getWhereClause(part) - - val sqlText = s"SELECT $columnList FROM $fqTable $myWhereClause" - val stmt = conn.prepareStatement(sqlText, - ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY) - val fetchSize = properties.getProperty(JdbcUtils.JDBC_BATCH_FETCH_SIZE, "0").toInt - require(fetchSize >= 0, - s"Invalid value `${fetchSize.toString}` for parameter " + - s"`${JdbcUtils.JDBC_BATCH_FETCH_SIZE}`. The minimum value is 0. When the value is 0, " + - "the JDBC driver ignores the value and does the estimates.") - stmt.setFetchSize(fetchSize) - val rs = stmt.executeQuery() - - val getters: Array[JDBCValueGetter] = makeGetters(schema) - val mutableRow = new SpecificMutableRow(schema.fields.map(x => x.dataType)) - - def getNext(): InternalRow = { - if (rs.next()) { - inputMetrics.incRecordsRead(1) - var i = 0 - while (i < getters.length) { - getters(i).apply(rs, mutableRow, i) - if (rs.wasNull) mutableRow.setNullAt(i) - i = i + 1 - } - mutableRow - } else { - finished = true - null.asInstanceOf[InternalRow] - } - } + var rs: ResultSet = null + var stmt: PreparedStatement = null + var conn: Connection = null def close() { if (closed) return @@ -555,33 +281,33 @@ private[jdbc] class JDBCRDD( closed = true } - override def hasNext: Boolean = { - if (!finished) { - if (!gotNext) { - nextValue = getNext() - if (finished) { - close() - } - gotNext = true - } - } - !finished - } + context.addTaskCompletionListener{ context => close() } - override def next(): InternalRow = { - if (!hasNext) { - throw new NoSuchElementException("End of stream") - } - gotNext = false - nextValue - } - } + val inputMetrics = context.taskMetrics().inputMetrics + val part = thePart.asInstanceOf[JDBCPartition] + conn = getConnection() + val dialect = JdbcDialects.get(url) + import scala.collection.JavaConverters._ + dialect.beforeFetch(conn, properties.asScala.toMap) - private def nullSafeConvert[T](input: T, f: T => Any): Any = { - if (input == null) { - null - } else { - f(input) - } + // H2's JDBC driver does not support the setSchema() method. We pass a + // fully-qualified table name in the SELECT statement. I don't know how to + // talk about a table in a completely portable way. + + val myWhereClause = getWhereClause(part) + + val sqlText = s"SELECT $columnList FROM $fqTable $myWhereClause" + stmt = conn.prepareStatement(sqlText, + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY) + val fetchSize = properties.getProperty(JdbcUtils.JDBC_BATCH_FETCH_SIZE, "0").toInt + require(fetchSize >= 0, + s"Invalid value `${fetchSize.toString}` for parameter " + + s"`${JdbcUtils.JDBC_BATCH_FETCH_SIZE}`. The minimum value is 0. When the value is 0, " + + "the JDBC driver ignores the value and does the estimates.") + stmt.setFetchSize(fetchSize) + rs = stmt.executeQuery() + val rowsIterator = JdbcUtils.resultSetToSparkInternalRows(rs, schema, inputMetrics) + + CompletionIterator[InternalRow, Iterator[InternalRow]](rowsIterator, close()) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala index 37153e545a0b8..132472ad0ce87 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala @@ -17,17 +17,25 @@ package org.apache.spark.sql.execution.datasources.jdbc -import java.sql.{Connection, Driver, DriverManager, PreparedStatement, SQLException} +import java.sql.{Connection, Driver, DriverManager, PreparedStatement, ResultSet, ResultSetMetaData, SQLException} import java.util.Properties import scala.collection.JavaConverters._ import scala.util.Try import scala.util.control.NonFatal +import org.apache.spark.TaskContext +import org.apache.spark.executor.InputMetrics import org.apache.spark.internal.Logging import org.apache.spark.sql.{DataFrame, Row} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.encoders.RowEncoder +import org.apache.spark.sql.catalyst.expressions.{MutableRow, SpecificMutableRow} +import org.apache.spark.sql.catalyst.util.{DateTimeUtils, GenericArrayData} import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects, JdbcType} import org.apache.spark.sql.types._ +import org.apache.spark.unsafe.types.UTF8String +import org.apache.spark.util.NextIterator /** * Util functions for JDBC tables. @@ -127,6 +135,7 @@ object JdbcUtils extends Logging { /** * Retrieve standard jdbc types. + * * @param dt The datatype (e.g. [[org.apache.spark.sql.types.StringType]]) * @return The default JdbcType for this DataType */ @@ -154,6 +163,297 @@ object JdbcUtils extends Logging { throw new IllegalArgumentException(s"Can't get JDBC type for ${dt.simpleString}")) } + /** + * Maps a JDBC type to a Catalyst type. This function is called only when + * the JdbcDialect class corresponding to your database driver returns null. + * + * @param sqlType - A field of java.sql.Types + * @return The Catalyst type corresponding to sqlType. + */ + private def getCatalystType( + sqlType: Int, + precision: Int, + scale: Int, + signed: Boolean): DataType = { + val answer = sqlType match { + // scalastyle:off + case java.sql.Types.ARRAY => null + case java.sql.Types.BIGINT => if (signed) { LongType } else { DecimalType(20,0) } + case java.sql.Types.BINARY => BinaryType + case java.sql.Types.BIT => BooleanType // @see JdbcDialect for quirks + case java.sql.Types.BLOB => BinaryType + case java.sql.Types.BOOLEAN => BooleanType + case java.sql.Types.CHAR => StringType + case java.sql.Types.CLOB => StringType + case java.sql.Types.DATALINK => null + case java.sql.Types.DATE => DateType + case java.sql.Types.DECIMAL + if precision != 0 || scale != 0 => DecimalType.bounded(precision, scale) + case java.sql.Types.DECIMAL => DecimalType.SYSTEM_DEFAULT + case java.sql.Types.DISTINCT => null + case java.sql.Types.DOUBLE => DoubleType + case java.sql.Types.FLOAT => FloatType + case java.sql.Types.INTEGER => if (signed) { IntegerType } else { LongType } + case java.sql.Types.JAVA_OBJECT => null + case java.sql.Types.LONGNVARCHAR => StringType + case java.sql.Types.LONGVARBINARY => BinaryType + case java.sql.Types.LONGVARCHAR => StringType + case java.sql.Types.NCHAR => StringType + case java.sql.Types.NCLOB => StringType + case java.sql.Types.NULL => null + case java.sql.Types.NUMERIC + if precision != 0 || scale != 0 => DecimalType.bounded(precision, scale) + case java.sql.Types.NUMERIC => DecimalType.SYSTEM_DEFAULT + case java.sql.Types.NVARCHAR => StringType + case java.sql.Types.OTHER => null + case java.sql.Types.REAL => DoubleType + case java.sql.Types.REF => StringType + case java.sql.Types.ROWID => LongType + case java.sql.Types.SMALLINT => IntegerType + case java.sql.Types.SQLXML => StringType + case java.sql.Types.STRUCT => StringType + case java.sql.Types.TIME => TimestampType + case java.sql.Types.TIMESTAMP => TimestampType + case java.sql.Types.TINYINT => IntegerType + case java.sql.Types.VARBINARY => BinaryType + case java.sql.Types.VARCHAR => StringType + case _ => null + // scalastyle:on + } + + if (answer == null) throw new SQLException("Unsupported type " + sqlType) + answer + } + + /** + * Takes a [[ResultSet]] and returns its Catalyst schema. + * + * @return A [[StructType]] giving the Catalyst schema. + * @throws SQLException if the schema contains an unsupported type. + */ + def getSchema(resultSet: ResultSet, dialect: JdbcDialect): StructType = { + val rsmd = resultSet.getMetaData + val ncols = rsmd.getColumnCount + val fields = new Array[StructField](ncols) + var i = 0 + while (i < ncols) { + val columnName = rsmd.getColumnLabel(i + 1) + val dataType = rsmd.getColumnType(i + 1) + val typeName = rsmd.getColumnTypeName(i + 1) + val fieldSize = rsmd.getPrecision(i + 1) + val fieldScale = rsmd.getScale(i + 1) + val isSigned = { + try { + rsmd.isSigned(i + 1) + } catch { + // Workaround for HIVE-14684: + case e: SQLException if + e.getMessage == "Method not supported" && + rsmd.getClass.getName == "org.apache.hive.jdbc.HiveResultSetMetaData" => true + } + } + val nullable = rsmd.isNullable(i + 1) != ResultSetMetaData.columnNoNulls + val metadata = new MetadataBuilder() + .putString("name", columnName) + .putLong("scale", fieldScale) + val columnType = + dialect.getCatalystType(dataType, typeName, fieldSize, metadata).getOrElse( + getCatalystType(dataType, fieldSize, fieldScale, isSigned)) + fields(i) = StructField(columnName, columnType, nullable, metadata.build()) + i = i + 1 + } + new StructType(fields) + } + + /** + * Convert a [[ResultSet]] into an iterator of Catalyst Rows. + */ + def resultSetToRows(resultSet: ResultSet, schema: StructType): Iterator[Row] = { + val inputMetrics = + Option(TaskContext.get()).map(_.taskMetrics().inputMetrics).getOrElse(new InputMetrics) + val encoder = RowEncoder(schema).resolveAndBind() + val internalRows = resultSetToSparkInternalRows(resultSet, schema, inputMetrics) + internalRows.map(encoder.fromRow) + } + + private[spark] def resultSetToSparkInternalRows( + resultSet: ResultSet, + schema: StructType, + inputMetrics: InputMetrics): Iterator[InternalRow] = { + new NextIterator[InternalRow] { + private[this] val rs = resultSet + private[this] val getters: Array[JDBCValueGetter] = makeGetters(schema) + private[this] val mutableRow = new SpecificMutableRow(schema.fields.map(x => x.dataType)) + + override protected def close(): Unit = { + try { + rs.close() + } catch { + case e: Exception => logWarning("Exception closing resultset", e) + } + } + + override protected def getNext(): InternalRow = { + if (rs.next()) { + inputMetrics.incRecordsRead(1) + var i = 0 + while (i < getters.length) { + getters(i).apply(rs, mutableRow, i) + if (rs.wasNull) mutableRow.setNullAt(i) + i = i + 1 + } + mutableRow + } else { + finished = true + null.asInstanceOf[InternalRow] + } + } + } + } + + // A `JDBCValueGetter` is responsible for getting a value from `ResultSet` into a field + // for `MutableRow`. The last argument `Int` means the index for the value to be set in + // the row and also used for the value in `ResultSet`. + private type JDBCValueGetter = (ResultSet, MutableRow, Int) => Unit + + /** + * Creates `JDBCValueGetter`s according to [[StructType]], which can set + * each value from `ResultSet` to each field of [[MutableRow]] correctly. + */ + private def makeGetters(schema: StructType): Array[JDBCValueGetter] = + schema.fields.map(sf => makeGetter(sf.dataType, sf.metadata)) + + private def makeGetter(dt: DataType, metadata: Metadata): JDBCValueGetter = dt match { + case BooleanType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setBoolean(pos, rs.getBoolean(pos + 1)) + + case DateType => + (rs: ResultSet, row: MutableRow, pos: Int) => + // DateTimeUtils.fromJavaDate does not handle null value, so we need to check it. + val dateVal = rs.getDate(pos + 1) + if (dateVal != null) { + row.setInt(pos, DateTimeUtils.fromJavaDate(dateVal)) + } else { + row.update(pos, null) + } + + // When connecting with Oracle DB through JDBC, the precision and scale of BigDecimal + // object returned by ResultSet.getBigDecimal is not correctly matched to the table + // schema reported by ResultSetMetaData.getPrecision and ResultSetMetaData.getScale. + // If inserting values like 19999 into a column with NUMBER(12, 2) type, you get through + // a BigDecimal object with scale as 0. But the dataframe schema has correct type as + // DecimalType(12, 2). Thus, after saving the dataframe into parquet file and then + // retrieve it, you will get wrong result 199.99. + // So it is needed to set precision and scale for Decimal based on JDBC metadata. + case DecimalType.Fixed(p, s) => + (rs: ResultSet, row: MutableRow, pos: Int) => + val decimal = + nullSafeConvert[java.math.BigDecimal](rs.getBigDecimal(pos + 1), d => Decimal(d, p, s)) + row.update(pos, decimal) + + case DoubleType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setDouble(pos, rs.getDouble(pos + 1)) + + case FloatType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setFloat(pos, rs.getFloat(pos + 1)) + + case IntegerType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setInt(pos, rs.getInt(pos + 1)) + + case LongType if metadata.contains("binarylong") => + (rs: ResultSet, row: MutableRow, pos: Int) => + val bytes = rs.getBytes(pos + 1) + var ans = 0L + var j = 0 + while (j < bytes.size) { + ans = 256 * ans + (255 & bytes(j)) + j = j + 1 + } + row.setLong(pos, ans) + + case LongType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setLong(pos, rs.getLong(pos + 1)) + + case ShortType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.setShort(pos, rs.getShort(pos + 1)) + + case StringType => + (rs: ResultSet, row: MutableRow, pos: Int) => + // TODO(davies): use getBytes for better performance, if the encoding is UTF-8 + row.update(pos, UTF8String.fromString(rs.getString(pos + 1))) + + case TimestampType => + (rs: ResultSet, row: MutableRow, pos: Int) => + val t = rs.getTimestamp(pos + 1) + if (t != null) { + row.setLong(pos, DateTimeUtils.fromJavaTimestamp(t)) + } else { + row.update(pos, null) + } + + case BinaryType => + (rs: ResultSet, row: MutableRow, pos: Int) => + row.update(pos, rs.getBytes(pos + 1)) + + case ArrayType(et, _) => + val elementConversion = et match { + case TimestampType => + (array: Object) => + array.asInstanceOf[Array[java.sql.Timestamp]].map { timestamp => + nullSafeConvert(timestamp, DateTimeUtils.fromJavaTimestamp) + } + + case StringType => + (array: Object) => + array.asInstanceOf[Array[java.lang.String]] + .map(UTF8String.fromString) + + case DateType => + (array: Object) => + array.asInstanceOf[Array[java.sql.Date]].map { date => + nullSafeConvert(date, DateTimeUtils.fromJavaDate) + } + + case dt: DecimalType => + (array: Object) => + array.asInstanceOf[Array[java.math.BigDecimal]].map { decimal => + nullSafeConvert[java.math.BigDecimal]( + decimal, d => Decimal(d, dt.precision, dt.scale)) + } + + case LongType if metadata.contains("binarylong") => + throw new IllegalArgumentException(s"Unsupported array element " + + s"type ${dt.simpleString} based on binary") + + case ArrayType(_, _) => + throw new IllegalArgumentException("Nested arrays unsupported") + + case _ => (array: Object) => array.asInstanceOf[Array[Any]] + } + + (rs: ResultSet, row: MutableRow, pos: Int) => + val array = nullSafeConvert[Object]( + rs.getArray(pos + 1).getArray, + array => new GenericArrayData(elementConversion.apply(array))) + row.update(pos, array) + + case _ => throw new IllegalArgumentException(s"Unsupported type ${dt.simpleString}") + } + + private def nullSafeConvert[T](input: T, f: T => Any): Any = { + if (input == null) { + null + } else { + f(input) + } + } + // A `JDBCValueSetter` is responsible for setting a value from `Row` into a field for // `PreparedStatement`. The last argument `Int` means the index for the value to be set // in the SQL statement and also used for the value in `Row`. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala index 78107809a1cf8..3a6d5b7f1ced6 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala @@ -162,7 +162,7 @@ object JdbcDialects { /** * Fetch the JdbcDialect class corresponding to a given database url. */ - private[sql] def get(url: String): JdbcDialect = { + def get(url: String): JdbcDialect = { val matchingDialects = dialects.filter(_.canHandle(url)) matchingDialects.length match { case 0 => NoopDialect From ea662286561aa9fe321cb0a0e10cdeaf60440b90 Mon Sep 17 00:00:00 2001 From: Jeff Zhang Date: Fri, 2 Sep 2016 10:08:14 -0700 Subject: [PATCH 186/270] [SPARK-17261] [PYSPARK] Using HiveContext after re-creating SparkContext in Spark 2.0 throws "Java.lang.illegalStateException: Cannot call methods on a stopped sparkContext" ## What changes were proposed in this pull request? Set SparkSession._instantiatedContext as None so that we can recreate SparkSession again. ## How was this patch tested? Tested manually using the following command in pyspark shell ``` spark.stop() spark = SparkSession.builder.enableHiveSupport().getOrCreate() spark.sql("show databases").show() ``` Author: Jeff Zhang Closes #14857 from zjffdu/SPARK-17261. --- python/pyspark/sql/session.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py index 61fa107497c60..8418abf99c8d5 100644 --- a/python/pyspark/sql/session.py +++ b/python/pyspark/sql/session.py @@ -595,6 +595,7 @@ def stop(self): """Stop the underlying :class:`SparkContext`. """ self._sc.stop() + SparkSession._instantiatedContext = None @since(2.0) def __enter__(self): From 812333e4336113e44d2c9473bcba1cee4a989d2c Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Fri, 2 Sep 2016 10:12:10 -0700 Subject: [PATCH 187/270] [SPARK-17376][SPARKR] Spark version should be available in R ## What changes were proposed in this pull request? Add sparkR.version() API. ``` > sparkR.version() [1] "2.1.0-SNAPSHOT" ``` ## How was this patch tested? manual, unit tests Author: Felix Cheung Closes #14935 from felixcheung/rsparksessionversion. --- R/pkg/NAMESPACE | 13 +++++++------ R/pkg/R/SQLContext.R | 19 +++++++++++++++++++ R/pkg/inst/tests/testthat/test_sparkSQL.R | 6 ++++++ 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index 5e625b2d8dbb4..ce41b512a4e11 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -15,8 +15,15 @@ export("sparkR.init") export("sparkR.stop") export("sparkR.session.stop") export("sparkR.conf") +export("sparkR.version") export("print.jobj") +export("sparkR.newJObject") +export("sparkR.callJMethod") +export("sparkR.callJStatic") + +export("install.spark") + export("sparkRSQL.init", "sparkRHive.init") @@ -363,9 +370,3 @@ S3method(structField, character) S3method(structField, jobj) S3method(structType, jobj) S3method(structType, structField) - -export("sparkR.newJObject") -export("sparkR.callJMethod") -export("sparkR.callJStatic") - -export("install.spark") diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index 572e71e25b80b..a1404543be127 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -156,6 +156,25 @@ sparkR.conf <- function(key, defaultValue) { } } +#' Get version of Spark on which this application is running +#' +#' Get version of Spark on which this application is running. +#' +#' @return a character string of the Spark version +#' @rdname sparkR.version +#' @name sparkR.version +#' @export +#' @examples +#'\dontrun{ +#' sparkR.session() +#' version <- sparkR.version() +#' } +#' @note sparkR.version since 2.1.0 +sparkR.version <- function() { + sparkSession <- getSparkSession() + callJMethod(sparkSession, "version") +} + getDefaultSqlSource <- function() { l <- sparkR.conf("spark.sql.sources.default", "org.apache.spark.sql.parquet") l[["spark.sql.sources.default"]] diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index 683a15cb4ffcd..aac3f6220447c 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -2507,6 +2507,12 @@ test_that("enableHiveSupport on SparkSession", { expect_equal(value, "hive") }) +test_that("Spark version from SparkSession", { + ver <- callJMethod(sc, "version") + version <- sparkR.version() + expect_equal(ver, version) +}) + unlink(parquetPath) unlink(orcPath) unlink(jsonPath) From 419eefd811a4e29a73bc309157f150751e478db5 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Fri, 2 Sep 2016 10:28:37 -0700 Subject: [PATCH 188/270] [SPARKR][DOC] regexp_extract should doc that it returns empty string when match fails ## What changes were proposed in this pull request? Doc change - see https://issues.apache.org/jira/browse/SPARK-16324 ## How was this patch tested? manual check Author: Felix Cheung Closes #14934 from felixcheung/regexpextractdoc. --- R/pkg/R/functions.R | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/R/pkg/R/functions.R b/R/pkg/R/functions.R index dbf8dd89a204e..369b1d00d9e5b 100644 --- a/R/pkg/R/functions.R +++ b/R/pkg/R/functions.R @@ -2876,7 +2876,8 @@ setMethod("randn", signature(seed = "numeric"), #' regexp_extract #' -#' Extract a specific(idx) group identified by a java regex, from the specified string column. +#' Extract a specific \code{idx} group identified by a Java regex, from the specified string column. +#' If the regex did not match, or the specified group did not match, an empty string is returned. #' #' @param x a string Column. #' @param pattern a regular expression. From e79962f2f3955485aecf32939207d8ee6ccd2704 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Fri, 2 Sep 2016 10:42:13 -0700 Subject: [PATCH 189/270] [SPARK-16711] YarnShuffleService doesn't re-init properly on YARN rolling upgrade The Spark Yarn Shuffle Service doesn't re-initialize the application credentials early enough which causes any other spark executors trying to fetch from that node during a rolling upgrade to fail with "java.lang.NullPointerException: Password cannot be null if SASL is enabled". Right now the spark shuffle service relies on the Yarn nodemanager to re-register the applications, unfortunately this is after we open the port for other executors to connect. If other executors connected before the re-register they get a null pointer exception which isn't a re-tryable exception and cause them to fail pretty quickly. To solve this I added another leveldb file so that it can save and re-initialize all the applications before opening the port for other executors to connect to it. Adding another leveldb was simpler from the code structure point of view. Most of the code changes are moving things to common util class. Patch was tested manually on a Yarn cluster with rolling upgrade was happing while spark job was running. Without the patch I consistently get the NullPointerException, with the patch the job gets a few Connection refused exceptions but the retries kick in and the it succeeds. Author: Thomas Graves Closes #14718 from tgravescs/SPARK-16711. --- common/network-common/pom.xml | 16 ++ .../spark/network/util/LevelDBProvider.java | 152 ++++++++++++++++++ common/network-shuffle/pom.xml | 16 -- .../shuffle/ExternalShuffleBlockResolver.java | 120 +------------- .../network/yarn/YarnShuffleService.java | 135 +++++++++++++++- .../yarn/YarnShuffleServiceSuite.scala | 10 ++ 6 files changed, 311 insertions(+), 138 deletions(-) create mode 100644 common/network-common/src/main/java/org/apache/spark/network/util/LevelDBProvider.java diff --git a/common/network-common/pom.xml b/common/network-common/pom.xml index 81f0c6e2265ae..fcefe64d59c91 100644 --- a/common/network-common/pom.xml +++ b/common/network-common/pom.xml @@ -45,6 +45,22 @@ commons-lang3 + + org.fusesource.leveldbjni + leveldbjni-all + 1.8 + + + + com.fasterxml.jackson.core + jackson-databind + + + + com.fasterxml.jackson.core + jackson-annotations + + org.slf4j diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/LevelDBProvider.java b/common/network-common/src/main/java/org/apache/spark/network/util/LevelDBProvider.java new file mode 100644 index 0000000000000..ec900a7b3ca63 --- /dev/null +++ b/common/network-common/src/main/java/org/apache/spark/network/util/LevelDBProvider.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.network.util; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.fusesource.leveldbjni.JniDBFactory; +import org.fusesource.leveldbjni.internal.NativeDB; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.Options; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * LevelDB utility class available in the network package. + */ +public class LevelDBProvider { + private static final Logger logger = LoggerFactory.getLogger(LevelDBProvider.class); + + public static DB initLevelDB(File dbFile, StoreVersion version, ObjectMapper mapper) throws + IOException { + DB tmpDb = null; + if (dbFile != null) { + Options options = new Options(); + options.createIfMissing(false); + options.logger(new LevelDBLogger()); + try { + tmpDb = JniDBFactory.factory.open(dbFile, options); + } catch (NativeDB.DBException e) { + if (e.isNotFound() || e.getMessage().contains(" does not exist ")) { + logger.info("Creating state database at " + dbFile); + options.createIfMissing(true); + try { + tmpDb = JniDBFactory.factory.open(dbFile, options); + } catch (NativeDB.DBException dbExc) { + throw new IOException("Unable to create state store", dbExc); + } + } else { + // the leveldb file seems to be corrupt somehow. Lets just blow it away and create a new + // one, so we can keep processing new apps + logger.error("error opening leveldb file {}. Creating new file, will not be able to " + + "recover state for existing applications", dbFile, e); + if (dbFile.isDirectory()) { + for (File f : dbFile.listFiles()) { + if (!f.delete()) { + logger.warn("error deleting {}", f.getPath()); + } + } + } + if (!dbFile.delete()) { + logger.warn("error deleting {}", dbFile.getPath()); + } + options.createIfMissing(true); + try { + tmpDb = JniDBFactory.factory.open(dbFile, options); + } catch (NativeDB.DBException dbExc) { + throw new IOException("Unable to create state store", dbExc); + } + + } + } + // if there is a version mismatch, we throw an exception, which means the service is unusable + checkVersion(tmpDb, version, mapper); + } + return tmpDb; + } + + private static class LevelDBLogger implements org.iq80.leveldb.Logger { + private static final Logger LOG = LoggerFactory.getLogger(LevelDBLogger.class); + + @Override + public void log(String message) { + LOG.info(message); + } + } + + /** + * Simple major.minor versioning scheme. Any incompatible changes should be across major + * versions. Minor version differences are allowed -- meaning we should be able to read + * dbs that are either earlier *or* later on the minor version. + */ + public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws + IOException { + byte[] bytes = db.get(StoreVersion.KEY); + if (bytes == null) { + storeVersion(db, newversion, mapper); + } else { + StoreVersion version = mapper.readValue(bytes, StoreVersion.class); + if (version.major != newversion.major) { + throw new IOException("cannot read state DB with version " + version + ", incompatible " + + "with current version " + newversion); + } + storeVersion(db, newversion, mapper); + } + } + + public static void storeVersion(DB db, StoreVersion version, ObjectMapper mapper) + throws IOException { + db.put(StoreVersion.KEY, mapper.writeValueAsBytes(version)); + } + + public static class StoreVersion { + + final static byte[] KEY = "StoreVersion".getBytes(StandardCharsets.UTF_8); + + public final int major; + public final int minor; + + @JsonCreator + public StoreVersion(@JsonProperty("major") int major, @JsonProperty("minor") int minor) { + this.major = major; + this.minor = minor; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + StoreVersion that = (StoreVersion) o; + + return major == that.major && minor == that.minor; + } + + @Override + public int hashCode() { + int result = major; + result = 31 * result + minor; + return result; + } + } +} diff --git a/common/network-shuffle/pom.xml b/common/network-shuffle/pom.xml index d211bd5bd194b..511e1f29de368 100644 --- a/common/network-shuffle/pom.xml +++ b/common/network-shuffle/pom.xml @@ -42,27 +42,11 @@ ${project.version} - - org.fusesource.leveldbjni - leveldbjni-all - 1.8 - - - - com.fasterxml.jackson.core - jackson-databind - - io.dropwizard.metrics metrics-core - - com.fasterxml.jackson.core - jackson-annotations - - org.slf4j diff --git a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java index d436711692e3d..25e9abde708d6 100644 --- a/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java +++ b/common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/ExternalShuffleBlockResolver.java @@ -34,17 +34,16 @@ import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.Maps; -import org.fusesource.leveldbjni.JniDBFactory; -import org.fusesource.leveldbjni.internal.NativeDB; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBIterator; -import org.iq80.leveldb.Options; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.spark.network.buffer.FileSegmentManagedBuffer; import org.apache.spark.network.buffer.ManagedBuffer; import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo; +import org.apache.spark.network.util.LevelDBProvider; +import org.apache.spark.network.util.LevelDBProvider.StoreVersion; import org.apache.spark.network.util.JavaUtils; import org.apache.spark.network.util.NettyUtils; import org.apache.spark.network.util.TransportConf; @@ -114,52 +113,10 @@ public ShuffleIndexInformation load(File file) throws IOException { }; shuffleIndexCache = CacheBuilder.newBuilder() .maximumSize(indexCacheEntries).build(indexCacheLoader); - if (registeredExecutorFile != null) { - Options options = new Options(); - options.createIfMissing(false); - options.logger(new LevelDBLogger()); - DB tmpDb; - try { - tmpDb = JniDBFactory.factory.open(registeredExecutorFile, options); - } catch (NativeDB.DBException e) { - if (e.isNotFound() || e.getMessage().contains(" does not exist ")) { - logger.info("Creating state database at " + registeredExecutorFile); - options.createIfMissing(true); - try { - tmpDb = JniDBFactory.factory.open(registeredExecutorFile, options); - } catch (NativeDB.DBException dbExc) { - throw new IOException("Unable to create state store", dbExc); - } - } else { - // the leveldb file seems to be corrupt somehow. Lets just blow it away and create a new - // one, so we can keep processing new apps - logger.error("error opening leveldb file {}. Creating new file, will not be able to " + - "recover state for existing applications", registeredExecutorFile, e); - if (registeredExecutorFile.isDirectory()) { - for (File f : registeredExecutorFile.listFiles()) { - if (!f.delete()) { - logger.warn("error deleting {}", f.getPath()); - } - } - } - if (!registeredExecutorFile.delete()) { - logger.warn("error deleting {}", registeredExecutorFile.getPath()); - } - options.createIfMissing(true); - try { - tmpDb = JniDBFactory.factory.open(registeredExecutorFile, options); - } catch (NativeDB.DBException dbExc) { - throw new IOException("Unable to create state store", dbExc); - } - - } - } - // if there is a version mismatch, we throw an exception, which means the service is unusable - checkVersion(tmpDb); - executors = reloadRegisteredExecutors(tmpDb); - db = tmpDb; + db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper); + if (db != null) { + executors = reloadRegisteredExecutors(db); } else { - db = null; executors = Maps.newConcurrentMap(); } this.directoryCleaner = directoryCleaner; @@ -384,76 +341,11 @@ static ConcurrentMap reloadRegisteredExecutors(D break; } AppExecId id = parseDbAppExecKey(key); + logger.info("Reloading registered executors: " + id.toString()); ExecutorShuffleInfo shuffleInfo = mapper.readValue(e.getValue(), ExecutorShuffleInfo.class); registeredExecutors.put(id, shuffleInfo); } } return registeredExecutors; } - - private static class LevelDBLogger implements org.iq80.leveldb.Logger { - private static final Logger LOG = LoggerFactory.getLogger(LevelDBLogger.class); - - @Override - public void log(String message) { - LOG.info(message); - } - } - - /** - * Simple major.minor versioning scheme. Any incompatible changes should be across major - * versions. Minor version differences are allowed -- meaning we should be able to read - * dbs that are either earlier *or* later on the minor version. - */ - private static void checkVersion(DB db) throws IOException { - byte[] bytes = db.get(StoreVersion.KEY); - if (bytes == null) { - storeVersion(db); - } else { - StoreVersion version = mapper.readValue(bytes, StoreVersion.class); - if (version.major != CURRENT_VERSION.major) { - throw new IOException("cannot read state DB with version " + version + ", incompatible " + - "with current version " + CURRENT_VERSION); - } - storeVersion(db); - } - } - - private static void storeVersion(DB db) throws IOException { - db.put(StoreVersion.KEY, mapper.writeValueAsBytes(CURRENT_VERSION)); - } - - - public static class StoreVersion { - - static final byte[] KEY = "StoreVersion".getBytes(StandardCharsets.UTF_8); - - public final int major; - public final int minor; - - @JsonCreator public StoreVersion( - @JsonProperty("major") int major, - @JsonProperty("minor") int minor) { - this.major = major; - this.minor = minor; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - StoreVersion that = (StoreVersion) o; - - return major == that.major && minor == that.minor; - } - - @Override - public int hashCode() { - int result = major; - result = 31 * result + minor; - return result; - } - } - } diff --git a/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java b/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java index 2cf3f53e6dfc1..df082e4a92747 100644 --- a/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java +++ b/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java @@ -18,15 +18,28 @@ package org.apache.spark.network.yarn; import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.ByteBuffer; import java.util.List; +import java.util.Map; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Objects; import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.api.*; +import org.apache.spark.network.util.LevelDBProvider; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBIterator; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,12 +82,26 @@ public class YarnShuffleService extends AuxiliaryService { private static final boolean DEFAULT_SPARK_AUTHENTICATE = false; private static final String RECOVERY_FILE_NAME = "registeredExecutors.ldb"; + private static final String SECRETS_RECOVERY_FILE_NAME = "sparkShuffleRecovery.ldb"; // Whether failure during service initialization should stop the NM. @VisibleForTesting static final String STOP_ON_FAILURE_KEY = "spark.yarn.shuffle.stopOnFailure"; private static final boolean DEFAULT_STOP_ON_FAILURE = false; + // just for testing when you want to find an open port + @VisibleForTesting + static int boundPort = -1; + private static final ObjectMapper mapper = new ObjectMapper(); + private static final String APP_CREDS_KEY_PREFIX = "AppCreds"; + private static final LevelDBProvider.StoreVersion CURRENT_VERSION = new LevelDBProvider + .StoreVersion(1, 0); + + // just for integration tests that want to look at this file -- in general not sensible as + // a static + @VisibleForTesting + static YarnShuffleService instance; + // An entity that manages the shuffle secret per application // This is used only if authentication is enabled private ShuffleSecretManager secretManager; @@ -96,14 +123,11 @@ public class YarnShuffleService extends AuxiliaryService { @VisibleForTesting File registeredExecutorFile; - // just for testing when you want to find an open port + // Where to store & reload application secrets for recovering state after an NM restart @VisibleForTesting - static int boundPort = -1; + File secretsFile; - // just for integration tests that want to look at this file -- in general not sensible as - // a static - @VisibleForTesting - static YarnShuffleService instance; + private DB db; public YarnShuffleService() { super("spark_shuffle"); @@ -143,10 +167,10 @@ protected void serviceInit(Configuration conf) throws Exception { // If authentication is enabled, set up the shuffle server to use a // special RPC handler that filters out unauthenticated fetch requests - boolean authEnabled = conf.getBoolean(SPARK_AUTHENTICATE_KEY, DEFAULT_SPARK_AUTHENTICATE); List bootstraps = Lists.newArrayList(); + boolean authEnabled = conf.getBoolean(SPARK_AUTHENTICATE_KEY, DEFAULT_SPARK_AUTHENTICATE); if (authEnabled) { - secretManager = new ShuffleSecretManager(); + createSecretManager(); bootstraps.add(new SaslServerBootstrap(transportConf, secretManager)); } @@ -170,6 +194,50 @@ protected void serviceInit(Configuration conf) throws Exception { } } + private void createSecretManager() throws IOException { + secretManager = new ShuffleSecretManager(); + secretsFile = new File(getRecoveryPath().toUri().getPath(), SECRETS_RECOVERY_FILE_NAME); + + // Make sure this is protected in case its not in the NM recovery dir + FileSystem fs = FileSystem.getLocal(_conf); + fs.mkdirs(new Path(secretsFile.getPath()), new FsPermission((short)0700)); + + db = LevelDBProvider.initLevelDB(secretsFile, CURRENT_VERSION, mapper); + logger.info("Recovery location is: " + secretsFile.getPath()); + if (db != null) { + logger.info("Going to reload spark shuffle data"); + DBIterator itr = db.iterator(); + itr.seek(APP_CREDS_KEY_PREFIX.getBytes(StandardCharsets.UTF_8)); + while (itr.hasNext()) { + Map.Entry e = itr.next(); + String key = new String(e.getKey(), StandardCharsets.UTF_8); + if (!key.startsWith(APP_CREDS_KEY_PREFIX)) { + break; + } + String id = parseDbAppKey(key); + ByteBuffer secret = mapper.readValue(e.getValue(), ByteBuffer.class); + logger.info("Reloading tokens for app: " + id); + secretManager.registerApp(id, secret); + } + } + } + + private static String parseDbAppKey(String s) throws IOException { + if (!s.startsWith(APP_CREDS_KEY_PREFIX)) { + throw new IllegalArgumentException("expected a string starting with " + APP_CREDS_KEY_PREFIX); + } + String json = s.substring(APP_CREDS_KEY_PREFIX.length() + 1); + AppId parsed = mapper.readValue(json, AppId.class); + return parsed.appId; + } + + private static byte[] dbAppKey(AppId appExecId) throws IOException { + // we stick a common prefix on all the keys so we can find them in the DB + String appExecJson = mapper.writeValueAsString(appExecId); + String key = (APP_CREDS_KEY_PREFIX + ";" + appExecJson); + return key.getBytes(StandardCharsets.UTF_8); + } + @Override public void initializeApplication(ApplicationInitializationContext context) { String appId = context.getApplicationId().toString(); @@ -177,6 +245,12 @@ public void initializeApplication(ApplicationInitializationContext context) { ByteBuffer shuffleSecret = context.getApplicationDataForService(); logger.info("Initializing application {}", appId); if (isAuthenticationEnabled()) { + AppId fullId = new AppId(appId); + if (db != null) { + byte[] key = dbAppKey(fullId); + byte[] value = mapper.writeValueAsString(shuffleSecret).getBytes(StandardCharsets.UTF_8); + db.put(key, value); + } secretManager.registerApp(appId, shuffleSecret); } } catch (Exception e) { @@ -190,6 +264,14 @@ public void stopApplication(ApplicationTerminationContext context) { try { logger.info("Stopping application {}", appId); if (isAuthenticationEnabled()) { + AppId fullId = new AppId(appId); + if (db != null) { + try { + db.delete(dbAppKey(fullId)); + } catch (IOException e) { + logger.error("Error deleting {} from executor state db", appId, e); + } + } secretManager.unregisterApp(appId); } blockHandler.applicationRemoved(appId, false /* clean up local dirs */); @@ -222,6 +304,9 @@ protected void serviceStop() { if (blockHandler != null) { blockHandler.close(); } + if (db != null) { + db.close(); + } } catch (Exception e) { logger.error("Exception when stopping service", e); } @@ -275,4 +360,38 @@ protected Path getRecoveryPath() { return _recoveryPath; } + + /** + * Simply encodes an application ID. + */ + public static class AppId { + public final String appId; + + @JsonCreator + public AppId(@JsonProperty("appId") String appId) { + this.appId = appId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AppId appExecId = (AppId) o; + return Objects.equal(appId, appExecId.appId); + } + + @Override + public int hashCode() { + return Objects.hashCode(appId); + } + + @Override + public String toString() { + return Objects.toStringHelper(this) + .add("appId", appId) + .toString(); + } + } + } diff --git a/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala b/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala index e123e78541048..9a071862bbdb0 100644 --- a/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala +++ b/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala @@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.api.{ApplicationInitializationContext, Appl import org.scalatest.{BeforeAndAfterEach, Matchers} import org.scalatest.concurrent.Eventually._ +import org.apache.spark.SecurityManager import org.apache.spark.SparkFunSuite import org.apache.spark.network.shuffle.ShuffleTestAccessor import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo @@ -77,6 +78,8 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd test("executor state kept across NM restart") { s1 = new YarnShuffleService + // set auth to true to test the secrets recovery + yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, true) s1.init(yarnConfig) val app1Id = ApplicationId.newInstance(0, 1) val app1Data: ApplicationInitializationContext = @@ -89,6 +92,8 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd val execStateFile = s1.registeredExecutorFile execStateFile should not be (null) + val secretsFile = s1.secretsFile + secretsFile should not be (null) val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER) val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER) @@ -118,6 +123,7 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd s1.stop() s2 = new YarnShuffleService s2.init(yarnConfig) + s2.secretsFile should be (secretsFile) s2.registeredExecutorFile should be (execStateFile) val handler2 = s2.blockHandler @@ -135,6 +141,7 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd s3 = new YarnShuffleService s3.init(yarnConfig) s3.registeredExecutorFile should be (execStateFile) + s3.secretsFile should be (secretsFile) val handler3 = s3.blockHandler val resolver3 = ShuffleTestAccessor.getBlockResolver(handler3) @@ -148,7 +155,10 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd test("removed applications should not be in registered executor file") { s1 = new YarnShuffleService + yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, false) s1.init(yarnConfig) + val secretsFile = s1.secretsFile + secretsFile should be (null) val app1Id = ApplicationId.newInstance(0, 1) val app1Data: ApplicationInitializationContext = new ApplicationInitializationContext("user", app1Id, null) From eac1d0e921345b5d15aa35d8c565140292ab2af3 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Fri, 2 Sep 2016 11:08:25 -0700 Subject: [PATCH 190/270] [SPARK-17376][SPARKR] followup - change since version ## What changes were proposed in this pull request? change since version in doc ## How was this patch tested? manual Author: Felix Cheung Closes #14939 from felixcheung/rsparkversion2. --- R/pkg/R/SQLContext.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index a1404543be127..783df53c12ca2 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -169,7 +169,7 @@ sparkR.conf <- function(key, defaultValue) { #' sparkR.session() #' version <- sparkR.version() #' } -#' @note sparkR.version since 2.1.0 +#' @note sparkR.version since 2.0.1 sparkR.version <- function() { sparkSession <- getSparkSession() callJMethod(sparkSession, "version") From ed9c884dcf925500ceb388b06b33bd2c95cd2ada Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Fri, 2 Sep 2016 15:10:12 -0700 Subject: [PATCH 191/270] [SPARK-17230] [SQL] Should not pass optimized query into QueryExecution in DataFrameWriter ## What changes were proposed in this pull request? Some analyzer rules have assumptions on logical plans, optimizer may break these assumption, we should not pass an optimized query plan into QueryExecution (will be analyzed again), otherwise we may some weird bugs. For example, we have a rule for decimal calculation to promote the precision before binary operations, use PromotePrecision as placeholder to indicate that this rule should not apply twice. But a Optimizer rule will remove this placeholder, that break the assumption, then the rule applied twice, cause wrong result. Ideally, we should make all the analyzer rules all idempotent, that may require lots of effort to double checking them one by one (may be not easy). An easier approach could be never feed a optimized plan into Analyzer, this PR fix the case for RunnableComand, they will be optimized, during execution, the passed `query` will also be passed into QueryExecution again. This PR make these `query` not part of the children, so they will not be optimized and analyzed again. Right now, we did not know a logical plan is optimized or not, we could introduce a flag for that, and make sure a optimized logical plan will not be analyzed again. ## How was this patch tested? Added regression tests. Author: Davies Liu Closes #14797 from davies/fix_writer. --- .../spark/sql/execution/command/commands.scala | 2 +- .../execution/command/createDataSourceTables.scala | 2 +- .../sql/execution/datasources/DataSource.scala | 13 ++++++++++++- .../execution/datasources/DataSourceStrategy.scala | 2 +- .../InsertIntoHadoopFsRelationCommand.scala | 2 +- .../spark/sql/test/DataFrameReaderWriterSuite.scala | 8 ++++++++ .../execution/CreateHiveTableAsSelectCommand.scala | 2 +- 7 files changed, 25 insertions(+), 6 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala index cce1489abd301..424a962b5eb1c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala @@ -37,7 +37,7 @@ import org.apache.spark.sql.types._ */ trait RunnableCommand extends LogicalPlan with logical.Command { override def output: Seq[Attribute] = Seq.empty - override def children: Seq[LogicalPlan] = Seq.empty + final override def children: Seq[LogicalPlan] = Seq.empty def run(sparkSession: SparkSession): Seq[Row] } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala index da3f6c600adef..c7e327906174c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala @@ -113,7 +113,7 @@ case class CreateDataSourceTableAsSelectCommand( query: LogicalPlan) extends RunnableCommand { - override protected def innerChildren: Seq[QueryPlan[_]] = Seq(query) + override protected def innerChildren: Seq[LogicalPlan] = Seq(query) override def run(sparkSession: SparkSession): Seq[Row] = { assert(table.tableType != CatalogTableType.VIEW) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala index 348530888de3a..5968db84cd608 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala @@ -31,6 +31,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute import org.apache.spark.sql.catalyst.catalog.BucketSpec +import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider import org.apache.spark.sql.execution.datasources.json.JsonFileFormat @@ -479,13 +480,23 @@ case class DataSource( } } + // SPARK-17230: Resolve the partition columns so InsertIntoHadoopFsRelationCommand does + // not need to have the query as child, to avoid to analyze an optimized query, + // because InsertIntoHadoopFsRelationCommand will be optimized first. + val columns = partitionColumns.map { name => + val plan = data.logicalPlan + plan.resolve(name :: Nil, data.sparkSession.sessionState.analyzer.resolver).getOrElse { + throw new AnalysisException( + s"Unable to resolve ${name} given [${plan.output.map(_.name).mkString(", ")}]") + }.asInstanceOf[Attribute] + } // For partitioned relation r, r.schema's column ordering can be different from the column // ordering of data.logicalPlan (partition columns are all moved after data column). This // will be adjusted within InsertIntoHadoopFsRelation. val plan = InsertIntoHadoopFsRelationCommand( outputPath, - partitionColumns.map(UnresolvedAttribute.quoted), + columns, bucketSpec, format, () => Unit, // No existing table needs to be refreshed. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala index a6621054fc74b..8286467e96a15 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala @@ -181,7 +181,7 @@ case class DataSourceAnalysis(conf: CatalystConf) extends Rule[LogicalPlan] { InsertIntoHadoopFsRelationCommand( outputPath, - t.partitionSchema.fields.map(_.name).map(UnresolvedAttribute(_)), + query.resolve(t.partitionSchema, t.sparkSession.sessionState.analyzer.resolver), t.bucketSpec, t.fileFormat, () => t.refresh(), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala index de822180ab5fa..02ce7fab64729 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala @@ -66,7 +66,7 @@ case class InsertIntoHadoopFsRelationCommand( mode: SaveMode) extends RunnableCommand { - override def children: Seq[LogicalPlan] = query :: Nil + override protected def innerChildren: Seq[LogicalPlan] = query :: Nil override def run(sparkSession: SparkSession): Seq[Row] = { // Most formats don't do well with duplicate columns, so lets not allow that diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala index 05935cec4b67c..63b0e4588e4a6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala @@ -449,6 +449,14 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with Be } } + test("SPARK-17230: write out results of decimal calculation") { + val df = spark.range(99, 101) + .selectExpr("id", "cast(id as long) * cast('1.0' as decimal(38, 18)) as num") + df.write.mode(SaveMode.Overwrite).parquet(dir) + val df2 = spark.read.parquet(dir) + checkAnswer(df2, df) + } + private def testRead( df: => DataFrame, expectedResult: Seq[String], diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala index 6e6b1c2a2bcf9..ef5a5a001fb6f 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala @@ -42,7 +42,7 @@ case class CreateHiveTableAsSelectCommand( private val tableIdentifier = tableDesc.identifier - override def children: Seq[LogicalPlan] = Seq(query) + override def innerChildren: Seq[LogicalPlan] = Seq(query) override def run(sparkSession: SparkSession): Seq[Row] = { lazy val metastoreRelation: MetastoreRelation = { From a2c9acb0e54b2e38cb8ee6431f1ea0e0b4cd959a Mon Sep 17 00:00:00 2001 From: Sameer Agarwal Date: Fri, 2 Sep 2016 15:16:16 -0700 Subject: [PATCH 192/270] [SPARK-16334] Reusing same dictionary column for decoding consecutive row groups shouldn't throw an error ## What changes were proposed in this pull request? This patch fixes a bug in the vectorized parquet reader that's caused by re-using the same dictionary column vector while reading consecutive row groups. Specifically, this issue manifests for a certain distribution of dictionary/plain encoded data while we read/populate the underlying bit packed dictionary data into a column-vector based data structure. ## How was this patch tested? Manually tested on datasets provided by the community. Thanks to Chris Perluss and Keith Kraus for their invaluable help in tracking down this issue! Author: Sameer Agarwal Closes #14941 from sameeragarwal/parquet-exception-2. --- .../parquet/VectorizedColumnReader.java | 54 +++++++++++++------ 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java index 4ed59b08a467c..cb51cb499eede 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java @@ -221,15 +221,21 @@ private void decodeDictionaryIds(int rowId, int num, ColumnVector column, if (column.dataType() == DataTypes.IntegerType || DecimalType.is32BitDecimalType(column.dataType())) { for (int i = rowId; i < rowId + num; ++i) { - column.putInt(i, dictionary.decodeToInt(dictionaryIds.getDictId(i))); + if (!column.isNullAt(i)) { + column.putInt(i, dictionary.decodeToInt(dictionaryIds.getDictId(i))); + } } } else if (column.dataType() == DataTypes.ByteType) { for (int i = rowId; i < rowId + num; ++i) { - column.putByte(i, (byte) dictionary.decodeToInt(dictionaryIds.getDictId(i))); + if (!column.isNullAt(i)) { + column.putByte(i, (byte) dictionary.decodeToInt(dictionaryIds.getDictId(i))); + } } } else if (column.dataType() == DataTypes.ShortType) { for (int i = rowId; i < rowId + num; ++i) { - column.putShort(i, (short) dictionary.decodeToInt(dictionaryIds.getDictId(i))); + if (!column.isNullAt(i)) { + column.putShort(i, (short) dictionary.decodeToInt(dictionaryIds.getDictId(i))); + } } } else { throw new UnsupportedOperationException("Unimplemented type: " + column.dataType()); @@ -240,7 +246,9 @@ private void decodeDictionaryIds(int rowId, int num, ColumnVector column, if (column.dataType() == DataTypes.LongType || DecimalType.is64BitDecimalType(column.dataType())) { for (int i = rowId; i < rowId + num; ++i) { - column.putLong(i, dictionary.decodeToLong(dictionaryIds.getDictId(i))); + if (!column.isNullAt(i)) { + column.putLong(i, dictionary.decodeToLong(dictionaryIds.getDictId(i))); + } } } else { throw new UnsupportedOperationException("Unimplemented type: " + column.dataType()); @@ -249,21 +257,27 @@ private void decodeDictionaryIds(int rowId, int num, ColumnVector column, case FLOAT: for (int i = rowId; i < rowId + num; ++i) { - column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getDictId(i))); + if (!column.isNullAt(i)) { + column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getDictId(i))); + } } break; case DOUBLE: for (int i = rowId; i < rowId + num; ++i) { - column.putDouble(i, dictionary.decodeToDouble(dictionaryIds.getDictId(i))); + if (!column.isNullAt(i)) { + column.putDouble(i, dictionary.decodeToDouble(dictionaryIds.getDictId(i))); + } } break; case INT96: if (column.dataType() == DataTypes.TimestampType) { for (int i = rowId; i < rowId + num; ++i) { // TODO: Convert dictionary of Binaries to dictionary of Longs - Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); - column.putLong(i, ParquetRowConverter.binaryToSQLTimestamp(v)); + if (!column.isNullAt(i)) { + Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); + column.putLong(i, ParquetRowConverter.binaryToSQLTimestamp(v)); + } } } else { throw new UnsupportedOperationException(); @@ -275,26 +289,34 @@ private void decodeDictionaryIds(int rowId, int num, ColumnVector column, // and reuse it across batches. This should mean adding a ByteArray would just update // the length and offset. for (int i = rowId; i < rowId + num; ++i) { - Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); - column.putByteArray(i, v.getBytes()); + if (!column.isNullAt(i)) { + Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); + column.putByteArray(i, v.getBytes()); + } } break; case FIXED_LEN_BYTE_ARRAY: // DecimalType written in the legacy mode if (DecimalType.is32BitDecimalType(column.dataType())) { for (int i = rowId; i < rowId + num; ++i) { - Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); - column.putInt(i, (int) ParquetRowConverter.binaryToUnscaledLong(v)); + if (!column.isNullAt(i)) { + Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); + column.putInt(i, (int) ParquetRowConverter.binaryToUnscaledLong(v)); + } } } else if (DecimalType.is64BitDecimalType(column.dataType())) { for (int i = rowId; i < rowId + num; ++i) { - Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); - column.putLong(i, ParquetRowConverter.binaryToUnscaledLong(v)); + if (!column.isNullAt(i)) { + Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); + column.putLong(i, ParquetRowConverter.binaryToUnscaledLong(v)); + } } } else if (DecimalType.isByteArrayDecimalType(column.dataType())) { for (int i = rowId; i < rowId + num; ++i) { - Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); - column.putByteArray(i, v.getBytes()); + if (!column.isNullAt(i)) { + Binary v = dictionary.decodeToBinary(dictionaryIds.getDictId(i)); + column.putByteArray(i, v.getBytes()); + } } } else { throw new UnsupportedOperationException(); From e6132a6cf10df8b12af8dd8d1a2c563792b5cc5a Mon Sep 17 00:00:00 2001 From: Srinath Shankar Date: Sat, 3 Sep 2016 00:20:43 +0200 Subject: [PATCH 193/270] [SPARK-17298][SQL] Require explicit CROSS join for cartesian products ## What changes were proposed in this pull request? Require the use of CROSS join syntax in SQL (and a new crossJoin DataFrame API) to specify explicit cartesian products between relations. By cartesian product we mean a join between relations R and S where there is no join condition involving columns from both R and S. If a cartesian product is detected in the absence of an explicit CROSS join, an error must be thrown. Turning on the "spark.sql.crossJoin.enabled" configuration flag will disable this check and allow cartesian products without an explicit CROSS join. The new crossJoin DataFrame API must be used to specify explicit cross joins. The existing join(DataFrame) method will produce a INNER join that will require a subsequent join condition. That is df1.join(df2) is equivalent to select * from df1, df2. ## How was this patch tested? Added cross-join.sql to the SQLQueryTestSuite to test the check for cartesian products. Added a couple of tests to the DataFrameJoinSuite to test the crossJoin API. Modified various other test suites to explicitly specify a cross join where an INNER join or a comma-separated list was previously used. Author: Srinath Shankar Closes #14866 from srinathshankar/crossjoin. --- R/pkg/R/DataFrame.R | 2 +- python/pyspark/sql/dataframe.py | 2 +- .../spark/sql/catalyst/parser/SqlBase.g4 | 3 +- .../spark/sql/catalyst/CatalystConf.scala | 7 + .../sql/catalyst/analysis/Analyzer.scala | 4 +- .../UnsupportedOperationChecker.scala | 2 +- .../sql/catalyst/optimizer/Optimizer.scala | 49 ++++++- .../optimizer/PropagateEmptyRelation.scala | 2 +- .../spark/sql/catalyst/optimizer/joins.scala | 25 ++-- .../sql/catalyst/parser/AstBuilder.scala | 1 + .../sql/catalyst/planning/patterns.scala | 27 ++-- .../spark/sql/catalyst/plans/joinTypes.scala | 20 ++- .../plans/logical/basicLogicalOperators.scala | 4 +- .../analysis/AnalysisErrorSuite.scala | 8 +- .../sql/catalyst/analysis/AnalysisSuite.scala | 4 +- .../optimizer/JoinOptimizationSuite.scala | 60 ++++++-- .../PropagateEmptyRelationSuite.scala | 4 + .../sql/catalyst/parser/PlanParserSuite.scala | 2 +- .../scala/org/apache/spark/sql/Dataset.scala | 18 ++- .../spark/sql/execution/SparkStrategies.scala | 9 +- .../joins/BroadcastHashJoinExec.scala | 4 +- .../joins/BroadcastNestedLoopJoinExec.scala | 16 +-- .../joins/CartesianProductExec.scala | 9 -- .../spark/sql/execution/joins/HashJoin.scala | 4 +- .../execution/joins/SortMergeJoinExec.scala | 9 +- .../apache/spark/sql/internal/SQLConf.scala | 7 +- .../resources/sql-tests/inputs/cross-join.sql | 35 +++++ .../test/resources/sql-tests/inputs/cte.sql | 2 +- .../resources/sql-tests/inputs/outer-join.sql | 5 +- .../sql-tests/results/cross-join.sql.out | 129 ++++++++++++++++++ .../resources/sql-tests/results/cte.sql.out | 2 +- .../sql-tests/results/outer-join.sql.out | 22 ++- .../apache/spark/sql/DataFrameJoinSuite.scala | 19 ++- .../org/apache/spark/sql/DataFrameSuite.scala | 4 +- .../org/apache/spark/sql/DatasetSuite.scala | 8 +- .../org/apache/spark/sql/JoinSuite.scala | 37 ++++- .../execution/SQLWindowFunctionSuite.scala | 3 +- .../sql/execution/joins/InnerJoinSuite.scala | 8 +- .../test/resources/sqlgen/join_2_tables.sql | 4 +- .../sql/catalyst/LogicalPlanToSQLSuite.scala | 4 +- .../sql/hive/execution/HiveQuerySuite.scala | 4 - .../apache/spark/sql/hive/parquetSuites.scala | 4 +- 42 files changed, 465 insertions(+), 127 deletions(-) create mode 100644 sql/core/src/test/resources/sql-tests/inputs/cross-join.sql create mode 100644 sql/core/src/test/resources/sql-tests/results/cross-join.sql.out diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index a92450274e077..d7686972d2eec 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -2276,7 +2276,7 @@ setMethod("join", signature(x = "SparkDataFrame", y = "SparkDataFrame"), function(x, y, joinExpr = NULL, joinType = NULL) { if (is.null(joinExpr)) { - sdf <- callJMethod(x@sdf, "join", y@sdf) + sdf <- callJMethod(x@sdf, "crossJoin", y@sdf) } else { if (class(joinExpr) != "Column") stop("joinExpr must be a Column") if (is.null(joinType)) { diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py index a986092f5d634..e5eac918a93a0 100644 --- a/python/pyspark/sql/dataframe.py +++ b/python/pyspark/sql/dataframe.py @@ -644,7 +644,7 @@ def join(self, other, on=None, how=None): on = [on] if on is None or len(on) == 0: - jdf = self._jdf.join(other._jdf) + jdf = self._jdf.crossJoin(other._jdf) elif isinstance(on[0], basestring): if how is None: jdf = self._jdf.join(other._jdf, self._jseq(on), "inner") diff --git a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 index a8af840c1e2a2..0447436ea7976 100644 --- a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 +++ b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 @@ -375,7 +375,7 @@ setQuantifier relation : left=relation - ((CROSS | joinType) JOIN right=relation joinCriteria? + (joinType JOIN right=relation joinCriteria? | NATURAL joinType JOIN right=relation ) #joinRelation | relationPrimary #relationDefault @@ -383,6 +383,7 @@ relation joinType : INNER? + | CROSS | LEFT OUTER? | LEFT SEMI | RIGHT OUTER? diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala index 4df100c2a8304..75ae588c18ec6 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala @@ -36,6 +36,12 @@ trait CatalystConf { def warehousePath: String + /** If true, cartesian products between relations will be allowed for all + * join types(inner, (left|right|full) outer). + * If false, cartesian products will require explicit CROSS JOIN syntax. + */ + def crossJoinEnabled: Boolean + /** * Returns the [[Resolver]] for the current configuration, which can be used to determine if two * identifiers are equal. @@ -55,5 +61,6 @@ case class SimpleCatalystConf( optimizerInSetConversionThreshold: Int = 10, maxCaseBranchesForCodegen: Int = 20, runSQLonFile: Boolean = true, + crossJoinEnabled: Boolean = false, warehousePath: String = "/user/hive/warehouse") extends CatalystConf diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index e559f235c5a38..18f814d6cdfd4 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -1003,7 +1003,7 @@ class Analyzer( failOnOuterReference(j) failOnOuterReferenceInSubTree(left, "a RIGHT OUTER JOIN") j - case j @ Join(_, right, jt, _) if jt != Inner => + case j @ Join(_, right, jt, _) if !jt.isInstanceOf[InnerLike] => failOnOuterReference(j) failOnOuterReferenceInSubTree(right, "a LEFT (OUTER) JOIN") j @@ -1899,7 +1899,7 @@ class Analyzer( joinedCols ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput.map(_.withNullability(true)) - case Inner => + case _ : InnerLike => leftKeys ++ lUniqueOutput ++ rUniqueOutput case _ => sys.error("Unsupported natural join type " + joinType) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala index f6e32e29ebca8..e81370c504abb 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala @@ -94,7 +94,7 @@ object UnsupportedOperationChecker { joinType match { - case Inner => + case _: InnerLike => if (left.isStreaming && right.isStreaming) { throwError("Inner join between two streaming DataFrames/Datasets is not supported") } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala index 7617d34261807..d2f0c97989213 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala @@ -22,6 +22,7 @@ import scala.collection.immutable.HashSet import scala.collection.mutable.ArrayBuffer import org.apache.spark.api.java.function.FilterFunction +import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{CatalystConf, SimpleCatalystConf} import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog} @@ -107,6 +108,8 @@ abstract class Optimizer(sessionCatalog: SessionCatalog, conf: CatalystConf) RewriteCorrelatedScalarSubquery, EliminateSerialization, RemoveAliasOnlyProject) :: + Batch("Check Cartesian Products", Once, + CheckCartesianProducts(conf)) :: Batch("Decimal Optimizations", fixedPoint, DecimalAggregates) :: Batch("Typed Filter Optimization", fixedPoint, @@ -838,7 +841,7 @@ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { val (leftFilterConditions, rightFilterConditions, commonFilterCondition) = split(splitConjunctivePredicates(filterCondition), left, right) joinType match { - case Inner => + case _: InnerLike => // push down the single side `where` condition into respective sides val newLeft = leftFilterConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) @@ -848,7 +851,7 @@ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { commonFilterCondition.partition(e => !SubqueryExpression.hasCorrelatedSubquery(e)) val newJoinCond = (newJoinConditions ++ joinCondition).reduceLeftOption(And) - val join = Join(newLeft, newRight, Inner, newJoinCond) + val join = Join(newLeft, newRight, joinType, newJoinCond) if (others.nonEmpty) { Filter(others.reduceLeft(And), join) } else { @@ -885,7 +888,7 @@ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right) joinType match { - case Inner | LeftExistence(_) => + case _: InnerLike | LeftExistence(_) => // push down the single side only join filter for both sides sub queries val newLeft = leftJoinConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) @@ -932,6 +935,46 @@ object CombineLimits extends Rule[LogicalPlan] { } } +/** + * Check if there any cartesian products between joins of any type in the optimized plan tree. + * Throw an error if a cartesian product is found without an explicit cross join specified. + * This rule is effectively disabled if the CROSS_JOINS_ENABLED flag is true. + * + * This rule must be run AFTER the ReorderJoin rule since the join conditions for each join must be + * collected before checking if it is a cartesian product. If you have + * SELECT * from R, S where R.r = S.s, + * the join between R and S is not a cartesian product and therefore should be allowed. + * The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule. + */ +case class CheckCartesianProducts(conf: CatalystConf) + extends Rule[LogicalPlan] with PredicateHelper { + /** + * Check if a join is a cartesian product. Returns true if + * there are no join conditions involving references from both left and right. + */ + def isCartesianProduct(join: Join): Boolean = { + val conditions = join.condition.map(splitConjunctivePredicates).getOrElse(Nil) + !conditions.map(_.references).exists(refs => refs.exists(join.left.outputSet.contains) + && refs.exists(join.right.outputSet.contains)) + } + + def apply(plan: LogicalPlan): LogicalPlan = + if (conf.crossJoinEnabled) { + plan + } else plan transform { + case j @ Join(left, right, Inner | LeftOuter | RightOuter | FullOuter, condition) + if isCartesianProduct(j) => + throw new AnalysisException( + s"""Detected cartesian product for ${j.joinType.sql} join between logical plans + |${left.treeString(false).trim} + |and + |${right.treeString(false).trim} + |Join condition is missing or trivial. + |Use the CROSS JOIN syntax to allow cartesian products between these relations.""" + .stripMargin) + } +} + /** * Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values. * diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelation.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelation.scala index 50076b1a41c02..7400a01918c52 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelation.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelation.scala @@ -50,7 +50,7 @@ object PropagateEmptyRelation extends Rule[LogicalPlan] with PredicateHelper { empty(p) case p @ Join(_, _, joinType, _) if p.children.exists(isEmptyLocalRelation) => joinType match { - case Inner => empty(p) + case _: InnerLike => empty(p) // Intersect is handled as LeftSemi by `ReplaceIntersectWithSemiJoin` rule. // Except is handled as LeftAnti by `ReplaceExceptWithAntiJoin` rule. case LeftOuter | LeftSemi | LeftAnti if isEmptyLocalRelation(p.left) => empty(p) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala index 158ad3d91fbad..1621bffd619f2 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala @@ -25,7 +25,6 @@ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ - /** * Reorder the joins and push all the conditions into join, so that the bottom ones have at least * one condition. @@ -39,39 +38,46 @@ object ReorderJoin extends Rule[LogicalPlan] with PredicateHelper { * * The joined plan are picked from left to right, prefer those has at least one join condition. * - * @param input a list of LogicalPlans to join. + * @param input a list of LogicalPlans to inner join and the type of inner join. * @param conditions a list of condition for join. */ @tailrec - def createOrderedJoin(input: Seq[LogicalPlan], conditions: Seq[Expression]): LogicalPlan = { + def createOrderedJoin(input: Seq[(LogicalPlan, InnerLike)], conditions: Seq[Expression]) + : LogicalPlan = { assert(input.size >= 2) if (input.size == 2) { val (joinConditions, others) = conditions.partition( e => !SubqueryExpression.hasCorrelatedSubquery(e)) - val join = Join(input(0), input(1), Inner, joinConditions.reduceLeftOption(And)) + val ((left, leftJoinType), (right, rightJoinType)) = (input(0), input(1)) + val innerJoinType = (leftJoinType, rightJoinType) match { + case (Inner, Inner) => Inner + case (_, _) => Cross + } + val join = Join(left, right, innerJoinType, joinConditions.reduceLeftOption(And)) if (others.nonEmpty) { Filter(others.reduceLeft(And), join) } else { join } } else { - val left :: rest = input.toList + val (left, _) :: rest = input.toList // find out the first join that have at least one join condition - val conditionalJoin = rest.find { plan => + val conditionalJoin = rest.find { planJoinPair => + val plan = planJoinPair._1 val refs = left.outputSet ++ plan.outputSet conditions.filterNot(canEvaluate(_, left)).filterNot(canEvaluate(_, plan)) .exists(_.references.subsetOf(refs)) } // pick the next one if no condition left - val right = conditionalJoin.getOrElse(rest.head) + val (right, innerJoinType) = conditionalJoin.getOrElse(rest.head) val joinedRefs = left.outputSet ++ right.outputSet val (joinConditions, others) = conditions.partition( e => e.references.subsetOf(joinedRefs) && !SubqueryExpression.hasCorrelatedSubquery(e)) - val joined = Join(left, right, Inner, joinConditions.reduceLeftOption(And)) + val joined = Join(left, right, innerJoinType, joinConditions.reduceLeftOption(And)) // should not have reference to same logical plan - createOrderedJoin(Seq(joined) ++ rest.filterNot(_ eq right), others) + createOrderedJoin(Seq((joined, Inner)) ++ rest.filterNot(_._1 eq right), others) } } @@ -82,7 +88,6 @@ object ReorderJoin extends Rule[LogicalPlan] with PredicateHelper { } } - /** * Elimination of outer joins, if the predicates can restrict the result sets so that * all null-supplying rows are eliminated diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 42fbc16d0396a..e4cb9f016133a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -539,6 +539,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { def join(ctx: JoinRelationContext, left: LogicalPlan, right: LogicalPlan): Join = { val baseJoinType = ctx.joinType match { case null => Inner + case jt if jt.CROSS != null => Cross case jt if jt.FULL != null => FullOuter case jt if jt.SEMI != null => LeftSemi case jt if jt.ANTI != null => LeftAnti diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala index 476c66af76b29..41cabb8cb3390 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala @@ -159,23 +159,30 @@ object ExtractEquiJoinKeys extends Logging with PredicateHelper { */ object ExtractFiltersAndInnerJoins extends PredicateHelper { - // flatten all inner joins, which are next to each other - def flattenJoin(plan: LogicalPlan): (Seq[LogicalPlan], Seq[Expression]) = plan match { - case Join(left, right, Inner, cond) => - val (plans, conditions) = flattenJoin(left) - (plans ++ Seq(right), conditions ++ cond.toSeq) + /** + * Flatten all inner joins, which are next to each other. + * Return a list of logical plans to be joined with a boolean for each plan indicating if it + * was involved in an explicit cross join. Also returns the entire list of join conditions for + * the left-deep tree. + */ + def flattenJoin(plan: LogicalPlan, parentJoinType: InnerLike = Inner) + : (Seq[(LogicalPlan, InnerLike)], Seq[Expression]) = plan match { + case Join(left, right, joinType: InnerLike, cond) => + val (plans, conditions) = flattenJoin(left, joinType) + (plans ++ Seq((right, joinType)), conditions ++ cond.toSeq) - case Filter(filterCondition, j @ Join(left, right, Inner, joinCondition)) => + case Filter(filterCondition, j @ Join(left, right, _: InnerLike, joinCondition)) => val (plans, conditions) = flattenJoin(j) (plans, conditions ++ splitConjunctivePredicates(filterCondition)) - case _ => (Seq(plan), Seq()) + case _ => (Seq((plan, parentJoinType)), Seq()) } - def unapply(plan: LogicalPlan): Option[(Seq[LogicalPlan], Seq[Expression])] = plan match { - case f @ Filter(filterCondition, j @ Join(_, _, Inner, _)) => + def unapply(plan: LogicalPlan): Option[(Seq[(LogicalPlan, InnerLike)], Seq[Expression])] + = plan match { + case f @ Filter(filterCondition, j @ Join(_, _, joinType: InnerLike, _)) => Some(flattenJoin(f)) - case j @ Join(_, _, Inner, _) => + case j @ Join(_, _, joinType, _) => Some(flattenJoin(j)) case _ => None } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/joinTypes.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/joinTypes.scala index 80674d9b4bc9c..61e083e6fc2c3 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/joinTypes.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/joinTypes.scala @@ -28,6 +28,7 @@ object JoinType { case "rightouter" | "right" => RightOuter case "leftsemi" => LeftSemi case "leftanti" => LeftAnti + case "cross" => Cross case _ => val supported = Seq( "inner", @@ -35,7 +36,8 @@ object JoinType { "leftouter", "left", "rightouter", "right", "leftsemi", - "leftanti") + "leftanti", + "cross") throw new IllegalArgumentException(s"Unsupported join type '$typ'. " + "Supported join types include: " + supported.mkString("'", "', '", "'") + ".") @@ -46,10 +48,24 @@ sealed abstract class JoinType { def sql: String } -case object Inner extends JoinType { +/** + * The explicitCartesian flag indicates if the inner join was constructed with a CROSS join + * indicating a cartesian product has been explicitly requested. + */ +sealed abstract class InnerLike extends JoinType { + def explicitCartesian: Boolean +} + +case object Inner extends InnerLike { + override def explicitCartesian: Boolean = false override def sql: String = "INNER" } +case object Cross extends InnerLike { + override def explicitCartesian: Boolean = true + override def sql: String = "CROSS" +} + case object LeftOuter extends JoinType { override def sql: String = "LEFT OUTER" } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala index 010aec7ba1a42..d2d33e40a8c8f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala @@ -293,7 +293,7 @@ case class Join( override protected def validConstraints: Set[Expression] = { joinType match { - case Inner if condition.isDefined => + case _: InnerLike if condition.isDefined => left.constraints .union(right.constraints) .union(splitConjunctivePredicates(condition.get).toSet) @@ -302,7 +302,7 @@ case class Join( .union(splitConjunctivePredicates(condition.get).toSet) case j: ExistenceJoin => left.constraints - case Inner => + case _: InnerLike => left.constraints.union(right.constraints) case LeftExistence(_) => left.constraints diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala index 13bf034f831cf..e7c8615bc5e04 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala @@ -24,7 +24,7 @@ import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete, Count, Max} -import org.apache.spark.sql.catalyst.plans.{Inner, LeftOuter, RightOuter} +import org.apache.spark.sql.catalyst.plans.{Cross, Inner, LeftOuter, RightOuter} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData, MapData} import org.apache.spark.sql.types._ @@ -396,7 +396,7 @@ class AnalysisErrorSuite extends AnalysisTest { } test("error test for self-join") { - val join = Join(testRelation, testRelation, Inner, None) + val join = Join(testRelation, testRelation, Cross, None) val error = intercept[AnalysisException] { SimpleAnalyzer.checkAnalysis(join) } @@ -475,7 +475,7 @@ class AnalysisErrorSuite extends AnalysisTest { LocalRelation( AttributeReference("c", BinaryType)(exprId = ExprId(4)), AttributeReference("d", IntegerType)(exprId = ExprId(3))), - Inner, + Cross, Some(EqualTo(AttributeReference("a", BinaryType)(exprId = ExprId(2)), AttributeReference("c", BinaryType)(exprId = ExprId(4))))) @@ -489,7 +489,7 @@ class AnalysisErrorSuite extends AnalysisTest { LocalRelation( AttributeReference("c", MapType(IntegerType, StringType))(exprId = ExprId(4)), AttributeReference("d", IntegerType)(exprId = ExprId(3))), - Inner, + Cross, Some(EqualTo(AttributeReference("a", MapType(IntegerType, StringType))(exprId = ExprId(2)), AttributeReference("c", MapType(IntegerType, StringType))(exprId = ExprId(4))))) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala index 8971edc7d3b9a..50ebad25cd258 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala @@ -21,7 +21,7 @@ import org.apache.spark.sql.catalyst.{SimpleCatalystConf, TableIdentifier} import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.plans.Inner +import org.apache.spark.sql.catalyst.plans.{Cross, Inner} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.types._ @@ -341,7 +341,7 @@ class AnalysisSuite extends AnalysisTest { Join( Project(Seq($"x.key"), SubqueryAlias("x", input, None)), Project(Seq($"y.key"), SubqueryAlias("y", input, None)), - Inner, None)) + Cross, None)) assertAnalysisSuccess(query) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala index dbb3e6a5272ec..087718b3ecf1a 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinOptimizationSuite.scala @@ -23,7 +23,7 @@ import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.planning.ExtractFiltersAndInnerJoins -import org.apache.spark.sql.catalyst.plans.{Inner, PlanTest} +import org.apache.spark.sql.catalyst.plans.{Cross, Inner, InnerLike, PlanTest} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules.RuleExecutor @@ -54,6 +54,18 @@ class JoinOptimizationSuite extends PlanTest { val z = testRelation.subquery('z) def testExtract(plan: LogicalPlan, expected: Option[(Seq[LogicalPlan], Seq[Expression])]) { + val expectedNoCross = expected map { + seq_pair => { + val plans = seq_pair._1 + val noCartesian = plans map { plan => (plan, Inner) } + (noCartesian, seq_pair._2) + } + } + testExtractCheckCross(plan, expectedNoCross) + } + + def testExtractCheckCross + (plan: LogicalPlan, expected: Option[(Seq[(LogicalPlan, InnerLike)], Seq[Expression])]) { assert(ExtractFiltersAndInnerJoins.unapply(plan) === expected) } @@ -70,6 +82,16 @@ class JoinOptimizationSuite extends PlanTest { testExtract(x.join(y).join(x.join(z)), Some(Seq(x, y, x.join(z)), Seq())) testExtract(x.join(y).join(x.join(z)).where("x.b".attr === "y.d".attr), Some(Seq(x, y, x.join(z)), Seq("x.b".attr === "y.d".attr))) + + testExtractCheckCross(x.join(y, Cross), Some(Seq((x, Cross), (y, Cross)), Seq())) + testExtractCheckCross(x.join(y, Cross).join(z, Cross), + Some(Seq((x, Cross), (y, Cross), (z, Cross)), Seq())) + testExtractCheckCross(x.join(y, Cross, Some("x.b".attr === "y.d".attr)).join(z, Cross), + Some(Seq((x, Cross), (y, Cross), (z, Cross)), Seq("x.b".attr === "y.d".attr))) + testExtractCheckCross(x.join(y, Inner, Some("x.b".attr === "y.d".attr)).join(z, Cross), + Some(Seq((x, Inner), (y, Inner), (z, Cross)), Seq("x.b".attr === "y.d".attr))) + testExtractCheckCross(x.join(y, Cross, Some("x.b".attr === "y.d".attr)).join(z, Inner), + Some(Seq((x, Cross), (y, Cross), (z, Inner)), Seq("x.b".attr === "y.d".attr))) } test("reorder inner joins") { @@ -77,18 +99,28 @@ class JoinOptimizationSuite extends PlanTest { val y = testRelation1.subquery('y) val z = testRelation.subquery('z) - val originalQuery = { - x.join(y).join(z) - .where(("x.b".attr === "z.b".attr) && ("y.d".attr === "z.a".attr)) + val queryAnswers = Seq( + ( + x.join(y).join(z).where(("x.b".attr === "z.b".attr) && ("y.d".attr === "z.a".attr)), + x.join(z, condition = Some("x.b".attr === "z.b".attr)) + .join(y, condition = Some("y.d".attr === "z.a".attr)) + ), + ( + x.join(y, Cross).join(z, Cross) + .where(("x.b".attr === "z.b".attr) && ("y.d".attr === "z.a".attr)), + x.join(z, Cross, Some("x.b".attr === "z.b".attr)) + .join(y, Cross, Some("y.d".attr === "z.a".attr)) + ), + ( + x.join(y, Inner).join(z, Cross).where("x.b".attr === "z.a".attr), + x.join(z, Cross, Some("x.b".attr === "z.a".attr)).join(y, Inner) + ) + ) + + queryAnswers foreach { queryAnswerPair => + val optimized = Optimize.execute(queryAnswerPair._1.analyze) + comparePlans(optimized, analysis.EliminateSubqueryAliases(queryAnswerPair._2.analyze)) } - - val optimized = Optimize.execute(originalQuery.analyze) - val correctAnswer = - x.join(z, condition = Some("x.b".attr === "z.b".attr)) - .join(y, condition = Some("y.d".attr === "z.a".attr)) - .analyze - - comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer)) } test("broadcasthint sets relation statistics to smallest value") { @@ -98,7 +130,7 @@ class JoinOptimizationSuite extends PlanTest { Project(Seq($"x.key", $"y.key"), Join( SubqueryAlias("x", input, None), - BroadcastHint(SubqueryAlias("y", input, None)), Inner, None)).analyze + BroadcastHint(SubqueryAlias("y", input, None)), Cross, None)).analyze val optimized = Optimize.execute(query) @@ -106,7 +138,7 @@ class JoinOptimizationSuite extends PlanTest { Join( Project(Seq($"x.key"), SubqueryAlias("x", input, None)), BroadcastHint(Project(Seq($"y.key"), SubqueryAlias("y", input, None))), - Inner, None).analyze + Cross, None).analyze comparePlans(optimized, expected) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelationSuite.scala index c549832ef3eda..908dde7a66988 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelationSuite.scala @@ -67,6 +67,7 @@ class PropagateEmptyRelationSuite extends PlanTest { // Note that `None` is used to compare with OptimizeWithoutPropagateEmptyRelation. val testcases = Seq( (true, true, Inner, None), + (true, true, Cross, None), (true, true, LeftOuter, None), (true, true, RightOuter, None), (true, true, FullOuter, None), @@ -74,6 +75,7 @@ class PropagateEmptyRelationSuite extends PlanTest { (true, true, LeftSemi, None), (true, false, Inner, Some(LocalRelation('a.int, 'b.int))), + (true, false, Cross, Some(LocalRelation('a.int, 'b.int))), (true, false, LeftOuter, None), (true, false, RightOuter, Some(LocalRelation('a.int, 'b.int))), (true, false, FullOuter, None), @@ -81,6 +83,7 @@ class PropagateEmptyRelationSuite extends PlanTest { (true, false, LeftSemi, None), (false, true, Inner, Some(LocalRelation('a.int, 'b.int))), + (false, true, Cross, Some(LocalRelation('a.int, 'b.int))), (false, true, LeftOuter, Some(LocalRelation('a.int, 'b.int))), (false, true, RightOuter, None), (false, true, FullOuter, None), @@ -88,6 +91,7 @@ class PropagateEmptyRelationSuite extends PlanTest { (false, true, LeftSemi, Some(LocalRelation('a.int))), (false, false, Inner, Some(LocalRelation('a.int, 'b.int))), + (false, false, Cross, Some(LocalRelation('a.int, 'b.int))), (false, false, LeftOuter, Some(LocalRelation('a.int, 'b.int))), (false, false, RightOuter, Some(LocalRelation('a.int, 'b.int))), (false, false, FullOuter, Some(LocalRelation('a.int, 'b.int))), diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala index 2fcbfc7067a13..faaea17b64d2a 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala @@ -346,7 +346,7 @@ class PlanParserSuite extends PlanTest { def test(sql: String, jt: JoinType, tests: Seq[(String, JoinType) => Unit]): Unit = { tests.foreach(_(sql, jt)) } - test("cross join", Inner, Seq(testUnconditionalJoin)) + test("cross join", Cross, Seq(testUnconditionalJoin)) test(",", Inner, Seq(testUnconditionalJoin)) test("join", Inner, testAll) test("inner join", Inner, testAll) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index e7dcf0f51f4a5..3b3cb820788a2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -589,9 +589,9 @@ class Dataset[T] private[sql]( def stat: DataFrameStatFunctions = new DataFrameStatFunctions(toDF()) /** - * Cartesian join with another [[DataFrame]]. + * Join with another [[DataFrame]]. * - * Note that cartesian joins are very expensive without an extra filter that can be pushed down. + * Behaves as an INNER JOIN and requires a subsequent join predicate. * * @param right Right side of the join operation. * @@ -763,6 +763,20 @@ class Dataset[T] private[sql]( } } + /** + * Explicit cartesian join with another [[DataFrame]]. + * + * Note that cartesian joins are very expensive without an extra filter that can be pushed down. + * + * @param right Right side of the join operation. + * + * @group untypedrel + * @since 2.0.0 + */ + def crossJoin(right: Dataset[_]): DataFrame = withPlan { + Join(logicalPlan, right.logicalPlan, joinType = Cross, None) + } + /** * :: Experimental :: * Joins this Dataset returning a [[Tuple2]] for each pair where `condition` evaluates to diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index b4899ad688f96..c389593b4f767 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -140,13 +140,13 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { } private def canBuildRight(joinType: JoinType): Boolean = joinType match { - case Inner | LeftOuter | LeftSemi | LeftAnti => true + case _: InnerLike | LeftOuter | LeftSemi | LeftAnti => true case j: ExistenceJoin => true case _ => false } private def canBuildLeft(joinType: JoinType): Boolean = joinType match { - case Inner | RightOuter => true + case _: InnerLike | RightOuter => true case _ => false } @@ -200,7 +200,7 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { planLater(left), planLater(right), BuildLeft, joinType, condition) :: Nil // Pick CartesianProduct for InnerJoin - case logical.Join(left, right, Inner, condition) => + case logical.Join(left, right, _: InnerLike, condition) => joins.CartesianProductExec(planLater(left), planLater(right), condition) :: Nil case logical.Join(left, right, joinType, condition) => @@ -212,8 +212,7 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { } // This join could be very slow or OOM joins.BroadcastNestedLoopJoinExec( - planLater(left), planLater(right), buildSide, joinType, condition, - withinBroadcastThreshold = false) :: Nil + planLater(left), planLater(right), buildSide, joinType, condition) :: Nil // --- Cases where this strategy does not apply --------------------------------------------- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala index 0f24baacd18d6..0bc261d593df4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala @@ -79,7 +79,7 @@ case class BroadcastHashJoinExec( override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = { joinType match { - case Inner => codegenInner(ctx, input) + case _: InnerLike => codegenInner(ctx, input) case LeftOuter | RightOuter => codegenOuter(ctx, input) case LeftSemi => codegenSemi(ctx, input) case LeftAnti => codegenAnti(ctx, input) @@ -134,7 +134,7 @@ case class BroadcastHashJoinExec( ctx.INPUT_ROW = matched buildPlan.output.zipWithIndex.map { case (a, i) => val ev = BoundReference(i, a.dataType, a.nullable).genCode(ctx) - if (joinType == Inner) { + if (joinType.isInstanceOf[InnerLike]) { ev } else { // the variables are needed even there is no matched rows diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala index 6a9965f1a24cd..43cdce7de8c7f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala @@ -34,8 +34,7 @@ case class BroadcastNestedLoopJoinExec( right: SparkPlan, buildSide: BuildSide, joinType: JoinType, - condition: Option[Expression], - withinBroadcastThreshold: Boolean = true) extends BinaryExecNode { + condition: Option[Expression]) extends BinaryExecNode { override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) @@ -65,7 +64,7 @@ case class BroadcastNestedLoopJoinExec( override def output: Seq[Attribute] = { joinType match { - case Inner => + case _: InnerLike => left.output ++ right.output case LeftOuter => left.output ++ right.output.map(_.withNullability(true)) @@ -340,20 +339,11 @@ case class BroadcastNestedLoopJoinExec( ) } - protected override def doPrepare(): Unit = { - if (!withinBroadcastThreshold && !sqlContext.conf.crossJoinEnabled) { - throw new AnalysisException("Both sides of this join are outside the broadcasting " + - "threshold and computing it could be prohibitively expensive. To explicitly enable it, " + - s"please set ${SQLConf.CROSS_JOINS_ENABLED.key} = true") - } - super.doPrepare() - } - protected override def doExecute(): RDD[InternalRow] = { val broadcastedRelation = broadcast.executeBroadcast[Array[InternalRow]]() val resultRdd = (joinType, buildSide) match { - case (Inner, _) => + case (_: InnerLike, _) => innerJoin(broadcastedRelation) case (LeftOuter, BuildRight) | (RightOuter, BuildLeft) => outerJoin(broadcastedRelation) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProductExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProductExec.scala index 57866df90d27d..15dc9b40662e2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProductExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProductExec.scala @@ -91,15 +91,6 @@ case class CartesianProductExec( override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) - protected override def doPrepare(): Unit = { - if (!sqlContext.conf.crossJoinEnabled) { - throw new AnalysisException("Cartesian joins could be prohibitively expensive and are " + - "disabled by default. To explicitly enable them, please set " + - s"${SQLConf.CROSS_JOINS_ENABLED.key} = true") - } - super.doPrepare() - } - protected override def doExecute(): RDD[InternalRow] = { val numOutputRows = longMetric("numOutputRows") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala index d46a80423fa35..fb6bfa7b2735c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala @@ -38,7 +38,7 @@ trait HashJoin { override def output: Seq[Attribute] = { joinType match { - case Inner => + case _: InnerLike => left.output ++ right.output case LeftOuter => left.output ++ right.output.map(_.withNullability(true)) @@ -225,7 +225,7 @@ trait HashJoin { numOutputRows: SQLMetric): Iterator[InternalRow] = { val joinedIter = joinType match { - case Inner => + case _: InnerLike => innerJoin(streamedIter, hashed) case LeftOuter | RightOuter => outerJoin(streamedIter, hashed) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala index 5c9c1e6062f0d..b46af2a99a1e0 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala @@ -45,7 +45,7 @@ case class SortMergeJoinExec( override def output: Seq[Attribute] = { joinType match { - case Inner => + case _: InnerLike => left.output ++ right.output case LeftOuter => left.output ++ right.output.map(_.withNullability(true)) @@ -64,7 +64,8 @@ case class SortMergeJoinExec( } override def outputPartitioning: Partitioning = joinType match { - case Inner => PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning)) + case _: InnerLike => + PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning)) // For left and right outer joins, the output is partitioned by the streamed input's join keys. case LeftOuter => left.outputPartitioning case RightOuter => right.outputPartitioning @@ -111,7 +112,7 @@ case class SortMergeJoinExec( val resultProj: InternalRow => InternalRow = UnsafeProjection.create(output, output) joinType match { - case Inner => + case _: InnerLike => new RowIterator { private[this] var currentLeftRow: InternalRow = _ private[this] var currentRightMatches: ArrayBuffer[InternalRow] = _ @@ -318,7 +319,7 @@ case class SortMergeJoinExec( } override def supportCodegen: Boolean = { - joinType == Inner + joinType.isInstanceOf[InnerLike] } override def inputRDDs(): Seq[RDD[InternalRow]] = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index a54342f82e249..1d6ca5a965cbf 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -362,7 +362,8 @@ object SQLConf { .createWithDefault(true) val CROSS_JOINS_ENABLED = SQLConfigBuilder("spark.sql.crossJoin.enabled") - .doc("When false, we will throw an error if a query contains a cross join") + .doc("When false, we will throw an error if a query contains a cartesian product without " + + "explicit CROSS JOIN syntax.") .booleanConf .createWithDefault(false) @@ -683,8 +684,6 @@ private[sql] class SQLConf extends Serializable with CatalystConf with Logging { def bucketingEnabled: Boolean = getConf(SQLConf.BUCKETING_ENABLED) - def crossJoinEnabled: Boolean = getConf(SQLConf.CROSS_JOINS_ENABLED) - // Do not use a value larger than 4000 as the default value of this property. // See the comments of SCHEMA_STRING_LENGTH_THRESHOLD above for more information. def schemaStringLengthThreshold: Int = getConf(SCHEMA_STRING_LENGTH_THRESHOLD) @@ -709,6 +708,8 @@ private[sql] class SQLConf extends Serializable with CatalystConf with Logging { override def orderByOrdinal: Boolean = getConf(ORDER_BY_ORDINAL) override def groupByOrdinal: Boolean = getConf(GROUP_BY_ORDINAL) + + override def crossJoinEnabled: Boolean = getConf(SQLConf.CROSS_JOINS_ENABLED) /** ********************** SQLConf functionality methods ************ */ /** Set Spark SQL configuration properties. */ diff --git a/sql/core/src/test/resources/sql-tests/inputs/cross-join.sql b/sql/core/src/test/resources/sql-tests/inputs/cross-join.sql new file mode 100644 index 0000000000000..aa7312437487a --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/cross-join.sql @@ -0,0 +1,35 @@ +-- Cross join detection and error checking is done in JoinSuite since explain output is +-- used in the error message and the ids are not stable. Only positive cases are checked here. + +create temporary view nt1 as select * from values + ("one", 1), + ("two", 2), + ("three", 3) + as nt1(k, v1); + +create temporary view nt2 as select * from values + ("one", 1), + ("two", 22), + ("one", 5) + as nt2(k, v2); + +-- Cross joins with and without predicates +SELECT * FROM nt1 cross join nt2; +SELECT * FROM nt1 cross join nt2 where nt1.k = nt2.k; +SELECT * FROM nt1 cross join nt2 on (nt1.k = nt2.k); +SELECT * FROM nt1 cross join nt2 where nt1.v1 = 1 and nt2.v2 = 22; + +SELECT a.key, b.key FROM +(SELECT k key FROM nt1 WHERE v1 < 2) a +CROSS JOIN +(SELECT k key FROM nt2 WHERE v2 = 22) b; + +-- Join reordering +create temporary view A(a, va) as select * from nt1; +create temporary view B(b, vb) as select * from nt1; +create temporary view C(c, vc) as select * from nt1; +create temporary view D(d, vd) as select * from nt1; + +-- Allowed since cross join with C is explicit +select * from ((A join B on (a = b)) cross join C) join D on (a = d); + diff --git a/sql/core/src/test/resources/sql-tests/inputs/cte.sql b/sql/core/src/test/resources/sql-tests/inputs/cte.sql index 10d34deff4ee3..3914db26914b4 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/cte.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/cte.sql @@ -11,4 +11,4 @@ WITH t AS (SELECT 1 FROM t) SELECT * FROM t; WITH s1 AS (SELECT 1 FROM s2), s2 AS (SELECT 1 FROM s1) SELECT * FROM s1, s2; -- WITH clause should reference the previous CTE -WITH t1 AS (SELECT * FROM t2), t2 AS (SELECT 2 FROM t1) SELECT * FROM t1, t2; +WITH t1 AS (SELECT * FROM t2), t2 AS (SELECT 2 FROM t1) SELECT * FROM t1 cross join t2; diff --git a/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql b/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql index f50f1ebad970e..cdc6c81e10047 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/outer-join.sql @@ -24,6 +24,9 @@ CREATE OR REPLACE TEMPORARY VIEW t1 AS SELECT * FROM VALUES (97) as t1(int_col1) CREATE OR REPLACE TEMPORARY VIEW t2 AS SELECT * FROM VALUES (0) as t2(int_col1); +-- Set the cross join enabled flag for the LEFT JOIN test since there's no join condition. +-- Ultimately the join should be optimized away. +set spark.sql.crossJoin.enabled = true; SELECT * FROM ( SELECT @@ -31,6 +34,6 @@ SELECT FROM t1 LEFT JOIN t2 ON false ) t where (t.int_col) is not null; - +set spark.sql.crossJoin.enabled = false; diff --git a/sql/core/src/test/resources/sql-tests/results/cross-join.sql.out b/sql/core/src/test/resources/sql-tests/results/cross-join.sql.out new file mode 100644 index 0000000000000..562e174fc0bb2 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/cross-join.sql.out @@ -0,0 +1,129 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 12 + + +-- !query 0 +create temporary view nt1 as select * from values + ("one", 1), + ("two", 2), + ("three", 3) + as nt1(k, v1) +-- !query 0 schema +struct<> +-- !query 0 output + + + +-- !query 1 +create temporary view nt2 as select * from values + ("one", 1), + ("two", 22), + ("one", 5) + as nt2(k, v2) +-- !query 1 schema +struct<> +-- !query 1 output + + + +-- !query 2 +SELECT * FROM nt1 cross join nt2 +-- !query 2 schema +struct +-- !query 2 output +one 1 one 1 +one 1 one 5 +one 1 two 22 +three 3 one 1 +three 3 one 5 +three 3 two 22 +two 2 one 1 +two 2 one 5 +two 2 two 22 + + +-- !query 3 +SELECT * FROM nt1 cross join nt2 where nt1.k = nt2.k +-- !query 3 schema +struct +-- !query 3 output +one 1 one 1 +one 1 one 5 +two 2 two 22 + + +-- !query 4 +SELECT * FROM nt1 cross join nt2 on (nt1.k = nt2.k) +-- !query 4 schema +struct +-- !query 4 output +one 1 one 1 +one 1 one 5 +two 2 two 22 + + +-- !query 5 +SELECT * FROM nt1 cross join nt2 where nt1.v1 = 1 and nt2.v2 = 22 +-- !query 5 schema +struct +-- !query 5 output +one 1 two 22 + + +-- !query 6 +SELECT a.key, b.key FROM +(SELECT k key FROM nt1 WHERE v1 < 2) a +CROSS JOIN +(SELECT k key FROM nt2 WHERE v2 = 22) b +-- !query 6 schema +struct +-- !query 6 output +one two + + +-- !query 7 +create temporary view A(a, va) as select * from nt1 +-- !query 7 schema +struct<> +-- !query 7 output + + + +-- !query 8 +create temporary view B(b, vb) as select * from nt1 +-- !query 8 schema +struct<> +-- !query 8 output + + + +-- !query 9 +create temporary view C(c, vc) as select * from nt1 +-- !query 9 schema +struct<> +-- !query 9 output + + + +-- !query 10 +create temporary view D(d, vd) as select * from nt1 +-- !query 10 schema +struct<> +-- !query 10 output + + + +-- !query 11 +select * from ((A join B on (a = b)) cross join C) join D on (a = d) +-- !query 11 schema +struct +-- !query 11 output +one 1 one 1 one 1 one 1 +one 1 one 1 three 3 one 1 +one 1 one 1 two 2 one 1 +three 3 three 3 one 1 three 3 +three 3 three 3 three 3 three 3 +three 3 three 3 two 2 three 3 +two 2 two 2 one 1 two 2 +two 2 two 2 three 3 two 2 +two 2 two 2 two 2 two 2 diff --git a/sql/core/src/test/resources/sql-tests/results/cte.sql.out b/sql/core/src/test/resources/sql-tests/results/cte.sql.out index ddee5bf2d473b..9fbad8f3800a9 100644 --- a/sql/core/src/test/resources/sql-tests/results/cte.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/cte.sql.out @@ -47,7 +47,7 @@ Table or view not found: s2; line 1 pos 26 -- !query 5 -WITH t1 AS (SELECT * FROM t2), t2 AS (SELECT 2 FROM t1) SELECT * FROM t1, t2 +WITH t1 AS (SELECT * FROM t2), t2 AS (SELECT 2 FROM t1) SELECT * FROM t1 cross join t2 -- !query 5 schema struct -- !query 5 output diff --git a/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out b/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out index b39fdb0e58720..cc50b9444bb4b 100644 --- a/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 6 +-- Number of queries: 8 -- !query 0 @@ -59,6 +59,14 @@ struct<> -- !query 5 +set spark.sql.crossJoin.enabled = true +-- !query 5 schema +struct +-- !query 5 output +spark.sql.crossJoin.enabled + + +-- !query 6 SELECT * FROM ( SELECT @@ -66,7 +74,15 @@ SELECT FROM t1 LEFT JOIN t2 ON false ) t where (t.int_col) is not null --- !query 5 schema +-- !query 6 schema struct --- !query 5 output +-- !query 6 output 97 + + +-- !query 7 +set spark.sql.crossJoin.enabled = false +-- !query 7 schema +struct +-- !query 7 output +spark.sql.crossJoin.enabled diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala index 4abf5e42b9c34..541ffb58e727f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala @@ -104,6 +104,21 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext { .collect().toSeq) } + test("join - cross join") { + val df1 = Seq((1, "1"), (3, "3")).toDF("int", "str") + val df2 = Seq((2, "2"), (4, "4")).toDF("int", "str") + + checkAnswer( + df1.crossJoin(df2), + Row(1, "1", 2, "2") :: Row(1, "1", 4, "4") :: + Row(3, "3", 2, "2") :: Row(3, "3", 4, "4") :: Nil) + + checkAnswer( + df2.crossJoin(df1), + Row(2, "2", 1, "1") :: Row(2, "2", 3, "3") :: + Row(4, "4", 1, "1") :: Row(4, "4", 3, "3") :: Nil) + } + test("join - using aliases after self join") { val df = Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str") checkAnswer( @@ -145,7 +160,7 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext { assert(plan1.collect { case p: BroadcastHashJoinExec => p }.size === 1) // no join key -- should not be a broadcast join - val plan2 = df1.join(broadcast(df2)).queryExecution.sparkPlan + val plan2 = df1.crossJoin(broadcast(df2)).queryExecution.sparkPlan assert(plan2.collect { case p: BroadcastHashJoinExec => p }.size === 0) // planner should not crash without a join @@ -155,7 +170,7 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext { withTempPath { path => df1.write.parquet(path.getCanonicalPath) val pf1 = spark.read.parquet(path.getCanonicalPath) - assert(df1.join(broadcast(pf1)).count() === 4) + assert(df1.crossJoin(broadcast(pf1)).count() === 4) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index f89951760f7d2..c2d256bdd335b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -626,9 +626,9 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { test("drop(name: String) search and drop all top level columns that matchs the name") { val df1 = Seq((1, 2)).toDF("a", "b") val df2 = Seq((3, 4)).toDF("a", "b") - checkAnswer(df1.join(df2), Row(1, 2, 3, 4)) + checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4)) // Finds and drops all columns that match the name (case insensitive). - checkAnswer(df1.join(df2).drop("A"), Row(2, 4)) + checkAnswer(df1.crossJoin(df2).drop("A"), Row(2, 4)) } test("withColumnRenamed") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala index 8ce6ea66b6bbf..3243f352a5337 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala @@ -466,7 +466,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext { test("self join") { val ds = Seq("1", "2").toDS().as("a") - val joined = ds.joinWith(ds, lit(true)) + val joined = ds.joinWith(ds, lit(true), "cross") checkDataset(joined, ("1", "1"), ("1", "2"), ("2", "1"), ("2", "2")) } @@ -486,7 +486,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext { test("Kryo encoder self join") { implicit val kryoEncoder = Encoders.kryo[KryoData] val ds = Seq(KryoData(1), KryoData(2)).toDS() - assert(ds.joinWith(ds, lit(true)).collect().toSet == + assert(ds.joinWith(ds, lit(true), "cross").collect().toSet == Set( (KryoData(1), KryoData(1)), (KryoData(1), KryoData(2)), @@ -514,7 +514,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext { test("Java encoder self join") { implicit val kryoEncoder = Encoders.javaSerialization[JavaData] val ds = Seq(JavaData(1), JavaData(2)).toDS() - assert(ds.joinWith(ds, lit(true)).collect().toSet == + assert(ds.joinWith(ds, lit(true), "cross").collect().toSet == Set( (JavaData(1), JavaData(1)), (JavaData(1), JavaData(2)), @@ -532,7 +532,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext { val ds2 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS() checkDataset( - ds1.joinWith(ds2, lit(true)), + ds1.joinWith(ds2, lit(true), "cross"), ((nullInt, "1"), (nullInt, "1")), ((nullInt, "1"), (new java.lang.Integer(22), "2")), ((new java.lang.Integer(22), "2"), (nullInt, "1")), diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala index 44889d92ee306..913b2ae9762cc 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala @@ -225,8 +225,8 @@ class JoinSuite extends QueryTest with SharedSQLContext { Row(2, 2, 1, null) :: Row(2, 2, 2, 2) :: Nil) } - assert(e.getMessage.contains("Cartesian joins could be prohibitively expensive and are " + - "disabled by default")) + assert(e.getMessage.contains("Detected cartesian product for INNER join " + + "between logical plans")) } } @@ -482,7 +482,8 @@ class JoinSuite extends QueryTest with SharedSQLContext { // we set the threshold is greater than statistic of the cached table testData withSQLConf( - SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> (sizeInByteOfTestData + 1).toString()) { + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> (sizeInByteOfTestData + 1).toString(), + SQLConf.CROSS_JOINS_ENABLED.key -> "true") { assert(statisticSizeInByte(spark.table("testData2")) > spark.conf.get(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD)) @@ -573,4 +574,34 @@ class JoinSuite extends QueryTest with SharedSQLContext { Row(3, 1) :: Row(3, 2) :: Nil) } + + test("cross join detection") { + testData.createOrReplaceTempView("A") + testData.createOrReplaceTempView("B") + testData2.createOrReplaceTempView("C") + testData3.createOrReplaceTempView("D") + upperCaseData.where('N >= 3).createOrReplaceTempView("`right`") + val cartesianQueries = Seq( + /** The following should error out since there is no explicit cross join */ + "SELECT * FROM testData inner join testData2", + "SELECT * FROM testData left outer join testData2", + "SELECT * FROM testData right outer join testData2", + "SELECT * FROM testData full outer join testData2", + "SELECT * FROM testData, testData2", + "SELECT * FROM testData, testData2 where testData.key = 1 and testData2.a = 22", + /** The following should fail because after reordering there are cartesian products */ + "select * from (A join B on (A.key = B.key)) join D on (A.key=D.a) join C", + "select * from ((A join B on (A.key = B.key)) join C) join D on (A.key = D.a)", + /** Cartesian product involving C, which is not involved in a CROSS join */ + "select * from ((A join B on (A.key = B.key)) cross join D) join C on (A.key = D.a)"); + + def checkCartesianDetection(query: String): Unit = { + val e = intercept[Exception] { + checkAnswer(sql(query), Nil); + } + assert(e.getMessage.contains("Detected cartesian product")) + } + + cartesianQueries.foreach(checkCartesianDetection) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala index d3cfa953a3123..afd47897ed4b2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala @@ -361,7 +361,8 @@ class SQLWindowFunctionSuite extends QueryTest with SharedSQLContext { |with | v0 as (select 0 as key, 1 as value), | v1 as (select key, count(value) over (partition by key) cnt_val from v0), - | v2 as (select v1.key, v1_lag.cnt_val from v1, v1 v1_lag where v1.key = v1_lag.key) + | v2 as (select v1.key, v1_lag.cnt_val from v1 cross join v1 v1_lag + | where v1.key = v1_lag.key) | select key, cnt_val from v2 order by key limit 1 """.stripMargin), Row(0, 1)) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala index 35dab63672c05..4408ece112258 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala @@ -109,8 +109,8 @@ class InnerJoinSuite extends SparkPlanTest with SharedSQLContext { leftPlan: SparkPlan, rightPlan: SparkPlan, side: BuildSide) = { - val shuffledHashJoin = - joins.ShuffledHashJoinExec(leftKeys, rightKeys, Inner, side, None, leftPlan, rightPlan) + val shuffledHashJoin = joins.ShuffledHashJoinExec(leftKeys, rightKeys, Inner, + side, None, leftPlan, rightPlan) val filteredJoin = boundCondition.map(FilterExec(_, shuffledHashJoin)).getOrElse(shuffledHashJoin) EnsureRequirements(spark.sessionState.conf).apply(filteredJoin) @@ -122,8 +122,8 @@ class InnerJoinSuite extends SparkPlanTest with SharedSQLContext { boundCondition: Option[Expression], leftPlan: SparkPlan, rightPlan: SparkPlan) = { - val sortMergeJoin = - joins.SortMergeJoinExec(leftKeys, rightKeys, Inner, boundCondition, leftPlan, rightPlan) + val sortMergeJoin = joins.SortMergeJoinExec(leftKeys, rightKeys, Inner, boundCondition, + leftPlan, rightPlan) EnsureRequirements(spark.sessionState.conf).apply(sortMergeJoin) } diff --git a/sql/hive/src/test/resources/sqlgen/join_2_tables.sql b/sql/hive/src/test/resources/sqlgen/join_2_tables.sql index 9dd200c3c0cfa..0f033a04aea47 100644 --- a/sql/hive/src/test/resources/sqlgen/join_2_tables.sql +++ b/sql/hive/src/test/resources/sqlgen/join_2_tables.sql @@ -1,7 +1,7 @@ -- This file is automatically generated by LogicalPlanToSQLSuite. SELECT COUNT(a.value), b.KEY, a.KEY -FROM parquet_t1 a, parquet_t1 b +FROM parquet_t1 a CROSS JOIN parquet_t1 b GROUP BY a.KEY, b.KEY HAVING MAX(a.KEY) > 0 -------------------------------------------------------------------------------- -SELECT `gen_attr_0` AS `count(value)`, `gen_attr_1` AS `KEY`, `gen_attr_2` AS `KEY` FROM (SELECT `gen_attr_0`, `gen_attr_1`, `gen_attr_2` FROM (SELECT count(`gen_attr_4`) AS `gen_attr_0`, `gen_attr_1`, `gen_attr_2`, max(`gen_attr_2`) AS `gen_attr_3` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_4` FROM `default`.`parquet_t1`) AS gen_subquery_0 INNER JOIN (SELECT `key` AS `gen_attr_1`, `value` AS `gen_attr_5` FROM `default`.`parquet_t1`) AS gen_subquery_1 GROUP BY `gen_attr_2`, `gen_attr_1` HAVING (`gen_attr_3` > CAST(0 AS BIGINT))) AS gen_subquery_2) AS gen_subquery_3 +SELECT `gen_attr_0` AS `count(value)`, `gen_attr_1` AS `KEY`, `gen_attr_2` AS `KEY` FROM (SELECT `gen_attr_0`, `gen_attr_1`, `gen_attr_2` FROM (SELECT count(`gen_attr_4`) AS `gen_attr_0`, `gen_attr_1`, `gen_attr_2`, max(`gen_attr_2`) AS `gen_attr_3` FROM (SELECT `key` AS `gen_attr_2`, `value` AS `gen_attr_4` FROM `default`.`parquet_t1`) AS gen_subquery_0 CROSS JOIN (SELECT `key` AS `gen_attr_1`, `value` AS `gen_attr_5` FROM `default`.`parquet_t1`) AS gen_subquery_1 GROUP BY `gen_attr_2`, `gen_attr_1` HAVING (`gen_attr_3` > CAST(0 AS BIGINT))) AS gen_subquery_2) AS gen_subquery_3 diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala index 9c6da6a628dcf..3e0fdc1f8b925 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala @@ -642,7 +642,7 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { checkColumnNames( """SELECT x.a, y.a, x.b, y.b |FROM (SELECT 1 AS a, 2 AS b) x - |INNER JOIN (SELECT 1 AS a, 2 AS b) y + |CROSS JOIN (SELECT 1 AS a, 2 AS b) y |ON x.a = y.a """.stripMargin, "a", "a", "b", "b" @@ -810,7 +810,7 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { checkSQL( """ |SELECT COUNT(a.value), b.KEY, a.KEY - |FROM parquet_t1 a, parquet_t1 b + |FROM parquet_t1 a CROSS JOIN parquet_t1 b |GROUP BY a.KEY, b.KEY |HAVING MAX(a.KEY) > 0 """.stripMargin, diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala index 3c7dbb449c521..1d1a958d3fea3 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala @@ -318,10 +318,6 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter { createQueryTest("trivial join ON clause", "SELECT * FROM src a JOIN src b ON a.key = b.key") - createQueryTest("small.cartesian", - "SELECT a.key, b.key FROM (SELECT key FROM src WHERE key < 1) a JOIN " + - "(SELECT key FROM src WHERE key = 2) b") - createQueryTest("length.udf", "SELECT length(\"test\") FROM src LIMIT 1") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala index e92bbdea75a7b..2f6d9fb96b825 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala @@ -592,9 +592,9 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest { test("self-join") { val table = spark.table("normal_parquet") - val selfJoin = table.as("t1").join(table.as("t2")) + val selfJoin = table.as("t1").crossJoin(table.as("t2")) checkAnswer(selfJoin, - sql("SELECT * FROM normal_parquet x JOIN normal_parquet y")) + sql("SELECT * FROM normal_parquet x CROSS JOIN normal_parquet y")) } } From d2fde6b72c4aede2e7edb4a7e6653fb1e7b19924 Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Fri, 2 Sep 2016 21:11:57 -0700 Subject: [PATCH 194/270] [SPARKR][MINOR] Fix docs for sparkR.session and count ## What changes were proposed in this pull request? This PR tries to add some more explanation to `sparkR.session`. It also modifies doc for `count` so when grouped in one doc, the description doesn't confuse users. ## How was this patch tested? Manual test. ![screen shot 2016-09-02 at 1 21 36 pm](https://cloud.githubusercontent.com/assets/15318264/18217198/409613ac-7110-11e6-8dae-cb0c8df557bf.png) Author: Junyang Qian Closes #14942 from junyangq/fixSparkRSessionDoc. --- R/pkg/R/functions.R | 3 ++- R/pkg/R/group.R | 2 +- R/pkg/R/sparkR.R | 6 ++++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/R/pkg/R/functions.R b/R/pkg/R/functions.R index 369b1d00d9e5b..ceedbe76711b1 100644 --- a/R/pkg/R/functions.R +++ b/R/pkg/R/functions.R @@ -444,7 +444,8 @@ setMethod("cosh", #' Returns the number of items in a group #' -#' Returns the number of items in a group. This is a column aggregate function. +#' This can be used as a column aggregate function with \code{Column} as input, +#' and returns the number of items in a group. #' #' @rdname count #' @name count diff --git a/R/pkg/R/group.R b/R/pkg/R/group.R index e3479ef5fa583..17f5283abead1 100644 --- a/R/pkg/R/group.R +++ b/R/pkg/R/group.R @@ -57,7 +57,7 @@ setMethod("show", "GroupedData", #' Count #' -#' Count the number of rows for each group. +#' Count the number of rows for each group when we have \code{GroupedData} input. #' The resulting SparkDataFrame will also contain the grouping columns. #' #' @return A SparkDataFrame. diff --git a/R/pkg/R/sparkR.R b/R/pkg/R/sparkR.R index de53b0bf79b58..15afe01c24ed2 100644 --- a/R/pkg/R/sparkR.R +++ b/R/pkg/R/sparkR.R @@ -314,8 +314,10 @@ sparkRHive.init <- function(jsc = NULL) { #' Get the existing SparkSession or initialize a new SparkSession. #' -#' Additional Spark properties can be set (...), and these named parameters take priority over -#' over values in master, appName, named lists of sparkConfig. +#' SparkSession is the entry point into SparkR. \code{sparkR.session} gets the existing +#' SparkSession or initializes a new SparkSession. +#' Additional Spark properties can be set in \code{...}, and these named parameters take priority +#' over values in \code{master}, \code{appName}, named lists of \code{sparkConfig}. #' #' For details on how to initialize and use SparkR, refer to SparkR programming guide at #' \url{http://spark.apache.org/docs/latest/sparkr.html#starting-up-sparksession}. From 7a8a81d79f4bee3395fb399ccc4d47744f8a0951 Mon Sep 17 00:00:00 2001 From: WeichenXu Date: Sat, 3 Sep 2016 09:52:53 +0100 Subject: [PATCH 195/270] [SPARK-17363][ML][MLLIB] fix MultivariantOnlineSummerizer.numNonZeros ## What changes were proposed in this pull request? fix `MultivariantOnlineSummerizer.numNonZeros` method, return `nnz` array, instead of `weightSum` array ## How was this patch tested? Existing test. Author: WeichenXu Closes #14923 from WeichenXu123/fix_MultivariantOnlineSummerizer_numNonZeros. --- .../spark/mllib/stat/MultivariateOnlineSummarizer.scala | 4 ++-- .../spark/mllib/stat/MultivariateOnlineSummarizerSuite.scala | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala index 964f419d120dd..7a2a7a35a91cd 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala @@ -231,9 +231,9 @@ class MultivariateOnlineSummarizer extends MultivariateStatisticalSummary with S */ @Since("1.1.0") override def numNonzeros: Vector = { - require(totalWeightSum > 0, s"Nothing has been added to this summarizer.") + require(totalCnt > 0, s"Nothing has been added to this summarizer.") - Vectors.dense(weightSum) + Vectors.dense(nnz.map(_.toDouble)) } /** diff --git a/mllib/src/test/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizerSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizerSuite.scala index 165a3f314a201..797e84fcc7377 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizerSuite.scala @@ -237,7 +237,7 @@ class MultivariateOnlineSummarizerSuite extends SparkFunSuite { absTol 1E-10, "mean mismatch") assert(summarizer.variance ~== Vectors.dense(Array(0.17657142857, 1.645115714, 2.42057142857)) absTol 1E-8, "variance mismatch") - assert(summarizer.numNonzeros ~== Vectors.dense(Array(0.3, 0.5, 0.4)) + assert(summarizer.numNonzeros ~== Vectors.dense(Array(3.0, 4.0, 3.0)) absTol 1E-10, "numNonzeros mismatch") assert(summarizer.max ~== Vectors.dense(Array(0.0, 1.7, 1.3)) absTol 1E-10, "max mismatch") assert(summarizer.min ~== Vectors.dense(Array(-0.8, -1.2, -1.7)) absTol 1E-10, "min mismatch") From 97da41039b2b8fa7f93caf213ae45b9973925995 Mon Sep 17 00:00:00 2001 From: CodingCat Date: Sat, 3 Sep 2016 10:03:40 +0100 Subject: [PATCH 196/270] [SPARK-17347][SQL][EXAMPLES] Encoder in Dataset example has incorrect type ## What changes were proposed in this pull request? We propose to fix the Encoder type in the Dataset example ## How was this patch tested? The PR will be tested with the current unit test cases Author: CodingCat Closes #14901 from CodingCat/SPARK-17347. --- .../scala/org/apache/spark/examples/sql/SparkSQLExample.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala index 5cd437d017f6f..129b81d5fbbf3 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala @@ -203,7 +203,7 @@ object SparkSQLExample { // No pre-defined encoders for Dataset[Map[K,V]], define explicitly implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]] // Primitive types and case classes can be also defined as - implicit val stringIntMapEncoder: Encoder[Map[String, Int]] = ExpressionEncoder() + // implicit val stringIntMapEncoder: Encoder[Map[String, Any]] = ExpressionEncoder() // row.getValuesMap[T] retrieves multiple columns at once into a Map[String, T] teenagersDF.map(teenager => teenager.getValuesMap[Any](List("name", "age"))).collect() From a8a35b39b92fc9000eaac102c67c66be30b05e54 Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Sat, 3 Sep 2016 15:35:19 +0100 Subject: [PATCH 197/270] [MINOR][SQL] Not dropping all necessary tables ## What changes were proposed in this pull request? was not dropping table `parquet_t3` ## How was this patch tested? tested `LogicalPlanToSQLSuite` locally Author: Sandeep Singh Closes #13767 from techaddict/minor-8. --- .../spark/sql/catalyst/LogicalPlanToSQLSuite.scala | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala index 3e0fdc1f8b925..d80f894c22dd8 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/LogicalPlanToSQLSuite.scala @@ -49,9 +49,9 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { protected override def beforeAll(): Unit = { super.beforeAll() - sql("DROP TABLE IF EXISTS parquet_t0") - sql("DROP TABLE IF EXISTS parquet_t1") - sql("DROP TABLE IF EXISTS parquet_t2") + (0 to 3).foreach { i => + sql(s"DROP TABLE IF EXISTS parquet_t$i") + } sql("DROP TABLE IF EXISTS t0") spark.range(10).write.saveAsTable("parquet_t0") @@ -87,10 +87,9 @@ class LogicalPlanToSQLSuite extends SQLBuilderTest with SQLTestUtils { override protected def afterAll(): Unit = { try { - sql("DROP TABLE IF EXISTS parquet_t0") - sql("DROP TABLE IF EXISTS parquet_t1") - sql("DROP TABLE IF EXISTS parquet_t2") - sql("DROP TABLE IF EXISTS parquet_t3") + (0 to 3).foreach { i => + sql(s"DROP TABLE IF EXISTS parquet_t$i") + } sql("DROP TABLE IF EXISTS t0") } finally { super.afterAll() From c2a1576c230697f56f282b6388c79835377e0f2f Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Sat, 3 Sep 2016 19:02:20 +0200 Subject: [PATCH 198/270] [SPARK-17335][SQL] Fix ArrayType and MapType CatalogString. ## What changes were proposed in this pull request? the `catalogString` for `ArrayType` and `MapType` currently calls the `simpleString` method on its children. This is a problem when the child is a struct, the `struct.simpleString` implementation truncates the number of fields it shows (25 at max). This breaks the generation of a proper `catalogString`, and has shown to cause errors while writing to Hive. This PR fixes this by providing proper `catalogString` implementations for `ArrayData` or `MapData`. ## How was this patch tested? Added testing for `catalogString` to `DataTypeSuite`. Author: Herman van Hovell Closes #14938 from hvanhovell/SPARK-17335. --- .../apache/spark/sql/types/ArrayType.scala | 2 + .../org/apache/spark/sql/types/MapType.scala | 2 + .../spark/sql/types/DataTypeSuite.scala | 30 +++ .../WideSchemaBenchmark-results.txt | 174 ++++++++++-------- 4 files changed, 133 insertions(+), 75 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala index 520e344361625..82a03b0afc002 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala @@ -77,6 +77,8 @@ case class ArrayType(elementType: DataType, containsNull: Boolean) extends DataT override def simpleString: String = s"array<${elementType.simpleString}>" + override def catalogString: String = s"array<${elementType.catalogString}>" + override def sql: String = s"ARRAY<${elementType.sql}>" override private[spark] def asNullable: ArrayType = diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/MapType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/MapType.scala index 454ea403bac22..178960929bd83 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/MapType.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/MapType.scala @@ -64,6 +64,8 @@ case class MapType( override def simpleString: String = s"map<${keyType.simpleString},${valueType.simpleString}>" + override def catalogString: String = s"map<${keyType.catalogString},${valueType.catalogString}>" + override def sql: String = s"MAP<${keyType.sql}, ${valueType.sql}>" override private[spark] def asNullable: MapType = diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/DataTypeSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/DataTypeSuite.scala index 688bc3e6026ec..b8ab9a9963de8 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/DataTypeSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/DataTypeSuite.scala @@ -18,6 +18,7 @@ package org.apache.spark.sql.types import org.apache.spark.{SparkException, SparkFunSuite} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser class DataTypeSuite extends SparkFunSuite { @@ -359,4 +360,33 @@ class DataTypeSuite extends SparkFunSuite { StructField("a", StringType, nullable = false) :: StructField("b", StringType, nullable = false) :: Nil), expected = false) + + def checkCatalogString(dt: DataType): Unit = { + test(s"catalogString: $dt") { + val dt2 = CatalystSqlParser.parseDataType(dt.catalogString) + assert(dt === dt2) + } + } + def createStruct(n: Int): StructType = new StructType(Array.tabulate(n) { + i => StructField(s"col$i", IntegerType, nullable = true) + }) + + checkCatalogString(BooleanType) + checkCatalogString(ByteType) + checkCatalogString(ShortType) + checkCatalogString(IntegerType) + checkCatalogString(LongType) + checkCatalogString(FloatType) + checkCatalogString(DoubleType) + checkCatalogString(DecimalType(10, 5)) + checkCatalogString(BinaryType) + checkCatalogString(StringType) + checkCatalogString(DateType) + checkCatalogString(TimestampType) + checkCatalogString(createStruct(4)) + checkCatalogString(createStruct(40)) + checkCatalogString(ArrayType(IntegerType)) + checkCatalogString(ArrayType(createStruct(40))) + checkCatalogString(MapType(IntegerType, StringType)) + checkCatalogString(MapType(IntegerType, createStruct(40))) } diff --git a/sql/core/benchmarks/WideSchemaBenchmark-results.txt b/sql/core/benchmarks/WideSchemaBenchmark-results.txt index ea6a6616c23d4..0b9f791ac85e4 100644 --- a/sql/core/benchmarks/WideSchemaBenchmark-results.txt +++ b/sql/core/benchmarks/WideSchemaBenchmark-results.txt @@ -1,93 +1,117 @@ -OpenJDK 64-Bit Server VM 1.8.0_66-internal-b17 on Linux 4.2.0-36-generic -Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz + parsing large select: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ -1 select expressions 3 / 5 0.0 2967064.0 1.0X -100 select expressions 11 / 12 0.0 11369518.0 0.3X -2500 select expressions 243 / 250 0.0 242561004.0 0.0X +1 select expressions 2 / 4 0.0 2050147.0 1.0X +100 select expressions 6 / 7 0.0 6123412.0 0.3X +2500 select expressions 135 / 141 0.0 134623148.0 0.0X + +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz -OpenJDK 64-Bit Server VM 1.8.0_66-internal-b17 on Linux 4.2.0-36-generic -Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz many column field r/w: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ -1 cols x 100000 rows (read in-mem) 28 / 40 3.6 278.8 1.0X -1 cols x 100000 rows (exec in-mem) 28 / 42 3.5 284.0 1.0X -1 cols x 100000 rows (read parquet) 23 / 35 4.4 228.8 1.2X -1 cols x 100000 rows (write parquet) 163 / 182 0.6 1633.0 0.2X -100 cols x 1000 rows (read in-mem) 27 / 39 3.7 266.9 1.0X -100 cols x 1000 rows (exec in-mem) 48 / 79 2.1 481.7 0.6X -100 cols x 1000 rows (read parquet) 25 / 36 3.9 254.3 1.1X -100 cols x 1000 rows (write parquet) 182 / 196 0.5 1819.5 0.2X -2500 cols x 40 rows (read in-mem) 280 / 315 0.4 2797.1 0.1X -2500 cols x 40 rows (exec in-mem) 606 / 638 0.2 6064.3 0.0X -2500 cols x 40 rows (read parquet) 836 / 843 0.1 8356.4 0.0X -2500 cols x 40 rows (write parquet) 490 / 522 0.2 4900.6 0.1X +1 cols x 100000 rows (read in-mem) 16 / 18 6.3 158.6 1.0X +1 cols x 100000 rows (exec in-mem) 17 / 19 6.0 166.7 1.0X +1 cols x 100000 rows (read parquet) 24 / 26 4.3 235.1 0.7X +1 cols x 100000 rows (write parquet) 81 / 85 1.2 811.3 0.2X +100 cols x 1000 rows (read in-mem) 17 / 19 6.0 166.2 1.0X +100 cols x 1000 rows (exec in-mem) 25 / 27 4.0 249.2 0.6X +100 cols x 1000 rows (read parquet) 23 / 25 4.4 226.0 0.7X +100 cols x 1000 rows (write parquet) 83 / 87 1.2 831.0 0.2X +2500 cols x 40 rows (read in-mem) 132 / 137 0.8 1322.9 0.1X +2500 cols x 40 rows (exec in-mem) 326 / 330 0.3 3260.6 0.0X +2500 cols x 40 rows (read parquet) 831 / 839 0.1 8305.8 0.0X +2500 cols x 40 rows (write parquet) 237 / 245 0.4 2372.6 0.1X + +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz -OpenJDK 64-Bit Server VM 1.8.0_66-internal-b17 on Linux 4.2.0-36-generic -Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz wide shallowly nested struct field r/w: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ -1 wide x 100000 rows (read in-mem) 22 / 35 4.6 216.0 1.0X -1 wide x 100000 rows (exec in-mem) 40 / 63 2.5 400.6 0.5X -1 wide x 100000 rows (read parquet) 93 / 134 1.1 933.9 0.2X -1 wide x 100000 rows (write parquet) 133 / 174 0.7 1334.3 0.2X -100 wide x 1000 rows (read in-mem) 22 / 44 4.5 223.3 1.0X -100 wide x 1000 rows (exec in-mem) 88 / 138 1.1 878.6 0.2X -100 wide x 1000 rows (read parquet) 117 / 186 0.9 1172.0 0.2X -100 wide x 1000 rows (write parquet) 144 / 174 0.7 1441.6 0.1X -2500 wide x 40 rows (read in-mem) 36 / 57 2.8 358.9 0.6X -2500 wide x 40 rows (exec in-mem) 1466 / 1507 0.1 14656.6 0.0X -2500 wide x 40 rows (read parquet) 690 / 802 0.1 6898.2 0.0X -2500 wide x 40 rows (write parquet) 197 / 207 0.5 1970.9 0.1X +1 wide x 100000 rows (read in-mem) 15 / 17 6.6 151.0 1.0X +1 wide x 100000 rows (exec in-mem) 20 / 22 5.1 196.6 0.8X +1 wide x 100000 rows (read parquet) 59 / 63 1.7 592.8 0.3X +1 wide x 100000 rows (write parquet) 81 / 87 1.2 814.6 0.2X +100 wide x 1000 rows (read in-mem) 21 / 25 4.8 208.7 0.7X +100 wide x 1000 rows (exec in-mem) 72 / 81 1.4 718.5 0.2X +100 wide x 1000 rows (read parquet) 75 / 85 1.3 752.6 0.2X +100 wide x 1000 rows (write parquet) 88 / 95 1.1 876.7 0.2X +2500 wide x 40 rows (read in-mem) 28 / 34 3.5 282.2 0.5X +2500 wide x 40 rows (exec in-mem) 1269 / 1284 0.1 12688.1 0.0X +2500 wide x 40 rows (read parquet) 549 / 578 0.2 5493.4 0.0X +2500 wide x 40 rows (write parquet) 96 / 104 1.0 959.1 0.2X + +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz -OpenJDK 64-Bit Server VM 1.8.0_66-internal-b17 on Linux 4.2.0-36-generic -Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz deeply nested struct field r/w: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ -1 deep x 100000 rows (read in-mem) 22 / 35 4.5 223.9 1.0X -1 deep x 100000 rows (exec in-mem) 28 / 52 3.6 280.6 0.8X -1 deep x 100000 rows (read parquet) 41 / 65 2.4 410.5 0.5X -1 deep x 100000 rows (write parquet) 163 / 173 0.6 1634.5 0.1X -100 deep x 1000 rows (read in-mem) 43 / 63 2.3 425.9 0.5X -100 deep x 1000 rows (exec in-mem) 232 / 280 0.4 2321.7 0.1X -100 deep x 1000 rows (read parquet) 1989 / 2281 0.1 19886.6 0.0X -100 deep x 1000 rows (write parquet) 144 / 184 0.7 1442.6 0.2X -250 deep x 400 rows (read in-mem) 68 / 95 1.5 680.9 0.3X -250 deep x 400 rows (exec in-mem) 1310 / 1403 0.1 13096.4 0.0X -250 deep x 400 rows (read parquet) 41477 / 41847 0.0 414766.8 0.0X -250 deep x 400 rows (write parquet) 243 / 272 0.4 2433.1 0.1X +1 deep x 100000 rows (read in-mem) 14 / 16 7.0 143.8 1.0X +1 deep x 100000 rows (exec in-mem) 17 / 19 5.9 169.7 0.8X +1 deep x 100000 rows (read parquet) 33 / 35 3.1 327.0 0.4X +1 deep x 100000 rows (write parquet) 79 / 84 1.3 786.9 0.2X +100 deep x 1000 rows (read in-mem) 21 / 24 4.7 211.3 0.7X +100 deep x 1000 rows (exec in-mem) 221 / 235 0.5 2214.5 0.1X +100 deep x 1000 rows (read parquet) 1928 / 1952 0.1 19277.1 0.0X +100 deep x 1000 rows (write parquet) 91 / 96 1.1 909.5 0.2X +250 deep x 400 rows (read in-mem) 57 / 61 1.8 567.1 0.3X +250 deep x 400 rows (exec in-mem) 1329 / 1385 0.1 13291.8 0.0X +250 deep x 400 rows (read parquet) 36563 / 36750 0.0 365630.2 0.0X +250 deep x 400 rows (write parquet) 126 / 130 0.8 1262.0 0.1X + +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz -OpenJDK 64-Bit Server VM 1.8.0_66-internal-b17 on Linux 4.2.0-36-generic -Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz bushy struct field r/w: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ -1 x 1 deep x 100000 rows (read in-mem) 23 / 36 4.4 229.8 1.0X -1 x 1 deep x 100000 rows (exec in-mem) 27 / 48 3.7 269.6 0.9X -1 x 1 deep x 100000 rows (read parquet) 25 / 33 4.0 247.5 0.9X -1 x 1 deep x 100000 rows (write parquet) 82 / 134 1.2 821.1 0.3X -128 x 8 deep x 1000 rows (read in-mem) 19 / 29 5.3 189.5 1.2X -128 x 8 deep x 1000 rows (exec in-mem) 144 / 165 0.7 1440.4 0.2X -128 x 8 deep x 1000 rows (read parquet) 117 / 159 0.9 1174.4 0.2X -128 x 8 deep x 1000 rows (write parquet) 135 / 162 0.7 1349.0 0.2X -1024 x 11 deep x 100 rows (read in-mem) 30 / 49 3.3 304.4 0.8X -1024 x 11 deep x 100 rows (exec in-mem) 1146 / 1183 0.1 11457.6 0.0X -1024 x 11 deep x 100 rows (read parquet) 712 / 758 0.1 7119.5 0.0X -1024 x 11 deep x 100 rows (write parquet) 104 / 143 1.0 1037.3 0.2X +1 x 1 deep x 100000 rows (read in-mem) 13 / 15 7.8 127.7 1.0X +1 x 1 deep x 100000 rows (exec in-mem) 15 / 17 6.6 151.5 0.8X +1 x 1 deep x 100000 rows (read parquet) 20 / 23 5.0 198.3 0.6X +1 x 1 deep x 100000 rows (write parquet) 77 / 82 1.3 770.4 0.2X +128 x 8 deep x 1000 rows (read in-mem) 12 / 14 8.2 122.5 1.0X +128 x 8 deep x 1000 rows (exec in-mem) 124 / 140 0.8 1241.2 0.1X +128 x 8 deep x 1000 rows (read parquet) 69 / 74 1.4 693.9 0.2X +128 x 8 deep x 1000 rows (write parquet) 78 / 83 1.3 777.7 0.2X +1024 x 11 deep x 100 rows (read in-mem) 25 / 29 4.1 246.1 0.5X +1024 x 11 deep x 100 rows (exec in-mem) 1197 / 1223 0.1 11974.6 0.0X +1024 x 11 deep x 100 rows (read parquet) 426 / 433 0.2 4263.7 0.0X +1024 x 11 deep x 100 rows (write parquet) 91 / 98 1.1 913.5 0.1X + +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz -OpenJDK 64-Bit Server VM 1.8.0_66-internal-b17 on Linux 4.2.0-36-generic -Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz wide array field r/w: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ -1 wide x 100000 rows (read in-mem) 18 / 31 5.6 179.3 1.0X -1 wide x 100000 rows (exec in-mem) 31 / 47 3.2 310.2 0.6X -1 wide x 100000 rows (read parquet) 45 / 73 2.2 445.1 0.4X -1 wide x 100000 rows (write parquet) 109 / 140 0.9 1085.9 0.2X -100 wide x 1000 rows (read in-mem) 17 / 25 5.8 172.7 1.0X -100 wide x 1000 rows (exec in-mem) 18 / 22 5.4 184.6 1.0X -100 wide x 1000 rows (read parquet) 26 / 42 3.8 261.8 0.7X -100 wide x 1000 rows (write parquet) 150 / 164 0.7 1499.4 0.1X -2500 wide x 40 rows (read in-mem) 19 / 31 5.1 194.7 0.9X -2500 wide x 40 rows (exec in-mem) 19 / 24 5.3 188.5 1.0X -2500 wide x 40 rows (read parquet) 33 / 47 3.0 334.4 0.5X -2500 wide x 40 rows (write parquet) 153 / 164 0.7 1528.2 0.1X +1 wide x 100000 rows (read in-mem) 14 / 16 7.0 143.2 1.0X +1 wide x 100000 rows (exec in-mem) 17 / 19 5.9 170.9 0.8X +1 wide x 100000 rows (read parquet) 43 / 46 2.3 434.1 0.3X +1 wide x 100000 rows (write parquet) 78 / 83 1.3 777.6 0.2X +100 wide x 1000 rows (read in-mem) 11 / 13 9.0 111.5 1.3X +100 wide x 1000 rows (exec in-mem) 13 / 15 7.8 128.3 1.1X +100 wide x 1000 rows (read parquet) 24 / 27 4.1 245.0 0.6X +100 wide x 1000 rows (write parquet) 74 / 80 1.4 740.5 0.2X +2500 wide x 40 rows (read in-mem) 11 / 13 9.1 109.5 1.3X +2500 wide x 40 rows (exec in-mem) 13 / 15 7.7 129.4 1.1X +2500 wide x 40 rows (read parquet) 24 / 26 4.1 241.3 0.6X +2500 wide x 40 rows (write parquet) 75 / 81 1.3 751.8 0.2X + +Java HotSpot(TM) 64-Bit Server VM 1.8.0_92-b14 on Mac OS X 10.11.6 +Intel(R) Core(TM) i7-4980HQ CPU @ 2.80GHz + +wide map field r/w: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative +------------------------------------------------------------------------------------------------ +1 wide x 100000 rows (read in-mem) 16 / 18 6.2 162.6 1.0X +1 wide x 100000 rows (exec in-mem) 21 / 23 4.8 208.2 0.8X +1 wide x 100000 rows (read parquet) 54 / 59 1.8 543.6 0.3X +1 wide x 100000 rows (write parquet) 80 / 86 1.2 804.5 0.2X +100 wide x 1000 rows (read in-mem) 11 / 13 8.7 114.5 1.4X +100 wide x 1000 rows (exec in-mem) 14 / 16 7.0 143.5 1.1X +100 wide x 1000 rows (read parquet) 30 / 32 3.3 300.4 0.5X +100 wide x 1000 rows (write parquet) 75 / 80 1.3 749.9 0.2X +2500 wide x 40 rows (read in-mem) 13 / 15 7.8 128.1 1.3X +2500 wide x 40 rows (exec in-mem) 15 / 18 6.5 153.6 1.1X +2500 wide x 40 rows (read parquet) 30 / 33 3.3 304.4 0.5X +2500 wide x 40 rows (write parquet) 77 / 83 1.3 768.5 0.2X From abb2f921036d97d8cab033838ae559eb731bf0fd Mon Sep 17 00:00:00 2001 From: Junyang Qian Date: Sat, 3 Sep 2016 12:26:30 -0700 Subject: [PATCH 199/270] [SPARK-17315][SPARKR] Kolmogorov-Smirnov test SparkR wrapper ## What changes were proposed in this pull request? This PR tries to add Kolmogorov-Smirnov Test wrapper to SparkR. This wrapper implementation only supports one sample test against normal distribution. ## How was this patch tested? R unit test. Author: Junyang Qian Closes #14881 from junyangq/SPARK-17315. --- R/pkg/NAMESPACE | 7 +- R/pkg/R/generics.R | 4 + R/pkg/R/mllib.R | 105 ++++++++++++++++++ R/pkg/inst/tests/testthat/test_mllib.R | 34 ++++++ .../org/apache/spark/ml/r/KSTestWrapper.scala | 57 ++++++++++ 5 files changed, 205 insertions(+), 2 deletions(-) create mode 100644 mllib/src/main/scala/org/apache/spark/ml/r/KSTestWrapper.scala diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index ce41b512a4e11..a5e9cbdc37f06 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -42,7 +42,8 @@ exportMethods("glm", "spark.perplexity", "spark.isoreg", "spark.gaussianMixture", - "spark.als") + "spark.als", + "spark.kstest") # Job group lifecycle management methods export("setJobGroup", @@ -342,7 +343,8 @@ export("as.DataFrame", "tables", "uncacheTable", "print.summary.GeneralizedLinearRegressionModel", - "read.ml") + "read.ml", + "print.summary.KSTest") export("structField", "structField.jobj", @@ -366,6 +368,7 @@ S3method(print, jobj) S3method(print, structField) S3method(print, structType) S3method(print, summary.GeneralizedLinearRegressionModel) +S3method(print, summary.KSTest) S3method(structField, character) S3method(structField, jobj) S3method(structType, jobj) diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 7e626be50808d..67a999da9bc26 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -1375,3 +1375,7 @@ setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") #' @rdname spark.als #' @export setGeneric("spark.als", function(data, ...) { standardGeneric("spark.als") }) + +#' @rdname spark.kstest +#' @export +setGeneric("spark.kstest", function(data, ...) { standardGeneric("spark.kstest") }) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index 9a53f757b4d44..f321fd19b39b4 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -88,6 +88,13 @@ setClass("GaussianMixtureModel", representation(jobj = "jobj")) #' @note ALSModel since 2.1.0 setClass("ALSModel", representation(jobj = "jobj")) +#' S4 class that represents an KSTest +#' +#' @param jobj a Java object reference to the backing Scala KSTestWrapper +#' @export +#' @note KSTest since 2.1.0 +setClass("KSTest", representation(jobj = "jobj")) + #' Saves the MLlib model to the input path #' #' Saves the MLlib model to the input path. For more information, see the specific @@ -1310,3 +1317,101 @@ setMethod("write.ml", signature(object = "ALSModel", path = "character"), function(object, path, overwrite = FALSE) { write_internal(object, path, overwrite) }) + +#' (One-Sample) Kolmogorov-Smirnov Test +#' +#' @description +#' \code{spark.kstest} Conduct the two-sided Kolmogorov-Smirnov (KS) test for data sampled from a +#' continuous distribution. +#' +#' By comparing the largest difference between the empirical cumulative +#' distribution of the sample data and the theoretical distribution we can provide a test for the +#' the null hypothesis that the sample data comes from that theoretical distribution. +#' +#' Users can call \code{summary} to obtain a summary of the test, and \code{print.summary.KSTest} +#' to print out a summary result. +#' +#' @param data a SparkDataFrame of user data. +#' @param testCol column name where the test data is from. It should be a column of double type. +#' @param nullHypothesis name of the theoretical distribution tested against. Currently only +#' \code{"norm"} for normal distribution is supported. +#' @param distParams parameters(s) of the distribution. For \code{nullHypothesis = "norm"}, +#' we can provide as a vector the mean and standard deviation of +#' the distribution. If none is provided, then standard normal will be used. +#' If only one is provided, then the standard deviation will be set to be one. +#' @param ... additional argument(s) passed to the method. +#' @return \code{spark.kstest} returns a test result object. +#' @rdname spark.kstest +#' @aliases spark.kstest,SparkDataFrame-method +#' @name spark.kstest +#' @seealso \href{http://spark.apache.org/docs/latest/mllib-statistics.html#hypothesis-testing}{ +#' MLlib: Hypothesis Testing} +#' @export +#' @examples +#' \dontrun{ +#' data <- data.frame(test = c(0.1, 0.15, 0.2, 0.3, 0.25)) +#' df <- createDataFrame(data) +#' test <- spark.ktest(df, "test", "norm", c(0, 1)) +#' +#' # get a summary of the test result +#' testSummary <- summary(test) +#' testSummary +#' +#' # print out the summary in an organized way +#' print.summary.KSTest(test) +#' } +#' @note spark.kstest since 2.1.0 +setMethod("spark.kstest", signature(data = "SparkDataFrame"), + function(data, testCol = "test", nullHypothesis = c("norm"), distParams = c(0, 1)) { + tryCatch(match.arg(nullHypothesis), + error = function(e) { + msg <- paste("Distribution", nullHypothesis, "is not supported.") + stop(msg) + }) + if (nullHypothesis == "norm") { + distParams <- as.numeric(distParams) + mu <- ifelse(length(distParams) < 1, 0, distParams[1]) + sigma <- ifelse(length(distParams) < 2, 1, distParams[2]) + jobj <- callJStatic("org.apache.spark.ml.r.KSTestWrapper", + "test", data@sdf, testCol, nullHypothesis, + as.array(c(mu, sigma))) + new("KSTest", jobj = jobj) + } +}) + +# Get the summary of Kolmogorov-Smirnov (KS) Test. +#' @param object test result object of KSTest by \code{spark.kstest}. +#' @return \code{summary} returns a list containing the p-value, test statistic computed for the +#' test, the null hypothesis with its parameters tested against +#' and degrees of freedom of the test. +#' @rdname spark.kstest +#' @aliases summary,KSTest-method +#' @export +#' @note summary(KSTest) since 2.1.0 +setMethod("summary", signature(object = "KSTest"), + function(object) { + jobj <- object@jobj + pValue <- callJMethod(jobj, "pValue") + statistic <- callJMethod(jobj, "statistic") + nullHypothesis <- callJMethod(jobj, "nullHypothesis") + distName <- callJMethod(jobj, "distName") + distParams <- unlist(callJMethod(jobj, "distParams")) + degreesOfFreedom <- callJMethod(jobj, "degreesOfFreedom") + + list(p.value = pValue, statistic = statistic, nullHypothesis = nullHypothesis, + nullHypothesis.name = distName, nullHypothesis.parameters = distParams, + degreesOfFreedom = degreesOfFreedom) + }) + +# Prints the summary of KSTest + +#' @rdname spark.kstest +#' @param x test result object of KSTest by \code{spark.kstest}. +#' @export +#' @note print.summary.KSTest since 2.1.0 +print.summary.KSTest <- function(x, ...) { + jobj <- x@jobj + summaryStr <- callJMethod(jobj, "summary") + cat(summaryStr) + invisible(summaryStr) +} diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index 825a24073b75c..ca25f2c7e8263 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -742,4 +742,38 @@ test_that("spark.als", { unlink(modelPath) }) +test_that("spark.kstest", { + data <- data.frame(test = c(0.1, 0.15, 0.2, 0.3, 0.25, -1, -0.5)) + df <- createDataFrame(data) + testResult <- spark.kstest(df, "test", "norm") + stats <- summary(testResult) + + rStats <- ks.test(data$test, "pnorm", alternative = "two.sided") + + expect_equal(stats$p.value, rStats$p.value, tolerance = 1e-4) + expect_equal(stats$statistic, unname(rStats$statistic), tolerance = 1e-4) + + printStr <- print.summary.KSTest(testResult) + expect_match(printStr, paste0("Kolmogorov-Smirnov test summary:\\n", + "degrees of freedom = 0 \\n", + "statistic = 0.38208[0-9]* \\n", + "pValue = 0.19849[0-9]* \\n", + ".*"), perl = TRUE) + + testResult <- spark.kstest(df, "test", "norm", -0.5) + stats <- summary(testResult) + + rStats <- ks.test(data$test, "pnorm", -0.5, 1, alternative = "two.sided") + + expect_equal(stats$p.value, rStats$p.value, tolerance = 1e-4) + expect_equal(stats$statistic, unname(rStats$statistic), tolerance = 1e-4) + + printStr <- print.summary.KSTest(testResult) + expect_match(printStr, paste0("Kolmogorov-Smirnov test summary:\\n", + "degrees of freedom = 0 \\n", + "statistic = 0.44003[0-9]* \\n", + "pValue = 0.09470[0-9]* \\n", + ".*"), perl = TRUE) +}) + sparkR.session.stop() diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/KSTestWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/KSTestWrapper.scala new file mode 100644 index 0000000000000..21531eb057ad3 --- /dev/null +++ b/mllib/src/main/scala/org/apache/spark/ml/r/KSTestWrapper.scala @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.r + +import org.apache.spark.mllib.stat.Statistics.kolmogorovSmirnovTest +import org.apache.spark.mllib.stat.test.KolmogorovSmirnovTestResult +import org.apache.spark.sql.{DataFrame, Row} + +private[r] class KSTestWrapper private ( + val testResult: KolmogorovSmirnovTestResult, + val distName: String, + val distParams: Array[Double]) { + + lazy val pValue = testResult.pValue + + lazy val statistic = testResult.statistic + + lazy val nullHypothesis = testResult.nullHypothesis + + lazy val degreesOfFreedom = testResult.degreesOfFreedom + + def summary: String = testResult.toString +} + +private[r] object KSTestWrapper { + + def test( + data: DataFrame, + featureName: String, + distName: String, + distParams: Array[Double]): KSTestWrapper = { + + val rddData = data.select(featureName).rdd.map { + case Row(feature: Double) => feature + } + + val ksTestResult = kolmogorovSmirnovTest(rddData, distName, distParams : _*) + + new KSTestWrapper(ksTestResult, distName, distParams) + } +} + From e9b58e9ef89a9118b6d5a466d10db8e30d61f850 Mon Sep 17 00:00:00 2001 From: "wm624@hotmail.com" Date: Sat, 3 Sep 2016 13:52:55 -0700 Subject: [PATCH 200/270] [SPARK-16829][SPARKR] sparkR sc.setLogLevel doesn't work (Please fill in changes proposed in this fix) ./bin/sparkR Launching java with spark-submit command /Users/mwang/spark_ws_0904/bin/spark-submit "sparkr-shell" /var/folders/s_/83b0sgvj2kl2kwq4stvft_pm0000gn/T//RtmpQxJGiZ/backend_porte9474603ed1e Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties Setting default log level to "WARN". To adjust logging level use sc.setLogLevel(newLevel). > sc.setLogLevel("INFO") Error: could not find function "sc.setLogLevel" sc.setLogLevel doesn't exist. R has a function setLogLevel. I rename the setLogLevel function to sc.setLogLevel. (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) Change unit test. Run unit tests. Manually tested it in sparkR shell. Author: wm624@hotmail.com Closes #14433 from wangmiao1981/sc. --- core/src/main/scala/org/apache/spark/internal/Logging.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/internal/Logging.scala b/core/src/main/scala/org/apache/spark/internal/Logging.scala index 66a0cfec6296d..013cd1c1bc037 100644 --- a/core/src/main/scala/org/apache/spark/internal/Logging.scala +++ b/core/src/main/scala/org/apache/spark/internal/Logging.scala @@ -135,7 +135,8 @@ private[spark] trait Logging { val replLevel = Option(replLogger.getLevel()).getOrElse(Level.WARN) if (replLevel != rootLogger.getEffectiveLevel()) { System.err.printf("Setting default log level to \"%s\".\n", replLevel) - System.err.println("To adjust logging level use sc.setLogLevel(newLevel).") + System.err.println("To adjust logging level use sc.setLogLevel(newLevel). " + + "For SparkR, use setLogLevel(newLevel).") rootLogger.setLevel(replLevel) } } From 6b156e2fcf9c0c1ed0770a7ad9c54fa374760e17 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Sun, 4 Sep 2016 15:04:33 +0800 Subject: [PATCH 201/270] [SPARK-17324][SQL] Remove Direct Usage of HiveClient in InsertIntoHiveTable ### What changes were proposed in this pull request? This is another step to get rid of HiveClient from `HiveSessionState`. All the metastore interactions should be through `ExternalCatalog` interface. However, the existing implementation of `InsertIntoHiveTable ` still requires Hive clients. This PR is to remove HiveClient by moving the metastore interactions into `ExternalCatalog`. ### How was this patch tested? Existing test cases Author: gatorsmile Closes #14888 from gatorsmile/removeClientFromInsertIntoHiveTable. --- .../catalyst/catalog/ExternalCatalog.scala | 20 +++++- .../catalyst/catalog/InMemoryCatalog.scala | 25 +++++++- .../sql/catalyst/catalog/SessionCatalog.scala | 7 +-- .../spark/sql/execution/command/tables.scala | 3 +- .../spark/sql/hive/HiveExternalCatalog.scala | 44 +++++++++++-- .../spark/sql/hive/client/HiveClient.scala | 8 +-- .../sql/hive/client/HiveClientImpl.scala | 18 +++--- .../hive/execution/InsertIntoHiveTable.scala | 61 ++++++++----------- .../spark/sql/hive/client/VersionsSuite.scala | 12 ++-- .../sql/hive/execution/HiveDDLSuite.scala | 38 ++++++++++++ 10 files changed, 168 insertions(+), 68 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala index df72baaba29c5..dd93b467eeeb2 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala @@ -121,8 +121,16 @@ abstract class ExternalCatalog { partition: TablePartitionSpec, isOverwrite: Boolean, holdDDLTime: Boolean, - inheritTableSpecs: Boolean, - isSkewedStoreAsSubdir: Boolean): Unit + inheritTableSpecs: Boolean): Unit + + def loadDynamicPartitions( + db: String, + table: String, + loadPath: String, + partition: TablePartitionSpec, + replace: Boolean, + numDP: Int, + holdDDLTime: Boolean): Unit // -------------------------------------------------------------------------- // Partitions @@ -165,6 +173,14 @@ abstract class ExternalCatalog { def getPartition(db: String, table: String, spec: TablePartitionSpec): CatalogTablePartition + /** + * Returns the specified partition or None if it does not exist. + */ + def getPartitionOption( + db: String, + table: String, + spec: TablePartitionSpec): Option[CatalogTablePartition] + /** * List the metadata of all partitions that belong to the specified table, assuming it exists. * diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala index 4e361a536d444..3e31127118b44 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala @@ -305,11 +305,21 @@ class InMemoryCatalog( partition: TablePartitionSpec, isOverwrite: Boolean, holdDDLTime: Boolean, - inheritTableSpecs: Boolean, - isSkewedStoreAsSubdir: Boolean): Unit = { + inheritTableSpecs: Boolean): Unit = { throw new UnsupportedOperationException("loadPartition is not implemented.") } + override def loadDynamicPartitions( + db: String, + table: String, + loadPath: String, + partition: TablePartitionSpec, + replace: Boolean, + numDP: Int, + holdDDLTime: Boolean): Unit = { + throw new UnsupportedOperationException("loadDynamicPartitions is not implemented.") + } + // -------------------------------------------------------------------------- // Partitions // -------------------------------------------------------------------------- @@ -444,6 +454,17 @@ class InMemoryCatalog( catalog(db).tables(table).partitions(spec) } + override def getPartitionOption( + db: String, + table: String, + spec: TablePartitionSpec): Option[CatalogTablePartition] = synchronized { + if (!partitionExists(db, table, spec)) { + None + } else { + Option(catalog(db).tables(table).partitions(spec)) + } + } + override def listPartitions( db: String, table: String, diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala index 62d0da076b5a2..e7132cd3975d5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala @@ -309,14 +309,13 @@ class SessionCatalog( partition: TablePartitionSpec, isOverwrite: Boolean, holdDDLTime: Boolean, - inheritTableSpecs: Boolean, - isSkewedStoreAsSubdir: Boolean): Unit = { + inheritTableSpecs: Boolean): Unit = { val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase)) val table = formatTableName(name.table) requireDbExists(db) requireTableExists(TableIdentifier(table, Some(db))) - externalCatalog.loadPartition(db, table, loadPath, partition, isOverwrite, holdDDLTime, - inheritTableSpecs, isSkewedStoreAsSubdir) + externalCatalog.loadPartition( + db, table, loadPath, partition, isOverwrite, holdDDLTime, inheritTableSpecs) } def defaultTablePath(tableIdent: TableIdentifier): String = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 67b2329effb7b..a1371582be064 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -309,8 +309,7 @@ case class LoadDataCommand( partition.get, isOverwrite, holdDDLTime = false, - inheritTableSpecs = true, - isSkewedStoreAsSubdir = false) + inheritTableSpecs = true) } else { catalog.loadTable( targetTable.identifier, diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index 8541ae2322ada..1fe7f4d41dc16 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -489,8 +489,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat partition: TablePartitionSpec, isOverwrite: Boolean, holdDDLTime: Boolean, - inheritTableSpecs: Boolean, - isSkewedStoreAsSubdir: Boolean): Unit = withClient { + inheritTableSpecs: Boolean): Unit = withClient { requireTableExists(db, table) val orderedPartitionSpec = new util.LinkedHashMap[String, String]() @@ -500,12 +499,37 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat client.loadPartition( loadPath, - s"$db.$table", + db, + table, orderedPartitionSpec, isOverwrite, holdDDLTime, - inheritTableSpecs, - isSkewedStoreAsSubdir) + inheritTableSpecs) + } + + override def loadDynamicPartitions( + db: String, + table: String, + loadPath: String, + partition: TablePartitionSpec, + replace: Boolean, + numDP: Int, + holdDDLTime: Boolean): Unit = withClient { + requireTableExists(db, table) + + val orderedPartitionSpec = new util.LinkedHashMap[String, String]() + getTable(db, table).partitionColumnNames.foreach { colName => + orderedPartitionSpec.put(colName, partition(colName)) + } + + client.loadDynamicPartitions( + loadPath, + db, + table, + orderedPartitionSpec, + replace, + numDP, + holdDDLTime) } // -------------------------------------------------------------------------- @@ -553,6 +577,16 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat client.getPartition(db, table, spec) } + /** + * Returns the specified partition or None if it does not exist. + */ + override def getPartitionOption( + db: String, + table: String, + spec: TablePartitionSpec): Option[CatalogTablePartition] = withClient { + client.getPartitionOption(db, table, spec) + } + /** * Returns the partition names from hive metastore for a given table in a database. */ diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala index dc74fa257aa4d..984d23bb09dbd 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala @@ -195,12 +195,12 @@ private[hive] trait HiveClient { /** Loads a static partition into an existing table. */ def loadPartition( loadPath: String, + dbName: String, tableName: String, partSpec: java.util.LinkedHashMap[String, String], // Hive relies on LinkedHashMap ordering replace: Boolean, holdDDLTime: Boolean, - inheritTableSpecs: Boolean, - isSkewedStoreAsSubdir: Boolean): Unit + inheritTableSpecs: Boolean): Unit /** Loads data into an existing table. */ def loadTable( @@ -212,12 +212,12 @@ private[hive] trait HiveClient { /** Loads new dynamic partitions into an existing table. */ def loadDynamicPartitions( loadPath: String, + dbName: String, tableName: String, partSpec: java.util.LinkedHashMap[String, String], // Hive relies on LinkedHashMap ordering replace: Boolean, numDP: Int, - holdDDLTime: Boolean, - listBucketingEnabled: Boolean): Unit + holdDDLTime: Boolean): Unit /** Create a function in an existing database. */ def createFunction(db: String, func: CatalogFunction): Unit diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index 54ec61abedb12..dd33d750a4d45 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -615,21 +615,22 @@ private[hive] class HiveClientImpl( def loadPartition( loadPath: String, + dbName: String, tableName: String, partSpec: java.util.LinkedHashMap[String, String], replace: Boolean, holdDDLTime: Boolean, - inheritTableSpecs: Boolean, - isSkewedStoreAsSubdir: Boolean): Unit = withHiveState { + inheritTableSpecs: Boolean): Unit = withHiveState { + val hiveTable = client.getTable(dbName, tableName, true /* throw exception */) shim.loadPartition( client, new Path(loadPath), // TODO: Use URI - tableName, + s"$dbName.$tableName", partSpec, replace, holdDDLTime, inheritTableSpecs, - isSkewedStoreAsSubdir) + isSkewedStoreAsSubdir = hiveTable.isStoredAsSubDirectories) } def loadTable( @@ -647,21 +648,22 @@ private[hive] class HiveClientImpl( def loadDynamicPartitions( loadPath: String, + dbName: String, tableName: String, partSpec: java.util.LinkedHashMap[String, String], replace: Boolean, numDP: Int, - holdDDLTime: Boolean, - listBucketingEnabled: Boolean): Unit = withHiveState { + holdDDLTime: Boolean): Unit = withHiveState { + val hiveTable = client.getTable(dbName, tableName, true /* throw exception */) shim.loadDynamicPartitions( client, new Path(loadPath), - tableName, + s"$dbName.$tableName", partSpec, replace, numDP, holdDDLTime, - listBucketingEnabled) + listBucketingEnabled = hiveTable.isStoredAsSubDirectories) } override def createFunction(db: String, func: CatalogFunction): Unit = withHiveState { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala index eb0c31ced6586..7eec9c787c43a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala @@ -51,7 +51,7 @@ case class InsertIntoHiveTable( ifNotExists: Boolean) extends UnaryExecNode { @transient private val sessionState = sqlContext.sessionState.asInstanceOf[HiveSessionState] - @transient private val client = sessionState.metadataHive + @transient private val externalCatalog = sqlContext.sharedState.externalCatalog def output: Seq[Attribute] = Seq.empty @@ -240,54 +240,45 @@ case class InsertIntoHiveTable( // holdDDLTime will be true when TOK_HOLD_DDLTIME presents in the query as a hint. val holdDDLTime = false if (partition.nonEmpty) { - - // loadPartition call orders directories created on the iteration order of the this map - val orderedPartitionSpec = new util.LinkedHashMap[String, String]() - table.hiveQlTable.getPartCols.asScala.foreach { entry => - orderedPartitionSpec.put(entry.getName, partitionSpec.getOrElse(entry.getName, "")) - } - - // inheritTableSpecs is set to true. It should be set to false for an IMPORT query - // which is currently considered as a Hive native command. - val inheritTableSpecs = true - // TODO: Correctly set isSkewedStoreAsSubdir. - val isSkewedStoreAsSubdir = false if (numDynamicPartitions > 0) { - client.synchronized { - client.loadDynamicPartitions( - outputPath.toString, - table.catalogTable.qualifiedName, - orderedPartitionSpec, - overwrite, - numDynamicPartitions, - holdDDLTime, - isSkewedStoreAsSubdir) - } + externalCatalog.loadDynamicPartitions( + db = table.catalogTable.database, + table = table.catalogTable.identifier.table, + outputPath.toString, + partitionSpec, + overwrite, + numDynamicPartitions, + holdDDLTime = holdDDLTime) } else { // scalastyle:off // ifNotExists is only valid with static partition, refer to // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-InsertingdataintoHiveTablesfromqueries // scalastyle:on val oldPart = - client.getPartitionOption( - table.catalogTable, + externalCatalog.getPartitionOption( + table.catalogTable.database, + table.catalogTable.identifier.table, partitionSpec) if (oldPart.isEmpty || !ifNotExists) { - client.loadPartition( - outputPath.toString, - table.catalogTable.qualifiedName, - orderedPartitionSpec, - overwrite, - holdDDLTime, - inheritTableSpecs, - isSkewedStoreAsSubdir) + // inheritTableSpecs is set to true. It should be set to false for an IMPORT query + // which is currently considered as a Hive native command. + val inheritTableSpecs = true + externalCatalog.loadPartition( + table.catalogTable.database, + table.catalogTable.identifier.table, + outputPath.toString, + partitionSpec, + isOverwrite = overwrite, + holdDDLTime = holdDDLTime, + inheritTableSpecs = inheritTableSpecs) } } } else { - client.loadTable( + externalCatalog.loadTable( + table.catalogTable.database, + table.catalogTable.identifier.table, outputPath.toString, // TODO: URI - table.catalogTable.qualifiedName, overwrite, holdDDLTime) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala index 10b6cd102416c..9a10957c8efa5 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala @@ -337,12 +337,12 @@ class VersionsSuite extends SparkFunSuite with Logging { client.loadPartition( emptyDir, - "default.src_part", + "default", + "src_part", partSpec, replace = false, holdDDLTime = false, - inheritTableSpecs = false, - isSkewedStoreAsSubdir = false) + inheritTableSpecs = false) } test(s"$version: loadDynamicPartitions") { @@ -352,12 +352,12 @@ class VersionsSuite extends SparkFunSuite with Logging { client.loadDynamicPartitions( emptyDir, - "default.src_part", + "default", + "src_part", partSpec, replace = false, numDP = 1, - false, - false) + holdDDLTime = false) } test(s"$version: renamePartitions") { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index 7f3d96de85ae7..eff32805bf50c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -932,6 +932,44 @@ class HiveDDLSuite } } + test("insert skewed table") { + val tabName = "tab1" + withTable(tabName) { + // Spark SQL does not support creating skewed table. Thus, we have to use Hive client. + val client = spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client + client.runSqlHive( + s""" + |CREATE Table $tabName(col1 int, col2 int) + |PARTITIONED BY (part1 string, part2 string) + |SKEWED BY (col1) ON (3, 4) STORED AS DIRECTORIES + """.stripMargin) + val hiveTable = + spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default"))) + + assert(hiveTable.unsupportedFeatures.contains("skewed columns")) + + // Call loadDynamicPartitions against a skewed table with enabling list bucketing + sql( + s""" + |INSERT OVERWRITE TABLE $tabName + |PARTITION (part1='a', part2) + |SELECT 3, 4, 'b' + """.stripMargin) + + // Call loadPartitions against a skewed table with enabling list bucketing + sql( + s""" + |INSERT INTO TABLE $tabName + |PARTITION (part1='a', part2='b') + |SELECT 1, 2 + """.stripMargin) + + checkAnswer( + sql(s"SELECT * from $tabName"), + Row(3, 4, "a", "b") :: Row(1, 2, "a", "b") :: Nil) + } + } + test("desc table for data source table - no user-defined schema") { Seq("parquet", "json", "orc").foreach { fileFormat => withTable("t1") { From e75c162e9e510d74b07f28ccf6c7948ac317a7c6 Mon Sep 17 00:00:00 2001 From: Shivansh Date: Sun, 4 Sep 2016 12:39:26 +0100 Subject: [PATCH 202/270] [SPARK-17308] Improved the spark core code by replacing all pattern match on boolean value by if/else block. ## What changes were proposed in this pull request? Improved the code quality of spark by replacing all pattern match on boolean value by if/else block. ## How was this patch tested? By running the tests Author: Shivansh Closes #14873 from shiv4nsh/SPARK-17308. --- .../org/apache/spark/deploy/Client.scala | 41 +++++++++---------- .../spark/mllib/clustering/KMeansSuite.scala | 9 ++-- project/SparkBuild.scala | 7 ++-- .../spark/sql/catalyst/trees/TreeNode.scala | 7 ++-- .../sql/catalyst/util/StringKeyHashMap.scala | 7 ++-- .../analysis/AnalysisErrorSuite.scala | 9 ++-- .../streaming/api/java/JavaPairDStream.scala | 7 ++-- 7 files changed, 44 insertions(+), 43 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala index bf2dab6e71376..ee276e1b71138 100644 --- a/core/src/main/scala/org/apache/spark/deploy/Client.scala +++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala @@ -124,27 +124,26 @@ private class ClientEndpoint( logInfo("... polling master for driver state") val statusResponse = activeMasterEndpoint.askWithRetry[DriverStatusResponse](RequestDriverStatus(driverId)) - statusResponse.found match { - case false => - logError(s"ERROR: Cluster master did not recognize $driverId") - System.exit(-1) - case true => - logInfo(s"State of $driverId is ${statusResponse.state.get}") - // Worker node, if present - (statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match { - case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) => - logInfo(s"Driver running on $hostPort ($id)") - case _ => - } - // Exception, if present - statusResponse.exception match { - case Some(e) => - logError(s"Exception from cluster was: $e") - e.printStackTrace() - System.exit(-1) - case _ => - System.exit(0) - } + if (statusResponse.found) { + logInfo(s"State of $driverId is ${statusResponse.state.get}") + // Worker node, if present + (statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match { + case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) => + logInfo(s"Driver running on $hostPort ($id)") + case _ => + } + // Exception, if present + statusResponse.exception match { + case Some(e) => + logError(s"Exception from cluster was: $e") + e.printStackTrace() + System.exit(-1) + case _ => + System.exit(0) + } + } else { + logError(s"ERROR: Cluster master did not recognize $driverId") + System.exit(-1) } } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala index 3003c62d9876c..2d35b312083c0 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala @@ -304,11 +304,10 @@ class KMeansSuite extends SparkFunSuite with MLlibTestSparkContext { object KMeansSuite extends SparkFunSuite { def createModel(dim: Int, k: Int, isSparse: Boolean): KMeansModel = { - val singlePoint = isSparse match { - case true => - Vectors.sparse(dim, Array.empty[Int], Array.empty[Double]) - case _ => - Vectors.dense(Array.fill[Double](dim)(0.0)) + val singlePoint = if (isSparse) { + Vectors.sparse(dim, Array.empty[Int], Array.empty[Double]) + } else { + Vectors.dense(Array.fill[Double](dim)(0.0)) } new KMeansModel(Array.fill[Vector](k)(singlePoint)) } diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 83a7c0864f76d..d164ead4ba73e 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -212,9 +212,10 @@ object SparkBuild extends PomBuild { cachedFun(findFiles(scalaSource.in(config).value)) } - private def findFiles(file: File): Set[File] = file.isDirectory match { - case true => file.listFiles().toSet.flatMap(findFiles) + file - case false => Set(file) + private def findFiles(file: File): Set[File] = if (file.isDirectory) { + file.listFiles().toSet.flatMap(findFiles) + file + } else { + Set(file) } def enableScalaStyle: Seq[sbt.Def.Setting[_]] = Seq( diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala index 24a2dc9d3b35f..037f8cb2873bf 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala @@ -103,9 +103,10 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product { * Find the first [[TreeNode]] that satisfies the condition specified by `f`. * The condition is recursively applied to this node and all of its children (pre-order). */ - def find(f: BaseType => Boolean): Option[BaseType] = f(this) match { - case true => Some(this) - case false => children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) } + def find(f: BaseType => Boolean): Option[BaseType] = if (f(this)) { + Some(this) + } else { + children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) } } /** diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala index d5d151a5802f6..a7ac6136835a7 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala @@ -22,9 +22,10 @@ package org.apache.spark.sql.catalyst.util * sensitive or insensitive. */ object StringKeyHashMap { - def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = caseSensitive match { - case false => new StringKeyHashMap[T](_.toLowerCase) - case true => new StringKeyHashMap[T](identity) + def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = if (caseSensitive) { + new StringKeyHashMap[T](identity) + } else { + new StringKeyHashMap[T](_.toLowerCase) } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala index e7c8615bc5e04..21afe9fec5944 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala @@ -414,11 +414,10 @@ class AnalysisErrorSuite extends AnalysisTest { AttributeReference("a", dataType)(exprId = ExprId(2)), AttributeReference("b", IntegerType)(exprId = ExprId(1)))) - shouldSuccess match { - case true => - assertAnalysisSuccess(plan, true) - case false => - assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil) + if (shouldSuccess) { + assertAnalysisSuccess(plan, true) + } else { + assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil) } } diff --git a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala index dec983165fb3b..da9ff858853cf 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala @@ -471,9 +471,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( val list: JList[V] = values.asJava val scalaState: Optional[S] = JavaUtils.optionToOptional(state) val result: Optional[S] = in.apply(list, scalaState) - result.isPresent match { - case true => Some(result.get()) - case _ => None + if (result.isPresent) { + Some(result.get()) + } else { + None } } scalaFunc From cdeb97a8cd26e3282cc2a4f126242ed2199f3898 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Sun, 4 Sep 2016 12:40:51 +0100 Subject: [PATCH 203/270] [SPARK-17311][MLLIB] Standardize Python-Java MLlib API to accept optional long seeds in all cases ## What changes were proposed in this pull request? Related to https://github.com/apache/spark/pull/14524 -- just the 'fix' rather than a behavior change. - PythonMLlibAPI methods that take a seed now always take a `java.lang.Long` consistently, allowing the Python API to specify "no seed" - .mllib's Word2VecModel seemed to be an odd man out in .mllib in that it picked its own random seed. Instead it defaults to None, meaning, letting the Scala implementation pick a seed - BisectingKMeansModel arguably should not hard-code a seed for consistency with .mllib, I think. However I left it. ## How was this patch tested? Existing tests Author: Sean Owen Closes #14826 from srowen/SPARK-16832.2. --- .../mllib/api/python/PythonMLLibAPI.scala | 20 ++++++++++--------- python/pyspark/mllib/feature.py | 4 ++-- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala index a80cca70f4b28..2ed6c6be1d894 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala @@ -126,13 +126,13 @@ private[python] class PythonMLLibAPI extends Serializable { k: Int, maxIterations: Int, minDivisibleClusterSize: Double, - seed: Long): BisectingKMeansModel = { - new BisectingKMeans() + seed: java.lang.Long): BisectingKMeansModel = { + val kmeans = new BisectingKMeans() .setK(k) .setMaxIterations(maxIterations) .setMinDivisibleClusterSize(minDivisibleClusterSize) - .setSeed(seed) - .run(data) + if (seed != null) kmeans.setSeed(seed) + kmeans.run(data) } /** @@ -678,7 +678,7 @@ private[python] class PythonMLLibAPI extends Serializable { learningRate: Double, numPartitions: Int, numIterations: Int, - seed: Long, + seed: java.lang.Long, minCount: Int, windowSize: Int): Word2VecModelWrapper = { val word2vec = new Word2Vec() @@ -686,9 +686,9 @@ private[python] class PythonMLLibAPI extends Serializable { .setLearningRate(learningRate) .setNumPartitions(numPartitions) .setNumIterations(numIterations) - .setSeed(seed) .setMinCount(minCount) .setWindowSize(windowSize) + if (seed != null) word2vec.setSeed(seed) try { val model = word2vec.fit(dataJRDD.rdd.persist(StorageLevel.MEMORY_AND_DISK_SER)) new Word2VecModelWrapper(model) @@ -751,7 +751,7 @@ private[python] class PythonMLLibAPI extends Serializable { impurityStr: String, maxDepth: Int, maxBins: Int, - seed: Int): RandomForestModel = { + seed: java.lang.Long): RandomForestModel = { val algo = Algo.fromString(algoStr) val impurity = Impurities.fromString(impurityStr) @@ -763,11 +763,13 @@ private[python] class PythonMLLibAPI extends Serializable { maxBins = maxBins, categoricalFeaturesInfo = categoricalFeaturesInfo.asScala.toMap) val cached = data.rdd.persist(StorageLevel.MEMORY_AND_DISK) + // Only done because methods below want an int, not an optional Long + val intSeed = getSeedOrDefault(seed).toInt try { if (algo == Algo.Classification) { - RandomForest.trainClassifier(cached, strategy, numTrees, featureSubsetStrategy, seed) + RandomForest.trainClassifier(cached, strategy, numTrees, featureSubsetStrategy, intSeed) } else { - RandomForest.trainRegressor(cached, strategy, numTrees, featureSubsetStrategy, seed) + RandomForest.trainRegressor(cached, strategy, numTrees, featureSubsetStrategy, intSeed) } } finally { cached.unpersist(blocking = false) diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py index 324ba9758e446..b32d0c70ec6a7 100644 --- a/python/pyspark/mllib/feature.py +++ b/python/pyspark/mllib/feature.py @@ -600,7 +600,7 @@ def __init__(self): self.learningRate = 0.025 self.numPartitions = 1 self.numIterations = 1 - self.seed = random.randint(0, sys.maxsize) + self.seed = None self.minCount = 5 self.windowSize = 5 @@ -675,7 +675,7 @@ def fit(self, data): raise TypeError("data should be an RDD of list of string") jmodel = callMLlibFunc("trainWord2VecModel", data, int(self.vectorSize), float(self.learningRate), int(self.numPartitions), - int(self.numIterations), int(self.seed), + int(self.numIterations), self.seed, int(self.minCount), int(self.windowSize)) return Word2VecModel(jmodel) From 1b001b5203444cc8d5c4887a30e03e8fb298d17d Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Sun, 4 Sep 2016 05:38:47 -0700 Subject: [PATCH 204/270] [MINOR][ML][MLLIB] Remove work around for breeze sparse matrix. ## What changes were proposed in this pull request? Since we have updated breeze version to 0.12, we should remove work around for bug of breeze sparse matrix in v0.11. I checked all mllib code and found this is the only work around for breeze 0.11. ## How was this patch tested? Existing tests. Author: Yanbo Liang Closes #14953 from yanboliang/matrices. --- .../scala/org/apache/spark/ml/linalg/Matrices.scala | 10 +--------- .../scala/org/apache/spark/mllib/linalg/Matrices.scala | 10 +--------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala index 98080bb71ac83..207f662e33bce 100644 --- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala +++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala @@ -846,16 +846,8 @@ object Matrices { case dm: BDM[Double] => new DenseMatrix(dm.rows, dm.cols, dm.data, dm.isTranspose) case sm: BSM[Double] => - // Spark-11507. work around breeze issue 479. - val mat = if (sm.colPtrs.last != sm.data.length) { - val matCopy = sm.copy - matCopy.compact() - matCopy - } else { - sm - } // There is no isTranspose flag for sparse matrices in Breeze - new SparseMatrix(mat.rows, mat.cols, mat.colPtrs, mat.rowIndices, mat.data) + new SparseMatrix(sm.rows, sm.cols, sm.colPtrs, sm.rowIndices, sm.data) case _ => throw new UnsupportedOperationException( s"Do not support conversion from type ${breeze.getClass.getName}.") diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala index ad882c969aa8a..8659cea4b8ebc 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala @@ -983,16 +983,8 @@ object Matrices { case dm: BDM[Double] => new DenseMatrix(dm.rows, dm.cols, dm.data, dm.isTranspose) case sm: BSM[Double] => - // Spark-11507. work around breeze issue 479. - val mat = if (sm.colPtrs.last != sm.data.length) { - val matCopy = sm.copy - matCopy.compact() - matCopy - } else { - sm - } // There is no isTranspose flag for sparse matrices in Breeze - new SparseMatrix(mat.rows, mat.cols, mat.colPtrs, mat.rowIndices, mat.data) + new SparseMatrix(sm.rows, sm.cols, sm.colPtrs, sm.rowIndices, sm.data) case _ => throw new UnsupportedOperationException( s"Do not support conversion from type ${breeze.getClass.getName}.") From c1e9a6d274c281ec30e6d022eedfbe3a2988f721 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Mon, 5 Sep 2016 11:28:19 +0800 Subject: [PATCH 205/270] [SPARK-17393][SQL] Error Handling when CTAS Against the Same Data Source Table Using Overwrite Mode ### What changes were proposed in this pull request? When we trying to read a table and then write to the same table using the `Overwrite` save mode, we got a very confusing error message: For example, ```Scala Seq((1, 2)).toDF("i", "j").write.saveAsTable("tab1") table("tab1").write.mode(SaveMode.Overwrite).saveAsTable("tab1") ``` ``` Job aborted. org.apache.spark.SparkException: Job aborted. at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1.apply$mcV$sp ... Caused by: org.apache.spark.SparkException: Task failed while writing rows at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:266) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143) at org.apache.spark.sql.execution.datasources ``` After the PR, we will issue an `AnalysisException`: ``` Cannot overwrite table `tab1` that is also being read from ``` ### How was this patch tested? Added test cases. Author: gatorsmile Closes #14954 from gatorsmile/ctasQueryAnalyze. --- .../sql/execution/datasources/rules.scala | 45 +++++++--------- .../sql/hive/MetastoreDataSourcesSuite.scala | 52 +++++++++++++++++++ 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala index f14c63c19f905..ae77e4cb96e7d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala @@ -304,6 +304,25 @@ case class PreWriteCheck(conf: SQLConf, catalog: SessionCatalog) failAnalysis(s"Database name ${tblIdent.database.get} is not a valid name for " + s"metastore. Metastore only accepts table name containing characters, numbers and _.") } + if (query.isDefined && + mode == SaveMode.Overwrite && + catalog.tableExists(tableDesc.identifier)) { + // Need to remove SubQuery operator. + EliminateSubqueryAliases(catalog.lookupRelation(tableDesc.identifier)) match { + // Only do the check if the table is a data source table + // (the relation is a BaseRelation). + case l @ LogicalRelation(dest: BaseRelation, _, _) => + // Get all input data source relations of the query. + val srcRelations = query.get.collect { + case LogicalRelation(src: BaseRelation, _, _) => src + } + if (srcRelations.contains(dest)) { + failAnalysis( + s"Cannot overwrite table ${tableDesc.identifier} that is also being read from") + } + case _ => // OK + } + } case i @ logical.InsertIntoTable( l @ LogicalRelation(t: InsertableRelation, _, _), @@ -357,32 +376,6 @@ case class PreWriteCheck(conf: SQLConf, catalog: SessionCatalog) // The relation in l is not an InsertableRelation. failAnalysis(s"$l does not allow insertion.") - case CreateTable(tableDesc, mode, Some(query)) => - // When the SaveMode is Overwrite, we need to check if the table is an input table of - // the query. If so, we will throw an AnalysisException to let users know it is not allowed. - if (mode == SaveMode.Overwrite && catalog.tableExists(tableDesc.identifier)) { - // Need to remove SubQuery operator. - EliminateSubqueryAliases(catalog.lookupRelation(tableDesc.identifier)) match { - // Only do the check if the table is a data source table - // (the relation is a BaseRelation). - case l @ LogicalRelation(dest: BaseRelation, _, _) => - // Get all input data source relations of the query. - val srcRelations = query.collect { - case LogicalRelation(src: BaseRelation, _, _) => src - } - if (srcRelations.contains(dest)) { - failAnalysis( - s"Cannot overwrite table ${tableDesc.identifier} that is also being read from.") - } else { - // OK - } - - case _ => // OK - } - } else { - // OK - } - case _ => // OK } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index 7a71475a2f192..3466733d7fdcd 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -1151,6 +1151,58 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv } } + test("saveAsTable - source and target are the same table") { + val tableName = "tab1" + withTable(tableName) { + Seq((1, 2)).toDF("i", "j").write.saveAsTable(tableName) + + table(tableName).write.mode(SaveMode.Append).saveAsTable(tableName) + checkAnswer(table(tableName), + Seq(Row(1, 2), Row(1, 2))) + + table(tableName).write.mode(SaveMode.Ignore).saveAsTable(tableName) + checkAnswer(table(tableName), + Seq(Row(1, 2), Row(1, 2))) + + var e = intercept[AnalysisException] { + table(tableName).write.mode(SaveMode.Overwrite).saveAsTable(tableName) + }.getMessage + assert(e.contains(s"Cannot overwrite table `$tableName` that is also being read from")) + + e = intercept[AnalysisException] { + table(tableName).write.mode(SaveMode.ErrorIfExists).saveAsTable(tableName) + }.getMessage + assert(e.contains(s"Table `$tableName` already exists")) + } + } + + test("insertInto - source and target are the same table") { + val tableName = "tab1" + withTable(tableName) { + Seq((1, 2)).toDF("i", "j").write.saveAsTable(tableName) + + table(tableName).write.mode(SaveMode.Append).insertInto(tableName) + checkAnswer( + table(tableName), + Seq(Row(1, 2), Row(1, 2))) + + table(tableName).write.mode(SaveMode.Ignore).insertInto(tableName) + checkAnswer( + table(tableName), + Seq(Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2))) + + table(tableName).write.mode(SaveMode.ErrorIfExists).insertInto(tableName) + checkAnswer( + table(tableName), + Seq(Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2), Row(1, 2))) + + val e = intercept[AnalysisException] { + table(tableName).write.mode(SaveMode.Overwrite).insertInto(tableName) + }.getMessage + assert(e.contains(s"Cannot overwrite a path that is also being read from")) + } + } + test("saveAsTable[append]: less columns") { withTable("saveAsTable_less_columns") { Seq((1, 2)).toDF("i", "j").write.saveAsTable("saveAsTable_less_columns") From 3ccb23e445711ea5d9059eb6de7c490c8fc9d112 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Mon, 5 Sep 2016 13:09:20 +0800 Subject: [PATCH 206/270] [SPARK-17394][SQL] should not allow specify database in table/view name after RENAME TO ## What changes were proposed in this pull request? It's really weird that we allow users to specify database in both from table name and to table name in `ALTER TABLE RENAME TO`, while logically we can't support rename a table to a different database. Both postgres and MySQL disallow this syntax, it's reasonable to follow them and simply our code. ## How was this patch tested? new test in `DDLCommandSuite` Author: Wenchen Fan Closes #14955 from cloud-fan/rename. --- .../sql/catalyst/catalog/SessionCatalog.scala | 16 ++------- .../catalog/SessionCatalogSuite.scala | 27 +++++--------- .../spark/sql/execution/SparkSqlParser.scala | 10 ++++-- .../spark/sql/execution/command/tables.scala | 7 ++-- .../execution/command/DDLCommandSuite.scala | 9 +++-- .../sql/execution/command/DDLSuite.scala | 35 ++----------------- 6 files changed, 32 insertions(+), 72 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala index e7132cd3975d5..9fb5db573b70f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala @@ -349,29 +349,17 @@ class SessionCatalog( * If a database is specified in `oldName`, this will rename the table in that database. * If no database is specified, this will first attempt to rename a temporary table with * the same name, then, if that does not exist, rename the table in the current database. - * - * This assumes the database specified in `oldName` matches the one specified in `newName`. */ - def renameTable(oldName: TableIdentifier, newName: TableIdentifier): Unit = synchronized { + def renameTable(oldName: TableIdentifier, newName: String): Unit = synchronized { val db = formatDatabaseName(oldName.database.getOrElse(currentDb)) requireDbExists(db) - val newDb = formatDatabaseName(newName.database.getOrElse(currentDb)) - if (db != newDb) { - throw new AnalysisException( - s"RENAME TABLE source and destination databases do not match: '$db' != '$newDb'") - } val oldTableName = formatTableName(oldName.table) - val newTableName = formatTableName(newName.table) + val newTableName = formatTableName(newName) if (oldName.database.isDefined || !tempTables.contains(oldTableName)) { requireTableExists(TableIdentifier(oldTableName, Some(db))) requireTableNotExists(TableIdentifier(newTableName, Some(db))) externalCatalog.renameTable(db, oldTableName, newTableName) } else { - if (newName.database.isDefined) { - throw new AnalysisException( - s"RENAME TEMPORARY TABLE from '$oldName' to '$newName': cannot specify database " + - s"name '${newName.database.get}' in the destination table") - } if (tempTables.contains(newTableName)) { throw new AnalysisException( s"RENAME TEMPORARY TABLE from '$oldName' to '$newName': destination table already exists") diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala index c9d4fef8056ca..012df629bbdef 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala @@ -273,37 +273,27 @@ class SessionCatalogSuite extends SparkFunSuite { val externalCatalog = newBasicCatalog() val sessionCatalog = new SessionCatalog(externalCatalog) assert(externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) - sessionCatalog.renameTable( - TableIdentifier("tbl1", Some("db2")), TableIdentifier("tblone", Some("db2"))) + sessionCatalog.renameTable(TableIdentifier("tbl1", Some("db2")), "tblone") assert(externalCatalog.listTables("db2").toSet == Set("tblone", "tbl2")) - sessionCatalog.renameTable( - TableIdentifier("tbl2", Some("db2")), TableIdentifier("tbltwo", Some("db2"))) + sessionCatalog.renameTable(TableIdentifier("tbl2", Some("db2")), "tbltwo") assert(externalCatalog.listTables("db2").toSet == Set("tblone", "tbltwo")) // Rename table without explicitly specifying database sessionCatalog.setCurrentDatabase("db2") - sessionCatalog.renameTable(TableIdentifier("tbltwo"), TableIdentifier("table_two")) + sessionCatalog.renameTable(TableIdentifier("tbltwo"), "table_two") assert(externalCatalog.listTables("db2").toSet == Set("tblone", "table_two")) - // Renaming "db2.tblone" to "db1.tblones" should fail because databases don't match - intercept[AnalysisException] { - sessionCatalog.renameTable( - TableIdentifier("tblone", Some("db2")), TableIdentifier("tblones", Some("db1"))) - } // The new table already exists intercept[TableAlreadyExistsException] { - sessionCatalog.renameTable( - TableIdentifier("tblone", Some("db2")), TableIdentifier("table_two", Some("db2"))) + sessionCatalog.renameTable(TableIdentifier("tblone", Some("db2")), "table_two") } } test("rename table when database/table does not exist") { val catalog = new SessionCatalog(newBasicCatalog()) intercept[NoSuchDatabaseException] { - catalog.renameTable( - TableIdentifier("tbl1", Some("unknown_db")), TableIdentifier("tbl2", Some("unknown_db"))) + catalog.renameTable(TableIdentifier("tbl1", Some("unknown_db")), "tbl2") } intercept[NoSuchTableException] { - catalog.renameTable( - TableIdentifier("unknown_table", Some("db2")), TableIdentifier("tbl2", Some("db2"))) + catalog.renameTable(TableIdentifier("unknown_table", Some("db2")), "tbl2") } } @@ -316,13 +306,12 @@ class SessionCatalogSuite extends SparkFunSuite { assert(sessionCatalog.getTempTable("tbl1") == Option(tempTable)) assert(externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) // If database is not specified, temp table should be renamed first - sessionCatalog.renameTable(TableIdentifier("tbl1"), TableIdentifier("tbl3")) + sessionCatalog.renameTable(TableIdentifier("tbl1"), "tbl3") assert(sessionCatalog.getTempTable("tbl1").isEmpty) assert(sessionCatalog.getTempTable("tbl3") == Option(tempTable)) assert(externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) // If database is specified, temp tables are never renamed - sessionCatalog.renameTable( - TableIdentifier("tbl2", Some("db2")), TableIdentifier("tbl4", Some("db2"))) + sessionCatalog.renameTable(TableIdentifier("tbl2", Some("db2")), "tbl4") assert(sessionCatalog.getTempTable("tbl3") == Option(tempTable)) assert(sessionCatalog.getTempTable("tbl4").isEmpty) assert(externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl4")) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 8fc1a8595a45f..fc078da07dcc0 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -666,9 +666,15 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { * }}} */ override def visitRenameTable(ctx: RenameTableContext): LogicalPlan = withOrigin(ctx) { + val fromName = visitTableIdentifier(ctx.from) + val toName = visitTableIdentifier(ctx.to) + if (toName.database.isDefined) { + operationNotAllowed("Can not specify database in table/view name after RENAME TO", ctx) + } + AlterTableRenameCommand( - visitTableIdentifier(ctx.from), - visitTableIdentifier(ctx.to), + fromName, + toName.table, ctx.VIEW != null) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index a1371582be064..4e6caae85caeb 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -152,7 +152,7 @@ case class CreateTableCommand(table: CatalogTable, ifNotExists: Boolean) extends */ case class AlterTableRenameCommand( oldName: TableIdentifier, - newName: TableIdentifier, + newName: String, isView: Boolean) extends RunnableCommand { @@ -165,6 +165,7 @@ case class AlterTableRenameCommand( if (isTemporary) { catalog.renameTable(oldName, newName) } else { + val newTblName = TableIdentifier(newName, oldName.database) // If an exception is thrown here we can just assume the table is uncached; // this can happen with Hive tables when the underlying catalog is in-memory. val wasCached = Try(sparkSession.catalog.isCached(oldName.unquotedString)).getOrElse(false) @@ -178,7 +179,7 @@ case class AlterTableRenameCommand( // For datasource tables, we also need to update the "path" serde property val table = catalog.getTableMetadata(oldName) if (DDLUtils.isDatasourceTable(table) && table.tableType == CatalogTableType.MANAGED) { - val newPath = catalog.defaultTablePath(newName) + val newPath = catalog.defaultTablePath(newTblName) val newTable = table.withNewStorage( serdeProperties = table.storage.properties ++ Map("path" -> newPath)) catalog.alterTable(newTable) @@ -188,7 +189,7 @@ case class AlterTableRenameCommand( catalog.refreshTable(oldName) catalog.renameTable(oldName, newName) if (wasCached) { - sparkSession.catalog.cacheTable(newName.unquotedString) + sparkSession.catalog.cacheTable(newTblName.unquotedString) } } Seq.empty[Row] diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala index 8dd883b37bde0..547fb63813750 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala @@ -388,14 +388,19 @@ class DDLCommandSuite extends PlanTest { val parsed_view = parser.parsePlan(sql_view) val expected_table = AlterTableRenameCommand( TableIdentifier("table_name", None), - TableIdentifier("new_table_name", None), + "new_table_name", isView = false) val expected_view = AlterTableRenameCommand( TableIdentifier("table_name", None), - TableIdentifier("new_table_name", None), + "new_table_name", isView = true) comparePlans(parsed_table, expected_table) comparePlans(parsed_view, expected_view) + + val e = intercept[ParseException]( + parser.parsePlan("ALTER TABLE db1.tbl RENAME TO db1.tbl2") + ) + assert(e.getMessage.contains("Can not specify database in table/view name after RENAME TO")) } // ALTER TABLE table_name SET TBLPROPERTIES ('comment' = new_comment); diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index 0073659a31541..fd35c987cab59 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -657,7 +657,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { createDatabase(catalog, "dby") createTable(catalog, tableIdent1) assert(catalog.listTables("dbx") == Seq(tableIdent1)) - sql("ALTER TABLE dbx.tab1 RENAME TO dbx.tab2") + sql("ALTER TABLE dbx.tab1 RENAME TO tab2") assert(catalog.listTables("dbx") == Seq(tableIdent2)) catalog.setCurrentDatabase("dbx") // rename without explicitly specifying database @@ -665,11 +665,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { assert(catalog.listTables("dbx") == Seq(tableIdent1)) // table to rename does not exist intercept[AnalysisException] { - sql("ALTER TABLE dbx.does_not_exist RENAME TO dbx.tab2") - } - // destination database is different - intercept[AnalysisException] { - sql("ALTER TABLE dbx.tab1 RENAME TO dby.tab2") + sql("ALTER TABLE dbx.does_not_exist RENAME TO tab2") } } @@ -691,31 +687,6 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { assert(spark.table("teachers").collect().toSeq == df.collect().toSeq) } - test("rename temporary table - destination table with database name") { - withTempView("tab1") { - sql( - """ - |CREATE TEMPORARY TABLE tab1 - |USING org.apache.spark.sql.sources.DDLScanSource - |OPTIONS ( - | From '1', - | To '10', - | Table 'test1' - |) - """.stripMargin) - - val e = intercept[AnalysisException] { - sql("ALTER TABLE tab1 RENAME TO default.tab2") - } - assert(e.getMessage.contains( - "RENAME TEMPORARY TABLE from '`tab1`' to '`default`.`tab2`': " + - "cannot specify database name 'default' in the destination table")) - - val catalog = spark.sessionState.catalog - assert(catalog.listTables("default") == Seq(TableIdentifier("tab1"))) - } - } - test("rename temporary table - destination table already exists") { withTempView("tab1", "tab2") { sql( @@ -744,7 +715,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { sql("ALTER TABLE tab1 RENAME TO tab2") } assert(e.getMessage.contains( - "RENAME TEMPORARY TABLE from '`tab1`' to '`tab2`': destination table already exists")) + "RENAME TEMPORARY TABLE from '`tab1`' to 'tab2': destination table already exists")) val catalog = spark.sessionState.catalog assert(catalog.listTables("default") == Seq(TableIdentifier("tab1"), TableIdentifier("tab2"))) From 6d86403d8b252776effcddd71338b4d21a224f9b Mon Sep 17 00:00:00 2001 From: wangzhenhua Date: Mon, 5 Sep 2016 17:32:31 +0200 Subject: [PATCH 207/270] [SPARK-17072][SQL] support table-level statistics generation and storing into/loading from metastore ## What changes were proposed in this pull request? 1. Support generation table-level statistics for - hive tables in HiveExternalCatalog - data source tables in HiveExternalCatalog - data source tables in InMemoryCatalog. 2. Add a property "catalogStats" in CatalogTable to hold statistics in Spark side. 3. Put logics of statistics transformation between Spark and Hive in HiveClientImpl. 4. Extend Statistics class by adding rowCount (will add estimatedSize when we have column stats). ## How was this patch tested? add unit tests Author: wangzhenhua Author: Zhenhua Wang Closes #14712 from wzhfy/tableStats. --- .../sql/catalyst/catalog/interface.scala | 4 +- .../catalyst/plans/logical/Statistics.scala | 15 +- .../spark/sql/catalyst/SQLBuilder.scala | 8 +- .../spark/sql/execution/SparkSqlParser.scala | 4 +- .../command/AnalyzeTableCommand.scala | 64 +++++--- .../datasources/DataSourceStrategy.scala | 8 +- .../datasources/FileSourceStrategy.scala | 2 +- .../datasources/LogicalRelation.scala | 13 +- .../sql/execution/datasources/rules.scala | 8 +- .../spark/sql/internal/SessionState.scala | 4 +- .../apache/spark/sql/StatisticsSuite.scala | 26 +++ .../spark/sql/hive/HiveExternalCatalog.scala | 57 +++++-- .../spark/sql/hive/HiveMetastoreCatalog.scala | 10 +- .../spark/sql/hive/MetastoreRelation.scala | 68 ++++---- .../spark/sql/hive/StatisticsSuite.scala | 153 +++++++++++++++++- .../sql/hive/execution/HiveDDLSuite.scala | 27 ++-- 16 files changed, 363 insertions(+), 108 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala index 8408d765d4918..79231ee9e378d 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala @@ -22,7 +22,7 @@ import java.util.Date import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.expressions.Attribute -import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan} +import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics} import org.apache.spark.sql.catalyst.util.quoteIdentifier import org.apache.spark.sql.types.StructType @@ -130,6 +130,7 @@ case class CatalogTable( createTime: Long = System.currentTimeMillis, lastAccessTime: Long = -1, properties: Map[String, String] = Map.empty, + stats: Option[Statistics] = None, viewOriginalText: Option[String] = None, viewText: Option[String] = None, comment: Option[String] = None, @@ -190,6 +191,7 @@ case class CatalogTable( viewText.map("View: " + _).getOrElse(""), comment.map("Comment: " + _).getOrElse(""), if (properties.nonEmpty) s"Properties: $tableProperties" else "", + if (stats.isDefined) s"Statistics: ${stats.get}" else "", s"$storage") output.filter(_.nonEmpty).mkString("CatalogTable(\n\t", "\n\t", ")") diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala index 6e6cc6962c007..58fa537a18e3e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala @@ -31,6 +31,19 @@ package org.apache.spark.sql.catalyst.plans.logical * * @param sizeInBytes Physical size in bytes. For leaf operators this defaults to 1, otherwise it * defaults to the product of children's `sizeInBytes`. + * @param rowCount Estimated number of rows. * @param isBroadcastable If true, output is small enough to be used in a broadcast join. */ -case class Statistics(sizeInBytes: BigInt, isBroadcastable: Boolean = false) +case class Statistics( + sizeInBytes: BigInt, + rowCount: Option[BigInt] = None, + isBroadcastable: Boolean = false) { + override def toString: String = { + val output = + Seq(s"sizeInBytes=$sizeInBytes", + if (rowCount.isDefined) s"rowCount=${rowCount.get}" else "", + s"isBroadcastable=$isBroadcastable" + ) + output.filter(_.nonEmpty).mkString("Statistics(", ", ", ")") + } +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala index dde91b0a8606e..6f821f80cc4c5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala @@ -590,8 +590,12 @@ class SQLBuilder private ( object ExtractSQLTable { def unapply(plan: LogicalPlan): Option[SQLTable] = plan match { - case l @ LogicalRelation(_, _, Some(TableIdentifier(table, Some(database)))) => - Some(SQLTable(database, table, l.output.map(_.withQualifier(None)))) + case l @ LogicalRelation(_, _, Some(catalogTable)) + if catalogTable.identifier.database.isDefined => + Some(SQLTable( + catalogTable.identifier.database.get, + catalogTable.identifier.table, + l.output.map(_.withQualifier(None)))) case relation: CatalogRelation => val m = relation.catalogTable diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index fc078da07dcc0..7ba1a9ff223de 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -99,9 +99,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { ctx.identifier.getText.toLowerCase == "noscan") { AnalyzeTableCommand(visitTableIdentifier(ctx.tableIdentifier).toString) } else { - // Always just run the no scan analyze. We should fix this and implement full analyze - // command in the future. - AnalyzeTableCommand(visitTableIdentifier(ctx.tableIdentifier).toString) + AnalyzeTableCommand(visitTableIdentifier(ctx.tableIdentifier).toString, noscan = false) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala index a469d4da8613b..15687ddd728a0 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala @@ -21,19 +21,18 @@ import scala.util.control.NonFatal import org.apache.hadoop.fs.{FileSystem, Path} -import org.apache.spark.sql.{AnalysisException, Row, SparkSession} +import org.apache.spark.sql.{AnalysisException, Dataset, Row, SparkSession} import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases import org.apache.spark.sql.catalyst.catalog.{CatalogRelation, CatalogTable} +import org.apache.spark.sql.catalyst.plans.logical.Statistics +import org.apache.spark.sql.execution.datasources.LogicalRelation /** * Analyzes the given table in the current database to generate statistics, which will be * used in query optimizations. - * - * Right now, it only supports Hive tables and it only updates the size of a Hive table - * in the Hive metastore. */ -case class AnalyzeTableCommand(tableName: String) extends RunnableCommand { +case class AnalyzeTableCommand(tableName: String, noscan: Boolean = true) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { val sessionState = sparkSession.sessionState @@ -71,8 +70,6 @@ case class AnalyzeTableCommand(tableName: String) extends RunnableCommand { size } - val tableParameters = catalogTable.properties - val oldTotalSize = tableParameters.get("totalSize").map(_.toLong).getOrElse(0L) val newTotalSize = catalogTable.storage.locationUri.map { p => val path = new Path(p) @@ -88,24 +85,47 @@ case class AnalyzeTableCommand(tableName: String) extends RunnableCommand { } }.getOrElse(0L) - // Update the Hive metastore if the total size of the table is different than the size - // recorded in the Hive metastore. - // This logic is based on org.apache.hadoop.hive.ql.exec.StatsTask.aggregateStats(). - if (newTotalSize > 0 && newTotalSize != oldTotalSize) { - sessionState.catalog.alterTable( - catalogTable.copy( - properties = relation.catalogTable.properties + - (AnalyzeTableCommand.TOTAL_SIZE_FIELD -> newTotalSize.toString))) - } + updateTableStats(catalogTable, newTotalSize) + + // data source tables have been converted into LogicalRelations + case logicalRel: LogicalRelation if logicalRel.catalogTable.isDefined => + updateTableStats(logicalRel.catalogTable.get, logicalRel.relation.sizeInBytes) case otherRelation => - throw new AnalysisException(s"ANALYZE TABLE is only supported for Hive tables, " + - s"but '${tableIdent.unquotedString}' is a ${otherRelation.nodeName}.") + throw new AnalysisException(s"ANALYZE TABLE is not supported for " + + s"${otherRelation.nodeName}.") } + + def updateTableStats(catalogTable: CatalogTable, newTotalSize: Long): Unit = { + val oldTotalSize = catalogTable.stats.map(_.sizeInBytes.toLong).getOrElse(0L) + val oldRowCount = catalogTable.stats.flatMap(_.rowCount.map(_.toLong)).getOrElse(-1L) + var newStats: Option[Statistics] = None + if (newTotalSize > 0 && newTotalSize != oldTotalSize) { + newStats = Some(Statistics(sizeInBytes = newTotalSize)) + } + // We only set rowCount when noscan is false, because otherwise: + // 1. when total size is not changed, we don't need to alter the table; + // 2. when total size is changed, `oldRowCount` becomes invalid. + // This is to make sure that we only record the right statistics. + if (!noscan) { + val newRowCount = Dataset.ofRows(sparkSession, relation).count() + if (newRowCount >= 0 && newRowCount != oldRowCount) { + newStats = if (newStats.isDefined) { + newStats.map(_.copy(rowCount = Some(BigInt(newRowCount)))) + } else { + Some(Statistics(sizeInBytes = oldTotalSize, rowCount = Some(BigInt(newRowCount)))) + } + } + } + // Update the metastore if the above statistics of the table are different from those + // recorded in the metastore. + if (newStats.isDefined) { + sessionState.catalog.alterTable(catalogTable.copy(stats = newStats)) + // Refresh the cached data source table in the catalog. + sessionState.catalog.refreshTable(tableIdent) + } + } + Seq.empty[Row] } } - -object AnalyzeTableCommand { - val TOTAL_SIZE_FIELD = "totalSize" -} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala index 8286467e96a15..c8ad5b303491f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala @@ -209,7 +209,7 @@ class FindDataSourceTable(sparkSession: SparkSession) extends Rule[LogicalPlan] LogicalRelation( dataSource.resolveRelation(), - metastoreTableIdentifier = Some(table.identifier)) + catalogTable = Some(table)) } override def apply(plan: LogicalPlan): LogicalPlan = plan transform { @@ -366,7 +366,8 @@ object DataSourceStrategy extends Strategy with Logging { val scan = RowDataSourceScanExec( projects.map(_.toAttribute), scanBuilder(requestedColumns, candidatePredicates, pushedFilters), - relation.relation, UnknownPartitioning(0), metadata, relation.metastoreTableIdentifier) + relation.relation, UnknownPartitioning(0), metadata, + relation.catalogTable.map(_.identifier)) filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan) } else { // Don't request columns that are only referenced by pushed filters. @@ -376,7 +377,8 @@ object DataSourceStrategy extends Strategy with Logging { val scan = RowDataSourceScanExec( requestedColumns, scanBuilder(requestedColumns, candidatePredicates, pushedFilters), - relation.relation, UnknownPartitioning(0), metadata, relation.metastoreTableIdentifier) + relation.relation, UnknownPartitioning(0), metadata, + relation.catalogTable.map(_.identifier)) execution.ProjectExec( projects, filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan)) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala index 8b36caf6f1e08..55ca4f11068f9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala @@ -112,7 +112,7 @@ object FileSourceStrategy extends Strategy with Logging { outputSchema, partitionKeyFilters.toSeq, pushedDownFilters, - table) + table.map(_.identifier)) val afterScanFilter = afterScanFilters.toSeq.reduceOption(expressions.And) val withFilter = afterScanFilter.map(execution.FilterExec(_, scan)).getOrElse(scan) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala index 2a8e147011f55..d9562fd32e87d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala @@ -16,8 +16,8 @@ */ package org.apache.spark.sql.execution.datasources -import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation +import org.apache.spark.sql.catalyst.catalog.CatalogTable import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap, AttributeReference} import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics} import org.apache.spark.sql.sources.BaseRelation @@ -33,7 +33,7 @@ import org.apache.spark.util.Utils case class LogicalRelation( relation: BaseRelation, expectedOutputAttributes: Option[Seq[Attribute]] = None, - metastoreTableIdentifier: Option[TableIdentifier] = None) + catalogTable: Option[CatalogTable] = None) extends LeafNode with MultiInstanceRelation { override val output: Seq[AttributeReference] = { @@ -72,9 +72,10 @@ case class LogicalRelation( // expId can be different but the relation is still the same. override lazy val cleanArgs: Seq[Any] = Seq(relation) - @transient override lazy val statistics: Statistics = Statistics( - sizeInBytes = BigInt(relation.sizeInBytes) - ) + @transient override lazy val statistics: Statistics = { + catalogTable.flatMap(_.stats.map(_.copy(sizeInBytes = relation.sizeInBytes))).getOrElse( + Statistics(sizeInBytes = relation.sizeInBytes)) + } /** Used to lookup original attribute capitalization */ val attributeMap: AttributeMap[AttributeReference] = AttributeMap(output.map(o => (o, o))) @@ -89,7 +90,7 @@ case class LogicalRelation( LogicalRelation( relation, expectedOutputAttributes.map(_.map(_.newInstance())), - metastoreTableIdentifier).asInstanceOf[this.type] + catalogTable).asInstanceOf[this.type] } override def refresh(): Unit = relation match { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala index ae77e4cb96e7d..5b96206ba88fe 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala @@ -252,11 +252,11 @@ case class PreprocessTableInsertion(conf: SQLConf) extends Rule[LogicalPlan] { case relation: CatalogRelation => val metadata = relation.catalogTable preprocess(i, metadata.identifier.quotedString, metadata.partitionColumnNames) - case LogicalRelation(h: HadoopFsRelation, _, identifier) => - val tblName = identifier.map(_.quotedString).getOrElse("unknown") + case LogicalRelation(h: HadoopFsRelation, _, catalogTable) => + val tblName = catalogTable.map(_.identifier.quotedString).getOrElse("unknown") preprocess(i, tblName, h.partitionSchema.map(_.name)) - case LogicalRelation(_: InsertableRelation, _, identifier) => - val tblName = identifier.map(_.quotedString).getOrElse("unknown") + case LogicalRelation(_: InsertableRelation, _, catalogTable) => + val tblName = catalogTable.map(_.identifier.quotedString).getOrElse("unknown") preprocess(i, tblName, Nil) case other => i } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala index ab27381c0600d..8fdbd0f2c6dab 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala @@ -192,7 +192,7 @@ private[sql] class SessionState(sparkSession: SparkSession) { * Right now, it only supports catalog tables and it only updates the size of a catalog table * in the external catalog. */ - def analyze(tableName: String): Unit = { - AnalyzeTableCommand(tableName).run(sparkSession) + def analyze(tableName: String, noscan: Boolean = true): Unit = { + AnalyzeTableCommand(tableName, noscan).run(sparkSession) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsSuite.scala index 2c81cbf15f088..264a2ffbebebd 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsSuite.scala @@ -18,6 +18,7 @@ package org.apache.spark.sql import org.apache.spark.sql.catalyst.plans.logical.{GlobalLimit, Join, LocalLimit} +import org.apache.spark.sql.execution.datasources.LogicalRelation import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types._ @@ -75,4 +76,29 @@ class StatisticsSuite extends QueryTest with SharedSQLContext { } } + test("test table-level statistics for data source table created in InMemoryCatalog") { + def checkTableStats(tableName: String, expectedRowCount: Option[BigInt]): Unit = { + val df = sql(s"SELECT * FROM $tableName") + val relations = df.queryExecution.analyzed.collect { case rel: LogicalRelation => + assert(rel.catalogTable.isDefined) + assert(rel.catalogTable.get.stats.flatMap(_.rowCount) === expectedRowCount) + rel + } + assert(relations.size === 1) + } + + val tableName = "tbl" + withTable(tableName) { + sql(s"CREATE TABLE $tableName(i INT, j STRING) USING parquet") + Seq(1 -> "a", 2 -> "b").toDF("i", "j").write.mode("overwrite").insertInto("tbl") + + // noscan won't count the number of rows + sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS noscan") + checkTableStats(tableName, expectedRowCount = None) + + // without noscan, we count the number of rows + sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS") + checkTableStats(tableName, expectedRowCount = Some(2)) + } + } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index 1fe7f4d41dc16..2e127ef562121 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -32,6 +32,7 @@ import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.catalyst.catalog._ +import org.apache.spark.sql.catalyst.plans.logical.Statistics import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.CaseInsensitiveMap import org.apache.spark.sql.hive.client.HiveClient @@ -102,11 +103,13 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat * metastore. */ private def verifyTableProperties(table: CatalogTable): Unit = { - val datasourceKeys = table.properties.keys.filter(_.startsWith(DATASOURCE_PREFIX)) - if (datasourceKeys.nonEmpty) { + val invalidKeys = table.properties.keys.filter { key => + key.startsWith(DATASOURCE_PREFIX) || key.startsWith(STATISTICS_PREFIX) + } + if (invalidKeys.nonEmpty) { throw new AnalysisException(s"Cannot persistent ${table.qualifiedName} into hive metastore " + - s"as table property keys may not start with '$DATASOURCE_PREFIX': " + - datasourceKeys.mkString("[", ", ", "]")) + s"as table property keys may not start with '$DATASOURCE_PREFIX' or '$STATISTICS_PREFIX':" + + s" ${invalidKeys.mkString("[", ", ", "]")}") } } @@ -388,21 +391,34 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat requireTableExists(db, tableDefinition.identifier.table) verifyTableProperties(tableDefinition) - if (DDLUtils.isDatasourceTable(tableDefinition)) { - val oldDef = client.getTable(db, tableDefinition.identifier.table) + // convert table statistics to properties so that we can persist them through hive api + val withStatsProps = if (tableDefinition.stats.isDefined) { + val stats = tableDefinition.stats.get + var statsProperties: Map[String, String] = + Map(STATISTICS_TOTAL_SIZE -> stats.sizeInBytes.toString()) + if (stats.rowCount.isDefined) { + statsProperties += (STATISTICS_NUM_ROWS -> stats.rowCount.get.toString()) + } + tableDefinition.copy(properties = tableDefinition.properties ++ statsProperties) + } else { + tableDefinition + } + + if (DDLUtils.isDatasourceTable(withStatsProps)) { + val oldDef = client.getTable(db, withStatsProps.identifier.table) // Sets the `schema`, `partitionColumnNames` and `bucketSpec` from the old table definition, // to retain the spark specific format if it is. Also add old data source properties to table // properties, to retain the data source table format. val oldDataSourceProps = oldDef.properties.filter(_._1.startsWith(DATASOURCE_PREFIX)) - val newDef = tableDefinition.copy( + val newDef = withStatsProps.copy( schema = oldDef.schema, partitionColumnNames = oldDef.partitionColumnNames, bucketSpec = oldDef.bucketSpec, - properties = oldDataSourceProps ++ tableDefinition.properties) + properties = oldDataSourceProps ++ withStatsProps.properties) client.alterTable(newDef) } else { - client.alterTable(tableDefinition) + client.alterTable(withStatsProps) } } @@ -422,7 +438,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat * properties, and filter out these special entries from table properties. */ private def restoreTableMetadata(table: CatalogTable): CatalogTable = { - if (table.tableType == VIEW) { + val catalogTable = if (table.tableType == VIEW) { table } else { getProviderFromTableProperties(table).map { provider => @@ -452,6 +468,19 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat table.copy(provider = Some("hive")) } } + // construct Spark's statistics from information in Hive metastore + if (catalogTable.properties.contains(STATISTICS_TOTAL_SIZE)) { + val totalSize = BigInt(catalogTable.properties.get(STATISTICS_TOTAL_SIZE).get) + // TODO: we will compute "estimatedSize" when we have column stats: + // average size of row * number of rows + catalogTable.copy( + properties = removeStatsProperties(catalogTable), + stats = Some(Statistics( + sizeInBytes = totalSize, + rowCount = catalogTable.properties.get(STATISTICS_NUM_ROWS).map(BigInt(_))))) + } else { + catalogTable + } } override def tableExists(db: String, table: String): Boolean = withClient { @@ -657,6 +686,14 @@ object HiveExternalCatalog { val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol." val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol." + val STATISTICS_PREFIX = "spark.sql.statistics." + val STATISTICS_TOTAL_SIZE = STATISTICS_PREFIX + "totalSize" + val STATISTICS_NUM_ROWS = STATISTICS_PREFIX + "numRows" + + def removeStatsProperties(metadata: CatalogTable): Map[String, String] = { + metadata.properties.filterNot { case (key, _) => key.startsWith(STATISTICS_PREFIX) } + } + def getProviderFromTableProperties(metadata: CatalogTable): Option[String] = { metadata.properties.get(DATASOURCE_PROVIDER) } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index ff82c7f7af6f8..d31a8d643ad88 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -82,7 +82,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log LogicalRelation( dataSource.resolveRelation(checkPathExist = true), - metastoreTableIdentifier = Some(TableIdentifier(in.name, Some(in.database)))) + catalogTable = Some(table)) } } @@ -257,10 +257,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log fileFormat = defaultSource, options = options) - val created = LogicalRelation( - relation, - metastoreTableIdentifier = - Some(TableIdentifier(tableIdentifier.name, Some(tableIdentifier.database)))) + val created = LogicalRelation(relation, catalogTable = Some(metastoreRelation.catalogTable)) cachedDataSourceTables.put(tableIdentifier, created) created } @@ -286,8 +283,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log bucketSpec = bucketSpec, options = options, className = fileType).resolveRelation(), - metastoreTableIdentifier = - Some(TableIdentifier(tableIdentifier.name, Some(tableIdentifier.database)))) + catalogTable = Some(metastoreRelation.catalogTable)) cachedDataSourceTables.put(tableIdentifier, created) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala index a90da98811f5d..0bfdc137fade7 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala @@ -108,39 +108,41 @@ private[hive] case class MetastoreRelation( new HiveTable(tTable) } - @transient override lazy val statistics: Statistics = Statistics( - sizeInBytes = { - val totalSize = hiveQlTable.getParameters.get(StatsSetupConst.TOTAL_SIZE) - val rawDataSize = hiveQlTable.getParameters.get(StatsSetupConst.RAW_DATA_SIZE) - // TODO: check if this estimate is valid for tables after partition pruning. - // NOTE: getting `totalSize` directly from params is kind of hacky, but this should be - // relatively cheap if parameters for the table are populated into the metastore. - // Besides `totalSize`, there are also `numFiles`, `numRows`, `rawDataSize` keys - // (see StatsSetupConst in Hive) that we can look at in the future. - BigInt( - // When table is external,`totalSize` is always zero, which will influence join strategy - // so when `totalSize` is zero, use `rawDataSize` instead - // if the size is still less than zero, we try to get the file size from HDFS. - // given this is only needed for optimization, if the HDFS call fails we return the default. - if (totalSize != null && totalSize.toLong > 0L) { - totalSize.toLong - } else if (rawDataSize != null && rawDataSize.toLong > 0) { - rawDataSize.toLong - } else if (sparkSession.sessionState.conf.fallBackToHdfsForStatsEnabled) { - try { - val hadoopConf = sparkSession.sessionState.newHadoopConf() - val fs: FileSystem = hiveQlTable.getPath.getFileSystem(hadoopConf) - fs.getContentSummary(hiveQlTable.getPath).getLength - } catch { - case e: IOException => - logWarning("Failed to get table size from hdfs.", e) - sparkSession.sessionState.conf.defaultSizeInBytes - } - } else { - sparkSession.sessionState.conf.defaultSizeInBytes - }) - } - ) + @transient override lazy val statistics: Statistics = { + catalogTable.stats.getOrElse(Statistics( + sizeInBytes = { + val totalSize = hiveQlTable.getParameters.get(StatsSetupConst.TOTAL_SIZE) + val rawDataSize = hiveQlTable.getParameters.get(StatsSetupConst.RAW_DATA_SIZE) + // TODO: check if this estimate is valid for tables after partition pruning. + // NOTE: getting `totalSize` directly from params is kind of hacky, but this should be + // relatively cheap if parameters for the table are populated into the metastore. + // Besides `totalSize`, there are also `numFiles`, `numRows`, `rawDataSize` keys + // (see StatsSetupConst in Hive) that we can look at in the future. + BigInt( + // When table is external,`totalSize` is always zero, which will influence join strategy + // so when `totalSize` is zero, use `rawDataSize` instead + // when `rawDataSize` is also zero, use `HiveExternalCatalog.STATISTICS_TOTAL_SIZE`, + // which is generated by analyze command. + if (totalSize != null && totalSize.toLong > 0L) { + totalSize.toLong + } else if (rawDataSize != null && rawDataSize.toLong > 0) { + rawDataSize.toLong + } else if (sparkSession.sessionState.conf.fallBackToHdfsForStatsEnabled) { + try { + val hadoopConf = sparkSession.sessionState.newHadoopConf() + val fs: FileSystem = hiveQlTable.getPath.getFileSystem(hadoopConf) + fs.getContentSummary(hiveQlTable.getPath).getLength + } catch { + case e: IOException => + logWarning("Failed to get table size from hdfs.", e) + sparkSession.sessionState.conf.defaultSizeInBytes + } + } else { + sparkSession.sessionState.conf.defaultSizeInBytes + }) + } + )) + } // When metastore partition pruning is turned off, we cache the list of all partitions to // mimic the behavior of Spark < 1.5 diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala index b275ab17a93ca..33ed67575486a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala @@ -23,11 +23,14 @@ import scala.reflect.ClassTag import org.apache.spark.sql.{AnalysisException, QueryTest, Row} import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.execution.command.AnalyzeTableCommand +import org.apache.spark.sql.catalyst.plans.logical.Statistics +import org.apache.spark.sql.execution.command.{AnalyzeTableCommand, DDLUtils} +import org.apache.spark.sql.execution.datasources.LogicalRelation import org.apache.spark.sql.execution.joins._ import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SQLTestUtils +import org.apache.spark.sql.types.StructType class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils { @@ -168,6 +171,154 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils TableIdentifier("tempTable"), ignoreIfNotExists = true, purge = false) } + private def checkMetastoreRelationStats( + tableName: String, + expectedStats: Option[Statistics]): Unit = { + val df = sql(s"SELECT * FROM $tableName") + val relations = df.queryExecution.analyzed.collect { case rel: MetastoreRelation => + expectedStats match { + case Some(es) => + assert(rel.catalogTable.stats.isDefined) + val stats = rel.catalogTable.stats.get + assert(stats.sizeInBytes === es.sizeInBytes) + assert(stats.rowCount === es.rowCount) + case None => + assert(rel.catalogTable.stats.isEmpty) + } + rel + } + assert(relations.size === 1) + } + + test("test table-level statistics for hive tables created in HiveExternalCatalog") { + val textTable = "textTable" + withTable(textTable) { + // Currently Spark's statistics are self-contained, we don't have statistics until we use + // the `ANALYZE TABLE` command. + sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS TEXTFILE") + checkMetastoreRelationStats(textTable, expectedStats = None) + sql(s"INSERT INTO TABLE $textTable SELECT * FROM src") + checkMetastoreRelationStats(textTable, expectedStats = None) + + // noscan won't count the number of rows + sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan") + checkMetastoreRelationStats(textTable, expectedStats = + Some(Statistics(sizeInBytes = 5812, rowCount = None))) + + // without noscan, we count the number of rows + sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS") + checkMetastoreRelationStats(textTable, expectedStats = + Some(Statistics(sizeInBytes = 5812, rowCount = Some(500)))) + } + } + + test("test elimination of the influences of the old stats") { + val textTable = "textTable" + withTable(textTable) { + sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS TEXTFILE") + sql(s"INSERT INTO TABLE $textTable SELECT * FROM src") + sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS") + checkMetastoreRelationStats(textTable, expectedStats = + Some(Statistics(sizeInBytes = 5812, rowCount = Some(500)))) + + sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan") + // when the total size is not changed, the old row count is kept + checkMetastoreRelationStats(textTable, expectedStats = + Some(Statistics(sizeInBytes = 5812, rowCount = Some(500)))) + + sql(s"INSERT INTO TABLE $textTable SELECT * FROM src") + sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan") + // update total size and remove the old and invalid row count + checkMetastoreRelationStats(textTable, expectedStats = + Some(Statistics(sizeInBytes = 11624, rowCount = None))) + } + } + + private def checkLogicalRelationStats( + tableName: String, + expectedStats: Option[Statistics]): Unit = { + val df = sql(s"SELECT * FROM $tableName") + val relations = df.queryExecution.analyzed.collect { case rel: LogicalRelation => + assert(rel.catalogTable.isDefined) + expectedStats match { + case Some(es) => + assert(rel.catalogTable.get.stats.isDefined) + val stats = rel.catalogTable.get.stats.get + assert(stats.sizeInBytes === es.sizeInBytes) + assert(stats.rowCount === es.rowCount) + case None => + assert(rel.catalogTable.get.stats.isEmpty) + } + rel + } + assert(relations.size === 1) + } + + test("test statistics of LogicalRelation converted from MetastoreRelation") { + val parquetTable = "parquetTable" + val orcTable = "orcTable" + withTable(parquetTable, orcTable) { + sql(s"CREATE TABLE $parquetTable (key STRING, value STRING) STORED AS PARQUET") + sql(s"CREATE TABLE $orcTable (key STRING, value STRING) STORED AS ORC") + sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src") + sql(s"INSERT INTO TABLE $orcTable SELECT * FROM src") + + // the default value for `spark.sql.hive.convertMetastoreParquet` is true, here we just set it + // for robustness + withSQLConf("spark.sql.hive.convertMetastoreParquet" -> "true") { + checkLogicalRelationStats(parquetTable, expectedStats = None) + sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS") + checkLogicalRelationStats(parquetTable, expectedStats = + Some(Statistics(sizeInBytes = 4236, rowCount = Some(500)))) + } + withSQLConf("spark.sql.hive.convertMetastoreOrc" -> "true") { + checkLogicalRelationStats(orcTable, expectedStats = None) + sql(s"ANALYZE TABLE $orcTable COMPUTE STATISTICS") + checkLogicalRelationStats(orcTable, expectedStats = + Some(Statistics(sizeInBytes = 3023, rowCount = Some(500)))) + } + } + } + + test("test table-level statistics for data source table created in HiveExternalCatalog") { + val parquetTable = "parquetTable" + withTable(parquetTable) { + sql(s"CREATE TABLE $parquetTable (key STRING, value STRING) USING PARQUET") + val catalogTable = spark.sessionState.catalog.getTableMetadata(TableIdentifier(parquetTable)) + assert(DDLUtils.isDatasourceTable(catalogTable)) + + sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src") + checkLogicalRelationStats(parquetTable, expectedStats = None) + + // noscan won't count the number of rows + sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS noscan") + checkLogicalRelationStats(parquetTable, expectedStats = + Some(Statistics(sizeInBytes = 4236, rowCount = None))) + + sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src") + sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS noscan") + checkLogicalRelationStats(parquetTable, expectedStats = + Some(Statistics(sizeInBytes = 8472, rowCount = None))) + + // without noscan, we count the number of rows + sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS") + checkLogicalRelationStats(parquetTable, expectedStats = + Some(Statistics(sizeInBytes = 8472, rowCount = Some(1000)))) + } + } + + test("statistics collection of a table with zero column") { + val table_no_cols = "table_no_cols" + withTable(table_no_cols) { + val rddNoCols = sparkContext.parallelize(1 to 10).map(_ => Row.empty) + val dfNoCols = spark.createDataFrame(rddNoCols, StructType(Seq.empty)) + dfNoCols.write.format("json").saveAsTable(table_no_cols) + sql(s"ANALYZE TABLE $table_no_cols COMPUTE STATISTICS") + checkLogicalRelationStats(table_no_cols, expectedStats = + Some(Statistics(sizeInBytes = 30, rowCount = Some(10)))) + } + } + test("estimates the size of a test MetastoreRelation") { val df = sql("""SELECT * FROM src""") val sizes = df.queryExecution.analyzed.collect { case mr: MetastoreRelation => diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index eff32805bf50c..3cba5b2a097f1 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -1026,26 +1026,29 @@ class HiveDDLSuite } } - test("datasource table property keys are not allowed") { + test("datasource and statistics table property keys are not allowed") { import org.apache.spark.sql.hive.HiveExternalCatalog.DATASOURCE_PREFIX + import org.apache.spark.sql.hive.HiveExternalCatalog.STATISTICS_PREFIX withTable("tbl") { sql("CREATE TABLE tbl(a INT) STORED AS parquet") - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${DATASOURCE_PREFIX}foo' = 'loser')") - } - assert(e.getMessage.contains(DATASOURCE_PREFIX + "foo")) + Seq(DATASOURCE_PREFIX, STATISTICS_PREFIX).foreach { forbiddenPrefix => + val e = intercept[AnalysisException] { + sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${forbiddenPrefix}foo' = 'loser')") + } + assert(e.getMessage.contains(forbiddenPrefix + "foo")) - val e2 = intercept[AnalysisException] { - sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${DATASOURCE_PREFIX}foo')") - } - assert(e2.getMessage.contains(DATASOURCE_PREFIX + "foo")) + val e2 = intercept[AnalysisException] { + sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${forbiddenPrefix}foo')") + } + assert(e2.getMessage.contains(forbiddenPrefix + "foo")) - val e3 = intercept[AnalysisException] { - sql(s"CREATE TABLE tbl TBLPROPERTIES ('${DATASOURCE_PREFIX}foo'='anything')") + val e3 = intercept[AnalysisException] { + sql(s"CREATE TABLE tbl TBLPROPERTIES ('${forbiddenPrefix}foo'='anything')") + } + assert(e3.getMessage.contains(forbiddenPrefix + "foo")) } - assert(e3.getMessage.contains(DATASOURCE_PREFIX + "foo")) } } } From 8d08f43d09157b98e559c0be6ce6fd571a35e0d1 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Tue, 6 Sep 2016 10:36:00 +0800 Subject: [PATCH 208/270] [SPARK-17279][SQL] better error message for exceptions during ScalaUDF execution ## What changes were proposed in this pull request? If `ScalaUDF` throws exceptions during executing user code, sometimes it's hard for users to figure out what's wrong, especially when they use Spark shell. An example ``` org.apache.spark.SparkException: Job aborted due to stage failure: Task 12 in stage 325.0 failed 4 times, most recent failure: Lost task 12.3 in stage 325.0 (TID 35622, 10.0.207.202): java.lang.NullPointerException at line8414e872fb8b42aba390efc153d1611a12.$read$$iwC$$iwC$$iwC$$iwC$$anonfun$2.apply(:40) at line8414e872fb8b42aba390efc153d1611a12.$read$$iwC$$iwC$$iwC$$iwC$$anonfun$2.apply(:40) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source) ... ``` We should catch these exceptions and rethrow them with better error message, to say that the exception is happened in scala udf. This PR also does some clean up for `ScalaUDF` and add a unit test suite for it. ## How was this patch tested? the new test suite Author: Wenchen Fan Closes #14850 from cloud-fan/npe. --- .../spark/ml/recommendation/ALSSuite.scala | 16 +++---- .../sql/catalyst/expressions/ScalaUDF.scala | 44 +++++++++++------ .../catalyst/expressions/ScalaUDFSuite.scala | 48 +++++++++++++++++++ 3 files changed, 86 insertions(+), 22 deletions(-) create mode 100644 sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDFSuite.scala diff --git a/mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala index e8ed50acf877c..d0aa2cdfe0fd1 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala @@ -510,18 +510,18 @@ class ALSSuite (1, 1L, 1d, 0, 0L, 0d, 5.0) ).toDF("user", "user_big", "user_small", "item", "item_big", "item_small", "rating") withClue("fit should fail when ids exceed integer range. ") { - assert(intercept[IllegalArgumentException] { + assert(intercept[SparkException] { als.fit(df.select(df("user_big").as("user"), df("item"), df("rating"))) - }.getMessage.contains("was out of Integer range")) - assert(intercept[IllegalArgumentException] { + }.getCause.getMessage.contains("was out of Integer range")) + assert(intercept[SparkException] { als.fit(df.select(df("user_small").as("user"), df("item"), df("rating"))) - }.getMessage.contains("was out of Integer range")) - assert(intercept[IllegalArgumentException] { + }.getCause.getMessage.contains("was out of Integer range")) + assert(intercept[SparkException] { als.fit(df.select(df("item_big").as("item"), df("user"), df("rating"))) - }.getMessage.contains("was out of Integer range")) - assert(intercept[IllegalArgumentException] { + }.getCause.getMessage.contains("was out of Integer range")) + assert(intercept[SparkException] { als.fit(df.select(df("item_small").as("item"), df("user"), df("rating"))) - }.getMessage.contains("was out of Integer range")) + }.getCause.getMessage.contains("was out of Integer range")) } withClue("transform should fail when ids exceed integer range. ") { val model = als.fit(df) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala index 21390644bc0b6..6cfdea9fdf9c5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql.catalyst.expressions +import org.apache.spark.SparkException import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow} import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.types.DataType @@ -994,20 +995,15 @@ case class ScalaUDF( ctx: CodegenContext, ev: ExprCode): ExprCode = { - ctx.references += this - - val scalaUDFClassName = classOf[ScalaUDF].getName + val scalaUDF = ctx.addReferenceObj("scalaUDF", this) val converterClassName = classOf[Any => Any].getName val typeConvertersClassName = CatalystTypeConverters.getClass.getName + ".MODULE$" - val expressionClassName = classOf[Expression].getName // Generate codes used to convert the returned value of user-defined functions to Catalyst type val catalystConverterTerm = ctx.freshName("catalystConverter") - val catalystConverterTermIdx = ctx.references.size - 1 ctx.addMutableState(converterClassName, catalystConverterTerm, s"this.$catalystConverterTerm = ($converterClassName)$typeConvertersClassName" + - s".createToCatalystConverter((($scalaUDFClassName)references" + - s"[$catalystConverterTermIdx]).dataType());") + s".createToCatalystConverter($scalaUDF.dataType());") val resultTerm = ctx.freshName("result") @@ -1019,10 +1015,8 @@ case class ScalaUDF( val funcClassName = s"scala.Function${children.size}" val funcTerm = ctx.freshName("udf") - val funcExpressionIdx = ctx.references.size - 1 ctx.addMutableState(funcClassName, funcTerm, - s"this.$funcTerm = ($funcClassName)((($scalaUDFClassName)references" + - s"[$funcExpressionIdx]).userDefinedFunc());") + s"this.$funcTerm = ($funcClassName)$scalaUDF.userDefinedFunc();") // codegen for children expressions val evals = children.map(_.genCode(ctx)) @@ -1039,9 +1033,16 @@ case class ScalaUDF( (convert, argTerm) }.unzip - val callFunc = s"${ctx.boxedType(dataType)} $resultTerm = " + - s"(${ctx.boxedType(dataType)})${catalystConverterTerm}" + - s".apply($funcTerm.apply(${funcArguments.mkString(", ")}));" + val getFuncResult = s"$funcTerm.apply(${funcArguments.mkString(", ")})" + val callFunc = + s""" + ${ctx.boxedType(dataType)} $resultTerm = null; + try { + $resultTerm = (${ctx.boxedType(dataType)})$catalystConverterTerm.apply($getFuncResult); + } catch (Exception e) { + throw new org.apache.spark.SparkException($scalaUDF.udfErrorMessage(), e); + } + """ ev.copy(code = s""" $evalCode @@ -1057,5 +1058,20 @@ case class ScalaUDF( private[this] val converter = CatalystTypeConverters.createToCatalystConverter(dataType) - override def eval(input: InternalRow): Any = converter(f(input)) + lazy val udfErrorMessage = { + val funcCls = function.getClass.getSimpleName + val inputTypes = children.map(_.dataType.simpleString).mkString(", ") + s"Failed to execute user defined function($funcCls: ($inputTypes) => ${dataType.simpleString})" + } + + override def eval(input: InternalRow): Any = { + val result = try { + f(input) + } catch { + case e: Exception => + throw new SparkException(udfErrorMessage, e) + } + + converter(result) + } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDFSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDFSuite.scala new file mode 100644 index 0000000000000..7e45028653e36 --- /dev/null +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDFSuite.scala @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions + +import org.apache.spark.{SparkException, SparkFunSuite} +import org.apache.spark.sql.types.{IntegerType, StringType} + +class ScalaUDFSuite extends SparkFunSuite with ExpressionEvalHelper { + + test("basic") { + val intUdf = ScalaUDF((i: Int) => i + 1, IntegerType, Literal(1) :: Nil) + checkEvaluation(intUdf, 2) + + val stringUdf = ScalaUDF((s: String) => s + "x", StringType, Literal("a") :: Nil) + checkEvaluation(stringUdf, "ax") + } + + test("better error message for NPE") { + val udf = ScalaUDF( + (s: String) => s.toLowerCase, + StringType, + Literal.create(null, StringType) :: Nil) + + val e1 = intercept[SparkException](udf.eval()) + assert(e1.getMessage.contains("Failed to execute user defined function")) + + val e2 = intercept[SparkException] { + checkEvalutionWithUnsafeProjection(udf, null) + } + assert(e2.getMessage.contains("Failed to execute user defined function")) + } + +} From afb3d5d301d004fd748ad305b3d72066af4ebb6c Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Tue, 6 Sep 2016 10:50:07 +0800 Subject: [PATCH 209/270] [SPARK-17369][SQL] MetastoreRelation toJSON throws AssertException due to missing otherCopyArgs ## What changes were proposed in this pull request? `TreeNode.toJSON` requires a subclass to explicitly override otherCopyArgs to include currying construction arguments, otherwise it reports AssertException telling that the construction argument values' count doesn't match the construction argument names' count. For class `MetastoreRelation`, it has a currying construction parameter `client: HiveClient`, but Spark forgets to add it to the list of otherCopyArgs. ## How was this patch tested? Unit tests. Author: Sean Zhong Closes #14928 from clockfly/metastore_relation_toJSON. --- .../spark/sql/hive/MetastoreRelation.scala | 2 +- .../sql/hive/MetastoreRelationSuite.scala | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreRelationSuite.scala diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala index 0bfdc137fade7..33f0ecff63529 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala @@ -59,7 +59,7 @@ private[hive] case class MetastoreRelation( Objects.hashCode(databaseName, tableName, output) } - override protected def otherCopyArgs: Seq[AnyRef] = catalogTable :: sparkSession :: Nil + override protected def otherCopyArgs: Seq[AnyRef] = catalogTable :: client :: sparkSession :: Nil private def toHiveColumn(c: StructField): FieldSchema = { new FieldSchema(c.name, c.dataType.catalogString, c.getComment.orNull) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreRelationSuite.scala new file mode 100644 index 0000000000000..2f3055dcac4c5 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreRelationSuite.scala @@ -0,0 +1,39 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.spark.sql.hive + +import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} +import org.apache.spark.sql.types.{IntegerType, StructField, StructType} + +class MetastoreRelationSuite extends SparkFunSuite { + test("makeCopy and toJSON should work") { + val table = CatalogTable( + identifier = TableIdentifier("test", Some("db")), + tableType = CatalogTableType.VIEW, + storage = CatalogStorageFormat.empty, + schema = StructType(StructField("a", IntegerType, true) :: Nil)) + val relation = MetastoreRelation("db", "test")(table, null, null) + + // No exception should be thrown + relation.makeCopy(Array("db", "test")) + // No exception should be thrown + relation.toJSON + } +} From 64e826f91eabb1a22d3d163d71fbb7b6d2185f25 Mon Sep 17 00:00:00 2001 From: Yadong Qi Date: Tue, 6 Sep 2016 10:57:21 +0800 Subject: [PATCH 210/270] [SPARK-17358][SQL] Cached table(parquet/orc) should be shard between beelines ## What changes were proposed in this pull request? Cached table(parquet/orc) couldn't be shard between beelines, because the `sameResult` method used by `CacheManager` always return false(`sparkSession` are different) when compare two `HadoopFsRelation` in different beelines. So we make `sparkSession` a curry parameter. ## How was this patch tested? Beeline1 ``` 1: jdbc:hive2://localhost:10000> CACHE TABLE src_pqt; +---------+--+ | Result | +---------+--+ +---------+--+ No rows selected (5.143 seconds) 1: jdbc:hive2://localhost:10000> EXPLAIN SELECT * FROM src_pqt; +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | plan | +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | == Physical Plan == InMemoryTableScan [key#49, value#50] +- InMemoryRelation [key#49, value#50], true, 10000, StorageLevel(disk, memory, deserialized, 1 replicas), `src_pqt` +- *FileScan parquet default.src_pqt[key#0,value#1] Batched: true, Format: ParquetFormat, InputPaths: hdfs://199.0.0.1:9000/qiyadong/src_pqt, PartitionFilters: [], PushedFilters: [], ReadSchema: struct | +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ ``` Beeline2 ``` 0: jdbc:hive2://localhost:10000> EXPLAIN SELECT * FROM src_pqt; +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | plan | +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | == Physical Plan == InMemoryTableScan [key#68, value#69] +- InMemoryRelation [key#68, value#69], true, 10000, StorageLevel(disk, memory, deserialized, 1 replicas), `src_pqt` +- *FileScan parquet default.src_pqt[key#0,value#1] Batched: true, Format: ParquetFormat, InputPaths: hdfs://199.0.0.1:9000/qiyadong/src_pqt, PartitionFilters: [], PushedFilters: [], ReadSchema: struct | +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ ``` Author: Yadong Qi Closes #14913 from watermen/SPARK-17358. --- .../apache/spark/sql/execution/datasources/DataSource.scala | 6 ++---- .../sql/execution/datasources/fileSourceInterfaces.scala | 4 ++-- .../sql/execution/datasources/FileSourceStrategySuite.scala | 3 ++- .../org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | 3 +-- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala index 5968db84cd608..9c99a800cc050 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala @@ -351,13 +351,12 @@ case class DataSource( } HadoopFsRelation( - sparkSession, fileCatalog, partitionSchema = fileCatalog.partitionSpec().partitionColumns, dataSchema = dataSchema, bucketSpec = None, format, - options) + options)(sparkSession) // This is a non-streaming file based datasource. case (format: FileFormat, _) => @@ -409,13 +408,12 @@ case class DataSource( } HadoopFsRelation( - sparkSession, fileCatalog, partitionSchema = fileCatalog.partitionSpec().partitionColumns, dataSchema = dataSchema.asNullable, bucketSpec = bucketSpec, format, - caseInsensitiveOptions) + caseInsensitiveOptions)(sparkSession) case _ => throw new AnalysisException( diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala index e03a2323c7493..7e40c35984062 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala @@ -134,13 +134,13 @@ abstract class OutputWriter { * @param options Configuration used when reading / writing data. */ case class HadoopFsRelation( - sparkSession: SparkSession, location: FileCatalog, partitionSchema: StructType, dataSchema: StructType, bucketSpec: Option[BucketSpec], fileFormat: FileFormat, - options: Map[String, String]) extends BaseRelation with FileRelation { + options: Map[String, String])(val sparkSession: SparkSession) + extends BaseRelation with FileRelation { override def sqlContext: SQLContext = sparkSession.sqlContext diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala index 09fd750180357..45411fa0656cd 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala @@ -508,7 +508,8 @@ class FileSourceStrategySuite extends QueryTest with SharedSQLContext with Predi val bucketed = df.queryExecution.analyzed transform { case l @ LogicalRelation(r: HadoopFsRelation, _, _) => l.copy(relation = - r.copy(bucketSpec = Some(BucketSpec(numBuckets = buckets, "c1" :: Nil, Nil)))) + r.copy(bucketSpec = + Some(BucketSpec(numBuckets = buckets, "c1" :: Nil, Nil)))(r.sparkSession)) } Dataset.ofRows(spark, bucketed) } else { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index d31a8d643ad88..c48d4ed6088b5 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -249,13 +249,12 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log } val relation = HadoopFsRelation( - sparkSession = sparkSession, location = fileCatalog, partitionSchema = partitionSchema, dataSchema = inferredSchema, bucketSpec = bucketSpec, fileFormat = defaultSource, - options = options) + options = options)(sparkSession = sparkSession) val created = LogicalRelation(relation, catalogTable = Some(metastoreRelation.catalogTable)) cachedDataSourceTables.put(tableIdentifier, created) From c0ae6bc6ea38909730fad36e653d3c7ab0a84b44 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Tue, 6 Sep 2016 14:17:47 +0800 Subject: [PATCH 211/270] [SPARK-17361][SQL] file-based external table without path should not be created ## What changes were proposed in this pull request? Using the public `Catalog` API, users can create a file-based data source table, without giving the path options. For this case, currently we can create the table successfully, but fail when we read it. Ideally we should fail during creation. This is because when we create data source table, we resolve the data source relation without validating path: `resolveRelation(checkPathExist = false)`. Looking back to why we add this trick(`checkPathExist`), it's because when we call `resolveRelation` for managed table, we add the path to data source options but the path is not created yet. So why we add this not-yet-created path to data source options? This PR fix the problem by adding path to options after we call `resolveRelation`. Then we can remove the `checkPathExist` parameter in `DataSource.resolveRelation` and do some related cleanups. ## How was this patch tested? existing tests and new test in `CatalogSuite` Author: Wenchen Fan Closes #14921 from cloud-fan/check-path. --- .../sql/catalyst/catalog/interface.scala | 4 ++-- .../command/createDataSourceTables.scala | 24 ++++++++++++------- .../spark/sql/execution/command/ddl.scala | 4 ++-- .../spark/sql/execution/command/tables.scala | 2 +- .../execution/datasources/DataSource.scala | 12 ++++------ .../datasources/ListingFileCatalog.scala | 18 +++----------- .../datasources/fileSourceInterfaces.scala | 9 ++----- .../spark/sql/internal/CatalogSuite.scala | 17 +++++++++++++ .../spark/sql/hive/HiveMetastoreCatalog.scala | 2 +- 9 files changed, 48 insertions(+), 44 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala index 79231ee9e378d..e74fa6e638a0b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala @@ -156,9 +156,9 @@ case class CatalogTable( outputFormat: Option[String] = storage.outputFormat, compressed: Boolean = false, serde: Option[String] = storage.serde, - serdeProperties: Map[String, String] = storage.properties): CatalogTable = { + properties: Map[String, String] = storage.properties): CatalogTable = { copy(storage = CatalogStorageFormat( - locationUri, inputFormat, outputFormat, serde, compressed, serdeProperties)) + locationUri, inputFormat, outputFormat, serde, compressed, properties)) } override def toString: String = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala index c7e327906174c..b1830e6cf3ea8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala @@ -18,7 +18,6 @@ package org.apache.spark.sql.execution.command import org.apache.spark.sql._ -import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.plans.QueryPlan @@ -56,12 +55,6 @@ case class CreateDataSourceTableCommand(table: CatalogTable, ignoreIfExists: Boo } } - val optionsWithPath = if (table.tableType == CatalogTableType.MANAGED) { - table.storage.properties + ("path" -> sessionState.catalog.defaultTablePath(table.identifier)) - } else { - table.storage.properties - } - // Create the relation to validate the arguments before writing the metadata to the metastore, // and infer the table schema and partition if users didn't specify schema in CREATE TABLE. val dataSource: BaseRelation = @@ -70,7 +63,16 @@ case class CreateDataSourceTableCommand(table: CatalogTable, ignoreIfExists: Boo userSpecifiedSchema = if (table.schema.isEmpty) None else Some(table.schema), className = table.provider.get, bucketSpec = table.bucketSpec, - options = optionsWithPath).resolveRelation(checkPathExist = false) + options = table.storage.properties).resolveRelation() + + dataSource match { + case fs: HadoopFsRelation => + if (table.tableType == CatalogTableType.EXTERNAL && fs.location.paths.isEmpty) { + throw new AnalysisException( + "Cannot create a file-based external data source table without path") + } + case _ => + } val partitionColumnNames = if (table.schema.nonEmpty) { table.partitionColumnNames @@ -83,6 +85,12 @@ case class CreateDataSourceTableCommand(table: CatalogTable, ignoreIfExists: Boo } } + val optionsWithPath = if (table.tableType == CatalogTableType.MANAGED) { + table.storage.properties + ("path" -> sessionState.catalog.defaultTablePath(table.identifier)) + } else { + table.storage.properties + } + val newTable = table.copy( storage = table.storage.copy(properties = optionsWithPath), schema = dataSource.schema, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala index 53fb684eb5ce3..bc1c4f85e3315 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala @@ -318,7 +318,7 @@ case class AlterTableSerDePropertiesCommand( if (partSpec.isEmpty) { val newTable = table.withNewStorage( serde = serdeClassName.orElse(table.storage.serde), - serdeProperties = table.storage.properties ++ serdeProperties.getOrElse(Map())) + properties = table.storage.properties ++ serdeProperties.getOrElse(Map())) catalog.alterTable(newTable) } else { val spec = partSpec.get @@ -669,7 +669,7 @@ case class AlterTableSetLocationCommand( if (DDLUtils.isDatasourceTable(table)) { table.withNewStorage( locationUri = Some(location), - serdeProperties = table.storage.properties ++ Map("path" -> location)) + properties = table.storage.properties ++ Map("path" -> location)) } else { table.withNewStorage(locationUri = Some(location)) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 4e6caae85caeb..027f3588e2922 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -181,7 +181,7 @@ case class AlterTableRenameCommand( if (DDLUtils.isDatasourceTable(table) && table.tableType == CatalogTableType.MANAGED) { val newPath = catalog.defaultTablePath(newTblName) val newTable = table.withNewStorage( - serdeProperties = table.storage.properties ++ Map("path" -> newPath)) + properties = table.storage.properties ++ Map("path" -> newPath)) catalog.alterTable(newTable) } // Invalidate the table last, otherwise uncaching the table would load the logical plan diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala index 9c99a800cc050..71807b771a95f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala @@ -315,12 +315,8 @@ case class DataSource( /** * Create a resolved [[BaseRelation]] that can be used to read data from or write data into this * [[DataSource]] - * - * @param checkPathExist A flag to indicate whether to check the existence of path or not. - * This flag will be set to false when we create an empty table (the - * path of the table does not exist). */ - def resolveRelation(checkPathExist: Boolean = true): BaseRelation = { + def resolveRelation(): BaseRelation = { val caseInsensitiveOptions = new CaseInsensitiveMap(options) val relation = (providingClass.newInstance(), userSpecifiedSchema) match { // TODO: Throw when too much is given. @@ -367,11 +363,11 @@ case class DataSource( val qualified = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory) val globPath = SparkHadoopUtil.get.globPathIfNecessary(qualified) - if (checkPathExist && globPath.isEmpty) { + if (globPath.isEmpty) { throw new AnalysisException(s"Path does not exist: $qualified") } // Sufficient to check head of the globPath seq for non-glob scenario - if (checkPathExist && !fs.exists(globPath.head)) { + if (!fs.exists(globPath.head)) { throw new AnalysisException(s"Path does not exist: ${globPath.head}") } globPath @@ -391,7 +387,7 @@ case class DataSource( val fileCatalog = new ListingFileCatalog( - sparkSession, globbedPaths, options, partitionSchema, !checkPathExist) + sparkSession, globbedPaths, options, partitionSchema) val dataSchema = userSpecifiedSchema.map { schema => val equality = sparkSession.sessionState.conf.resolver diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ListingFileCatalog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ListingFileCatalog.scala index 706ec6b9b36c7..60742bdbed204 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ListingFileCatalog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ListingFileCatalog.scala @@ -17,10 +17,7 @@ package org.apache.spark.sql.execution.datasources -import java.io.FileNotFoundException - import scala.collection.mutable -import scala.util.Try import org.apache.hadoop.fs.{FileStatus, LocatedFileStatus, Path} import org.apache.hadoop.mapred.{FileInputFormat, JobConf} @@ -37,16 +34,12 @@ import org.apache.spark.sql.types.StructType * @param paths a list of paths to scan * @param partitionSchema an optional partition schema that will be use to provide types for the * discovered partitions - * @param ignoreFileNotFound if true, return empty file list when encountering a - * [[FileNotFoundException]] in file listing. Note that this is a hack - * for SPARK-16313. We should get rid of this flag in the future. */ class ListingFileCatalog( sparkSession: SparkSession, override val paths: Seq[Path], parameters: Map[String, String], - partitionSchema: Option[StructType], - ignoreFileNotFound: Boolean = false) + partitionSchema: Option[StructType]) extends PartitioningAwareFileCatalog(sparkSession, parameters, partitionSchema) { @volatile private var cachedLeafFiles: mutable.LinkedHashMap[Path, FileStatus] = _ @@ -88,7 +81,7 @@ class ListingFileCatalog( */ def listLeafFiles(paths: Seq[Path]): mutable.LinkedHashSet[FileStatus] = { if (paths.length >= sparkSession.sessionState.conf.parallelPartitionDiscoveryThreshold) { - HadoopFsRelation.listLeafFilesInParallel(paths, hadoopConf, sparkSession, ignoreFileNotFound) + HadoopFsRelation.listLeafFilesInParallel(paths, hadoopConf, sparkSession) } else { // Right now, the number of paths is less than the value of // parallelPartitionDiscoveryThreshold. So, we will list file statues at the driver. @@ -104,12 +97,7 @@ class ListingFileCatalog( logTrace(s"Listing $path on driver") val childStatuses = { - val stats = - try { - fs.listStatus(path) - } catch { - case e: FileNotFoundException if ignoreFileNotFound => Array.empty[FileStatus] - } + val stats = fs.listStatus(path) if (pathFilter != null) stats.filter(f => pathFilter.accept(f.getPath)) else stats } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala index 7e40c35984062..5cc5f32e6e809 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/fileSourceInterfaces.scala @@ -440,8 +440,7 @@ object HadoopFsRelation extends Logging { def listLeafFilesInParallel( paths: Seq[Path], hadoopConf: Configuration, - sparkSession: SparkSession, - ignoreFileNotFound: Boolean): mutable.LinkedHashSet[FileStatus] = { + sparkSession: SparkSession): mutable.LinkedHashSet[FileStatus] = { assert(paths.size >= sparkSession.sessionState.conf.parallelPartitionDiscoveryThreshold) logInfo(s"Listing leaf files and directories in parallel under: ${paths.mkString(", ")}") @@ -462,11 +461,7 @@ object HadoopFsRelation extends Logging { val pathFilter = FileInputFormat.getInputPathFilter(jobConf) paths.map(new Path(_)).flatMap { path => val fs = path.getFileSystem(serializableConfiguration.value) - try { - listLeafFiles(fs, fs.getFileStatus(path), pathFilter) - } catch { - case e: java.io.FileNotFoundException if ignoreFileNotFound => Array.empty[FileStatus] - } + listLeafFiles(fs, fs.getFileStatus(path), pathFilter) } }.map { status => val blockLocations = status match { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala index 8aa81854b22e8..b221eed7b2426 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala @@ -27,6 +27,7 @@ import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo} import org.apache.spark.sql.catalyst.plans.logical.Range import org.apache.spark.sql.test.SharedSQLContext +import org.apache.spark.sql.types.{IntegerType, StructType} /** @@ -305,6 +306,22 @@ class CatalogSuite columnFields.foreach { f => assert(columnString.contains(f.toString)) } } + test("createExternalTable should fail if path is not given for file-based data source") { + val e = intercept[AnalysisException] { + spark.catalog.createExternalTable("tbl", "json", Map.empty[String, String]) + } + assert(e.message.contains("Unable to infer schema")) + + val e2 = intercept[AnalysisException] { + spark.catalog.createExternalTable( + "tbl", + "json", + new StructType().add("i", IntegerType), + Map.empty[String, String]) + } + assert(e2.message == "Cannot create a file-based external data source table without path") + } + // TODO: add tests for the rest of them } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index c48d4ed6088b5..8410a2e4a47ca 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -81,7 +81,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log options = table.storage.properties) LogicalRelation( - dataSource.resolveRelation(checkPathExist = true), + dataSource.resolveRelation(), catalogTable = Some(table)) } } From 6f13aa7dfee12b1b301bd10a1050549008ecc67e Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Tue, 6 Sep 2016 16:05:50 +0800 Subject: [PATCH 212/270] [SPARK-17356][SQL] Fix out of memory issue when generating JSON for TreeNode ## What changes were proposed in this pull request? class `org.apache.spark.sql.types.Metadata` is widely used in mllib to store some ml attributes. `Metadata` is commonly stored in `Alias` expression. ``` case class Alias(child: Expression, name: String)( val exprId: ExprId = NamedExpression.newExprId, val qualifier: Option[String] = None, val explicitMetadata: Option[Metadata] = None, override val isGenerated: java.lang.Boolean = false) ``` The `Metadata` can take a big memory footprint since the number of attributes is big ( in scale of million). When `toJSON` is called on `Alias` expression, the `Metadata` will also be converted to a big JSON string. If a plan contains many such kind of `Alias` expressions, it may trigger out of memory error when `toJSON` is called, since converting all `Metadata` references to JSON will take huge memory. With this PR, we will skip scanning Metadata when doing JSON conversion. For a reproducer of the OOM, and analysis, please look at jira https://issues.apache.org/jira/browse/SPARK-17356. ## How was this patch tested? Existing tests. Author: Sean Zhong Closes #14915 from clockfly/json_oom. --- .../org/apache/spark/sql/catalyst/trees/TreeNode.scala | 4 +++- .../test/scala/org/apache/spark/sql/QueryTest.scala | 10 +++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala index 037f8cb2873bf..893af5146c5b3 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala @@ -618,7 +618,9 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product { case s: String => JString(s) case u: UUID => JString(u.toString) case dt: DataType => dt.jsonValue - case m: Metadata => m.jsonValue + // SPARK-17356: In usage of mllib, Metadata may store a huge vector of data, transforming + // it to JSON may trigger OutOfMemoryError. + case m: Metadata => Metadata.empty.jsonValue case s: StorageLevel => ("useDisk" -> s.useDisk) ~ ("useMemory" -> s.useMemory) ~ ("useOffHeap" -> s.useOffHeap) ~ ("deserialized" -> s.deserialized) ~ ("replication" -> s.replication) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala index c7af40227d45f..d361f61764d1f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala @@ -33,7 +33,7 @@ import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression import org.apache.spark.sql.execution.columnar.InMemoryRelation import org.apache.spark.sql.execution.datasources.LogicalRelation import org.apache.spark.sql.execution.streaming.MemoryPlan -import org.apache.spark.sql.types.ObjectType +import org.apache.spark.sql.types.{Metadata, ObjectType} abstract class QueryTest extends PlanTest { @@ -274,6 +274,14 @@ abstract class QueryTest extends PlanTest { val normalized1 = logicalPlan.transformAllExpressions { case udf: ScalaUDF => udf.copy(function = null) case gen: UserDefinedGenerator => gen.copy(function = null) + // After SPARK-17356: the JSON representation no longer has the Metadata. We need to remove + // the Metadata from the normalized plan so that we can compare this plan with the + // JSON-deserialzed plan. + case a @ Alias(child, name) if a.explicitMetadata.isDefined => + Alias(child, name)(a.exprId, a.qualifier, Some(Metadata.empty), a.isGenerated) + case a: AttributeReference if a.metadata != Metadata.empty => + AttributeReference(a.name, a.dataType, a.nullable, Metadata.empty)(a.exprId, a.qualifier, + a.isGenerated) } // RDDs/data are not serializable to JSON, so we need to collect LogicalPlans that contains From 39d538dddf7d44bf4603c966d0f7b2c92f1e951a Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Tue, 6 Sep 2016 03:30:37 -0700 Subject: [PATCH 213/270] [MINOR][ML] Correct weights doc of MultilayerPerceptronClassificationModel. ## What changes were proposed in this pull request? ```weights``` of ```MultilayerPerceptronClassificationModel``` should be the output weights of layers rather than initial weights, this PR correct it. ## How was this patch tested? Doc change. Author: Yanbo Liang Closes #14967 from yanboliang/mlp-weights. --- .../ml/classification/MultilayerPerceptronClassifier.scala | 2 +- python/pyspark/ml/classification.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala index 7264a99b47eb1..88fe7cb4a6e0f 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala @@ -288,7 +288,7 @@ object MultilayerPerceptronClassifier * * @param uid uid * @param layers array of layer sizes including input and output layers - * @param weights vector of initial weights for the model that consists of the weights of layers + * @param weights the weights of layers * @return prediction model */ @Since("1.5.0") diff --git a/python/pyspark/ml/classification.py b/python/pyspark/ml/classification.py index d1522d78faa0b..b4c01fd5c4ffb 100644 --- a/python/pyspark/ml/classification.py +++ b/python/pyspark/ml/classification.py @@ -1276,7 +1276,7 @@ def layers(self): @since("2.0.0") def weights(self): """ - vector of initial weights for the model that consists of the weights of layers. + the weights of layers. """ return self._call_java("weights") From bc2767df2666ff615e7f44e980555afab06dd8a3 Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Tue, 6 Sep 2016 22:20:55 +0800 Subject: [PATCH 214/270] [SPARK-17374][SQL] Better error messages when parsing JSON using DataFrameReader ## What changes were proposed in this pull request? This PR adds better error messages for malformed record when reading a JSON file using DataFrameReader. For example, for query: ``` import org.apache.spark.sql.types._ val corruptRecords = spark.sparkContext.parallelize("""{"a":{, b:3}""" :: Nil) val schema = StructType(StructField("a", StringType, true) :: Nil) val jsonDF = spark.read.schema(schema).json(corruptRecords) ``` **Before change:** We silently replace corrupted line with null ``` scala> jsonDF.show +----+ | a| +----+ |null| +----+ ``` **After change:** Add an explicit warning message: ``` scala> jsonDF.show 16/09/02 14:43:16 WARN JacksonParser: Found at least one malformed records (sample: {"a":{, b:3}). The JSON reader will replace all malformed records with placeholder null in current PERMISSIVE parser mode. To find out which corrupted records have been replaced with null, please use the default inferred schema instead of providing a custom schema. Code example to print all malformed records (scala): =================================================== // The corrupted record exists in column _corrupt_record. val parsedJson = spark.read.json("/path/to/json/file/test.json") +----+ | a| +----+ |null| +----+ ``` ### ## How was this patch tested? Unit test. Author: Sean Zhong Closes #14929 from clockfly/logwarning_if_schema_not_contain_corrupted_record. --- .../datasources/json/JacksonParser.scala | 39 ++++++++++++++++++- .../datasources/json/JsonSuite.scala | 29 +++++++++++++- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala index 359a3e2aa8ad2..5ce1bf7432159 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala @@ -28,6 +28,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.util._ +import org.apache.spark.sql.execution.datasources.ParseModes.{DROP_MALFORMED_MODE, PERMISSIVE_MODE} import org.apache.spark.sql.execution.datasources.json.JacksonUtils.nextUntil import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String @@ -52,6 +53,11 @@ class JacksonParser( private val factory = new JsonFactory() options.setJacksonOptions(factory) + private val emptyRow: Seq[InternalRow] = Seq(new GenericInternalRow(schema.length)) + + @transient + private[this] var isWarningPrintedForMalformedRecord: Boolean = false + /** * This function deals with the cases it fails to parse. This function will be called * when exceptions are caught during converting. This functions also deals with `mode` option. @@ -62,8 +68,39 @@ class JacksonParser( throw new RuntimeException(s"Malformed line in FAILFAST mode: $record") } if (options.dropMalformed) { - logWarning(s"Dropping malformed line: $record") + if (!isWarningPrintedForMalformedRecord) { + logWarning( + s"""Found at least one malformed records (sample: $record). The JSON reader will drop + |all malformed records in current $DROP_MALFORMED_MODE parser mode. To find out which + |corrupted records have been dropped, please switch the parser mode to $PERMISSIVE_MODE + |mode and use the default inferred schema. + | + |Code example to print all malformed records (scala): + |=================================================== + |// The corrupted record exists in column ${columnNameOfCorruptRecord} + |val parsedJson = spark.read.json("/path/to/json/file/test.json") + | + """.stripMargin) + isWarningPrintedForMalformedRecord = true + } Nil + } else if (schema.getFieldIndex(columnNameOfCorruptRecord).isEmpty) { + if (!isWarningPrintedForMalformedRecord) { + logWarning( + s"""Found at least one malformed records (sample: $record). The JSON reader will replace + |all malformed records with placeholder null in current $PERMISSIVE_MODE parser mode. + |To find out which corrupted records have been replaced with null, please use the + |default inferred schema instead of providing a custom schema. + | + |Code example to print all malformed records (scala): + |=================================================== + |// The corrupted record exists in column ${columnNameOfCorruptRecord}. + |val parsedJson = spark.read.json("/path/to/json/file/test.json") + | + """.stripMargin) + isWarningPrintedForMalformedRecord = true + } + emptyRow } else { val row = new GenericMutableRow(schema.length) for (corruptIndex <- schema.getFieldIndex(columnNameOfCorruptRecord)) { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala index 63a9061210ca6..3d533c14e18e7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala @@ -1081,7 +1081,34 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData { assert(jsonDFTwo.schema === schemaTwo) } - test("Corrupt records: PERMISSIVE mode") { + test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") { + withTempView("jsonTable") { + val schema = StructType( + StructField("a", StringType, true) :: + StructField("b", StringType, true) :: + StructField("c", StringType, true) :: Nil) + + val jsonDF = spark.read.schema(schema).json(corruptRecords) + jsonDF.createOrReplaceTempView("jsonTable") + + checkAnswer( + sql( + """ + |SELECT a, b, c + |FROM jsonTable + """.stripMargin), + Seq( + // Corrupted records are replaced with null + Row(null, null, null), + Row(null, null, null), + Row(null, null, null), + Row("str_a_4", "str_b_4", "str_c_4"), + Row(null, null, null)) + ) + } + } + + test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") { // Test if we can query corrupt records. withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") { withTempView("jsonTable") { From f7e26d788757f917b32749856bb29feb7b4c2987 Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Tue, 6 Sep 2016 10:46:31 -0700 Subject: [PATCH 215/270] [SPARK-16922] [SPARK-17211] [SQL] make the address of values portable in LongToUnsafeRowMap ## What changes were proposed in this pull request? In LongToUnsafeRowMap, we use offset of a value as pointer, stored in a array also in the page for chained values. The offset is not portable, because Platform.LONG_ARRAY_OFFSET will be different with different JVM Heap size, then the deserialized LongToUnsafeRowMap will be corrupt. This PR will change to use portable address (without Platform.LONG_ARRAY_OFFSET). ## How was this patch tested? Added a test case with random generated keys, to improve the coverage. But this test is not a regression test, that could require a Spark cluster that have at least 32G heap in driver or executor. Author: Davies Liu Closes #14927 from davies/longmap. --- .../sql/execution/joins/HashedRelation.scala | 27 ++++++--- .../execution/joins/HashedRelationSuite.scala | 56 +++++++++++++++++++ 2 files changed, 75 insertions(+), 8 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala index 08975733ff5d1..8821c0dea9ee5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala @@ -447,10 +447,20 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap */ private def nextSlot(pos: Int): Int = (pos + 2) & mask + private[this] def toAddress(offset: Long, size: Int): Long = { + ((offset - Platform.LONG_ARRAY_OFFSET) << SIZE_BITS) | size + } + + private[this] def toOffset(address: Long): Long = { + (address >>> SIZE_BITS) + Platform.LONG_ARRAY_OFFSET + } + + private[this] def toSize(address: Long): Int = { + (address & SIZE_MASK).toInt + } + private def getRow(address: Long, resultRow: UnsafeRow): UnsafeRow = { - val offset = address >>> SIZE_BITS - val size = address & SIZE_MASK - resultRow.pointTo(page, offset, size.toInt) + resultRow.pointTo(page, toOffset(address), toSize(address)) resultRow } @@ -485,9 +495,9 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var addr = address override def hasNext: Boolean = addr != 0 override def next(): UnsafeRow = { - val offset = addr >>> SIZE_BITS - val size = addr & SIZE_MASK - resultRow.pointTo(page, offset, size.toInt) + val offset = toOffset(addr) + val size = toSize(addr) + resultRow.pointTo(page, offset, size) addr = Platform.getLong(page, offset + size) resultRow } @@ -554,7 +564,7 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap Platform.putLong(page, cursor, 0) cursor += 8 numValues += 1 - updateIndex(key, (offset.toLong << SIZE_BITS) | row.getSizeInBytes) + updateIndex(key, toAddress(offset, row.getSizeInBytes)) } /** @@ -562,6 +572,7 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap */ private def updateIndex(key: Long, address: Long): Unit = { var pos = firstSlot(key) + assert(numKeys < array.length / 2) while (array(pos) != key && array(pos + 1) != 0) { pos = nextSlot(pos) } @@ -582,7 +593,7 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap } } else { // there are some values for this key, put the address in the front of them. - val pointer = (address >>> SIZE_BITS) + (address & SIZE_MASK) + val pointer = toOffset(address) + toSize(address) Platform.putLong(page, pointer, array(pos + 1)) array(pos + 1) = address } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala index 1196f5ec7b3a8..ede63fea9606f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala @@ -19,6 +19,8 @@ package org.apache.spark.sql.execution.joins import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream} +import scala.util.Random + import org.apache.spark.{SparkConf, SparkFunSuite} import org.apache.spark.memory.{StaticMemoryManager, TaskMemoryManager} import org.apache.spark.serializer.KryoSerializer @@ -197,6 +199,60 @@ class HashedRelationSuite extends SparkFunSuite with SharedSQLContext { } } + test("LongToUnsafeRowMap with random keys") { + val taskMemoryManager = new TaskMemoryManager( + new StaticMemoryManager( + new SparkConf().set("spark.memory.offHeap.enabled", "false"), + Long.MaxValue, + Long.MaxValue, + 1), + 0) + val unsafeProj = UnsafeProjection.create(Seq(BoundReference(0, LongType, false))) + + val N = 1000000 + val rand = new Random + val keys = (0 to N).map(x => rand.nextLong()).toArray + + val map = new LongToUnsafeRowMap(taskMemoryManager, 10) + keys.foreach { k => + map.append(k, unsafeProj(InternalRow(k))) + } + map.optimize() + + val os = new ByteArrayOutputStream() + val out = new ObjectOutputStream(os) + map.writeExternal(out) + out.flush() + val in = new ObjectInputStream(new ByteArrayInputStream(os.toByteArray)) + val map2 = new LongToUnsafeRowMap(taskMemoryManager, 1) + map2.readExternal(in) + + val row = unsafeProj(InternalRow(0L)).copy() + keys.foreach { k => + val r = map2.get(k, row) + assert(r.hasNext) + var c = 0 + while (r.hasNext) { + val rr = r.next() + assert(rr.getLong(0) === k) + c += 1 + } + } + var i = 0 + while (i < N * 10) { + val k = rand.nextLong() + val r = map2.get(k, row) + if (r != null) { + assert(r.hasNext) + while (r.hasNext) { + assert(r.next().getLong(0) === k) + } + } + i += 1 + } + map.free() + } + test("Spark-14521") { val ser = new KryoSerializer( (new SparkConf).set("spark.kryo.referenceTracking", "false")).newInstance() From 6c08dbf683875ff1ba724447e0531f673bcff8ba Mon Sep 17 00:00:00 2001 From: Adam Roberts Date: Tue, 6 Sep 2016 22:13:25 +0100 Subject: [PATCH 216/270] [SPARK-17378][BUILD] Upgrade snappy-java to 1.1.2.6 ## What changes were proposed in this pull request? Upgrades the Snappy version to 1.1.2.6 from 1.1.2.4, release notes: https://github.com/xerial/snappy-java/blob/master/Milestone.md mention "Fix a bug in SnappyInputStream when reading compressed data that happened to have the same first byte with the stream magic header (#142)" ## How was this patch tested? Existing unit tests using the latest IBM Java 8 on Intel, Power and Z architectures (little and big-endian) Author: Adam Roberts Closes #14958 from a-roberts/master. --- dev/deps/spark-deps-hadoop-2.2 | 2 +- dev/deps/spark-deps-hadoop-2.3 | 2 +- dev/deps/spark-deps-hadoop-2.4 | 2 +- dev/deps/spark-deps-hadoop-2.6 | 2 +- dev/deps/spark-deps-hadoop-2.7 | 2 +- pom.xml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev/deps/spark-deps-hadoop-2.2 b/dev/deps/spark-deps-hadoop-2.2 index eaed0889ac36f..81adde6a13a14 100644 --- a/dev/deps/spark-deps-hadoop-2.2 +++ b/dev/deps/spark-deps-hadoop-2.2 @@ -152,7 +152,7 @@ shapeless_2.11-2.0.0.jar slf4j-api-1.7.16.jar slf4j-log4j12-1.7.16.jar snappy-0.2.jar -snappy-java-1.1.2.4.jar +snappy-java-1.1.2.6.jar spire-macros_2.11-0.7.4.jar spire_2.11-0.7.4.jar stax-api-1.0.1.jar diff --git a/dev/deps/spark-deps-hadoop-2.3 b/dev/deps/spark-deps-hadoop-2.3 index d68a7f462ba7f..75ab6286dec3c 100644 --- a/dev/deps/spark-deps-hadoop-2.3 +++ b/dev/deps/spark-deps-hadoop-2.3 @@ -159,7 +159,7 @@ shapeless_2.11-2.0.0.jar slf4j-api-1.7.16.jar slf4j-log4j12-1.7.16.jar snappy-0.2.jar -snappy-java-1.1.2.4.jar +snappy-java-1.1.2.6.jar spire-macros_2.11-0.7.4.jar spire_2.11-0.7.4.jar stax-api-1.0-2.jar diff --git a/dev/deps/spark-deps-hadoop-2.4 b/dev/deps/spark-deps-hadoop-2.4 index 346f19767d367..897d802a9d6a1 100644 --- a/dev/deps/spark-deps-hadoop-2.4 +++ b/dev/deps/spark-deps-hadoop-2.4 @@ -159,7 +159,7 @@ shapeless_2.11-2.0.0.jar slf4j-api-1.7.16.jar slf4j-log4j12-1.7.16.jar snappy-0.2.jar -snappy-java-1.1.2.4.jar +snappy-java-1.1.2.6.jar spire-macros_2.11-0.7.4.jar spire_2.11-0.7.4.jar stax-api-1.0-2.jar diff --git a/dev/deps/spark-deps-hadoop-2.6 b/dev/deps/spark-deps-hadoop-2.6 index 6f4695f345a48..f95ddb1c3065d 100644 --- a/dev/deps/spark-deps-hadoop-2.6 +++ b/dev/deps/spark-deps-hadoop-2.6 @@ -167,7 +167,7 @@ shapeless_2.11-2.0.0.jar slf4j-api-1.7.16.jar slf4j-log4j12-1.7.16.jar snappy-0.2.jar -snappy-java-1.1.2.4.jar +snappy-java-1.1.2.6.jar spire-macros_2.11-0.7.4.jar spire_2.11-0.7.4.jar stax-api-1.0-2.jar diff --git a/dev/deps/spark-deps-hadoop-2.7 b/dev/deps/spark-deps-hadoop-2.7 index 7a86a8bd88846..8df02c032bf21 100644 --- a/dev/deps/spark-deps-hadoop-2.7 +++ b/dev/deps/spark-deps-hadoop-2.7 @@ -168,7 +168,7 @@ shapeless_2.11-2.0.0.jar slf4j-api-1.7.16.jar slf4j-log4j12-1.7.16.jar snappy-0.2.jar -snappy-java-1.1.2.4.jar +snappy-java-1.1.2.6.jar spire-macros_2.11-0.7.4.jar spire_2.11-0.7.4.jar stax-api-1.0-2.jar diff --git a/pom.xml b/pom.xml index 2c265c1fa325e..e6c28977ca78d 100644 --- a/pom.xml +++ b/pom.xml @@ -159,7 +159,7 @@ 2.11 1.9.13 2.6.5 - 1.1.2.4 + 1.1.2.6 1.1.2 1.2.0-incubating 1.10 From 7775d9f224e22400c6c8c093652a383f4af66ee0 Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Tue, 6 Sep 2016 22:18:28 +0100 Subject: [PATCH 217/270] [SPARK-17299] TRIM/LTRIM/RTRIM should not strips characters other than spaces ## What changes were proposed in this pull request? TRIM/LTRIM/RTRIM should not strips characters other than spaces, we were trimming all chars small than ASCII 0x20(space) ## How was this patch tested? fixed existing tests. Author: Sandeep Singh Closes #14924 from techaddict/SPARK-17299. --- .../java/org/apache/spark/unsafe/types/UTF8String.java | 8 ++++---- .../org/apache/spark/unsafe/types/UTF8StringSuite.java | 10 ++++++++++ .../unsafe/types/UTF8StringPropertyCheckSuite.scala | 8 ++++---- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java index dc03d893a5361..e09a6b7d93a93 100644 --- a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java +++ b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/UTF8String.java @@ -465,9 +465,9 @@ public UTF8String trim() { int s = 0; int e = this.numBytes - 1; // skip all of the space (0x20) in the left side - while (s < this.numBytes && getByte(s) <= 0x20 && getByte(s) >= 0x00) s++; + while (s < this.numBytes && getByte(s) == 0x20) s++; // skip all of the space (0x20) in the right side - while (e >= 0 && getByte(e) <= 0x20 && getByte(e) >= 0x00) e--; + while (e >= 0 && getByte(e) == 0x20) e--; if (s > e) { // empty string return EMPTY_UTF8; @@ -479,7 +479,7 @@ public UTF8String trim() { public UTF8String trimLeft() { int s = 0; // skip all of the space (0x20) in the left side - while (s < this.numBytes && getByte(s) <= 0x20 && getByte(s) >= 0x00) s++; + while (s < this.numBytes && getByte(s) == 0x20) s++; if (s == this.numBytes) { // empty string return EMPTY_UTF8; @@ -491,7 +491,7 @@ public UTF8String trimLeft() { public UTF8String trimRight() { int e = numBytes - 1; // skip all of the space (0x20) in the right side - while (e >= 0 && getByte(e) <= 0x20 && getByte(e) >= 0x00) e--; + while (e >= 0 && getByte(e) == 0x20) e--; if (e < 0) { // empty string diff --git a/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java b/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java index d4160ad029eb3..7f03686dcec41 100644 --- a/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java +++ b/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java @@ -232,6 +232,16 @@ public void trims() { assertEquals(fromString("数据砖头"), fromString("数据砖头").trim()); assertEquals(fromString("数据砖头"), fromString("数据砖头").trimLeft()); assertEquals(fromString("数据砖头"), fromString("数据砖头").trimRight()); + + char[] charsLessThan0x20 = new char[10]; + Arrays.fill(charsLessThan0x20, (char)(' ' - 1)); + String stringStartingWithSpace = + new String(charsLessThan0x20) + "hello" + new String(charsLessThan0x20); + assertEquals(fromString(stringStartingWithSpace), fromString(stringStartingWithSpace).trim()); + assertEquals(fromString(stringStartingWithSpace), + fromString(stringStartingWithSpace).trimLeft()); + assertEquals(fromString(stringStartingWithSpace), + fromString(stringStartingWithSpace).trimRight()); } @Test diff --git a/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala b/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala index 8a6b9e3e4536d..62d4176d00f94 100644 --- a/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala +++ b/common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala @@ -98,7 +98,7 @@ class UTF8StringPropertyCheckSuite extends FunSuite with GeneratorDrivenProperty } } - val whitespaceChar: Gen[Char] = Gen.choose(0x00, 0x20).map(_.toChar) + val whitespaceChar: Gen[Char] = Gen.const(0x20.toChar) val whitespaceString: Gen[String] = Gen.listOf(whitespaceChar).map(_.mkString) val randomString: Gen[String] = Arbitrary.arbString.arbitrary @@ -107,7 +107,7 @@ class UTF8StringPropertyCheckSuite extends FunSuite with GeneratorDrivenProperty def lTrim(s: String): String = { var st = 0 val array: Array[Char] = s.toCharArray - while ((st < s.length) && (array(st) <= ' ')) { + while ((st < s.length) && (array(st) == ' ')) { st += 1 } if (st > 0) s.substring(st, s.length) else s @@ -115,7 +115,7 @@ class UTF8StringPropertyCheckSuite extends FunSuite with GeneratorDrivenProperty def rTrim(s: String): String = { var len = s.length val array: Array[Char] = s.toCharArray - while ((len > 0) && (array(len - 1) <= ' ')) { + while ((len > 0) && (array(len - 1) == ' ')) { len -= 1 } if (len < s.length) s.substring(0, len) else s @@ -127,7 +127,7 @@ class UTF8StringPropertyCheckSuite extends FunSuite with GeneratorDrivenProperty whitespaceString ) { (start: String, middle: String, end: String) => val s = start + middle + end - assert(toUTF8(s).trim() === toUTF8(s.trim())) + assert(toUTF8(s).trim() === toUTF8(rTrim(lTrim(s)))) assert(toUTF8(s).trimLeft() === toUTF8(lTrim(s))) assert(toUTF8(s).trimRight() === toUTF8(rTrim(s))) } From 8bbb08a3001313b53a5d854ef442d82d4c7e1d5d Mon Sep 17 00:00:00 2001 From: Zheng RuiFeng Date: Tue, 6 Sep 2016 14:20:56 -0700 Subject: [PATCH 218/270] [MINOR] Remove unnecessary check in MLSerDe ## What changes were proposed in this pull request? 1, remove unnecessary `require()`, because it will make following check useless. 2, update the error msg. ## How was this patch tested? no test Author: Zheng RuiFeng Closes #14972 from zhengruifeng/del_unnecessary_check. --- .../main/scala/org/apache/spark/ml/python/MLSerDe.scala | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/python/MLSerDe.scala b/mllib/src/main/scala/org/apache/spark/ml/python/MLSerDe.scala index 1279c901c5c9e..4b805e145482a 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/python/MLSerDe.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/python/MLSerDe.scala @@ -56,9 +56,8 @@ private[spark] object MLSerDe extends SerDeBase with Serializable { } def construct(args: Array[Object]): Object = { - require(args.length == 1) if (args.length != 1) { - throw new PickleException("should be 1") + throw new PickleException("length of args should be 1") } val bytes = getBytes(args(0)) val bb = ByteBuffer.wrap(bytes, 0, bytes.length) @@ -95,7 +94,7 @@ private[spark] object MLSerDe extends SerDeBase with Serializable { def construct(args: Array[Object]): Object = { if (args.length != 4) { - throw new PickleException("should be 4") + throw new PickleException("length of args should be 4") } val bytes = getBytes(args(2)) val n = bytes.length / 8 @@ -143,7 +142,7 @@ private[spark] object MLSerDe extends SerDeBase with Serializable { def construct(args: Array[Object]): Object = { if (args.length != 6) { - throw new PickleException("should be 6") + throw new PickleException("length of args should be 6") } val order = ByteOrder.nativeOrder() val colPtrsBytes = getBytes(args(2)) @@ -187,7 +186,7 @@ private[spark] object MLSerDe extends SerDeBase with Serializable { def construct(args: Array[Object]): Object = { if (args.length != 3) { - throw new PickleException("should be 3") + throw new PickleException("length of args should be 3") } val size = args(0).asInstanceOf[Int] val indiceBytes = getBytes(args(1)) From 29cfab3f1524c5690be675d24dda0a9a1806d6ff Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Tue, 6 Sep 2016 15:07:28 -0700 Subject: [PATCH 219/270] [SPARK-17110] Fix StreamCorruptionException in BlockManager.getRemoteValues() ## What changes were proposed in this pull request? This patch fixes a `java.io.StreamCorruptedException` error affecting remote reads of cached values when certain data types are used. The problem stems from #11801 / SPARK-13990, a patch to have Spark automatically pick the "best" serializer when caching RDDs. If PySpark cached a PythonRDD, then this would be cached as an `RDD[Array[Byte]]` and the automatic serializer selection would pick KryoSerializer for replication and block transfer. However, the `getRemoteValues()` / `getRemoteBytes()` code path did not pass proper class tags in order to enable the same serializer to be used during deserialization, causing Java to be inappropriately used instead of Kryo, leading to the StreamCorruptedException. We already fixed a similar bug in #14311, which dealt with similar issues in block replication. Prior to that patch, it seems that we had no tests to ensure that block replication actually succeeded. Similarly, prior to this bug fix patch it looks like we had no tests to perform remote reads of cached data, which is why this bug was able to remain latent for so long. This patch addresses the bug by modifying `BlockManager`'s `get()` and `getRemoteValues()` methods to accept ClassTags, allowing the proper class tag to be threaded in the `getOrElseUpdate` code path (which is used by `rdd.iterator`) ## How was this patch tested? Extended the caching tests in `DistributedSuite` to exercise the `getRemoteValues` path, plus manual testing to verify that the PySpark bug reproduction in SPARK-17110 is fixed. Author: Josh Rosen Closes #14952 from JoshRosen/SPARK-17110. --- .../scala/org/apache/spark/rdd/BlockRDD.scala | 2 +- .../spark/serializer/SerializerManager.scala | 7 ++++--- .../org/apache/spark/storage/BlockManager.scala | 15 ++++++++------- .../scala/org/apache/spark/DistributedSuite.scala | 6 ++++-- .../rdd/WriteAheadLogBackedBlockRDD.scala | 5 +++-- .../streaming/ReceivedBlockHandlerSuite.scala | 3 ++- 6 files changed, 22 insertions(+), 16 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala b/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala index 63d1d1767a8cb..d47b75544fdba 100644 --- a/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala @@ -44,7 +44,7 @@ class BlockRDD[T: ClassTag](sc: SparkContext, @transient val blockIds: Array[Blo assertValid() val blockManager = SparkEnv.get.blockManager val blockId = split.asInstanceOf[BlockRDDPartition].blockId - blockManager.get(blockId) match { + blockManager.get[T](blockId) match { case Some(block) => block.data.asInstanceOf[Iterator[T]] case None => throw new Exception("Could not compute split, block " + blockId + " not found") diff --git a/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala b/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala index 7b1ec6fcbbbf6..2156d576f1874 100644 --- a/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala +++ b/core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala @@ -180,11 +180,12 @@ private[spark] class SerializerManager(defaultSerializer: Serializer, conf: Spar * Deserializes an InputStream into an iterator of values and disposes of it when the end of * the iterator is reached. */ - def dataDeserializeStream[T: ClassTag]( + def dataDeserializeStream[T]( blockId: BlockId, - inputStream: InputStream): Iterator[T] = { + inputStream: InputStream) + (classTag: ClassTag[T]): Iterator[T] = { val stream = new BufferedInputStream(inputStream) - getSerializer(implicitly[ClassTag[T]]) + getSerializer(classTag) .newInstance() .deserializeStream(wrapStream(blockId, stream)) .asIterator.asInstanceOf[Iterator[T]] diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index c72f28e00cdbc..0614646771bd0 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -520,10 +520,11 @@ private[spark] class BlockManager( * * This does not acquire a lock on this block in this JVM. */ - private def getRemoteValues(blockId: BlockId): Option[BlockResult] = { + private def getRemoteValues[T: ClassTag](blockId: BlockId): Option[BlockResult] = { + val ct = implicitly[ClassTag[T]] getRemoteBytes(blockId).map { data => val values = - serializerManager.dataDeserializeStream(blockId, data.toInputStream(dispose = true)) + serializerManager.dataDeserializeStream(blockId, data.toInputStream(dispose = true))(ct) new BlockResult(values, DataReadMethod.Network, data.size) } } @@ -602,13 +603,13 @@ private[spark] class BlockManager( * any locks if the block was fetched from a remote block manager. The read lock will * automatically be freed once the result's `data` iterator is fully consumed. */ - def get(blockId: BlockId): Option[BlockResult] = { + def get[T: ClassTag](blockId: BlockId): Option[BlockResult] = { val local = getLocalValues(blockId) if (local.isDefined) { logInfo(s"Found block $blockId locally") return local } - val remote = getRemoteValues(blockId) + val remote = getRemoteValues[T](blockId) if (remote.isDefined) { logInfo(s"Found block $blockId remotely") return remote @@ -660,7 +661,7 @@ private[spark] class BlockManager( makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = { // Attempt to read the block from local or remote storage. If it's present, then we don't need // to go through the local-get-or-put path. - get(blockId) match { + get[T](blockId)(classTag) match { case Some(block) => return Left(block) case _ => @@ -1204,8 +1205,8 @@ private[spark] class BlockManager( /** * Read a block consisting of a single object. */ - def getSingle(blockId: BlockId): Option[Any] = { - get(blockId).map(_.data.next()) + def getSingle[T: ClassTag](blockId: BlockId): Option[T] = { + get[T](blockId).map(_.data.next().asInstanceOf[T]) } /** diff --git a/core/src/test/scala/org/apache/spark/DistributedSuite.scala b/core/src/test/scala/org/apache/spark/DistributedSuite.scala index 4ee0e00fde506..4e36adc8baf3f 100644 --- a/core/src/test/scala/org/apache/spark/DistributedSuite.scala +++ b/core/src/test/scala/org/apache/spark/DistributedSuite.scala @@ -170,10 +170,12 @@ class DistributedSuite extends SparkFunSuite with Matchers with LocalSparkContex blockManager.master.getLocations(blockId).foreach { cmId => val bytes = blockTransfer.fetchBlockSync(cmId.host, cmId.port, cmId.executorId, blockId.toString) - val deserialized = serializerManager.dataDeserializeStream[Int](blockId, - new ChunkedByteBuffer(bytes.nioByteBuffer()).toInputStream()).toList + val deserialized = serializerManager.dataDeserializeStream(blockId, + new ChunkedByteBuffer(bytes.nioByteBuffer()).toInputStream())(data.elementClassTag).toList assert(deserialized === (1 to 100).toList) } + // This will exercise the getRemoteBytes / getRemoteValues code paths: + assert(blockIds.flatMap(id => blockManager.get[Int](id).get.data).toSet === (1 to 1000).toSet) } Seq( diff --git a/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala b/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala index 53fccd8d5e6ed..0b2ec298132ad 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala @@ -120,7 +120,7 @@ class WriteAheadLogBackedBlockRDD[T: ClassTag]( val blockId = partition.blockId def getBlockFromBlockManager(): Option[Iterator[T]] = { - blockManager.get(blockId).map(_.data.asInstanceOf[Iterator[T]]) + blockManager.get[T](blockId).map(_.data.asInstanceOf[Iterator[T]]) } def getBlockFromWriteAheadLog(): Iterator[T] = { @@ -163,7 +163,8 @@ class WriteAheadLogBackedBlockRDD[T: ClassTag]( dataRead.rewind() } serializerManager - .dataDeserializeStream(blockId, new ChunkedByteBuffer(dataRead).toInputStream()) + .dataDeserializeStream( + blockId, new ChunkedByteBuffer(dataRead).toInputStream())(elementClassTag) .asInstanceOf[Iterator[T]] } diff --git a/streaming/src/test/scala/org/apache/spark/streaming/ReceivedBlockHandlerSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/ReceivedBlockHandlerSuite.scala index feb5c30c6aa14..7e665454a5400 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/ReceivedBlockHandlerSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/ReceivedBlockHandlerSuite.scala @@ -23,6 +23,7 @@ import java.nio.ByteBuffer import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ import scala.language.postfixOps +import scala.reflect.ClassTag import org.apache.hadoop.conf.Configuration import org.scalatest.{BeforeAndAfter, Matchers} @@ -163,7 +164,7 @@ class ReceivedBlockHandlerSuite val bytes = reader.read(fileSegment) reader.close() serializerManager.dataDeserializeStream( - generateBlockId(), new ChunkedByteBuffer(bytes).toInputStream()).toList + generateBlockId(), new ChunkedByteBuffer(bytes).toInputStream())(ClassTag.Any).toList } loggedData shouldEqual data } From 4f769b903bc9822c262f0a15f5933cc05c67923f Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Wed, 7 Sep 2016 00:44:07 +0200 Subject: [PATCH 220/270] [SPARK-17296][SQL] Simplify parser join processing. ## What changes were proposed in this pull request? Join processing in the parser relies on the fact that the grammar produces a right nested trees, for instance the parse tree for `select * from a join b join c` is expected to produce a tree similar to `JOIN(a, JOIN(b, c))`. However there are cases in which this (invariant) is violated, like: ```sql SELECT COUNT(1) FROM test T1 CROSS JOIN test T2 JOIN test T3 ON T3.col = T1.col JOIN test T4 ON T4.col = T1.col ``` In this case the parser returns a tree in which Joins are located on both the left and the right sides of the parent join node. This PR introduces a different grammar rule which does not make this assumption. The new rule takes a relation and searches for zero or more joined relations. As a bonus processing is much easier. ## How was this patch tested? Existing tests and I have added a regression test to the plan parser suite. Author: Herman van Hovell Closes #14867 from hvanhovell/SPARK-17296. --- .../spark/sql/catalyst/parser/SqlBase.g4 | 11 ++- .../sql/catalyst/parser/AstBuilder.scala | 99 ++++++++++--------- .../sql/catalyst/parser/ParserUtils.scala | 6 +- .../sql/catalyst/parser/PlanParserSuite.scala | 44 +++++++++ 4 files changed, 102 insertions(+), 58 deletions(-) diff --git a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 index 0447436ea7976..9a643465a9994 100644 --- a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 +++ b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 @@ -374,11 +374,12 @@ setQuantifier ; relation - : left=relation - (joinType JOIN right=relation joinCriteria? - | NATURAL joinType JOIN right=relation - ) #joinRelation - | relationPrimary #relationDefault + : relationPrimary joinRelation* + ; + +joinRelation + : (joinType) JOIN right=relationPrimary joinCriteria? + | NATURAL joinType JOIN right=relationPrimary ; joinType diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index e4cb9f016133a..bbbb14df88f8c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -92,10 +92,9 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { // Apply CTEs query.optional(ctx.ctes) { - val ctes = ctx.ctes.namedQuery.asScala.map { - case nCtx => - val namedQuery = visitNamedQuery(nCtx) - (namedQuery.alias, namedQuery) + val ctes = ctx.ctes.namedQuery.asScala.map { nCtx => + val namedQuery = visitNamedQuery(nCtx) + (namedQuery.alias, namedQuery) } // Check for duplicate names. checkDuplicateKeys(ctes, ctx) @@ -401,7 +400,11 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { * separated) relations here, these get converted into a single plan by condition-less inner join. */ override def visitFromClause(ctx: FromClauseContext): LogicalPlan = withOrigin(ctx) { - val from = ctx.relation.asScala.map(plan).reduceLeft(Join(_, _, Inner, None)) + val from = ctx.relation.asScala.foldLeft(null: LogicalPlan) { (left, relation) => + val right = plan(relation.relationPrimary) + val join = right.optionalMap(left)(Join(_, _, Inner, None)) + withJoinRelations(join, relation) + } ctx.lateralView.asScala.foldLeft(from)(withGenerate) } @@ -532,55 +535,53 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging { } /** - * Create a joins between two or more logical plans. + * Create a single relation referenced in a FROM claused. This method is used when a part of the + * join condition is nested, for example: + * {{{ + * select * from t1 join (t2 cross join t3) on col1 = col2 + * }}} */ - override def visitJoinRelation(ctx: JoinRelationContext): LogicalPlan = withOrigin(ctx) { - /** Build a join between two plans. */ - def join(ctx: JoinRelationContext, left: LogicalPlan, right: LogicalPlan): Join = { - val baseJoinType = ctx.joinType match { - case null => Inner - case jt if jt.CROSS != null => Cross - case jt if jt.FULL != null => FullOuter - case jt if jt.SEMI != null => LeftSemi - case jt if jt.ANTI != null => LeftAnti - case jt if jt.LEFT != null => LeftOuter - case jt if jt.RIGHT != null => RightOuter - case _ => Inner - } + override def visitRelation(ctx: RelationContext): LogicalPlan = withOrigin(ctx) { + withJoinRelations(plan(ctx.relationPrimary), ctx) + } - // Resolve the join type and join condition - val (joinType, condition) = Option(ctx.joinCriteria) match { - case Some(c) if c.USING != null => - val columns = c.identifier.asScala.map { column => - UnresolvedAttribute.quoted(column.getText) - } - (UsingJoin(baseJoinType, columns), None) - case Some(c) if c.booleanExpression != null => - (baseJoinType, Option(expression(c.booleanExpression))) - case None if ctx.NATURAL != null => - (NaturalJoin(baseJoinType), None) - case None => - (baseJoinType, None) - } - Join(left, right, joinType, condition) - } + /** + * Join one more [[LogicalPlan]]s to the current logical plan. + */ + private def withJoinRelations(base: LogicalPlan, ctx: RelationContext): LogicalPlan = { + ctx.joinRelation.asScala.foldLeft(base) { (left, join) => + withOrigin(join) { + val baseJoinType = join.joinType match { + case null => Inner + case jt if jt.CROSS != null => Cross + case jt if jt.FULL != null => FullOuter + case jt if jt.SEMI != null => LeftSemi + case jt if jt.ANTI != null => LeftAnti + case jt if jt.LEFT != null => LeftOuter + case jt if jt.RIGHT != null => RightOuter + case _ => Inner + } - // Handle all consecutive join clauses. ANTLR produces a right nested tree in which the the - // first join clause is at the top. However fields of previously referenced tables can be used - // in following join clauses. The tree needs to be reversed in order to make this work. - var result = plan(ctx.left) - var current = ctx - while (current != null) { - current.right match { - case right: JoinRelationContext => - result = join(current, result, plan(right.left)) - current = right - case right => - result = join(current, result, plan(right)) - current = null + // Resolve the join type and join condition + val (joinType, condition) = Option(join.joinCriteria) match { + case Some(c) if c.USING != null => + val columns = c.identifier.asScala.map { column => + UnresolvedAttribute.quoted(column.getText) + } + (UsingJoin(baseJoinType, columns), None) + case Some(c) if c.booleanExpression != null => + (baseJoinType, Option(expression(c.booleanExpression))) + case None if join.NATURAL != null => + if (baseJoinType == Cross) { + throw new ParseException("NATURAL CROSS JOIN is not supported", ctx) + } + (NaturalJoin(baseJoinType), None) + case None => + (baseJoinType, None) + } + Join(left, plan(join.right), joinType, condition) } } - result } /** diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala index cb89a9679a8cf..6fbc33fad735c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala @@ -18,7 +18,7 @@ package org.apache.spark.sql.catalyst.parser import scala.collection.mutable.StringBuilder -import org.antlr.v4.runtime.{CharStream, ParserRuleContext, Token} +import org.antlr.v4.runtime.{ParserRuleContext, Token} import org.antlr.v4.runtime.misc.Interval import org.antlr.v4.runtime.tree.TerminalNode @@ -189,9 +189,7 @@ object ParserUtils { * Map a [[LogicalPlan]] to another [[LogicalPlan]] if the passed context exists using the * passed function. The original plan is returned when the context does not exist. */ - def optionalMap[C <: ParserRuleContext]( - ctx: C)( - f: (C, LogicalPlan) => LogicalPlan): LogicalPlan = { + def optionalMap[C](ctx: C)(f: (C, LogicalPlan) => LogicalPlan): LogicalPlan = { if (ctx != null) { f(ctx, plan) } else { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala index faaea17b64d2a..ca86304d4d400 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala @@ -360,10 +360,54 @@ class PlanParserSuite extends PlanTest { test("left anti join", LeftAnti, testExistence) test("anti join", LeftAnti, testExistence) + // Test natural cross join + intercept("select * from a natural cross join b") + + // Test natural join with a condition + intercept("select * from a natural join b on a.id = b.id") + // Test multiple consecutive joins assertEqual( "select * from a join b join c right join d", table("a").join(table("b")).join(table("c")).join(table("d"), RightOuter).select(star())) + + // SPARK-17296 + assertEqual( + "select * from t1 cross join t2 join t3 on t3.id = t1.id join t4 on t4.id = t1.id", + table("t1") + .join(table("t2"), Cross) + .join(table("t3"), Inner, Option(Symbol("t3.id") === Symbol("t1.id"))) + .join(table("t4"), Inner, Option(Symbol("t4.id") === Symbol("t1.id"))) + .select(star())) + + // Test multiple on clauses. + intercept("select * from t1 inner join t2 inner join t3 on col3 = col2 on col3 = col1") + + // Parenthesis + assertEqual( + "select * from t1 inner join (t2 inner join t3 on col3 = col2) on col3 = col1", + table("t1") + .join(table("t2") + .join(table("t3"), Inner, Option('col3 === 'col2)), Inner, Option('col3 === 'col1)) + .select(star())) + assertEqual( + "select * from t1 inner join (t2 inner join t3) on col3 = col2", + table("t1") + .join(table("t2").join(table("t3"), Inner, None), Inner, Option('col3 === 'col2)) + .select(star())) + assertEqual( + "select * from t1 inner join (t2 inner join t3 on col3 = col2)", + table("t1") + .join(table("t2").join(table("t3"), Inner, Option('col3 === 'col2)), Inner, None) + .select(star())) + + // Implicit joins. + assertEqual( + "select * from t1, t3 join t2 on t1.col1 = t2.col2", + table("t1") + .join(table("t3")) + .join(table("t2"), Inner, Option(Symbol("t1.col1") === Symbol("t2.col2"))) + .select(star())) } test("sampled relations") { From 0bd00ff2454c5046e4cb084ee64d432c4d3dcbc3 Mon Sep 17 00:00:00 2001 From: Marcelo Vanzin Date: Tue, 6 Sep 2016 15:54:54 -0700 Subject: [PATCH 221/270] [SPARK-15891][YARN] Clean up some logging in the YARN AM. To make the log file more readable, rework some of the logging done by the AM: - log executor command / env just once, since they're all almost the same; the information that changes, such as executor ID, is already available in other log messages. - avoid printing logs when nothing happens, especially when updating the container requests in the allocator. - print fewer log messages when requesting many unlocalized executors, instead of repeating the same message multiple times. - removed some logs that seemed unnecessary. In the process, I slightly fixed up the wording in a few log messages, and did some minor clean up of method arguments that were redundant. Tested by running existing unit tests, and analyzing the logs of an application that exercises dynamic allocation by forcing executors to be allocated and be killed in waves. Author: Marcelo Vanzin Closes #14943 from vanzin/SPARK-15891. --- .../spark/deploy/yarn/ApplicationMaster.scala | 15 ++- .../spark/deploy/yarn/ExecutorRunnable.scala | 92 +++++++++---------- .../spark/deploy/yarn/YarnAllocator.scala | 34 ++++--- .../deploy/yarn/YarnAllocatorSuite.scala | 4 +- 4 files changed, 82 insertions(+), 63 deletions(-) diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala index a4b575c85d5fb..ad50ea789a913 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala @@ -173,7 +173,6 @@ private[spark] class ApplicationMaster( sys.props.remove(e.key) } - logInfo("Prepared Local resources " + resources) resources.toMap } @@ -329,7 +328,7 @@ private[spark] class ApplicationMaster( val appId = client.getAttemptId().getApplicationId().toString() val attemptId = client.getAttemptId().getAttemptId().toString() val historyAddress = - sparkConf.get(HISTORY_SERVER_ADDRESS) + _sparkConf.get(HISTORY_SERVER_ADDRESS) .map { text => SparkHadoopUtil.get.substituteHadoopVariables(text, yarnConf) } .map { address => s"${address}${HistoryServer.UI_PATH_PREFIX}/${appId}/${attemptId}" } .getOrElse("") @@ -338,6 +337,18 @@ private[spark] class ApplicationMaster( _sparkConf.get("spark.driver.host"), _sparkConf.get("spark.driver.port").toInt, CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString + + // Before we initialize the allocator, let's log the information about how executors will + // be run up front, to avoid printing this out for every single executor being launched. + // Use placeholders for information that changes such as executor IDs. + logInfo { + val executorMemory = sparkConf.get(EXECUTOR_MEMORY).toInt + val executorCores = sparkConf.get(EXECUTOR_CORES) + val dummyRunner = new ExecutorRunnable(None, yarnConf, sparkConf, driverUrl, "", + "", executorMemory, executorCores, appId, securityMgr, localResources) + dummyRunner.launchContextDebugInfo() + } + allocator = client.register(driverUrl, driverRef, yarnConf, diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala index 3d0e996b18720..8e0533f39ae53 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala @@ -24,7 +24,6 @@ import java.util.Collections import scala.collection.JavaConverters._ import scala.collection.mutable.{HashMap, ListBuffer} -import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.io.DataOutputBuffer import org.apache.hadoop.security.UserGroupInformation @@ -45,11 +44,11 @@ import org.apache.spark.network.util.JavaUtils import org.apache.spark.util.Utils private[yarn] class ExecutorRunnable( - container: Container, - conf: Configuration, + container: Option[Container], + conf: YarnConfiguration, sparkConf: SparkConf, masterAddress: String, - slaveId: String, + executorId: String, hostname: String, executorMemory: Int, executorCores: Int, @@ -59,43 +58,46 @@ private[yarn] class ExecutorRunnable( var rpc: YarnRPC = YarnRPC.create(conf) var nmClient: NMClient = _ - val yarnConf: YarnConfiguration = new YarnConfiguration(conf) - lazy val env = prepareEnvironment(container) def run(): Unit = { - logInfo("Starting Executor Container") + logDebug("Starting Executor Container") nmClient = NMClient.createNMClient() - nmClient.init(yarnConf) + nmClient.init(conf) nmClient.start() startContainer() } - def startContainer(): java.util.Map[String, ByteBuffer] = { - logInfo("Setting up ContainerLaunchContext") + def launchContextDebugInfo(): String = { + val commands = prepareCommand() + val env = prepareEnvironment() + + s""" + |=============================================================================== + |YARN executor launch context: + | env: + |${env.map { case (k, v) => s" $k -> $v\n" }.mkString} + | command: + | ${commands.mkString(" \\ \n ")} + | + | resources: + |${localResources.map { case (k, v) => s" $k -> $v\n" }.mkString} + |===============================================================================""".stripMargin + } + def startContainer(): java.util.Map[String, ByteBuffer] = { val ctx = Records.newRecord(classOf[ContainerLaunchContext]) .asInstanceOf[ContainerLaunchContext] + val env = prepareEnvironment().asJava ctx.setLocalResources(localResources.asJava) - ctx.setEnvironment(env.asJava) + ctx.setEnvironment(env) val credentials = UserGroupInformation.getCurrentUser().getCredentials() val dob = new DataOutputBuffer() credentials.writeTokenStorageToStream(dob) ctx.setTokens(ByteBuffer.wrap(dob.getData())) - val commands = prepareCommand(masterAddress, slaveId, hostname, executorMemory, executorCores, - appId) - - logInfo(s""" - |=============================================================================== - |YARN executor launch context: - | env: - |${env.map { case (k, v) => s" $k -> $v\n" }.mkString} - | command: - | ${commands.mkString(" ")} - |=============================================================================== - """.stripMargin) + val commands = prepareCommand() ctx.setCommands(commands.asJava) ctx.setApplicationACLs( @@ -119,21 +121,15 @@ private[yarn] class ExecutorRunnable( // Send the start request to the ContainerManager try { - nmClient.startContainer(container, ctx) + nmClient.startContainer(container.get, ctx) } catch { case ex: Exception => - throw new SparkException(s"Exception while starting container ${container.getId}" + + throw new SparkException(s"Exception while starting container ${container.get.getId}" + s" on host $hostname", ex) } } - private def prepareCommand( - masterAddress: String, - slaveId: String, - hostname: String, - executorMemory: Int, - executorCores: Int, - appId: String): List[String] = { + private def prepareCommand(): List[String] = { // Extra options for the JVM val javaOpts = ListBuffer[String]() @@ -216,23 +212,23 @@ private[yarn] class ExecutorRunnable( "-server") ++ javaOpts ++ Seq("org.apache.spark.executor.CoarseGrainedExecutorBackend", - "--driver-url", masterAddress.toString, - "--executor-id", slaveId.toString, - "--hostname", hostname.toString, + "--driver-url", masterAddress, + "--executor-id", executorId, + "--hostname", hostname, "--cores", executorCores.toString, "--app-id", appId) ++ userClassPath ++ Seq( - "1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout", - "2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr") + s"1>${ApplicationConstants.LOG_DIR_EXPANSION_VAR}/stdout", + s"2>${ApplicationConstants.LOG_DIR_EXPANSION_VAR}/stderr") // TODO: it would be nicer to just make sure there are no null commands here commands.map(s => if (s == null) "null" else s).toList } - private def prepareEnvironment(container: Container): HashMap[String, String] = { + private def prepareEnvironment(): HashMap[String, String] = { val env = new HashMap[String, String]() - Client.populateClasspath(null, yarnConf, sparkConf, env, sparkConf.get(EXECUTOR_CLASS_PATH)) + Client.populateClasspath(null, conf, sparkConf, env, sparkConf.get(EXECUTOR_CLASS_PATH)) sparkConf.getExecutorEnv.foreach { case (key, value) => // This assumes each executor environment variable set here is a path @@ -246,20 +242,22 @@ private[yarn] class ExecutorRunnable( } // lookup appropriate http scheme for container log urls - val yarnHttpPolicy = yarnConf.get( + val yarnHttpPolicy = conf.get( YarnConfiguration.YARN_HTTP_POLICY_KEY, YarnConfiguration.YARN_HTTP_POLICY_DEFAULT ) val httpScheme = if (yarnHttpPolicy == "HTTPS_ONLY") "https://" else "http://" // Add log urls - sys.env.get("SPARK_USER").foreach { user => - val containerId = ConverterUtils.toString(container.getId) - val address = container.getNodeHttpAddress - val baseUrl = s"$httpScheme$address/node/containerlogs/$containerId/$user" - - env("SPARK_LOG_URL_STDERR") = s"$baseUrl/stderr?start=-4096" - env("SPARK_LOG_URL_STDOUT") = s"$baseUrl/stdout?start=-4096" + container.foreach { c => + sys.env.get("SPARK_USER").foreach { user => + val containerId = ConverterUtils.toString(c.getId) + val address = c.getNodeHttpAddress + val baseUrl = s"$httpScheme$address/node/containerlogs/$containerId/$user" + + env("SPARK_LOG_URL_STDERR") = s"$baseUrl/stderr?start=-4096" + env("SPARK_LOG_URL_STDOUT") = s"$baseUrl/stdout?start=-4096" + } } System.getenv().asScala.filterKeys(_.startsWith("SPARK")) diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala index dbdac3369b905..0daf1ea0bc8b4 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala @@ -26,10 +26,10 @@ import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Queue} import scala.collection.JavaConverters._ import scala.util.control.NonFatal -import org.apache.hadoop.conf.Configuration import org.apache.hadoop.yarn.api.records._ import org.apache.hadoop.yarn.client.api.AMRMClient import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest +import org.apache.hadoop.yarn.conf.YarnConfiguration import org.apache.hadoop.yarn.util.RackResolver import org.apache.log4j.{Level, Logger} @@ -60,7 +60,7 @@ import org.apache.spark.util.{Clock, SystemClock, ThreadUtils} private[yarn] class YarnAllocator( driverUrl: String, driverRef: RpcEndpointRef, - conf: Configuration, + conf: YarnConfiguration, sparkConf: SparkConf, amClient: AMRMClient[ContainerRequest], appAttemptId: ApplicationAttemptId, @@ -297,8 +297,9 @@ private[yarn] class YarnAllocator( val missing = targetNumExecutors - numPendingAllocate - numExecutorsRunning if (missing > 0) { - logInfo(s"Will request $missing executor containers, each with ${resource.getVirtualCores} " + - s"cores and ${resource.getMemory} MB memory including $memoryOverhead MB overhead") + logInfo(s"Will request $missing executor container(s), each with " + + s"${resource.getVirtualCores} core(s) and " + + s"${resource.getMemory} MB memory (including $memoryOverhead MB of overhead)") // Split the pending container request into three groups: locality matched list, locality // unmatched list and non-locality list. Take the locality matched container request into @@ -314,7 +315,9 @@ private[yarn] class YarnAllocator( amClient.removeContainerRequest(stale) } val cancelledContainers = staleRequests.size - logInfo(s"Canceled $cancelledContainers container requests (locality no longer needed)") + if (cancelledContainers > 0) { + logInfo(s"Canceled $cancelledContainers container request(s) (locality no longer needed)") + } // consider the number of new containers and cancelled stale containers available val availableContainers = missing + cancelledContainers @@ -344,14 +347,24 @@ private[yarn] class YarnAllocator( anyHostRequests.slice(0, numToCancel).foreach { nonLocal => amClient.removeContainerRequest(nonLocal) } - logInfo(s"Canceled $numToCancel container requests for any host to resubmit with locality") + if (numToCancel > 0) { + logInfo(s"Canceled $numToCancel unlocalized container requests to resubmit with locality") + } } newLocalityRequests.foreach { request => amClient.addContainerRequest(request) - logInfo(s"Submitted container request (host: ${hostStr(request)}, capability: $resource)") } + if (log.isInfoEnabled()) { + val (localized, anyHost) = newLocalityRequests.partition(_.getNodes() != null) + if (anyHost.nonEmpty) { + logInfo(s"Submitted ${anyHost.size} unlocalized container requests.") + } + localized.foreach { request => + logInfo(s"Submitted container request for host ${hostStr(request)}.") + } + } } else if (numPendingAllocate > 0 && missing < 0) { val numToCancel = math.min(numPendingAllocate, -missing) logInfo(s"Canceling requests for $numToCancel executor container(s) to have a new desired " + @@ -479,7 +492,7 @@ private[yarn] class YarnAllocator( val containerId = container.getId val executorId = executorIdCounter.toString assert(container.getResource.getMemory >= resource.getMemory) - logInfo("Launching container %s for on host %s".format(containerId, executorHostname)) + logInfo(s"Launching container $containerId on host $executorHostname") def updateInternalState(): Unit = synchronized { numExecutorsRunning += 1 @@ -494,14 +507,11 @@ private[yarn] class YarnAllocator( } if (launchContainers) { - logInfo("Launching ExecutorRunnable. driverUrl: %s, executorHostname: %s".format( - driverUrl, executorHostname)) - launcherPool.execute(new Runnable { override def run(): Unit = { try { new ExecutorRunnable( - container, + Some(container), conf, sparkConf, driverUrl, diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala index 207dbf56d3606..696e552c35d12 100644 --- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala +++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala @@ -19,12 +19,12 @@ package org.apache.spark.deploy.yarn import java.util.{Arrays, List => JList} -import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.CommonConfigurationKeysPublic import org.apache.hadoop.net.DNSToSwitchMapping import org.apache.hadoop.yarn.api.records._ import org.apache.hadoop.yarn.client.api.AMRMClient import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest +import org.apache.hadoop.yarn.conf.YarnConfiguration import org.mockito.Mockito._ import org.scalatest.{BeforeAndAfterEach, Matchers} @@ -49,7 +49,7 @@ class MockResolver extends DNSToSwitchMapping { } class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach { - val conf = new Configuration() + val conf = new YarnConfiguration() conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, classOf[MockResolver], classOf[DNSToSwitchMapping]) From 175b4344112b376cbbbd05265125ed0e1b87d507 Mon Sep 17 00:00:00 2001 From: Shixiong Zhu Date: Tue, 6 Sep 2016 16:49:06 -0700 Subject: [PATCH 222/270] [SPARK-17316][CORE] Fix the 'ask' type parameter in 'removeExecutor' ## What changes were proposed in this pull request? Fix the 'ask' type parameter in 'removeExecutor' to eliminate a lot of error logs `Cannot cast java.lang.Boolean to scala.runtime.Nothing$` ## How was this patch tested? Jenkins Author: Shixiong Zhu Closes #14983 from zsxwing/SPARK-17316-3. --- .../spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala index 6d26705377e9d..c6b3fdf439f5f 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala @@ -416,7 +416,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp */ protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = { // Only log the failure since we don't care about the result. - driverEndpoint.ask(RemoveExecutor(executorId, reason)).onFailure { case t => + driverEndpoint.ask[Boolean](RemoveExecutor(executorId, reason)).onFailure { case t => logError(t.getMessage, t) }(ThreadUtils.sameThread) } From c07cbb3534a57834b9b78e1572d40fb2af930f5f Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 6 Sep 2016 16:55:22 -0700 Subject: [PATCH 223/270] [SPARK-17371] Resubmitted shuffle outputs can get deleted by zombie map tasks ## What changes were proposed in this pull request? It seems that old shuffle map tasks hanging around after a stage resubmit will delete intended shuffle output files on stop(), causing downstream stages to fail even after successful resubmit completion. This can happen easily if the prior map task is waiting for a network timeout when its stage is resubmitted. This can cause unnecessary stage resubmits, sometimes multiple times as fetch fails cause a cascade of shuffle file invalidations, and confusing FetchFailure messages that report shuffle index files missing from the local disk. Given that IndexShuffleBlockResolver commits data atomically, it seems unnecessary to ever delete committed task output: even in the rare case that a task is failed after it finishes committing shuffle output, it should be safe to retain that output. ## How was this patch tested? Prior to the fix proposed in https://github.com/apache/spark/pull/14931, I was able to reproduce this behavior by killing slaves in the middle of a large shuffle. After this patch, stages were no longer resubmitted multiple times due to shuffle index loss. cc JoshRosen vanzin Author: Eric Liang Closes #14932 from ericl/dont-remove-committed-files. --- .../apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java | 1 - .../java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java | 2 -- .../scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala | 2 -- 3 files changed, 5 deletions(-) diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java index cd6d64a1ee4af..0fcc56d50ae6a 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java @@ -238,7 +238,6 @@ public Option stop(boolean success) { partitionWriters = null; } } - shuffleBlockResolver.removeDataByMap(shuffleId, mapId); return None$.empty(); } } diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java index 876cb7f7d86d5..63d376b44fb11 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java @@ -465,8 +465,6 @@ public Option stop(boolean success) { } return Option.apply(mapStatus); } else { - // The map task failed, so delete our output data. - shuffleBlockResolver.removeDataByMap(shuffleId, mapId); return Option.apply(null); } } diff --git a/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala b/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala index 1adacabc86c05..cc01e6aa7ea91 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala @@ -83,8 +83,6 @@ private[spark] class SortShuffleWriter[K, V, C]( if (success) { return Option(mapStatus) } else { - // The map task failed, so delete our output data. - shuffleBlockResolver.removeDataByMap(dep.shuffleId, mapId) return None } } finally { From a40657bfd375bd27d65204bb42ed0cbd7bd1ebf2 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Wed, 7 Sep 2016 08:13:12 +0800 Subject: [PATCH 224/270] [SPARK-17408][TEST] Flaky test: org.apache.spark.sql.hive.StatisticsSuite ### What changes were proposed in this pull request? https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/64956/testReport/junit/org.apache.spark.sql.hive/StatisticsSuite/test_statistics_of_LogicalRelation_converted_from_MetastoreRelation/ ``` org.apache.spark.sql.hive.StatisticsSuite.test statistics of LogicalRelation converted from MetastoreRelation Failing for the past 1 build (Since Failed#64956 ) Took 1.4 sec. Error Message org.scalatest.exceptions.TestFailedException: 6871 did not equal 4236 Stacktrace sbt.ForkMain$ForkError: org.scalatest.exceptions.TestFailedException: 6871 did not equal 4236 at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:500) ``` This fix does not check the exact value of `sizeInBytes`. Instead, we compare whether it is larger than zero and compare the values between different values. In addition, we also combine `checkMetastoreRelationStats` and `checkLogicalRelationStats` into the same checking function. ### How was this patch tested? N/A Author: gatorsmile Closes #14978 from gatorsmile/spark17408. --- .../spark/sql/hive/StatisticsSuite.scala | 141 ++++++++++-------- 1 file changed, 80 insertions(+), 61 deletions(-) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala index 33ed67575486a..9956706929cd1 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala @@ -171,23 +171,37 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils TableIdentifier("tempTable"), ignoreIfNotExists = true, purge = false) } - private def checkMetastoreRelationStats( + private def checkStats( + stats: Option[Statistics], + hasSizeInBytes: Boolean, + expectedRowCounts: Option[Int]): Unit = { + if (hasSizeInBytes || expectedRowCounts.nonEmpty) { + assert(stats.isDefined) + assert(stats.get.sizeInBytes > 0) + assert(stats.get.rowCount === expectedRowCounts) + } else { + assert(stats.isEmpty) + } + } + + private def checkStats( tableName: String, - expectedStats: Option[Statistics]): Unit = { + isDataSourceTable: Boolean, + hasSizeInBytes: Boolean, + expectedRowCounts: Option[Int]): Option[Statistics] = { val df = sql(s"SELECT * FROM $tableName") - val relations = df.queryExecution.analyzed.collect { case rel: MetastoreRelation => - expectedStats match { - case Some(es) => - assert(rel.catalogTable.stats.isDefined) - val stats = rel.catalogTable.stats.get - assert(stats.sizeInBytes === es.sizeInBytes) - assert(stats.rowCount === es.rowCount) - case None => - assert(rel.catalogTable.stats.isEmpty) - } - rel + val stats = df.queryExecution.analyzed.collect { + case rel: MetastoreRelation => + checkStats(rel.catalogTable.stats, hasSizeInBytes, expectedRowCounts) + assert(!isDataSourceTable, "Expected a data source table, but got a Hive serde table") + rel.catalogTable.stats + case rel: LogicalRelation => + checkStats(rel.catalogTable.get.stats, hasSizeInBytes, expectedRowCounts) + assert(isDataSourceTable, "Expected a Hive serde table, but got a data source table") + rel.catalogTable.get.stats } - assert(relations.size === 1) + assert(stats.size == 1) + stats.head } test("test table-level statistics for hive tables created in HiveExternalCatalog") { @@ -196,19 +210,28 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils // Currently Spark's statistics are self-contained, we don't have statistics until we use // the `ANALYZE TABLE` command. sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS TEXTFILE") - checkMetastoreRelationStats(textTable, expectedStats = None) + checkStats( + textTable, + isDataSourceTable = false, + hasSizeInBytes = false, + expectedRowCounts = None) sql(s"INSERT INTO TABLE $textTable SELECT * FROM src") - checkMetastoreRelationStats(textTable, expectedStats = None) + checkStats( + textTable, + isDataSourceTable = false, + hasSizeInBytes = false, + expectedRowCounts = None) // noscan won't count the number of rows sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan") - checkMetastoreRelationStats(textTable, expectedStats = - Some(Statistics(sizeInBytes = 5812, rowCount = None))) + val fetchedStats1 = checkStats( + textTable, isDataSourceTable = false, hasSizeInBytes = true, expectedRowCounts = None) // without noscan, we count the number of rows sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS") - checkMetastoreRelationStats(textTable, expectedStats = - Some(Statistics(sizeInBytes = 5812, rowCount = Some(500)))) + val fetchedStats2 = checkStats( + textTable, isDataSourceTable = false, hasSizeInBytes = true, expectedRowCounts = Some(500)) + assert(fetchedStats1.get.sizeInBytes == fetchedStats2.get.sizeInBytes) } } @@ -218,40 +241,22 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS TEXTFILE") sql(s"INSERT INTO TABLE $textTable SELECT * FROM src") sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS") - checkMetastoreRelationStats(textTable, expectedStats = - Some(Statistics(sizeInBytes = 5812, rowCount = Some(500)))) + val fetchedStats1 = checkStats( + textTable, isDataSourceTable = false, hasSizeInBytes = true, expectedRowCounts = Some(500)) sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan") // when the total size is not changed, the old row count is kept - checkMetastoreRelationStats(textTable, expectedStats = - Some(Statistics(sizeInBytes = 5812, rowCount = Some(500)))) + val fetchedStats2 = checkStats( + textTable, isDataSourceTable = false, hasSizeInBytes = true, expectedRowCounts = Some(500)) + assert(fetchedStats1 == fetchedStats2) sql(s"INSERT INTO TABLE $textTable SELECT * FROM src") sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan") // update total size and remove the old and invalid row count - checkMetastoreRelationStats(textTable, expectedStats = - Some(Statistics(sizeInBytes = 11624, rowCount = None))) - } - } - - private def checkLogicalRelationStats( - tableName: String, - expectedStats: Option[Statistics]): Unit = { - val df = sql(s"SELECT * FROM $tableName") - val relations = df.queryExecution.analyzed.collect { case rel: LogicalRelation => - assert(rel.catalogTable.isDefined) - expectedStats match { - case Some(es) => - assert(rel.catalogTable.get.stats.isDefined) - val stats = rel.catalogTable.get.stats.get - assert(stats.sizeInBytes === es.sizeInBytes) - assert(stats.rowCount === es.rowCount) - case None => - assert(rel.catalogTable.get.stats.isEmpty) - } - rel + val fetchedStats3 = checkStats( + textTable, isDataSourceTable = false, hasSizeInBytes = true, expectedRowCounts = None) + assert(fetchedStats3.get.sizeInBytes > fetchedStats2.get.sizeInBytes) } - assert(relations.size === 1) } test("test statistics of LogicalRelation converted from MetastoreRelation") { @@ -266,16 +271,21 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils // the default value for `spark.sql.hive.convertMetastoreParquet` is true, here we just set it // for robustness withSQLConf("spark.sql.hive.convertMetastoreParquet" -> "true") { - checkLogicalRelationStats(parquetTable, expectedStats = None) + checkStats( + parquetTable, isDataSourceTable = true, hasSizeInBytes = false, expectedRowCounts = None) sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS") - checkLogicalRelationStats(parquetTable, expectedStats = - Some(Statistics(sizeInBytes = 4236, rowCount = Some(500)))) + checkStats( + parquetTable, + isDataSourceTable = true, + hasSizeInBytes = true, + expectedRowCounts = Some(500)) } withSQLConf("spark.sql.hive.convertMetastoreOrc" -> "true") { - checkLogicalRelationStats(orcTable, expectedStats = None) + checkStats( + orcTable, isDataSourceTable = true, hasSizeInBytes = false, expectedRowCounts = None) sql(s"ANALYZE TABLE $orcTable COMPUTE STATISTICS") - checkLogicalRelationStats(orcTable, expectedStats = - Some(Statistics(sizeInBytes = 3023, rowCount = Some(500)))) + checkStats( + orcTable, isDataSourceTable = true, hasSizeInBytes = true, expectedRowCounts = Some(500)) } } } @@ -288,22 +298,28 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils assert(DDLUtils.isDatasourceTable(catalogTable)) sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src") - checkLogicalRelationStats(parquetTable, expectedStats = None) + checkStats( + parquetTable, isDataSourceTable = true, hasSizeInBytes = false, expectedRowCounts = None) // noscan won't count the number of rows sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS noscan") - checkLogicalRelationStats(parquetTable, expectedStats = - Some(Statistics(sizeInBytes = 4236, rowCount = None))) + val fetchedStats1 = checkStats( + parquetTable, isDataSourceTable = true, hasSizeInBytes = true, expectedRowCounts = None) sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src") sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS noscan") - checkLogicalRelationStats(parquetTable, expectedStats = - Some(Statistics(sizeInBytes = 8472, rowCount = None))) + val fetchedStats2 = checkStats( + parquetTable, isDataSourceTable = true, hasSizeInBytes = true, expectedRowCounts = None) + assert(fetchedStats2.get.sizeInBytes > fetchedStats1.get.sizeInBytes) // without noscan, we count the number of rows sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS") - checkLogicalRelationStats(parquetTable, expectedStats = - Some(Statistics(sizeInBytes = 8472, rowCount = Some(1000)))) + val fetchedStats3 = checkStats( + parquetTable, + isDataSourceTable = true, + hasSizeInBytes = true, + expectedRowCounts = Some(1000)) + assert(fetchedStats3.get.sizeInBytes == fetchedStats2.get.sizeInBytes) } } @@ -314,8 +330,11 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton with SQLTestUtils val dfNoCols = spark.createDataFrame(rddNoCols, StructType(Seq.empty)) dfNoCols.write.format("json").saveAsTable(table_no_cols) sql(s"ANALYZE TABLE $table_no_cols COMPUTE STATISTICS") - checkLogicalRelationStats(table_no_cols, expectedStats = - Some(Statistics(sizeInBytes = 30, rowCount = Some(10)))) + checkStats( + table_no_cols, + isDataSourceTable = true, + hasSizeInBytes = true, + expectedRowCounts = Some(10)) } } From d6eede9a36766e2d2294951b054d7557008a5662 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Wed, 7 Sep 2016 09:36:53 +0800 Subject: [PATCH 225/270] [SPARK-17238][SQL] simplify the logic for converting data source table into hive compatible format ## What changes were proposed in this pull request? Previously we have 2 conditions to decide whether a data source table is hive-compatible: 1. the data source is file-based and has a corresponding Hive serde 2. have a `path` entry in data source options/storage properties However, if condition 1 is true, condition 2 must be true too, as we will put the default table path into data source options/storage properties for managed data source tables. There is also a potential issue: we will set the `locationUri` even for managed table. This PR removes the condition 2 and only set the `locationUri` for external data source tables. Note: this is also a first step to unify the `path` of data source tables and `locationUri` of hive serde tables. For hive serde tables, `locationUri` is only set for external table. For data source tables, `path` is always set. We can make them consistent after this PR. ## How was this patch tested? existing tests Author: Wenchen Fan Closes #14809 from cloud-fan/minor2. --- .../spark/sql/hive/HiveExternalCatalog.scala | 32 +++++++++++-------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index 2e127ef562121..d35a681b67e38 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -249,10 +249,21 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat } // converts the table metadata to Hive compatible format, i.e. set the serde information. - def newHiveCompatibleMetastoreTable(serde: HiveSerDe, path: String): CatalogTable = { + def newHiveCompatibleMetastoreTable(serde: HiveSerDe): CatalogTable = { + val location = if (tableDefinition.tableType == EXTERNAL) { + // When we hit this branch, we are saving an external data source table with hive + // compatible format, which means the data source is file-based and must have a `path`. + val map = new CaseInsensitiveMap(tableDefinition.storage.properties) + require(map.contains("path"), + "External file-based data source table must have a `path` entry in storage properties.") + Some(new Path(map("path")).toUri.toString) + } else { + None + } + tableDefinition.copy( storage = tableDefinition.storage.copy( - locationUri = Some(new Path(path).toUri.toString), + locationUri = location, inputFormat = serde.inputFormat, outputFormat = serde.outputFormat, serde = serde.serde @@ -262,11 +273,10 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat val qualifiedTableName = tableDefinition.identifier.quotedString val maybeSerde = HiveSerDe.sourceToSerDe(tableDefinition.provider.get) - val maybePath = new CaseInsensitiveMap(tableDefinition.storage.properties).get("path") val skipHiveMetadata = tableDefinition.storage.properties .getOrElse("skipHiveMetadata", "false").toBoolean - val (hiveCompatibleTable, logMessage) = (maybeSerde, maybePath) match { + val (hiveCompatibleTable, logMessage) = maybeSerde match { case _ if skipHiveMetadata => val message = s"Persisting data source table $qualifiedTableName into Hive metastore in" + @@ -280,17 +290,11 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " (None, message) - case (Some(serde), Some(path)) => + case Some(serde) => val message = - s"Persisting file based data source table $qualifiedTableName with an input path " + - s"into Hive metastore in Hive compatible format." - (Some(newHiveCompatibleMetastoreTable(serde, path)), message) - - case (Some(_), None) => - val message = - s"Data source table $qualifiedTableName is not file based. Persisting it into " + - s"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive." - (None, message) + s"Persisting file based data source table $qualifiedTableName into " + + s"Hive metastore in Hive compatible format." + (Some(newHiveCompatibleMetastoreTable(serde)), message) case _ => val provider = tableDefinition.provider.get From eb1ab88a86ce35f3d6ba03b3a798099fbcf6b3fc Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 6 Sep 2016 19:34:11 -0700 Subject: [PATCH 226/270] [SPARK-17372][SQL][STREAMING] Avoid serialization issues by using Arrays to save file names in FileStreamSource ## What changes were proposed in this pull request? When we create a filestream on a directory that has partitioned subdirs (i.e. dir/x=y/), then ListingFileCatalog.allFiles returns the files in the dir as Seq[String] which internally is a Stream[String]. This is because of this [line](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala#L93), where a LinkedHashSet.values.toSeq returns Stream. Then when the [FileStreamSource](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala#L79) filters this Stream[String] to remove the seen files, it creates a new Stream[String], which has a filter function that has a $outer reference to the FileStreamSource (in Scala 2.10). Trying to serialize this Stream[String] causes NotSerializableException. This will happened even if there is just one file in the dir. Its important to note that this behavior is different in Scala 2.11. There is no $outer reference to FileStreamSource, so it does not throw NotSerializableException. However, with a large sequence of files (tested with 10000 files), it throws StackOverflowError. This is because how Stream class is implemented. Its basically like a linked list, and attempting to serialize a long Stream requires *recursively* going through linked list, thus resulting in StackOverflowError. In short, across both Scala 2.10 and 2.11, serialization fails when both the following conditions are true. - file stream defined on a partitioned directory - directory has 10k+ files The right solution is to convert the seq to an array before writing to the log. This PR implements this fix in two ways. - Changing all uses for HDFSMetadataLog to ensure Array is used instead of Seq - Added a `require` in HDFSMetadataLog such that it is never used with type Seq ## How was this patch tested? Added unit test that test that ensures the file stream source can handle with 10000 files. This tests fails in both Scala 2.10 and 2.11 with different failures as indicated above. Author: Tathagata Das Closes #14987 from tdas/SPARK-17372. --- .../streaming/FileStreamSinkLog.scala | 12 +++--- .../streaming/FileStreamSource.scala | 4 +- .../execution/streaming/HDFSMetadataLog.scala | 4 ++ .../execution/streaming/StreamExecution.scala | 3 ++ .../streaming/FileStreamSinkLogSuite.scala | 18 ++++---- .../sql/streaming/FileStreamSourceSuite.scala | 42 ++++++++++++++++++- 6 files changed, 65 insertions(+), 18 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala index 4254df44c97a6..7520163522027 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala @@ -80,7 +80,7 @@ object SinkFileStatus { * (drops the deleted files). */ class FileStreamSinkLog(sparkSession: SparkSession, path: String) - extends HDFSMetadataLog[Seq[SinkFileStatus]](sparkSession, path) { + extends HDFSMetadataLog[Array[SinkFileStatus]](sparkSession, path) { import FileStreamSinkLog._ @@ -123,11 +123,11 @@ class FileStreamSinkLog(sparkSession: SparkSession, path: String) } } - override def serialize(logData: Seq[SinkFileStatus]): Array[Byte] = { + override def serialize(logData: Array[SinkFileStatus]): Array[Byte] = { (VERSION +: logData.map(write(_))).mkString("\n").getBytes(UTF_8) } - override def deserialize(bytes: Array[Byte]): Seq[SinkFileStatus] = { + override def deserialize(bytes: Array[Byte]): Array[SinkFileStatus] = { val lines = new String(bytes, UTF_8).split("\n") if (lines.length == 0) { throw new IllegalStateException("Incomplete log file") @@ -136,10 +136,10 @@ class FileStreamSinkLog(sparkSession: SparkSession, path: String) if (version != VERSION) { throw new IllegalStateException(s"Unknown log version: ${version}") } - lines.toSeq.slice(1, lines.length).map(read[SinkFileStatus](_)) + lines.slice(1, lines.length).map(read[SinkFileStatus](_)) } - override def add(batchId: Long, logs: Seq[SinkFileStatus]): Boolean = { + override def add(batchId: Long, logs: Array[SinkFileStatus]): Boolean = { if (isCompactionBatch(batchId, compactInterval)) { compact(batchId, logs) } else { @@ -186,7 +186,7 @@ class FileStreamSinkLog(sparkSession: SparkSession, path: String) private def compact(batchId: Long, logs: Seq[SinkFileStatus]): Boolean = { val validBatches = getValidBatchesBeforeCompactionBatch(batchId, compactInterval) val allLogs = validBatches.flatMap(batchId => get(batchId)).flatten ++ logs - if (super.add(batchId, compactLogs(allLogs))) { + if (super.add(batchId, compactLogs(allLogs).toArray)) { if (isDeletingExpiredLog) { deleteExpiredLog(batchId) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala index e8b969b5e0a55..42fb454c2d158 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala @@ -49,7 +49,7 @@ class FileStreamSource( fs.makeQualified(new Path(path)) // can contains glob patterns } - private val metadataLog = new HDFSMetadataLog[Seq[FileEntry]](sparkSession, metadataPath) + private val metadataLog = new HDFSMetadataLog[Array[FileEntry]](sparkSession, metadataPath) private var maxBatchId = metadataLog.getLatest().map(_._1).getOrElse(-1L) @@ -98,7 +98,7 @@ class FileStreamSource( if (batchFiles.nonEmpty) { maxBatchId += 1 - metadataLog.add(maxBatchId, batchFiles) + metadataLog.add(maxBatchId, batchFiles.toArray) logInfo(s"Max batch id increased to $maxBatchId with ${batchFiles.size} new files") } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala index 127ece9ab0e56..39a0f3341389c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala @@ -49,6 +49,10 @@ import org.apache.spark.util.UninterruptibleThread class HDFSMetadataLog[T: ClassTag](sparkSession: SparkSession, path: String) extends MetadataLog[T] with Logging { + // Avoid serializing generic sequences, see SPARK-17372 + require(implicitly[ClassTag[T]].runtimeClass != classOf[Seq[_]], + "Should not create a log with type Seq, use Arrays instead - see SPARK-17372") + import HDFSMetadataLog._ val metadataPath = new Path(path) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala index 4d05af0b60358..5e1e5eeb50936 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala @@ -407,6 +407,9 @@ class StreamExecution( awaitBatchLock.lock() try { awaitBatchLockCondition.await(100, TimeUnit.MILLISECONDS) + if (streamDeathCause != null) { + throw streamDeathCause + } } finally { awaitBatchLock.unlock() } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala index 39fd1f0cd37bb..26f8b98cb38a5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala @@ -98,7 +98,7 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSQLContext { test("serialize") { withFileStreamSinkLog { sinkLog => - val logs = Seq( + val logs = Array( SinkFileStatus( path = "/a/b/x", size = 100L, @@ -132,7 +132,7 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSQLContext { // scalastyle:on assert(expected === new String(sinkLog.serialize(logs), UTF_8)) - assert(FileStreamSinkLog.VERSION === new String(sinkLog.serialize(Nil), UTF_8)) + assert(FileStreamSinkLog.VERSION === new String(sinkLog.serialize(Array()), UTF_8)) } } @@ -196,7 +196,7 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSQLContext { for (batchId <- 0 to 10) { sinkLog.add( batchId, - Seq(newFakeSinkFileStatus("/a/b/" + batchId, FileStreamSinkLog.ADD_ACTION))) + Array(newFakeSinkFileStatus("/a/b/" + batchId, FileStreamSinkLog.ADD_ACTION))) val expectedFiles = (0 to batchId).map { id => newFakeSinkFileStatus("/a/b/" + id, FileStreamSinkLog.ADD_ACTION) } @@ -230,17 +230,17 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSQLContext { }.toSet } - sinkLog.add(0, Seq(newFakeSinkFileStatus("/a/b/0", FileStreamSinkLog.ADD_ACTION))) + sinkLog.add(0, Array(newFakeSinkFileStatus("/a/b/0", FileStreamSinkLog.ADD_ACTION))) assert(Set("0") === listBatchFiles()) - sinkLog.add(1, Seq(newFakeSinkFileStatus("/a/b/1", FileStreamSinkLog.ADD_ACTION))) + sinkLog.add(1, Array(newFakeSinkFileStatus("/a/b/1", FileStreamSinkLog.ADD_ACTION))) assert(Set("0", "1") === listBatchFiles()) - sinkLog.add(2, Seq(newFakeSinkFileStatus("/a/b/2", FileStreamSinkLog.ADD_ACTION))) + sinkLog.add(2, Array(newFakeSinkFileStatus("/a/b/2", FileStreamSinkLog.ADD_ACTION))) assert(Set("2.compact") === listBatchFiles()) - sinkLog.add(3, Seq(newFakeSinkFileStatus("/a/b/3", FileStreamSinkLog.ADD_ACTION))) + sinkLog.add(3, Array(newFakeSinkFileStatus("/a/b/3", FileStreamSinkLog.ADD_ACTION))) assert(Set("2.compact", "3") === listBatchFiles()) - sinkLog.add(4, Seq(newFakeSinkFileStatus("/a/b/4", FileStreamSinkLog.ADD_ACTION))) + sinkLog.add(4, Array(newFakeSinkFileStatus("/a/b/4", FileStreamSinkLog.ADD_ACTION))) assert(Set("2.compact", "3", "4") === listBatchFiles()) - sinkLog.add(5, Seq(newFakeSinkFileStatus("/a/b/5", FileStreamSinkLog.ADD_ACTION))) + sinkLog.add(5, Array(newFakeSinkFileStatus("/a/b/5", FileStreamSinkLog.ADD_ACTION))) assert(Set("5.compact") === listBatchFiles()) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala index 03222b4a49c6c..886f7be59db93 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala @@ -18,7 +18,9 @@ package org.apache.spark.sql.streaming import java.io.File -import java.util.UUID + +import org.scalatest.concurrent.Eventually._ +import org.scalatest.time.SpanSugar._ import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.util._ @@ -142,6 +144,8 @@ class FileStreamSourceSuite extends FileStreamSourceTest { import testImplicits._ + override val streamingTimeout = 20.seconds + /** Use `format` and `path` to create FileStreamSource via DataFrameReader */ private def createFileStreamSource( format: String, @@ -761,6 +765,42 @@ class FileStreamSourceSuite extends FileStreamSourceTest { } } } + + test("SPARK-17372 - write file names to WAL as Array[String]") { + // Note: If this test takes longer than the timeout, then its likely that this is actually + // running a Spark job with 10000 tasks. This test tries to avoid that by + // 1. Setting the threshold for parallel file listing to very high + // 2. Using a query that should use constant folding to eliminate reading of the files + + val numFiles = 10000 + + // This is to avoid running a spark job to list of files in parallel + // by the ListingFileCatalog. + spark.sessionState.conf.setConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD, numFiles * 2) + + withTempDirs { case (root, tmp) => + val src = new File(root, "a=1") + src.mkdirs() + + (1 to numFiles).map { _.toString }.foreach { i => + val tempFile = Utils.tempFileWith(new File(tmp, "text")) + val finalFile = new File(src, tempFile.getName) + stringToFile(finalFile, i) + } + assert(src.listFiles().size === numFiles) + + val files = spark.readStream.text(root.getCanonicalPath).as[String] + + // Note this query will use constant folding to eliminate the file scan. + // This is to avoid actually running a Spark job with 10000 tasks + val df = files.filter("1 == 0").groupBy().count() + + testStream(df, InternalOutputModes.Complete)( + AddTextFileData("0", src, tmp), + CheckAnswer(0) + ) + } + } } class FileStreamSourceStressTestSuite extends FileStreamSourceTest { From 9fccde4ff80fb0fd65a9e90eb3337965e4349de4 Mon Sep 17 00:00:00 2001 From: Clark Fitzgerald Date: Tue, 6 Sep 2016 23:40:37 -0700 Subject: [PATCH 227/270] [SPARK-16785] R dapply doesn't return array or raw columns ## What changes were proposed in this pull request? Fixed bug in `dapplyCollect` by changing the `compute` function of `worker.R` to explicitly handle raw (binary) vectors. cc shivaram ## How was this patch tested? Unit tests Author: Clark Fitzgerald Closes #14783 from clarkfitzg/SPARK-16785. --- R/pkg/R/SQLContext.R | 4 ++++ R/pkg/R/utils.R | 15 ++++++++++++++ R/pkg/inst/tests/testthat/test_sparkSQL.R | 21 ++++++++++++++++++++ R/pkg/inst/tests/testthat/test_utils.R | 24 +++++++++++++++++++++++ R/pkg/inst/worker/worker.R | 9 ++++++++- 5 files changed, 72 insertions(+), 1 deletion(-) diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index 783df53c12ca2..ce531c3f88863 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -202,7 +202,10 @@ getDefaultSqlSource <- function() { # TODO(davies): support sampling and infer type from NA createDataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0) { sparkSession <- getSparkSession() + if (is.data.frame(data)) { + # Convert data into a list of rows. Each row is a list. + # get the names of columns, they will be put into RDD if (is.null(schema)) { schema <- names(data) @@ -227,6 +230,7 @@ createDataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0) { args <- list(FUN = list, SIMPLIFY = FALSE, USE.NAMES = FALSE) data <- do.call(mapply, append(args, data)) } + if (is.list(data)) { sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) rdd <- parallelize(sc, data) diff --git a/R/pkg/R/utils.R b/R/pkg/R/utils.R index 2809ce5d376a5..248c57532b6cf 100644 --- a/R/pkg/R/utils.R +++ b/R/pkg/R/utils.R @@ -697,3 +697,18 @@ isMasterLocal <- function(master) { isSparkRShell <- function() { grepl(".*shell\\.R$", Sys.getenv("R_PROFILE_USER"), perl = TRUE) } + +# rbind a list of rows with raw (binary) columns +# +# @param inputData a list of rows, with each row a list +# @return data.frame with raw columns as lists +rbindRaws <- function(inputData){ + row1 <- inputData[[1]] + rawcolumns <- ("raw" == sapply(row1, class)) + + listmatrix <- do.call(rbind, inputData) + # A dataframe with all list columns + out <- as.data.frame(listmatrix) + out[!rawcolumns] <- lapply(out[!rawcolumns], unlist) + out +} diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index aac3f6220447c..a9bd325895821 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -2270,6 +2270,27 @@ test_that("dapply() and dapplyCollect() on a DataFrame", { expect_identical(expected, result) }) +test_that("dapplyCollect() on DataFrame with a binary column", { + + df <- data.frame(key = 1:3) + df$bytes <- lapply(df$key, serialize, connection = NULL) + + df_spark <- createDataFrame(df) + + result1 <- collect(df_spark) + expect_identical(df, result1) + + result2 <- dapplyCollect(df_spark, function(x) x) + expect_identical(df, result2) + + # A data.frame with a single column of bytes + scb <- subset(df, select = "bytes") + scb_spark <- createDataFrame(scb) + result <- dapplyCollect(scb_spark, function(x) x) + expect_identical(scb, result) + +}) + test_that("repartition by columns on DataFrame", { df <- createDataFrame( list(list(1L, 1, "1", 0.1), list(1L, 2, "2", 0.2), list(3L, 3, "3", 0.3)), diff --git a/R/pkg/inst/tests/testthat/test_utils.R b/R/pkg/inst/tests/testthat/test_utils.R index 83e94a14322f9..77f25292f3f29 100644 --- a/R/pkg/inst/tests/testthat/test_utils.R +++ b/R/pkg/inst/tests/testthat/test_utils.R @@ -183,4 +183,28 @@ test_that("overrideEnvs", { expect_equal(config[["config_only"]], "ok") }) +test_that("rbindRaws", { + + # Mixed Column types + r <- serialize(1:5, connection = NULL) + r1 <- serialize(1, connection = NULL) + r2 <- serialize(letters, connection = NULL) + r3 <- serialize(1:10, connection = NULL) + inputData <- list(list(1L, r1, "a", r), list(2L, r2, "b", r), + list(3L, r3, "c", r)) + expected <- data.frame(V1 = 1:3) + expected$V2 <- list(r1, r2, r3) + expected$V3 <- c("a", "b", "c") + expected$V4 <- list(r, r, r) + result <- rbindRaws(inputData) + expect_equal(expected, result) + + # Single binary column + input <- list(list(r1), list(r2), list(r3)) + expected <- subset(expected, select = "V2") + result <- setNames(rbindRaws(input), "V2") + expect_equal(expected, result) + +}) + sparkR.session.stop() diff --git a/R/pkg/inst/worker/worker.R b/R/pkg/inst/worker/worker.R index debf0180183a4..cfe41ded200c2 100644 --- a/R/pkg/inst/worker/worker.R +++ b/R/pkg/inst/worker/worker.R @@ -36,7 +36,14 @@ compute <- function(mode, partition, serializer, deserializer, key, # available since R 3.2.4. So we set the global option here. oldOpt <- getOption("stringsAsFactors") options(stringsAsFactors = FALSE) - inputData <- do.call(rbind.data.frame, inputData) + + # Handle binary data types + if ("raw" %in% sapply(inputData[[1]], class)) { + inputData <- SparkR:::rbindRaws(inputData) + } else { + inputData <- do.call(rbind.data.frame, inputData) + } + options(stringsAsFactors = oldOpt) names(inputData) <- colNames From 3ce3a282c8463408f9a2db93c1748e8df8087e07 Mon Sep 17 00:00:00 2001 From: Liwei Lin Date: Wed, 7 Sep 2016 10:04:00 +0100 Subject: [PATCH 228/270] [SPARK-17359][SQL][MLLIB] Use ArrayBuffer.+=(A) instead of ArrayBuffer.append(A) in performance critical paths ## What changes were proposed in this pull request? We should generally use `ArrayBuffer.+=(A)` rather than `ArrayBuffer.append(A)`, because `append(A)` would involve extra boxing / unboxing. ## How was this patch tested? N/A Author: Liwei Lin Closes #14914 from lw-lin/append_to_plus_eq_v2. --- .../apache/spark/api/python/PythonRDD.scala | 2 +- .../scala/org/apache/spark/ui/WebUI.scala | 4 ++-- .../collection/ExternalAppendOnlyMap.scala | 4 ++-- .../util/collection/ExternalSorter.scala | 6 ++--- .../apache/spark/deploy/IvyTestUtils.scala | 2 +- .../spark/memory/MemoryManagerSuite.scala | 3 +-- .../BypassMergeSortShuffleWriterSuite.scala | 2 +- .../spark/util/SizeEstimatorSuite.scala | 4 ++-- .../spark/streaming/kafka/KafkaCluster.scala | 22 +++++++++---------- ...esosFineGrainedSchedulerBackendSuite.scala | 12 +++++----- .../org/apache/spark/ml/linalg/Matrices.scala | 4 ++-- .../apache/spark/mllib/linalg/Matrices.scala | 4 ++-- .../linalg/distributed/BlockMatrix.scala | 2 +- .../mllib/optimization/GradientDescent.scala | 2 +- .../StreamingLogisticRegressionSuite.scala | 2 +- .../StreamingLinearRegressionSuite.scala | 2 +- .../catalyst/expressions/ExpressionSet.scala | 2 +- .../expressions/codegen/CodeGenerator.scala | 4 ++-- .../sql/catalyst/util/QuantileSummaries.scala | 8 +++---- .../sql/catalyst/trees/TreeNodeSuite.scala | 12 +++++----- .../sql/execution/DataSourceScanExec.scala | 4 ++-- .../spark/sql/execution/WindowExec.scala | 4 ++-- .../sql/execution/streaming/memory.scala | 4 ++-- .../streaming/util/BatchedWriteAheadLog.scala | 2 +- .../spark/deploy/yarn/YarnAllocator.scala | 4 ++-- 25 files changed, 60 insertions(+), 61 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala index 2822eb5d60024..d841091a316b1 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala @@ -467,7 +467,7 @@ private[spark] object PythonRDD extends Logging { val length = file.readInt() val obj = new Array[Byte](length) file.readFully(obj) - objs.append(obj) + objs += obj } } catch { case eof: EOFException => // No-op diff --git a/core/src/main/scala/org/apache/spark/ui/WebUI.scala b/core/src/main/scala/org/apache/spark/ui/WebUI.scala index 2c40e726992d3..38363800ec505 100644 --- a/core/src/main/scala/org/apache/spark/ui/WebUI.scala +++ b/core/src/main/scala/org/apache/spark/ui/WebUI.scala @@ -83,8 +83,8 @@ private[spark] abstract class WebUI( (request: HttpServletRequest) => page.renderJson(request), securityManager, conf, basePath) attachHandler(renderHandler) attachHandler(renderJsonHandler) - pageToHandlers.getOrElseUpdate(page, ArrayBuffer[ServletContextHandler]()) - .append(renderHandler) + val handlers = pageToHandlers.getOrElseUpdate(page, ArrayBuffer[ServletContextHandler]()) + handlers += renderHandler } /** Attach a handler to this UI. */ diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala index 09435281194b5..948cc3b099b18 100644 --- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala +++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala @@ -184,7 +184,7 @@ class ExternalAppendOnlyMap[K, V, C]( override protected[this] def spill(collection: SizeTracker): Unit = { val inMemoryIterator = currentMap.destructiveSortedIterator(keyComparator) val diskMapIterator = spillMemoryIteratorToDisk(inMemoryIterator) - spilledMaps.append(diskMapIterator) + spilledMaps += diskMapIterator } /** @@ -215,7 +215,7 @@ class ExternalAppendOnlyMap[K, V, C]( // Flush the disk writer's contents to disk, and update relevant variables def flush(): Unit = { val segment = writer.commitAndGet() - batchSizes.append(segment.length) + batchSizes += segment.length _diskBytesSpilled += segment.length objectsWritten = 0 } diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala index 3579918fac45f..176f84fa2a0d2 100644 --- a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala +++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala @@ -238,7 +238,7 @@ private[spark] class ExternalSorter[K, V, C]( override protected[this] def spill(collection: WritablePartitionedPairCollection[K, C]): Unit = { val inMemoryIterator = collection.destructiveSortedWritablePartitionedIterator(comparator) val spillFile = spillMemoryIteratorToDisk(inMemoryIterator) - spills.append(spillFile) + spills += spillFile } /** @@ -285,7 +285,7 @@ private[spark] class ExternalSorter[K, V, C]( // The writer is committed at the end of this process. def flush(): Unit = { val segment = writer.commitAndGet() - batchSizes.append(segment.length) + batchSizes += segment.length _diskBytesSpilled += segment.length objectsWritten = 0 } @@ -796,7 +796,7 @@ private[spark] class ExternalSorter[K, V, C]( logInfo(s"Task ${context.taskAttemptId} force spilling in-memory map to disk and " + s" it will release ${org.apache.spark.util.Utils.bytesToString(getUsed())} memory") val spillFile = spillMemoryIteratorToDisk(inMemoryIterator) - forceSpillFiles.append(spillFile) + forceSpillFiles += spillFile val spillReader = new SpillReader(spillFile) nextUpstream = (0 until numPartitions).iterator.flatMap { p => val iterator = spillReader.readNextPartition() diff --git a/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala b/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala index 9ecf49b59898b..c9b3d657c2b9d 100644 --- a/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala +++ b/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala @@ -305,7 +305,7 @@ private[deploy] object IvyTestUtils { val allFiles = ArrayBuffer[(String, File)](javaFile) if (withPython) { val pythonFile = createPythonFile(root) - allFiles.append((pythonFile.getName, pythonFile)) + allFiles += Tuple2(pythonFile.getName, pythonFile) } if (withR) { val rFiles = createRFiles(root, className, artifact.groupId) diff --git a/core/src/test/scala/org/apache/spark/memory/MemoryManagerSuite.scala b/core/src/test/scala/org/apache/spark/memory/MemoryManagerSuite.scala index 38bf7e5e5aec3..eb2b3ffd1509a 100644 --- a/core/src/test/scala/org/apache/spark/memory/MemoryManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/memory/MemoryManagerSuite.scala @@ -118,8 +118,7 @@ private[memory] trait MemoryManagerSuite extends SparkFunSuite with BeforeAndAft if (numBytesToFree <= mm.storageMemoryUsed) { // We can evict enough blocks to fulfill the request for space mm.releaseStorageMemory(numBytesToFree, MemoryMode.ON_HEAP) - evictedBlocks.append( - (null, BlockStatus(StorageLevel.MEMORY_ONLY, numBytesToFree, 0L))) + evictedBlocks += Tuple2(null, BlockStatus(StorageLevel.MEMORY_ONLY, numBytesToFree, 0L)) numBytesToFree } else { // No blocks were evicted because eviction would not free enough space. diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala index ed9428820ff6c..442941685f1ae 100644 --- a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala +++ b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala @@ -107,7 +107,7 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte val blockId = new TempShuffleBlockId(UUID.randomUUID) val file = new File(tempDir, blockId.name) blockIdToFileMap.put(blockId, file) - temporaryFilesCreated.append(file) + temporaryFilesCreated += file (blockId, file) } }) diff --git a/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala b/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala index c342b68f46656..2695295d451d5 100644 --- a/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala @@ -150,12 +150,12 @@ class SizeEstimatorSuite val buf = new ArrayBuffer[DummyString]() for (i <- 0 until 5000) { - buf.append(new DummyString(new Array[Char](10))) + buf += new DummyString(new Array[Char](10)) } assertResult(340016)(SizeEstimator.estimate(buf.toArray)) for (i <- 0 until 5000) { - buf.append(new DummyString(arr)) + buf += new DummyString(arr) } assertResult(683912)(SizeEstimator.estimate(buf.toArray)) diff --git a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala index 726b5d8ec3d3b..35acb7b09f12b 100644 --- a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala +++ b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala @@ -108,7 +108,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { } else { val missing = topicAndPartitions.diff(leaderMap.keySet) val err = new Err - err.append(new SparkException(s"Couldn't find leaders for ${missing}")) + err += new SparkException(s"Couldn't find leaders for ${missing}") Left(err) } } @@ -139,7 +139,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { respErrs.foreach { m => val cause = ErrorMapping.exceptionFor(m.errorCode) val msg = s"Error getting partition metadata for '${m.topic}'. Does the topic exist?" - errs.append(new SparkException(msg, cause)) + errs += new SparkException(msg, cause) } } } @@ -205,11 +205,11 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { LeaderOffset(consumer.host, consumer.port, off) } } else { - errs.append(new SparkException( - s"Empty offsets for ${tp}, is ${before} before log beginning?")) + errs += new SparkException( + s"Empty offsets for ${tp}, is ${before} before log beginning?") } } else { - errs.append(ErrorMapping.exceptionFor(por.error)) + errs += ErrorMapping.exceptionFor(por.error) } } } @@ -218,7 +218,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { } } val missing = topicAndPartitions.diff(result.keySet) - errs.append(new SparkException(s"Couldn't find leader offsets for ${missing}")) + errs += new SparkException(s"Couldn't find leader offsets for ${missing}") Left(errs) } } @@ -274,7 +274,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { if (ome.error == ErrorMapping.NoError) { result += tp -> ome } else { - errs.append(ErrorMapping.exceptionFor(ome.error)) + errs += ErrorMapping.exceptionFor(ome.error) } } } @@ -283,7 +283,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { } } val missing = topicAndPartitions.diff(result.keySet) - errs.append(new SparkException(s"Couldn't find consumer offsets for ${missing}")) + errs += new SparkException(s"Couldn't find consumer offsets for ${missing}") Left(errs) } @@ -330,7 +330,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { if (err == ErrorMapping.NoError) { result += tp -> err } else { - errs.append(ErrorMapping.exceptionFor(err)) + errs += ErrorMapping.exceptionFor(err) } } } @@ -339,7 +339,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { } } val missing = topicAndPartitions.diff(result.keySet) - errs.append(new SparkException(s"Couldn't set offsets for ${missing}")) + errs += new SparkException(s"Couldn't set offsets for ${missing}") Left(errs) } @@ -353,7 +353,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { fn(consumer) } catch { case NonFatal(e) => - errs.append(e) + errs += e } finally { if (consumer != null) { consumer.close() diff --git a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala index fcf39f63915f7..7a706ab256f82 100644 --- a/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala +++ b/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala @@ -236,16 +236,16 @@ class MesosFineGrainedSchedulerBackendSuite mesosOffers.add(createOffer(3, minMem, minCpu)) val expectedWorkerOffers = new ArrayBuffer[WorkerOffer](2) - expectedWorkerOffers.append(new WorkerOffer( + expectedWorkerOffers += new WorkerOffer( mesosOffers.get(0).getSlaveId.getValue, mesosOffers.get(0).getHostname, (minCpu - backend.mesosExecutorCores).toInt - )) - expectedWorkerOffers.append(new WorkerOffer( + ) + expectedWorkerOffers += new WorkerOffer( mesosOffers.get(2).getSlaveId.getValue, mesosOffers.get(2).getHostname, (minCpu - backend.mesosExecutorCores).toInt - )) + ) val taskDesc = new TaskDescription(1L, 0, "s1", "n1", 0, ByteBuffer.wrap(new Array[Byte](0))) when(taskScheduler.resourceOffers(expectedWorkerOffers)).thenReturn(Seq(Seq(taskDesc))) when(taskScheduler.CPUS_PER_TASK).thenReturn(2) @@ -339,11 +339,11 @@ class MesosFineGrainedSchedulerBackendSuite val backend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master") val expectedWorkerOffers = new ArrayBuffer[WorkerOffer](1) - expectedWorkerOffers.append(new WorkerOffer( + expectedWorkerOffers += new WorkerOffer( mesosOffers.get(0).getSlaveId.getValue, mesosOffers.get(0).getHostname, 2 // Deducting 1 for executor - )) + ) val taskDesc = new TaskDescription(1L, 0, "s1", "n1", 0, ByteBuffer.wrap(new Array[Byte](0))) when(taskScheduler.resourceOffers(expectedWorkerOffers)).thenReturn(Seq(Seq(taskDesc))) diff --git a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala index 207f662e33bce..4d4b06b0952bd 100644 --- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala +++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala @@ -991,7 +991,7 @@ object Matrices { val data = new ArrayBuffer[(Int, Int, Double)]() dnMat.foreachActive { (i, j, v) => if (v != 0.0) { - data.append((i, j + startCol, v)) + data += Tuple3(i, j + startCol, v) } } startCol += nCols @@ -1061,7 +1061,7 @@ object Matrices { val data = new ArrayBuffer[(Int, Int, Double)]() dnMat.foreachActive { (i, j, v) => if (v != 0.0) { - data.append((i + startRow, j, v)) + data += Tuple3(i + startRow, j, v) } } startRow += nRows diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala index 8659cea4b8ebc..6642999a2121f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala @@ -1128,7 +1128,7 @@ object Matrices { val data = new ArrayBuffer[(Int, Int, Double)]() dnMat.foreachActive { (i, j, v) => if (v != 0.0) { - data.append((i, j + startCol, v)) + data += Tuple3(i, j + startCol, v) } } startCol += nCols @@ -1198,7 +1198,7 @@ object Matrices { val data = new ArrayBuffer[(Int, Int, Double)]() dnMat.foreachActive { (i, j, v) => if (v != 0.0) { - data.append((i + startRow, j, v)) + data += Tuple3(i + startRow, j, v) } } startRow += nRows diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala index 9782350587061..ff1068417d94f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala @@ -257,7 +257,7 @@ class BlockMatrix @Since("1.3.0") ( val colStart = blockColIndex.toLong * colsPerBlock val entryValues = new ArrayBuffer[MatrixEntry]() mat.foreachActive { (i, j, v) => - if (v != 0.0) entryValues.append(new MatrixEntry(rowStart + i, colStart + j, v)) + if (v != 0.0) entryValues += new MatrixEntry(rowStart + i, colStart + j, v) } entryValues } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala index f372355005656..123e0bb3e607a 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala @@ -252,7 +252,7 @@ object GradientDescent extends Logging { * lossSum is computed using the weights from the previous iteration * and regVal is the regularization value computed in the previous iteration as well. */ - stochasticLossHistory.append(lossSum / miniBatchSize + regVal) + stochasticLossHistory += lossSum / miniBatchSize + regVal val update = updater.compute( weights, Vectors.fromBreeze(gradientSum / miniBatchSize.toDouble), stepSize, i, regParam) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/StreamingLogisticRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/StreamingLogisticRegressionSuite.scala index bf98bf2f5fde5..5f797a60f09e6 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/classification/StreamingLogisticRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/StreamingLogisticRegressionSuite.scala @@ -95,7 +95,7 @@ class StreamingLogisticRegressionSuite extends SparkFunSuite with TestSuiteBase // (we add a count to ensure the result is a DStream) ssc = setupStreams(input, (inputDStream: DStream[LabeledPoint]) => { model.trainOn(inputDStream) - inputDStream.foreachRDD(x => history.append(math.abs(model.latestModel().weights(0) - B))) + inputDStream.foreachRDD(x => history += math.abs(model.latestModel().weights(0) - B)) inputDStream.count() }) runStreams(ssc, numBatches, numBatches) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/regression/StreamingLinearRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/regression/StreamingLinearRegressionSuite.scala index 34c07ed170816..eaeaa3fc1e68d 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/regression/StreamingLinearRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/StreamingLinearRegressionSuite.scala @@ -109,7 +109,7 @@ class StreamingLinearRegressionSuite extends SparkFunSuite with TestSuiteBase { // (we add a count to ensure the result is a DStream) ssc = setupStreams(input, (inputDStream: DStream[LabeledPoint]) => { model.trainOn(inputDStream) - inputDStream.foreachRDD(x => history.append(math.abs(model.latestModel().weights(0) - 10.0))) + inputDStream.foreachRDD(x => history += math.abs(model.latestModel().weights(0) - 10.0)) inputDStream.count() }) runStreams(ssc, numBatches, numBatches) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala index 644a5b28a2151..f93e5736de401 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala @@ -55,7 +55,7 @@ class ExpressionSet protected( protected def add(e: Expression): Unit = { if (!baseSet.contains(e.canonicalized)) { baseSet.add(e.canonicalized) - originals.append(e) + originals += e } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala index 28064a5cfa2ee..f982c222af5f0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala @@ -596,12 +596,12 @@ class CodegenContext { // also not be too small, or it will have many function calls (for wide table), see the // results in BenchmarkWideTable. if (blockBuilder.length > 1024) { - blocks.append(blockBuilder.toString()) + blocks += blockBuilder.toString() blockBuilder.clear() } blockBuilder.append(code) } - blocks.append(blockBuilder.toString()) + blocks += blockBuilder.toString() if (blocks.length == 1) { // inline execution if only one block diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala index 493b5faf9e50a..7512ace188569 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala @@ -59,7 +59,7 @@ class QuantileSummaries( * @param x the new observation to insert into the summary */ def insert(x: Double): QuantileSummaries = { - headSampled.append(x) + headSampled += x if (headSampled.size >= defaultHeadSize) { this.withHeadBufferInserted } else { @@ -90,7 +90,7 @@ class QuantileSummaries( val currentSample = sorted(opsIdx) // Add all the samples before the next observation. while(sampleIdx < sampled.size && sampled(sampleIdx).value <= currentSample) { - newSamples.append(sampled(sampleIdx)) + newSamples += sampled(sampleIdx) sampleIdx += 1 } @@ -104,13 +104,13 @@ class QuantileSummaries( } val tuple = Stats(currentSample, 1, delta) - newSamples.append(tuple) + newSamples += tuple opsIdx += 1 } // Add all the remaining existing samples while(sampleIdx < sampled.size) { - newSamples.append(sampled(sampleIdx)) + newSamples += sampled(sampleIdx) sampleIdx += 1 } new QuantileSummaries(compressThreshold, relativeError, newSamples.toArray, currentCount) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala index 6a188e7e55126..6246380dbeb9b 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/trees/TreeNodeSuite.scala @@ -82,8 +82,8 @@ class TreeNodeSuite extends SparkFunSuite { val expected = Seq("+", "1", "*", "2", "-", "3", "4") val expression = Add(Literal(1), Multiply(Literal(2), Subtract(Literal(3), Literal(4)))) expression transformDown { - case b: BinaryOperator => actual.append(b.symbol); b - case l: Literal => actual.append(l.toString); l + case b: BinaryOperator => actual += b.symbol; b + case l: Literal => actual += l.toString; l } assert(expected === actual) @@ -94,8 +94,8 @@ class TreeNodeSuite extends SparkFunSuite { val expected = Seq("1", "2", "3", "4", "-", "*", "+") val expression = Add(Literal(1), Multiply(Literal(2), Subtract(Literal(3), Literal(4)))) expression transformUp { - case b: BinaryOperator => actual.append(b.symbol); b - case l: Literal => actual.append(l.toString); l + case b: BinaryOperator => actual += b.symbol; b + case l: Literal => actual += l.toString; l } assert(expected === actual) @@ -134,8 +134,8 @@ class TreeNodeSuite extends SparkFunSuite { val expected = Seq("1", "2", "3", "4", "-", "*", "+") val expression = Add(Literal(1), Multiply(Literal(2), Subtract(Literal(3), Literal(4)))) expression foreachUp { - case b: BinaryOperator => actual.append(b.symbol); - case l: Literal => actual.append(l.toString); + case b: BinaryOperator => actual += b.symbol; + case l: Literal => actual += l.toString; } assert(expected === actual) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala index 1a8d0e310aec0..9597bdf34b714 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala @@ -448,7 +448,7 @@ case class FileSourceScanExec( FilePartition( partitions.size, currentFiles.toArray.toSeq) // Copy to a new Array. - partitions.append(newPartition) + partitions += newPartition } currentFiles.clear() currentSize = 0 @@ -462,7 +462,7 @@ case class FileSourceScanExec( } // Add the given file to the current partition. currentSize += file.length + openCostInBytes - currentFiles.append(file) + currentFiles += file } closePartition() diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/WindowExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/WindowExec.scala index b60f17cc17a26..9d006d21d9440 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/WindowExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/WindowExec.scala @@ -167,8 +167,8 @@ case class WindowExec( val key = (tpe, fr.frameType, FrameBoundary(fr.frameStart), FrameBoundary(fr.frameEnd)) val (es, fns) = framedFunctions.getOrElseUpdate( key, (ArrayBuffer.empty[Expression], ArrayBuffer.empty[Expression])) - es.append(e) - fns.append(fn) + es += e + fns += fn } // Collect all valid window functions and group them by their frame. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala index e37f0c77795c3..5052c4d50c5ed 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala @@ -77,7 +77,7 @@ case class MemoryStream[A : Encoder](id: Int, sqlContext: SQLContext) logDebug(s"Adding ds: $ds") this.synchronized { currentOffset = currentOffset + 1 - batches.append(ds) + batches += ds currentOffset } } @@ -155,7 +155,7 @@ class MemorySink(val schema: StructType, outputMode: OutputMode) extends Sink wi case InternalOutputModes.Complete => batches.clear() - batches.append(AddedData(batchId, data.collect())) + batches += AddedData(batchId, data.collect()) case _ => throw new IllegalArgumentException( diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/BatchedWriteAheadLog.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/BatchedWriteAheadLog.scala index 71f3304f1ba73..35f0166ed0cf2 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/BatchedWriteAheadLog.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/BatchedWriteAheadLog.scala @@ -157,7 +157,7 @@ private[util] class BatchedWriteAheadLog(val wrappedLog: WriteAheadLog, conf: Sp /** Write all the records in the buffer to the write ahead log. */ private def flushRecords(): Unit = { try { - buffer.append(walWriteQueue.take()) + buffer += walWriteQueue.take() val numBatched = walWriteQueue.drainTo(buffer.asJava) + 1 logDebug(s"Received $numBatched records from queue") } catch { diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala index 0daf1ea0bc8b4..2f4b498b3ca74 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala @@ -332,14 +332,14 @@ private[yarn] class YarnAllocator( val newLocalityRequests = new mutable.ArrayBuffer[ContainerRequest] containerLocalityPreferences.foreach { case ContainerLocalityPreferences(nodes, racks) if nodes != null => - newLocalityRequests.append(createContainerRequest(resource, nodes, racks)) + newLocalityRequests += createContainerRequest(resource, nodes, racks) case _ => } if (availableContainers >= newLocalityRequests.size) { // more containers are available than needed for locality, fill in requests for any host for (i <- 0 until (availableContainers - newLocalityRequests.size)) { - newLocalityRequests.append(createContainerRequest(resource, null, null)) + newLocalityRequests += createContainerRequest(resource, null, null) } } else { val numToCancel = newLocalityRequests.size - availableContainers From 6b41195bca65de6236168d96758f93b85f1dd7ca Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Wed, 7 Sep 2016 19:24:03 +0900 Subject: [PATCH 229/270] [SPARK-17339][SPARKR][CORE] Fix some R tests and use Path.toUri in SparkContext for Windows paths in SparkR ## What changes were proposed in this pull request? This PR fixes the Windows path issues in several APIs. Please refer https://issues.apache.org/jira/browse/SPARK-17339 for more details. ## How was this patch tested? Tests via AppVeyor CI - https://ci.appveyor.com/project/HyukjinKwon/spark/build/82-SPARK-17339-fix-r Also, manually, ![2016-09-06 3 14 38](https://cloud.githubusercontent.com/assets/6477701/18263406/b93a98be-7444-11e6-9521-b28ee65a4771.png) Author: hyukjinkwon Closes #14960 from HyukjinKwon/SPARK-17339. --- R/pkg/inst/tests/testthat/test_mllib.R | 14 ++++++++++---- .../main/scala/org/apache/spark/SparkContext.scala | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index ca25f2c7e8263..ac896cfbcfff7 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -22,6 +22,11 @@ context("MLlib functions") # Tests for MLlib functions in SparkR sparkSession <- sparkR.session(enableHiveSupport = FALSE) +absoluteSparkPath <- function(x) { + sparkHome <- sparkR.conf("spark.home") + file.path(sparkHome, x) +} + test_that("formula of spark.glm", { training <- suppressWarnings(createDataFrame(iris)) # directly calling the spark API @@ -354,7 +359,8 @@ test_that("spark.kmeans", { }) test_that("spark.mlp", { - df <- read.df("data/mllib/sample_multiclass_classification_data.txt", source = "libsvm") + df <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"), + source = "libsvm") model <- spark.mlp(df, blockSize = 128, layers = c(4, 5, 4, 3), solver = "l-bfgs", maxIter = 100, tol = 0.5, stepSize = 1, seed = 1) @@ -616,7 +622,7 @@ test_that("spark.gaussianMixture", { }) test_that("spark.lda with libsvm", { - text <- read.df("data/mllib/sample_lda_libsvm_data.txt", source = "libsvm") + text <- read.df(absoluteSparkPath("data/mllib/sample_lda_libsvm_data.txt"), source = "libsvm") model <- spark.lda(text, optimizer = "em") stats <- summary(model, 10) @@ -652,7 +658,7 @@ test_that("spark.lda with libsvm", { }) test_that("spark.lda with text input", { - text <- read.text("data/mllib/sample_lda_data.txt") + text <- read.text(absoluteSparkPath("data/mllib/sample_lda_data.txt")) model <- spark.lda(text, optimizer = "online", features = "value") stats <- summary(model) @@ -688,7 +694,7 @@ test_that("spark.lda with text input", { }) test_that("spark.posterior and spark.perplexity", { - text <- read.text("data/mllib/sample_lda_data.txt") + text <- read.text(absoluteSparkPath("data/mllib/sample_lda_data.txt")) model <- spark.lda(text, features = "value", k = 3) # Assert perplexities are equal diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 744d5d0f7aa8e..4aa795a58a28e 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -992,7 +992,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. - FileSystem.get(new URI(path), hadoopConfiguration) + FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) @@ -1081,7 +1081,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. - FileSystem.get(new URI(path), hadoopConfiguration) + FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves From 6f4aeccf8cdfe0b5bdf77165711a9062a52d9d90 Mon Sep 17 00:00:00 2001 From: Daoyuan Wang Date: Wed, 7 Sep 2016 13:01:27 +0200 Subject: [PATCH 230/270] [SPARK-17427][SQL] function SIZE should return -1 when parameter is null ## What changes were proposed in this pull request? `select size(null)` returns -1 in Hive. In order to be compatible, we should return `-1`. ## How was this patch tested? unit test in `CollectionFunctionsSuite` and `DataFrameFunctionsSuite`. Author: Daoyuan Wang Closes #14991 from adrian-wang/size. --- .../expressions/collectionOperations.scala | 24 ++++++++++++++----- .../CollectionFunctionsSuite.scala | 4 ++-- .../spark/sql/DataFrameFunctionsSuite.scala | 14 ++++++----- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala index 2e8ea1107cee0..c0200299376ca 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala @@ -18,13 +18,14 @@ package org.apache.spark.sql.catalyst.expressions import java.util.Comparator +import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodegenFallback, ExprCode} import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData, MapData} import org.apache.spark.sql.types._ /** - * Given an array or map, returns its size. + * Given an array or map, returns its size. Returns -1 if null. */ @ExpressionDescription( usage = "_FUNC_(expr) - Returns the size of an array or a map.", @@ -32,14 +33,25 @@ import org.apache.spark.sql.types._ case class Size(child: Expression) extends UnaryExpression with ExpectsInputTypes { override def dataType: DataType = IntegerType override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(ArrayType, MapType)) - - override def nullSafeEval(value: Any): Int = child.dataType match { - case _: ArrayType => value.asInstanceOf[ArrayData].numElements() - case _: MapType => value.asInstanceOf[MapData].numElements() + override def nullable: Boolean = false + + override def eval(input: InternalRow): Any = { + val value = child.eval(input) + if (value == null) { + -1 + } else child.dataType match { + case _: ArrayType => value.asInstanceOf[ArrayData].numElements() + case _: MapType => value.asInstanceOf[MapData].numElements() + } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { - nullSafeCodeGen(ctx, ev, c => s"${ev.value} = ($c).numElements();") + val childGen = child.genCode(ctx) + ev.copy(code = s""" + boolean ${ev.isNull} = false; + ${childGen.code} + ${ctx.javaType(dataType)} ${ev.value} = ${childGen.isNull} ? -1 : + (${childGen.value}).numElements();""", isNull = "false") } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionFunctionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionFunctionsSuite.scala index a5f784fdcc13c..c76dad208ea1e 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionFunctionsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionFunctionsSuite.scala @@ -40,8 +40,8 @@ class CollectionFunctionsSuite extends SparkFunSuite with ExpressionEvalHelper { checkEvaluation(Size(m1), 0) checkEvaluation(Size(m2), 1) - checkEvaluation(Literal.create(null, MapType(StringType, StringType)), null) - checkEvaluation(Literal.create(null, ArrayType(StringType)), null) + checkEvaluation(Size(Literal.create(null, MapType(StringType, StringType))), -1) + checkEvaluation(Size(Literal.create(null, ArrayType(StringType))), -1) } test("MapKeys/MapValues") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala index 0f6c49e759590..45db61515e9b6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala @@ -324,15 +324,16 @@ class DataFrameFunctionsSuite extends QueryTest with SharedSQLContext { val df = Seq( (Seq[Int](1, 2), "x"), (Seq[Int](), "y"), - (Seq[Int](1, 2, 3), "z") + (Seq[Int](1, 2, 3), "z"), + (null, "empty") ).toDF("a", "b") checkAnswer( df.select(size($"a")), - Seq(Row(2), Row(0), Row(3)) + Seq(Row(2), Row(0), Row(3), Row(-1)) ) checkAnswer( df.selectExpr("size(a)"), - Seq(Row(2), Row(0), Row(3)) + Seq(Row(2), Row(0), Row(3), Row(-1)) ) } @@ -340,15 +341,16 @@ class DataFrameFunctionsSuite extends QueryTest with SharedSQLContext { val df = Seq( (Map[Int, Int](1 -> 1, 2 -> 2), "x"), (Map[Int, Int](), "y"), - (Map[Int, Int](1 -> 1, 2 -> 2, 3 -> 3), "z") + (Map[Int, Int](1 -> 1, 2 -> 2, 3 -> 3), "z"), + (null, "empty") ).toDF("a", "b") checkAnswer( df.select(size($"a")), - Seq(Row(2), Row(0), Row(3)) + Seq(Row(2), Row(0), Row(3), Row(-1)) ) checkAnswer( df.selectExpr("size(a)"), - Seq(Row(2), Row(0), Row(3)) + Seq(Row(2), Row(0), Row(3), Row(-1)) ) } From 76ad89e9241fb2dece95dd445661dd95ee4ef699 Mon Sep 17 00:00:00 2001 From: Srinivasa Reddy Vundela Date: Wed, 7 Sep 2016 12:41:03 +0100 Subject: [PATCH 231/270] [MINOR][SQL] Fixing the typo in unit test ## What changes were proposed in this pull request? Fixing the typo in the unit test of CodeGenerationSuite.scala ## How was this patch tested? Ran the unit test after fixing the typo and it passes Author: Srinivasa Reddy Vundela Closes #14989 from vundela/typo_fix. --- .../spark/sql/catalyst/expressions/CodeGenerationSuite.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala index 8ea8f61150844..0532cf51136da 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala @@ -58,8 +58,8 @@ class CodeGenerationSuite extends SparkFunSuite with ExpressionEvalHelper { GenerateOrdering.generate(Add(Literal(123), Literal(1)).asc :: Nil) assert(CodegenMetrics.METRIC_COMPILATION_TIME.getCount() == startCount1 + 1) assert(CodegenMetrics.METRIC_SOURCE_CODE_SIZE.getCount() == startCount2 + 1) - assert(CodegenMetrics.METRIC_GENERATED_CLASS_BYTECODE_SIZE.getCount() > startCount1) - assert(CodegenMetrics.METRIC_GENERATED_METHOD_BYTECODE_SIZE.getCount() > startCount1) + assert(CodegenMetrics.METRIC_GENERATED_CLASS_BYTECODE_SIZE.getCount() > startCount3) + assert(CodegenMetrics.METRIC_GENERATED_METHOD_BYTECODE_SIZE.getCount() > startCount4) } test("SPARK-8443: split wide projections into blocks due to JVM code size limit") { From 649fa4bf1d6fc9271ae56b6891bc93ebf57858d1 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 7 Sep 2016 12:33:50 -0700 Subject: [PATCH 232/270] [SPARK-17370] Shuffle service files not invalidated when a slave is lost ## What changes were proposed in this pull request? DAGScheduler invalidates shuffle files when an executor loss event occurs, but not when the external shuffle service is enabled. This is because when shuffle service is on, the shuffle file lifetime can exceed the executor lifetime. However, it also doesn't invalidate shuffle files when the shuffle service itself is lost (due to whole slave loss). This can cause long hangs when slaves are lost since the file loss is not detected until a subsequent stage attempts to read the shuffle files. The proposed fix is to also invalidate shuffle files when an executor is lost due to a `SlaveLost` event. ## How was this patch tested? Unit tests, also verified on an actual cluster that slave loss invalidates shuffle files immediately as expected. cc mateiz Author: Eric Liang Closes #14931 from ericl/sc-4439. --- .../apache/spark/deploy/DeployMessage.scala | 2 +- .../deploy/client/StandaloneAppClient.scala | 4 +- .../client/StandaloneAppClientListener.scala | 3 +- .../apache/spark/deploy/master/Master.scala | 4 +- .../apache/spark/scheduler/DAGScheduler.scala | 24 +++++--- .../spark/scheduler/DAGSchedulerEvent.scala | 3 +- .../spark/scheduler/ExecutorLossReason.scala | 6 +- .../spark/scheduler/TaskSchedulerImpl.scala | 9 ++- .../cluster/StandaloneSchedulerBackend.scala | 5 +- .../spark/deploy/client/AppClientSuite.scala | 3 +- .../spark/scheduler/DAGSchedulerSuite.scala | 58 ++++++++++++++++--- .../spark/scheduler/TaskSetManagerSuite.scala | 2 +- 12 files changed, 92 insertions(+), 31 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala b/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala index 34c0696bfc4e5..ac09c6c497f8b 100644 --- a/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala +++ b/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala @@ -135,7 +135,7 @@ private[deploy] object DeployMessages { } case class ExecutorUpdated(id: Int, state: ExecutorState, message: Option[String], - exitStatus: Option[Int]) + exitStatus: Option[Int], workerLost: Boolean) case class ApplicationRemoved(message: String) diff --git a/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala b/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala index 7a60f08aadd4f..93f58ce63799f 100644 --- a/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala +++ b/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala @@ -174,12 +174,12 @@ private[spark] class StandaloneAppClient( cores)) listener.executorAdded(fullId, workerId, hostPort, cores, memory) - case ExecutorUpdated(id, state, message, exitStatus) => + case ExecutorUpdated(id, state, message, exitStatus, workerLost) => val fullId = appId + "/" + id val messageText = message.map(s => " (" + s + ")").getOrElse("") logInfo("Executor updated: %s is now %s%s".format(fullId, state, messageText)) if (ExecutorState.isFinished(state)) { - listener.executorRemoved(fullId, message.getOrElse(""), exitStatus) + listener.executorRemoved(fullId, message.getOrElse(""), exitStatus, workerLost) } case MasterChanged(masterRef, masterWebUiUrl) => diff --git a/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClientListener.scala b/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClientListener.scala index 370b16ce4213a..64255ec92b72a 100644 --- a/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClientListener.scala +++ b/core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClientListener.scala @@ -36,5 +36,6 @@ private[spark] trait StandaloneAppClientListener { def executorAdded( fullId: String, workerId: String, hostPort: String, cores: Int, memory: Int): Unit - def executorRemoved(fullId: String, message: String, exitStatus: Option[Int]): Unit + def executorRemoved( + fullId: String, message: String, exitStatus: Option[Int], workerLost: Boolean): Unit } diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala index dfffc47703ab4..dcf41638e7994 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala @@ -252,7 +252,7 @@ private[deploy] class Master( appInfo.resetRetryCount() } - exec.application.driver.send(ExecutorUpdated(execId, state, message, exitStatus)) + exec.application.driver.send(ExecutorUpdated(execId, state, message, exitStatus, false)) if (ExecutorState.isFinished(state)) { // Remove this executor from the worker and app @@ -766,7 +766,7 @@ private[deploy] class Master( for (exec <- worker.executors.values) { logInfo("Telling app of lost executor: " + exec.id) exec.application.driver.send(ExecutorUpdated( - exec.id, ExecutorState.LOST, Some("worker lost"), None)) + exec.id, ExecutorState.LOST, Some("worker lost"), None, workerLost = true)) exec.state = ExecutorState.LOST exec.application.removeExecutor(exec) } diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala index 4eb7c81f9e8cc..dd47c1dbbec06 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala @@ -239,8 +239,8 @@ class DAGScheduler( /** * Called by TaskScheduler implementation when an executor fails. */ - def executorLost(execId: String): Unit = { - eventProcessLoop.post(ExecutorLost(execId)) + def executorLost(execId: String, reason: ExecutorLossReason): Unit = { + eventProcessLoop.post(ExecutorLost(execId, reason)) } /** @@ -1281,7 +1281,7 @@ class DAGScheduler( // TODO: mark the executor as failed only if there were lots of fetch failures on it if (bmAddress != null) { - handleExecutorLost(bmAddress.executorId, fetchFailed = true, Some(task.epoch)) + handleExecutorLost(bmAddress.executorId, filesLost = true, Some(task.epoch)) } } @@ -1306,15 +1306,16 @@ class DAGScheduler( * modify the scheduler's internal state. Use executorLost() to post a loss event from outside. * * We will also assume that we've lost all shuffle blocks associated with the executor if the - * executor serves its own blocks (i.e., we're not using external shuffle) OR a FetchFailed - * occurred, in which case we presume all shuffle data related to this executor to be lost. + * executor serves its own blocks (i.e., we're not using external shuffle), the entire slave + * is lost (likely including the shuffle service), or a FetchFailed occurred, in which case we + * presume all shuffle data related to this executor to be lost. * * Optionally the epoch during which the failure was caught can be passed to avoid allowing * stray fetch failures from possibly retriggering the detection of a node as lost. */ private[scheduler] def handleExecutorLost( execId: String, - fetchFailed: Boolean, + filesLost: Boolean, maybeEpoch: Option[Long] = None) { val currentEpoch = maybeEpoch.getOrElse(mapOutputTracker.getEpoch) if (!failedEpoch.contains(execId) || failedEpoch(execId) < currentEpoch) { @@ -1322,7 +1323,8 @@ class DAGScheduler( logInfo("Executor lost: %s (epoch %d)".format(execId, currentEpoch)) blockManagerMaster.removeExecutor(execId) - if (!env.blockManager.externalShuffleServiceEnabled || fetchFailed) { + if (filesLost || !env.blockManager.externalShuffleServiceEnabled) { + logInfo("Shuffle files lost for executor: %s (epoch %d)".format(execId, currentEpoch)) // TODO: This will be really slow if we keep accumulating shuffle map stages for ((shuffleId, stage) <- shuffleIdToMapStage) { stage.removeOutputsOnExecutor(execId) @@ -1624,8 +1626,12 @@ private[scheduler] class DAGSchedulerEventProcessLoop(dagScheduler: DAGScheduler case ExecutorAdded(execId, host) => dagScheduler.handleExecutorAdded(execId, host) - case ExecutorLost(execId) => - dagScheduler.handleExecutorLost(execId, fetchFailed = false) + case ExecutorLost(execId, reason) => + val filesLost = reason match { + case SlaveLost(_, true) => true + case _ => false + } + dagScheduler.handleExecutorLost(execId, filesLost) case BeginEvent(task, taskInfo) => dagScheduler.handleBeginEvent(task, taskInfo) diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerEvent.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerEvent.scala index 8c761124824ae..03781a2a2b56c 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerEvent.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerEvent.scala @@ -77,7 +77,8 @@ private[scheduler] case class CompletionEvent( private[scheduler] case class ExecutorAdded(execId: String, host: String) extends DAGSchedulerEvent -private[scheduler] case class ExecutorLost(execId: String) extends DAGSchedulerEvent +private[scheduler] case class ExecutorLost(execId: String, reason: ExecutorLossReason) + extends DAGSchedulerEvent private[scheduler] case class TaskSetFailed(taskSet: TaskSet, reason: String, exception: Option[Throwable]) diff --git a/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala b/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala index 642bf81ac087e..46a35b6a2eaf9 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala @@ -51,6 +51,10 @@ private[spark] object ExecutorKilled extends ExecutorLossReason("Executor killed */ private [spark] object LossReasonPending extends ExecutorLossReason("Pending loss reason.") +/** + * @param _message human readable loss reason + * @param workerLost whether the worker is confirmed lost too (i.e. including shuffle service) + */ private[spark] -case class SlaveLost(_message: String = "Slave lost") +case class SlaveLost(_message: String = "Slave lost", workerLost: Boolean = false) extends ExecutorLossReason(_message) diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala index 7d905538c66a7..ee5cbfeb47353 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala @@ -346,6 +346,7 @@ private[spark] class TaskSchedulerImpl( def statusUpdate(tid: Long, state: TaskState, serializedData: ByteBuffer) { var failedExecutor: Option[String] = None + var reason: Option[ExecutorLossReason] = None synchronized { try { if (state == TaskState.LOST && taskIdToExecutorId.contains(tid)) { @@ -353,8 +354,9 @@ private[spark] class TaskSchedulerImpl( val execId = taskIdToExecutorId(tid) if (executorIdToTaskCount.contains(execId)) { - removeExecutor(execId, + reason = Some( SlaveLost(s"Task $tid was lost, so marking the executor as lost as well.")) + removeExecutor(execId, reason.get) failedExecutor = Some(execId) } } @@ -387,7 +389,8 @@ private[spark] class TaskSchedulerImpl( } // Update the DAGScheduler without holding a lock on this, since that can deadlock if (failedExecutor.isDefined) { - dagScheduler.executorLost(failedExecutor.get) + assert(reason.isDefined) + dagScheduler.executorLost(failedExecutor.get, reason.get) backend.reviveOffers() } } @@ -513,7 +516,7 @@ private[spark] class TaskSchedulerImpl( } // Call dagScheduler.executorLost without holding the lock on this to prevent deadlock if (failedExecutor.isDefined) { - dagScheduler.executorLost(failedExecutor.get) + dagScheduler.executorLost(failedExecutor.get, reason) backend.reviveOffers() } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala index 5068bf2e66b60..04d40e2907cff 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala @@ -150,10 +150,11 @@ private[spark] class StandaloneSchedulerBackend( fullId, hostPort, cores, Utils.megabytesToString(memory))) } - override def executorRemoved(fullId: String, message: String, exitStatus: Option[Int]) { + override def executorRemoved( + fullId: String, message: String, exitStatus: Option[Int], workerLost: Boolean) { val reason: ExecutorLossReason = exitStatus match { case Some(code) => ExecutorExited(code, exitCausedByApp = true, message) - case None => SlaveLost(message) + case None => SlaveLost(message, workerLost = workerLost) } logInfo("Executor %s removed: %s".format(fullId, message)) removeExecutor(fullId.split("/")(1), reason) diff --git a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala index 416efaa75b8d2..bc58fb2a362a4 100644 --- a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala @@ -210,7 +210,8 @@ class AppClientSuite execAddedList.add(id) } - def executorRemoved(id: String, message: String, exitStatus: Option[Int]): Unit = { + def executorRemoved( + id: String, message: String, exitStatus: Option[Int], workerLost: Boolean): Unit = { execRemovedList.add(id) } } diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala index 33824749ae92f..6787b302614e6 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala @@ -31,6 +31,7 @@ import org.apache.spark._ import org.apache.spark.broadcast.BroadcastManager import org.apache.spark.rdd.RDD import org.apache.spark.scheduler.SchedulingMode.SchedulingMode +import org.apache.spark.shuffle.MetadataFetchFailedException import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster} import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, Utils} @@ -201,7 +202,11 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou override def beforeEach(): Unit = { super.beforeEach() - sc = new SparkContext("local", "DAGSchedulerSuite") + init(new SparkConf()) + } + + private def init(testConf: SparkConf): Unit = { + sc = new SparkContext("local", "DAGSchedulerSuite", testConf) sparkListener.submittedStageInfos.clear() sparkListener.successfulStages.clear() sparkListener.failedStages.clear() @@ -621,6 +626,46 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou assertDataStructuresEmpty() } + private val shuffleFileLossTests = Seq( + ("slave lost with shuffle service", SlaveLost("", false), true, false), + ("worker lost with shuffle service", SlaveLost("", true), true, true), + ("worker lost without shuffle service", SlaveLost("", true), false, true), + ("executor failure with shuffle service", ExecutorKilled, true, false), + ("executor failure without shuffle service", ExecutorKilled, false, true)) + + for ((eventDescription, event, shuffleServiceOn, expectFileLoss) <- shuffleFileLossTests) { + val maybeLost = if (expectFileLoss) { + "lost" + } else { + "not lost" + } + test(s"shuffle files $maybeLost when $eventDescription") { + // reset the test context with the right shuffle service config + afterEach() + val conf = new SparkConf() + conf.set("spark.shuffle.service.enabled", shuffleServiceOn.toString) + init(conf) + assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn) + + val shuffleMapRdd = new MyRDD(sc, 2, Nil) + val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1)) + val shuffleId = shuffleDep.shuffleId + val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker) + submit(reduceRdd, Array(0)) + complete(taskSets(0), Seq( + (Success, makeMapStatus("hostA", 1)), + (Success, makeMapStatus("hostB", 1)))) + runEvent(ExecutorLost("exec-hostA", event)) + if (expectFileLoss) { + intercept[MetadataFetchFailedException] { + mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0) + } + } else { + assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet === + HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))) + } + } + } // Helper function to validate state when creating tests for task failures private def checkStageId(stageId: Int, attempt: Int, stageAttempt: TaskSet) { @@ -628,7 +673,6 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou assert(stageAttempt.stageAttemptId == attempt) } - // Helper functions to extract commonly used code in Fetch Failure test cases private def setupStageAbortTest(sc: SparkContext) { sc.listenerBus.addListener(new EndListener()) @@ -1110,7 +1154,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou // pretend we were told hostA went away val oldEpoch = mapOutputTracker.getEpoch - runEvent(ExecutorLost("exec-hostA")) + runEvent(ExecutorLost("exec-hostA", ExecutorKilled)) val newEpoch = mapOutputTracker.getEpoch assert(newEpoch > oldEpoch) @@ -1241,7 +1285,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou )) // then one executor dies, and a task fails in stage 1 - runEvent(ExecutorLost("exec-hostA")) + runEvent(ExecutorLost("exec-hostA", ExecutorKilled)) runEvent(makeCompletionEvent( taskSets(1).tasks(0), FetchFailed(null, firstShuffleId, 2, 0, "Fetch failed"), @@ -1339,7 +1383,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou makeMapStatus("hostA", reduceRdd.partitions.length))) // now that host goes down - runEvent(ExecutorLost("exec-hostA")) + runEvent(ExecutorLost("exec-hostA", ExecutorKilled)) // so we resubmit those tasks runEvent(makeCompletionEvent(taskSets(0).tasks(0), Resubmitted, null)) @@ -1532,7 +1576,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou submit(reduceRdd, Array(0)) // blockManagerMaster.removeExecutor("exec-hostA") // pretend we were told hostA went away - runEvent(ExecutorLost("exec-hostA")) + runEvent(ExecutorLost("exec-hostA", ExecutorKilled)) // DAGScheduler will immediately resubmit the stage after it appears to have no pending tasks // rather than marking it is as failed and waiting. complete(taskSets(0), Seq( @@ -1999,7 +2043,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou // Pretend host A was lost val oldEpoch = mapOutputTracker.getEpoch - runEvent(ExecutorLost("exec-hostA")) + runEvent(ExecutorLost("exec-hostA", ExecutorKilled)) val newEpoch = mapOutputTracker.getEpoch assert(newEpoch > oldEpoch) diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala index 36d1c5690f3c6..7d6ad08036cb4 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala @@ -46,7 +46,7 @@ class FakeDAGScheduler(sc: SparkContext, taskScheduler: FakeTaskScheduler) override def executorAdded(execId: String, host: String) {} - override def executorLost(execId: String) {} + override def executorLost(execId: String, reason: ExecutorLossReason) {} override def taskSetFailed( taskSet: TaskSet, From b230fb92a53375b648fa0f9e1d852270156d79e5 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Wed, 7 Sep 2016 14:03:14 -0700 Subject: [PATCH 233/270] [SPARK-17052][SQL] Remove Duplicate Test Cases auto_join from HiveCompatibilitySuite.scala ### What changes were proposed in this pull request? The original [JIRA Hive-1642](https://issues.apache.org/jira/browse/HIVE-1642) delivered the test cases `auto_joinXYZ` for verifying the results when the joins are automatically converted to map-join. Basically, most of them are just copied from the corresponding `joinXYZ`. After comparison between `auto_joinXYZ` and `joinXYZ`, below is a list of duplicate cases: ``` "auto_join0", "auto_join1", "auto_join10", "auto_join11", "auto_join12", "auto_join13", "auto_join14", "auto_join14_hadoop20", "auto_join15", "auto_join17", "auto_join18", "auto_join2", "auto_join20", "auto_join21", "auto_join23", "auto_join24", "auto_join3", "auto_join4", "auto_join5", "auto_join6", "auto_join7", "auto_join8", "auto_join9" ``` We can remove all of them without affecting the test coverage. ### How was this patch tested? N/A Author: gatorsmile Closes #14635 from gatorsmile/removeAuto. --- .../execution/HiveCompatibilitySuite.scala | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala index a54d234876256..bebcb8f8016b1 100644 --- a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala +++ b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala @@ -553,7 +553,32 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "union31", "union_date", "varchar_2", - "varchar_join1" + "varchar_join1", + + // These tests are duplicates of joinXYZ + "auto_join0", + "auto_join1", + "auto_join10", + "auto_join11", + "auto_join12", + "auto_join13", + "auto_join14", + "auto_join14_hadoop20", + "auto_join15", + "auto_join17", + "auto_join18", + "auto_join2", + "auto_join20", + "auto_join21", + "auto_join23", + "auto_join24", + "auto_join3", + "auto_join4", + "auto_join5", + "auto_join6", + "auto_join7", + "auto_join8", + "auto_join9" ) /** @@ -573,37 +598,14 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "annotate_stats_part", "annotate_stats_table", "annotate_stats_union", - "auto_join0", - "auto_join1", - "auto_join10", - "auto_join11", - "auto_join12", - "auto_join13", - "auto_join14", - "auto_join14_hadoop20", - "auto_join15", - "auto_join17", - "auto_join18", "auto_join19", - "auto_join2", - "auto_join20", - "auto_join21", "auto_join22", - "auto_join23", - "auto_join24", "auto_join25", "auto_join26", "auto_join27", "auto_join28", - "auto_join3", "auto_join30", "auto_join31", - "auto_join4", - "auto_join5", - "auto_join6", - "auto_join7", - "auto_join8", - "auto_join9", "auto_join_nulls", "auto_join_reordering_values", "binary_constant", From 3ced39df32e52170d6954a2464f84e0c9f307423 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Thu, 8 Sep 2016 19:41:49 +0800 Subject: [PATCH 234/270] [SPARK-17432][SQL] PreprocessDDL should respect case sensitivity when checking duplicated columns ## What changes were proposed in this pull request? In `PreprocessDDL` we will check if table columns are duplicated. However, this checking ignores case sensitivity config(it's always case-sensitive) and lead to different result between `HiveExternalCatalog` and `InMemoryCatalog`. `HiveExternalCatalog` will throw exception because hive metastore is always case-nonsensitive, and `InMemoryCatalog` is fine. This PR fixes it. ## How was this patch tested? a new test in DDLSuite Author: Wenchen Fan Closes #14994 from cloud-fan/check-dup. --- .../org/apache/spark/sql/execution/datasources/rules.scala | 7 ++++++- .../org/apache/spark/sql/execution/command/DDLSuite.scala | 7 +++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala index 5b96206ba88fe..fbf4063ff63b8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala @@ -97,7 +97,12 @@ case class PreprocessDDL(conf: SQLConf) extends Rule[LogicalPlan] { // * sort columns' type must be orderable. case c @ CreateTable(tableDesc, mode, query) if c.childrenResolved => val schema = if (query.isDefined) query.get.schema else tableDesc.schema - checkDuplication(schema.map(_.name), "table definition of " + tableDesc.identifier) + val columnNames = if (conf.caseSensitiveAnalysis) { + schema.map(_.name) + } else { + schema.map(_.name.toLowerCase) + } + checkDuplication(columnNames, "table definition of " + tableDesc.identifier) val partitionColsChecked = checkPartitionColumns(schema, tableDesc) val bucketColsChecked = checkBucketColumns(schema, partitionColsChecked) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index fd35c987cab59..05f826a11b586 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -371,6 +371,13 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { sql("CREATE TABLE tbl(a int, a string) USING json") } assert(e.message == "Found duplicate column(s) in table definition of `tbl`: a") + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + val e2 = intercept[AnalysisException] { + sql("CREATE TABLE tbl(a int, A string) USING json") + } + assert(e2.message == "Found duplicate column(s) in table definition of `tbl`: a") + } } test("create table - partition column names not in table definition") { From f0d21b7f90cdcce353ab6fc279b9cc376e46e536 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Thu, 8 Sep 2016 08:22:58 -0700 Subject: [PATCH 235/270] [SPARK-17442][SPARKR] Additional arguments in write.df are not passed to data source ## What changes were proposed in this pull request? additional options were not passed down in write.df. ## How was this patch tested? unit tests falaki shivaram Author: Felix Cheung Closes #15010 from felixcheung/testreadoptions. --- R/pkg/R/DataFrame.R | 1 + R/pkg/inst/tests/testthat/test_sparkSQL.R | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index d7686972d2eec..40f1f0f4429e0 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -2635,6 +2635,7 @@ setMethod("write.df", write <- callJMethod(df@sdf, "write") write <- callJMethod(write, "format", source) write <- callJMethod(write, "mode", jmode) + write <- callJMethod(write, "options", options) write <- callJMethod(write, "save", path) }) diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index a9bd325895821..9d874a0988716 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -208,7 +208,7 @@ test_that("create DataFrame from RDD", { unsetHiveContext() }) -test_that("read csv as DataFrame", { +test_that("read/write csv as DataFrame", { csvPath <- tempfile(pattern = "sparkr-test", fileext = ".csv") mockLinesCsv <- c("year,make,model,comment,blank", "\"2012\",\"Tesla\",\"S\",\"No comment\",", @@ -243,7 +243,17 @@ test_that("read csv as DataFrame", { expect_equal(count(withoutna2), 3) expect_equal(count(where(withoutna2, withoutna2$make == "Dummy")), 0) + # writing csv file + csvPath2 <- tempfile(pattern = "csvtest2", fileext = ".csv") + write.df(df2, path = csvPath2, "csv", header = "true") + df3 <- read.df(csvPath2, "csv", header = "true") + expect_equal(nrow(df3), nrow(df2)) + expect_equal(colnames(df3), colnames(df2)) + csv <- read.csv(file = list.files(csvPath2, pattern = "^part", full.names = T)[[1]]) + expect_equal(colnames(df3), colnames(csv)) + unlink(csvPath) + unlink(csvPath2) }) test_that("convert NAs to null type in DataFrames", { From 78d5d4dd5ce5a537ed04cd1bf242c9e9ea2c391a Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Thu, 8 Sep 2016 08:26:59 -0700 Subject: [PATCH 236/270] [SPARK-17200][PROJECT INFRA][BUILD][SPARKR] Automate building and testing on Windows (currently SparkR only) ## What changes were proposed in this pull request? This PR adds the build automation on Windows with [AppVeyor](https://www.appveyor.com/) CI tool. Currently, this only runs the tests for SparkR as we have been having some issues with testing Windows-specific PRs (e.g. https://github.com/apache/spark/pull/14743 and https://github.com/apache/spark/pull/13165) and hard time to verify this. One concern is, this build is dependent on [steveloughran/winutils](https://github.com/steveloughran/winutils) for pre-built Hadoop bin package (who is a Hadoop PMC member). ## How was this patch tested? Manually, https://ci.appveyor.com/project/HyukjinKwon/spark/build/88-SPARK-17200-build-profile This takes roughly 40 mins. Some tests are already being failed and this was found in https://github.com/apache/spark/pull/14743#issuecomment-241405287. Author: hyukjinkwon Closes #14859 from HyukjinKwon/SPARK-17200-build. --- appveyor.yml | 56 +++++++++ dev/appveyor-guide.md | 168 ++++++++++++++++++++++++++ dev/appveyor-install-dependencies.ps1 | 126 +++++++++++++++++++ 3 files changed, 350 insertions(+) create mode 100644 appveyor.yml create mode 100644 dev/appveyor-guide.md create mode 100644 dev/appveyor-install-dependencies.ps1 diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 0000000000000..5e756835bcb9b --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "{build}-{branch}" + +shallow_clone: true + +platform: x64 +configuration: Debug + +branches: + only: + - master + +only_commits: + files: + - R/ + +cache: + - C:\Users\appveyor\.m2 + +install: + # Install maven and dependencies + - ps: .\dev\appveyor-install-dependencies.ps1 + # Required package for R unit tests + - cmd: R -e "install.packages('testthat', repos='http://cran.us.r-project.org')" + - cmd: R -e "packageVersion('testthat')" + - cmd: R -e "install.packages('e1071', repos='http://cran.us.r-project.org')" + - cmd: R -e "packageVersion('e1071')" + - cmd: R -e "install.packages('survival', repos='http://cran.us.r-project.org')" + - cmd: R -e "packageVersion('survival')" + +build_script: + - cmd: mvn -DskipTests -Phadoop-2.6 -Psparkr -Phive -Phive-thriftserver package + +test_script: + - cmd: .\bin\spark-submit2.cmd --conf spark.hadoop.fs.default.name="file:///" R\pkg\tests\run-all.R + +notifications: + - provider: Email + on_build_success: false + on_build_failure: false + on_build_status_changed: false + diff --git a/dev/appveyor-guide.md b/dev/appveyor-guide.md new file mode 100644 index 0000000000000..d2e00b484727d --- /dev/null +++ b/dev/appveyor-guide.md @@ -0,0 +1,168 @@ +# AppVeyor Guides + +Currently, SparkR on Windows is being tested with [AppVeyor](https://ci.appveyor.com). This page describes how to set up AppVeyor with Spark, how to run the build, check the status and stop the build via this tool. There is the documenation for AppVeyor [here](https://www.appveyor.com/docs). Please refer this for full details. + + +### Setting up AppVeyor + +#### Sign up AppVeyor. + +- Go to https://ci.appveyor.com, and then click "SIGN UP FOR FREE". + + 2016-09-04 11 07 48 + +- As Apache Spark is one of open source projects, click "FREE - for open-source projects". + + 2016-09-04 11 07 58 + +- Click "Github". + + 2016-09-04 11 08 10 + + +#### After signing up, go to profile to link Github and AppVeyor. + +- Click your account and then click "Profile". + + 2016-09-04 11 09 43 + +- Enable the link with GitHub via clicking "Link Github account". + + 2016-09-04 11 09 52 + +- Click "Authorize application" in Github site. + +2016-09-04 11 10 05 + + +#### Add a project, Spark to enable the builds. + +- Go to the PROJECTS menu. + + 2016-08-30 12 16 31 + +- Click "NEW PROJECT" to add Spark. + + 2016-08-30 12 16 35 + +- Since we will use Github here, click the "GITHUB" button and then click "Authorize Github" so that AppVeyor can access to the Github logs (e.g. commits). + + 2016-09-04 11 10 22 + +- Click "Authorize application" from Github (the above step will pop up this page). + + 2016-09-04 11 10 27 + +- Come back to https://ci.appveyor.com/projects/new and then adds "spark". + + 2016-09-04 11 10 36 + + +#### Check if any event supposed to run the build actually triggers the build. + +- Click "PROJECTS" menu. + + 2016-08-30 12 16 31 + +- Click Spark project. + + 2016-09-04 11 22 37 + + +### Checking the status, restarting and stopping the build + +- Click "PROJECTS" menu. + + 2016-08-30 12 16 31 + +- Locate "spark" and click it. + + 2016-09-04 11 22 37 + +- Here, we can check the status of current build. Also, "HISTORY" shows the past build history. + + 2016-09-04 11 23 24 + +- If the build is stopped, "RE-BUILD COMMIT" button appears. Click this button to restart the build. + + 2016-08-30 12 29 41 + +- If the build is running, "CANCEL BUILD" buttom appears. Click this button top cancel the current build. + + 2016-08-30 1 11 13 + + +### Specifying the branch for building and setting the build schedule + +Note: It seems the configurations in UI and `appveyor.yml` are mutually exclusive according to the [documentation](https://www.appveyor.com/docs/build-configuration/#configuring-build). + + +- Click the settings button on the right. + + 2016-08-30 1 19 12 + +- Set the default branch to build as above. + + 2016-08-30 12 42 25 + +- Specify the branch in order to exclude the builds in other branches. + + 2016-08-30 12 42 33 + +- Set the Crontab expression to regularly start the build. AppVeyor uses Crontab expression, [atifaziz/NCrontab](https://github.com/atifaziz/NCrontab/wiki/Crontab-Expression). Please refer the examples [here](https://github.com/atifaziz/NCrontab/wiki/Crontab-Examples). + + + 2016-08-30 12 42 43 + + +### Filtering commits and Pull Requests + +Currently, AppVeyor is only used for SparkR. So, the build is only triggered when R codes are changed. + +This is specified in `.appveyor.yml` as below: + +``` +only_commits: + files: + - R/ +``` + +Please refer https://www.appveyor.com/docs/how-to/filtering-commits for more details. + + +### Checking the full log of the build + +Currently, the console in AppVeyor does not print full details. This can be manually checked. For example, AppVeyor shows the failed tests as below in console + +``` +Failed ------------------------------------------------------------------------- +1. Error: union on two RDDs (@test_binary_function.R#38) ----------------------- +1: textFile(sc, fileName) at C:/projects/spark/R/lib/SparkR/tests/testthat/test_binary_function.R:38 +2: callJMethod(sc, "textFile", path, getMinPartitions(sc, minPartitions)) +3: invokeJava(isStatic = FALSE, objId$id, methodName, ...) +4: stop(readString(conn)) +``` + +After downloading the log by clicking the log button as below: + +![2016-09-08 11 37 17](https://cloud.githubusercontent.com/assets/6477701/18335227/b07d0782-75b8-11e6-94da-1b88cd2a2402.png) + +the details can be checked as below (e.g. exceptions) + +``` +Failed ------------------------------------------------------------------------- +1. Error: spark.lda with text input (@test_mllib.R#655) ------------------------ + org.apache.spark.sql.AnalysisException: Path does not exist: file:/C:/projects/spark/R/lib/SparkR/tests/testthat/data/mllib/sample_lda_data.txt; + at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$12.apply(DataSource.scala:376) + at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$12.apply(DataSource.scala:365) + at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) + at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) + ... + + 1: read.text("data/mllib/sample_lda_data.txt") at C:/projects/spark/R/lib/SparkR/tests/testthat/test_mllib.R:655 + 2: dispatchFunc("read.text(path)", x, ...) + 3: f(x, ...) + 4: callJMethod(read, "text", paths) + 5: invokeJava(isStatic = FALSE, objId$id, methodName, ...) + 6: stop(readString(conn)) +``` diff --git a/dev/appveyor-install-dependencies.ps1 b/dev/appveyor-install-dependencies.ps1 new file mode 100644 index 0000000000000..087b8666cc684 --- /dev/null +++ b/dev/appveyor-install-dependencies.ps1 @@ -0,0 +1,126 @@ +<# +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +#> + +$CRAN = "https://cloud.r-project.org" + +Function InstallR { + if ( -not(Test-Path Env:\R_ARCH) ) { + $arch = "i386" + } + Else { + $arch = $env:R_ARCH + } + + $urlPath = "" + $latestVer = $(ConvertFrom-JSON $(Invoke-WebRequest http://rversions.r-pkg.org/r-release).Content).version + If ($rVer -ne $latestVer) { + $urlPath = ("old/" + $rVer + "/") + } + + $rurl = $CRAN + "/bin/windows/base/" + $urlPath + "R-" + $rVer + "-win.exe" + + # Downloading R + Start-FileDownload $rurl "R-win.exe" + + # Running R installer + Start-Process -FilePath .\R-win.exe -ArgumentList "/VERYSILENT /DIR=C:\R" -NoNewWindow -Wait + + $RDrive = "C:" + echo "R is now available on drive $RDrive" + + $env:PATH = $RDrive + '\R\bin\' + $arch + ';' + 'C:\MinGW\msys\1.0\bin;' + $env:PATH + + # Testing R installation + Rscript -e "sessionInfo()" +} + +Function InstallRtools { + $rtoolsver = $rToolsVer.Split('.')[0..1] -Join '' + $rtoolsurl = $CRAN + "/bin/windows/Rtools/Rtools$rtoolsver.exe" + + # Downloading Rtools + Start-FileDownload $rtoolsurl "Rtools-current.exe" + + # Running Rtools installer + Start-Process -FilePath .\Rtools-current.exe -ArgumentList /VERYSILENT -NoNewWindow -Wait + + $RtoolsDrive = "C:" + echo "Rtools is now available on drive $RtoolsDrive" + + if ( -not(Test-Path Env:\GCC_PATH) ) { + $gccPath = "gcc-4.6.3" + } + Else { + $gccPath = $env:GCC_PATH + } + $env:PATH = $RtoolsDrive + '\Rtools\bin;' + $RtoolsDrive + '\Rtools\MinGW\bin;' + $RtoolsDrive + '\Rtools\' + $gccPath + '\bin;' + $env:PATH + $env:BINPREF=$RtoolsDrive + '/Rtools/mingw_$(WIN)/bin/' +} + +# create tools directory outside of Spark directory +$up = (Get-Item -Path ".." -Verbose).FullName +$tools = "$up\tools" +if (!(Test-Path $tools)) { + New-Item -ItemType Directory -Force -Path $tools | Out-Null +} + +# ========================== Maven +Push-Location $tools + +$mavenVer = "3.3.9" +Start-FileDownload "https://archive.apache.org/dist/maven/maven-3/$mavenVer/binaries/apache-maven-$mavenVer-bin.zip" "maven.zip" + +# extract +Invoke-Expression "7z.exe x maven.zip" + +# add maven to environment variables +$env:Path += ";$tools\apache-maven-$mavenVer\bin" +$env:M2_HOME = "$tools\apache-maven-$mavenVer" +$env:MAVEN_OPTS = "-Xmx2g -XX:MaxPermSize=512M -XX:ReservedCodeCacheSize=512m" + +Pop-Location + +# ========================== Hadoop bin package +$hadoopVer = "2.6.0" +$hadoopPath = "$tools\hadoop" +if (!(Test-Path $hadoopPath)) { + New-Item -ItemType Directory -Force -Path $hadoopPath | Out-Null +} +Push-Location $hadoopPath + +Start-FileDownload "https://github.com/steveloughran/winutils/archive/master.zip" "winutils-master.zip" + +# extract +Invoke-Expression "7z.exe x winutils-master.zip" + +# add hadoop bin to environment variables +$env:HADOOP_HOME = "$hadoopPath/winutils-master/hadoop-$hadoopVer" + +Pop-Location + +# ========================== R +$rVer = "3.3.1" +$rToolsVer = "3.4.0" + +InstallR +InstallRtools + +$env:R_LIBS_USER = 'c:\RLibrary' +if ( -not(Test-Path $env:R_LIBS_USER) ) { + mkdir $env:R_LIBS_USER +} + From 722afbb2b33037a30d385a15725f2db5365bd375 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 8 Sep 2016 16:47:18 -0700 Subject: [PATCH 237/270] [SPARK-17405] RowBasedKeyValueBatch should use default page size to prevent OOMs ## What changes were proposed in this pull request? Before this change, we would always allocate 64MB per aggregation task for the first-level hash map storage, even when running in low-memory situations such as local mode. This changes it to use the memory manager default page size, which is automatically reduced from 64MB in these situations. cc ooq JoshRosen ## How was this patch tested? Tested manually with `bin/spark-shell --master=local[32]` and verifying that `(1 to math.pow(10, 3).toInt).toDF("n").withColumn("m", 'n % 2).groupBy('m).agg(sum('n)).show` does not crash. Author: Eric Liang Closes #15016 from ericl/sc-4483. --- .../expressions/RowBasedKeyValueBatch.java | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java index 4899f856c8756..551443a11298b 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java @@ -37,19 +37,18 @@ * We use `FixedLengthRowBasedKeyValueBatch` if all fields in the key and the value are fixed-length * data types. Otherwise we use `VariableLengthRowBasedKeyValueBatch`. * - * RowBasedKeyValueBatch is backed by a single page / MemoryBlock (defaults to 64MB). If the page - * is full, the aggregate logic should fallback to a second level, larger hash map. We intentionally - * use the single-page design because it simplifies memory address encoding & decoding for each - * key-value pair. Because the maximum capacity for RowBasedKeyValueBatch is only 2^16, it is - * unlikely we need a second page anyway. Filling the page requires an average size for key value - * pairs to be larger than 1024 bytes. + * RowBasedKeyValueBatch is backed by a single page / MemoryBlock (ranges from 1 to 64MB depending + * on the system configuration). If the page is full, the aggregate logic should fallback to a + * second level, larger hash map. We intentionally use the single-page design because it simplifies + * memory address encoding & decoding for each key-value pair. Because the maximum capacity for + * RowBasedKeyValueBatch is only 2^16, it is unlikely we need a second page anyway. Filling the + * page requires an average size for key value pairs to be larger than 1024 bytes. * */ public abstract class RowBasedKeyValueBatch extends MemoryConsumer { protected final Logger logger = LoggerFactory.getLogger(RowBasedKeyValueBatch.class); private static final int DEFAULT_CAPACITY = 1 << 16; - private static final long DEFAULT_PAGE_SIZE = 64 * 1024 * 1024; protected final StructType keySchema; protected final StructType valueSchema; @@ -105,7 +104,7 @@ protected RowBasedKeyValueBatch(StructType keySchema, StructType valueSchema, in this.keyRow = new UnsafeRow(keySchema.length()); this.valueRow = new UnsafeRow(valueSchema.length()); - if (!acquirePage(DEFAULT_PAGE_SIZE)) { + if (!acquirePage(manager.pageSizeBytes())) { page = null; recordStartOffset = 0; } else { From 92ce8d4849a0341c4636e70821b7be57ad3055b1 Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Thu, 8 Sep 2016 17:20:20 -0700 Subject: [PATCH 238/270] [SPARK-15487][WEB UI] Spark Master UI to reverse proxy Application and Workers UI ## What changes were proposed in this pull request? This pull request adds the functionality to enable accessing worker and application UI through master UI itself. Thus helps in accessing SparkUI when running spark cluster in closed networks e.g. Kubernetes. Cluster admin needs to expose only spark master UI and rest of the UIs can be in the private network, master UI will reverse proxy the connection request to corresponding resource. It adds the path for workers/application UIs as WorkerUI: ://master-publicIP:/target/workerID/ ApplicationUI: ://master-publicIP:/target/appID/ This makes it easy for users to easily protect the Spark master cluster access by putting some reverse proxy e.g. https://github.com/bitly/oauth2_proxy ## How was this patch tested? The functionality has been tested manually and there is a unit test too for testing access to worker UI with reverse proxy address. pwendell bomeng BryanCutler can you please review it, thanks. Author: Gurvinder Singh Closes #13950 from gurvindersingh/rproxy. --- core/pom.xml | 12 ++- .../scala/org/apache/spark/SparkContext.scala | 3 + .../apache/spark/deploy/master/Master.scala | 18 ++++ .../deploy/master/ui/ApplicationPage.scala | 13 ++- .../spark/deploy/master/ui/MasterPage.scala | 12 ++- .../spark/deploy/master/ui/MasterWebUI.scala | 16 ++++ .../spark/deploy/worker/ExecutorRunner.scala | 6 +- .../apache/spark/deploy/worker/Worker.scala | 3 + .../org/apache/spark/ui/JettyUtils.scala | 85 +++++++++++++++++++ .../scala/org/apache/spark/ui/UIUtils.scala | 12 +++ .../spark/deploy/master/MasterSuite.scala | 27 ++++++ .../scala/org/apache/spark/ui/UISuite.scala | 37 ++++++++ docs/configuration.md | 14 +++ pom.xml | 14 +++ .../apache/spark/repl/SparkILoopInit.scala | 13 ++- .../org/apache/spark/repl/SparkILoop.scala | 13 ++- 16 files changed, 287 insertions(+), 11 deletions(-) diff --git a/core/pom.xml b/core/pom.xml index 69a0b0ff27c39..3c8138f974a56 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -124,6 +124,16 @@ jetty-servlet compile + + org.eclipse.jetty + jetty-proxy + compile + + + org.eclipse.jetty + jetty-client + compile + org.eclipse.jetty jetty-servlets @@ -388,7 +398,7 @@ true true - guava,jetty-io,jetty-servlet,jetty-servlets,jetty-continuation,jetty-http,jetty-plus,jetty-util,jetty-server,jetty-security + guava,jetty-io,jetty-servlet,jetty-servlets,jetty-continuation,jetty-http,jetty-plus,jetty-util,jetty-server,jetty-security,jetty-proxy,jetty-client true diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 4aa795a58a28e..e32e4aa5b8312 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -505,6 +505,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli _applicationId = _taskScheduler.applicationId() _applicationAttemptId = taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) + if (_conf.getBoolean("spark.ui.reverseProxy", false)) { + System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId) + } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala index dcf41638e7994..8c91aa15167c4 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala @@ -114,6 +114,7 @@ private[deploy] class Master( // Default maxCores for applications that don't specify it (i.e. pass Int.MaxValue) private val defaultCores = conf.getInt("spark.deploy.defaultCores", Int.MaxValue) + val reverseProxy = conf.getBoolean("spark.ui.reverseProxy", false) if (defaultCores < 1) { throw new SparkException("spark.deploy.defaultCores must be positive") } @@ -129,6 +130,11 @@ private[deploy] class Master( webUi = new MasterWebUI(this, webUiPort) webUi.bind() masterWebUiUrl = "http://" + masterPublicAddress + ":" + webUi.boundPort + if (reverseProxy) { + masterWebUiUrl = conf.get("spark.ui.reverseProxyUrl", masterWebUiUrl) + logInfo(s"Spark Master is acting as a reverse proxy. Master, Workers and " + + s"Applications UIs are available at $masterWebUiUrl") + } checkForWorkerTimeOutTask = forwardMessageThread.scheduleAtFixedRate(new Runnable { override def run(): Unit = Utils.tryLogNonFatalError { self.send(CheckForWorkerTimeOut) @@ -755,6 +761,9 @@ private[deploy] class Master( workers += worker idToWorker(worker.id) = worker addressToWorker(workerAddress) = worker + if (reverseProxy) { + webUi.addProxyTargets(worker.id, worker.webUiAddress) + } true } @@ -763,6 +772,9 @@ private[deploy] class Master( worker.setState(WorkerState.DEAD) idToWorker -= worker.id addressToWorker -= worker.endpoint.address + if (reverseProxy) { + webUi.removeProxyTargets(worker.id) + } for (exec <- worker.executors.values) { logInfo("Telling app of lost executor: " + exec.id) exec.application.driver.send(ExecutorUpdated( @@ -810,6 +822,9 @@ private[deploy] class Master( endpointToApp(app.driver) = app addressToApp(appAddress) = app waitingApps += app + if (reverseProxy) { + webUi.addProxyTargets(app.id, app.desc.appUiUrl) + } } private def finishApplication(app: ApplicationInfo) { @@ -823,6 +838,9 @@ private[deploy] class Master( idToApp -= app.id endpointToApp -= app.driver addressToApp -= app.driver.address + if (reverseProxy) { + webUi.removeProxyTargets(app.id) + } if (completedApps.size >= RETAINED_APPLICATIONS) { val toRemove = math.max(RETAINED_APPLICATIONS / 10, 1) completedApps.take(toRemove).foreach { a => diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala b/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala index 8875fc223250d..17c521cbf983f 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala @@ -77,7 +77,10 @@ private[ui] class ApplicationPage(parent: MasterWebUI) extends WebUIPage("app")
  • State: {app.state}
  • { if (!app.isFinished) { -
  • Application Detail UI
  • +
  • + Application Detail UI +
  • } } @@ -100,19 +103,21 @@ private[ui] class ApplicationPage(parent: MasterWebUI) extends WebUIPage("app") } private def executorRow(executor: ExecutorDesc): Seq[Node] = { + val workerUrlRef = UIUtils.makeHref(parent.master.reverseProxy, + executor.worker.id, executor.worker.webUiAddress) {executor.id} - {executor.worker.id} + {executor.worker.id} {executor.cores} {executor.memory} {executor.state} stdout + .format(workerUrlRef, executor.application.id, executor.id)}>stdout stderr + .format(workerUrlRef, executor.application.id, executor.id)}>stderr } diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala index 5ed3e39edc484..3fb860582cc17 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala @@ -176,7 +176,8 @@ private[ui] class MasterPage(parent: MasterWebUI) extends WebUIPage("") { private def workerRow(worker: WorkerInfo): Seq[Node] = { - {worker.id} + {worker.id} {worker.host}:{worker.port} {worker.state} @@ -210,7 +211,8 @@ private[ui] class MasterPage(parent: MasterWebUI) extends WebUIPage("") { if (app.isFinished) { app.desc.name } else { - {app.desc.name} + {app.desc.name} } } @@ -244,7 +246,11 @@ private[ui] class MasterPage(parent: MasterWebUI) extends WebUIPage("") { {driver.id} {killLink} {driver.submitDate} - {driver.worker.map(w => {w.id.toString}).getOrElse("None")} + {driver.worker.map(w => + + {w.id.toString} + ).getOrElse("None")} {driver.state} diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala index a0727ad83fb66..8cfd0f682932d 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala @@ -17,6 +17,10 @@ package org.apache.spark.deploy.master.ui +import scala.collection.mutable.HashMap + +import org.eclipse.jetty.servlet.ServletContextHandler + import org.apache.spark.deploy.master.Master import org.apache.spark.internal.Logging import org.apache.spark.ui.{SparkUI, WebUI} @@ -34,6 +38,7 @@ class MasterWebUI( val masterEndpointRef = master.self val killEnabled = master.conf.getBoolean("spark.ui.killEnabled", true) + private val proxyHandlers = new HashMap[String, ServletContextHandler] initialize() @@ -48,6 +53,17 @@ class MasterWebUI( attachHandler(createRedirectHandler( "/driver/kill", "/", masterPage.handleDriverKillRequest, httpMethods = Set("POST"))) } + + def addProxyTargets(id: String, target: String): Unit = { + var endTarget = target.stripSuffix("/") + val handler = createProxyHandler("/proxy/" + id, endTarget) + attachHandler(handler) + proxyHandlers(id) = handler + } + + def removeProxyTargets(id: String): Unit = { + proxyHandlers.remove(id).foreach(detachHandler) + } } private[master] object MasterWebUI { diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala b/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala index 06066248ea5d0..d4d8521cc8204 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala @@ -156,7 +156,11 @@ private[deploy] class ExecutorRunner( // Add webUI log urls val baseUrl = - s"http://$publicAddress:$webUiPort/logPage/?appId=$appId&executorId=$execId&logType=" + if (conf.getBoolean("spark.ui.reverseProxy", false)) { + s"/proxy/$workerId/logPage/?appId=$appId&executorId=$execId&logType=" + } else { + s"http://$publicAddress:$webUiPort/logPage/?appId=$appId&executorId=$execId&logType=" + } builder.environment.put("SPARK_LOG_URL_STDERR", s"${baseUrl}stderr") builder.environment.put("SPARK_LOG_URL_STDOUT", s"${baseUrl}stdout") diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala index 724206bf94c68..0bedd9a20a969 100755 --- a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala @@ -203,6 +203,9 @@ private[deploy] class Worker( activeMasterWebUiUrl = uiUrl master = Some(masterRef) connected = true + if (conf.getBoolean("spark.ui.reverseProxy", false)) { + logInfo(s"WorkerWebUI is available at $activeMasterWebUiUrl/proxy/$workerId") + } // Cancel any outstanding re-registration attempts because we found a new master cancelLastRegistrationRetry() } diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala index 50283f2b74a41..24f3f757157f3 100644 --- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala @@ -25,6 +25,8 @@ import scala.collection.mutable.ArrayBuffer import scala.language.implicitConversions import scala.xml.Node +import org.eclipse.jetty.client.api.Response +import org.eclipse.jetty.proxy.ProxyServlet import org.eclipse.jetty.server.{Request, Server, ServerConnector} import org.eclipse.jetty.server.handler._ import org.eclipse.jetty.servlet._ @@ -186,6 +188,47 @@ private[spark] object JettyUtils extends Logging { contextHandler } + /** Create a handler for proxying request to Workers and Application Drivers */ + def createProxyHandler( + prefix: String, + target: String): ServletContextHandler = { + val servlet = new ProxyServlet { + override def rewriteTarget(request: HttpServletRequest): String = { + val rewrittenURI = createProxyURI( + prefix, target, request.getRequestURI(), request.getQueryString()) + if (rewrittenURI == null) { + return null + } + if (!validateDestination(rewrittenURI.getHost(), rewrittenURI.getPort())) { + return null + } + rewrittenURI.toString() + } + + override def filterServerResponseHeader( + clientRequest: HttpServletRequest, + serverResponse: Response, + headerName: String, + headerValue: String): String = { + if (headerName.equalsIgnoreCase("location")) { + val newHeader = createProxyLocationHeader( + prefix, headerValue, clientRequest, serverResponse.getRequest().getURI()) + if (newHeader != null) { + return newHeader + } + } + super.filterServerResponseHeader( + clientRequest, serverResponse, headerName, headerValue) + } + } + + val contextHandler = new ServletContextHandler + val holder = new ServletHolder(servlet) + contextHandler.setContextPath(prefix) + contextHandler.addServlet(holder, "/") + contextHandler + } + /** Add filters, if any, to the given list of ServletContextHandlers */ def addFilters(handlers: Seq[ServletContextHandler], conf: SparkConf) { val filters: Array[String] = conf.get("spark.ui.filters", "").split(',').map(_.trim()) @@ -332,6 +375,48 @@ private[spark] object JettyUtils extends Logging { redirectHandler } + def createProxyURI(prefix: String, target: String, path: String, query: String): URI = { + if (!path.startsWith(prefix)) { + return null + } + + val uri = new StringBuilder(target) + val rest = path.substring(prefix.length()) + + if (!rest.isEmpty()) { + if (!rest.startsWith("/")) { + uri.append("/") + } + uri.append(rest) + } + + val rewrittenURI = URI.create(uri.toString()) + if (query != null) { + return new URI( + rewrittenURI.getScheme(), + rewrittenURI.getAuthority(), + rewrittenURI.getPath(), + query, + rewrittenURI.getFragment() + ).normalize() + } + rewrittenURI.normalize() + } + + def createProxyLocationHeader( + prefix: String, + headerValue: String, + clientRequest: HttpServletRequest, + targetUri: URI): String = { + val toReplace = targetUri.getScheme() + "://" + targetUri.getAuthority() + if (headerValue.startsWith(toReplace)) { + clientRequest.getScheme() + "://" + clientRequest.getHeader("host") + + prefix + headerValue.substring(toReplace.length()) + } else { + null + } + } + // Create a new URI from the arguments, handling IPv6 host encoding and default ports. private def createRedirectURI( scheme: String, server: String, port: Int, path: String, query: String) = { diff --git a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala index 2b6c538485c51..c0d1a2220f62a 100644 --- a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala @@ -510,4 +510,16 @@ private[spark] object UIUtils extends Logging { def getTimeZoneOffset() : Int = TimeZone.getDefault().getOffset(System.currentTimeMillis()) / 1000 / 60 + + /** + * Return the correct Href after checking if master is running in the + * reverse proxy mode or not. + */ + def makeHref(proxy: Boolean, id: String, origHref: String): String = { + if (proxy) { + s"/proxy/$id" + } else { + origHref + } + } } diff --git a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala index 7cbe4e342eaa5..831a7bcb12743 100644 --- a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala @@ -157,6 +157,33 @@ class MasterSuite extends SparkFunSuite } } + test("master/worker web ui available with reverseProxy") { + implicit val formats = org.json4s.DefaultFormats + val reverseProxyUrl = "http://localhost:8080" + val conf = new SparkConf() + conf.set("spark.ui.reverseProxy", "true") + conf.set("spark.ui.reverseProxyUrl", reverseProxyUrl) + val localCluster = new LocalSparkCluster(2, 2, 512, conf) + localCluster.start() + try { + eventually(timeout(5 seconds), interval(100 milliseconds)) { + val json = Source.fromURL(s"http://localhost:${localCluster.masterWebUIPort}/json") + .getLines().mkString("\n") + val JArray(workers) = (parse(json) \ "workers") + workers.size should be (2) + workers.foreach { workerSummaryJson => + val JString(workerId) = workerSummaryJson \ "id" + val url = s"http://localhost:${localCluster.masterWebUIPort}/proxy/${workerId}/json" + val workerResponse = parse(Source.fromURL(url).getLines().mkString("\n")) + (workerResponse \ "cores").extract[Int] should be (2) + (workerResponse \ "masterwebuiurl").extract[String] should be (reverseProxyUrl) + } + } + } finally { + localCluster.stop() + } + } + test("basic scheduling - spread out") { basicScheduling(spreadOut = true) } diff --git a/core/src/test/scala/org/apache/spark/ui/UISuite.scala b/core/src/test/scala/org/apache/spark/ui/UISuite.scala index 2b59b48d8bc98..dbb8dca4c8dab 100644 --- a/core/src/test/scala/org/apache/spark/ui/UISuite.scala +++ b/core/src/test/scala/org/apache/spark/ui/UISuite.scala @@ -18,10 +18,13 @@ package org.apache.spark.ui import java.net.{BindException, ServerSocket} +import java.net.URI +import javax.servlet.http.HttpServletRequest import scala.io.Source import org.eclipse.jetty.servlet.ServletContextHandler +import org.mockito.Mockito.{mock, when} import org.scalatest.concurrent.Eventually._ import org.scalatest.time.SpanSugar._ @@ -190,6 +193,40 @@ class UISuite extends SparkFunSuite { } } + test("verify proxy rewrittenURI") { + val prefix = "/proxy/worker-id" + val target = "http://localhost:8081" + val path = "/proxy/worker-id/json" + var rewrittenURI = JettyUtils.createProxyURI(prefix, target, path, null) + assert(rewrittenURI.toString() === "http://localhost:8081/json") + rewrittenURI = JettyUtils.createProxyURI(prefix, target, path, "test=done") + assert(rewrittenURI.toString() === "http://localhost:8081/json?test=done") + rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/proxy/worker-id", null) + assert(rewrittenURI.toString() === "http://localhost:8081") + rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/proxy/worker-id/test%2F", null) + assert(rewrittenURI.toString() === "http://localhost:8081/test%2F") + rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/proxy/worker-id/%F0%9F%98%84", null) + assert(rewrittenURI.toString() === "http://localhost:8081/%F0%9F%98%84") + rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/proxy/worker-noid/json", null) + assert(rewrittenURI === null) + } + + test("verify rewriting location header for reverse proxy") { + val clientRequest = mock(classOf[HttpServletRequest]) + var headerValue = "http://localhost:4040/jobs" + val prefix = "/proxy/worker-id" + val targetUri = URI.create("http://localhost:4040") + when(clientRequest.getScheme()).thenReturn("http") + when(clientRequest.getHeader("host")).thenReturn("localhost:8080") + var newHeader = JettyUtils.createProxyLocationHeader( + prefix, headerValue, clientRequest, targetUri) + assert(newHeader.toString() === "http://localhost:8080/proxy/worker-id/jobs") + headerValue = "http://localhost:4041/jobs" + newHeader = JettyUtils.createProxyLocationHeader( + prefix, headerValue, clientRequest, targetUri) + assert(newHeader === null) + } + def stopServer(info: ServerInfo): Unit = { if (info != null && info.server != null) info.server.stop } diff --git a/docs/configuration.md b/docs/configuration.md index 6e98f67b73751..ebd0aa796db08 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -657,6 +657,20 @@ Apart from these, the following properties are also available, and may be useful collecting. + + spark.ui.reverseProxy + false + + Enable running Spark Master as reverse proxy for worker and application UIs. In this mode, Spark master will reverse proxy the worker and application UIs to enable access without requiring direct access to their hosts. Use it with caution, as worker and application UI will not be accessible directly, you will only be able to access them through spark master/proxy public URL. This setting affects all the workers and application UIs running in the cluster and must be set on all the workers, drivers and masters. + + + + spark.ui.reverseProxyUrl + + + This is the URL where your proxy is running. This URL is for proxy which is running in front of Spark Master. This is useful when running proxy for authentication e.g. OAuth proxy. Make sure this is a complete URL including scheme (http/https) and port to reach your proxy. + + spark.worker.ui.retainedExecutors 1000 diff --git a/pom.xml b/pom.xml index e6c28977ca78d..3b3ad39b47571 100644 --- a/pom.xml +++ b/pom.xml @@ -338,6 +338,18 @@ ${jetty.version} provided
    + + org.eclipse.jetty + jetty-proxy + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-client + ${jetty.version} + provided + org.eclipse.jetty jetty-util @@ -2256,6 +2268,8 @@ org.spark-project.spark:unused org.eclipse.jetty:jetty-io org.eclipse.jetty:jetty-http + org.eclipse.jetty:jetty-proxy + org.eclipse.jetty:jetty-client org.eclipse.jetty:jetty-continuation org.eclipse.jetty:jetty-servlet org.eclipse.jetty:jetty-servlets diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala index 29f63de8a0fa1..b2a61260c2bb6 100644 --- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala +++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala @@ -126,7 +126,18 @@ private[repl] trait SparkILoopInit { @transient val spark = org.apache.spark.repl.Main.interp.createSparkSession() @transient val sc = { val _sc = spark.sparkContext - _sc.uiWebUrl.foreach(webUrl => println(s"Spark context Web UI available at ${webUrl}")) + if (_sc.getConf.getBoolean("spark.ui.reverseProxy", false)) { + val proxyUrl = _sc.getConf.get("spark.ui.reverseProxyUrl", null) + if (proxyUrl != null) { + println(s"Spark Context Web UI is available at ${proxyUrl}/proxy/${_sc.applicationId}") + } else { + println(s"Spark Context Web UI is available at Spark Master Public URL") + } + } else { + _sc.uiWebUrl.foreach { + webUrl => println(s"Spark context Web UI available at ${webUrl}") + } + } println("Spark context available as 'sc' " + s"(master = ${_sc.master}, app id = ${_sc.applicationId}).") println("Spark session available as 'spark'.") diff --git a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala index 2707b0847aefc..76a66c1beada0 100644 --- a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala +++ b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala @@ -43,7 +43,18 @@ class SparkILoop(in0: Option[BufferedReader], out: JPrintWriter) } @transient val sc = { val _sc = spark.sparkContext - _sc.uiWebUrl.foreach(webUrl => println(s"Spark context Web UI available at ${webUrl}")) + if (_sc.getConf.getBoolean("spark.ui.reverseProxy", false)) { + val proxyUrl = _sc.getConf.get("spark.ui.reverseProxyUrl", null) + if (proxyUrl != null) { + println(s"Spark Context Web UI is available at ${proxyUrl}/proxy/${_sc.applicationId}") + } else { + println(s"Spark Context Web UI is available at Spark Master Public URL") + } + } else { + _sc.uiWebUrl.foreach { + webUrl => println(s"Spark context Web UI available at ${webUrl}") + } + } println("Spark context available as 'sc' " + s"(master = ${_sc.master}, app id = ${_sc.applicationId}).") println("Spark session available as 'spark'.") From 65b814bf50e92e2e9b622d1602f18bacd217181c Mon Sep 17 00:00:00 2001 From: "Joseph K. Bradley" Date: Fri, 9 Sep 2016 05:35:10 -0700 Subject: [PATCH 239/270] [SPARK-17456][CORE] Utility for parsing Spark versions ## What changes were proposed in this pull request? This patch adds methods for extracting major and minor versions as Int types in Scala from a Spark version string. Motivation: There are many hacks within Spark's codebase to identify and compare Spark versions. We should add a simple utility to standardize these code paths, especially since there have been mistakes made in the past. This will let us add unit tests as well. Currently, I want this functionality to check Spark versions to provide backwards compatibility for ML model persistence. ## How was this patch tested? Unit tests Author: Joseph K. Bradley Closes #15017 from jkbradley/version-parsing. --- .../org/apache/spark/util/VersionUtils.scala | 52 +++++++++++++ .../apache/spark/util/VersionUtilsSuite.scala | 76 +++++++++++++++++++ 2 files changed, 128 insertions(+) create mode 100644 core/src/main/scala/org/apache/spark/util/VersionUtils.scala create mode 100644 core/src/test/scala/org/apache/spark/util/VersionUtilsSuite.scala diff --git a/core/src/main/scala/org/apache/spark/util/VersionUtils.scala b/core/src/main/scala/org/apache/spark/util/VersionUtils.scala new file mode 100644 index 0000000000000..828153b868420 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/util/VersionUtils.scala @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.util + +/** + * Utilities for working with Spark version strings + */ +private[spark] object VersionUtils { + + private val majorMinorRegex = """^(\d+)\.(\d+)(\..*)?$""".r + + /** + * Given a Spark version string, return the major version number. + * E.g., for 2.0.1-SNAPSHOT, return 2. + */ + def majorVersion(sparkVersion: String): Int = majorMinorVersion(sparkVersion)._1 + + /** + * Given a Spark version string, return the minor version number. + * E.g., for 2.0.1-SNAPSHOT, return 0. + */ + def minorVersion(sparkVersion: String): Int = majorMinorVersion(sparkVersion)._2 + + /** + * Given a Spark version string, return the (major version number, minor version number). + * E.g., for 2.0.1-SNAPSHOT, return (2, 0). + */ + def majorMinorVersion(sparkVersion: String): (Int, Int) = { + majorMinorRegex.findFirstMatchIn(sparkVersion) match { + case Some(m) => + (m.group(1).toInt, m.group(2).toInt) + case None => + throw new IllegalArgumentException(s"Spark tried to parse '$sparkVersion' as a Spark" + + s" version string, but it could not find the major and minor version numbers.") + } + } +} diff --git a/core/src/test/scala/org/apache/spark/util/VersionUtilsSuite.scala b/core/src/test/scala/org/apache/spark/util/VersionUtilsSuite.scala new file mode 100644 index 0000000000000..aaf79ebd4f9fc --- /dev/null +++ b/core/src/test/scala/org/apache/spark/util/VersionUtilsSuite.scala @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.util + +import org.apache.spark.SparkFunSuite + +class VersionUtilsSuite extends SparkFunSuite { + + import org.apache.spark.util.VersionUtils._ + + test("Parse Spark major version") { + assert(majorVersion("2.0") === 2) + assert(majorVersion("12.10.11") === 12) + assert(majorVersion("2.0.1-SNAPSHOT") === 2) + assert(majorVersion("2.0.x") === 2) + withClue("majorVersion parsing should fail for invalid major version number") { + intercept[IllegalArgumentException] { + majorVersion("2z.0") + } + } + withClue("majorVersion parsing should fail for invalid minor version number") { + intercept[IllegalArgumentException] { + majorVersion("2.0z") + } + } + } + + test("Parse Spark minor version") { + assert(minorVersion("2.0") === 0) + assert(minorVersion("12.10.11") === 10) + assert(minorVersion("2.0.1-SNAPSHOT") === 0) + assert(minorVersion("2.0.x") === 0) + withClue("minorVersion parsing should fail for invalid major version number") { + intercept[IllegalArgumentException] { + minorVersion("2z.0") + } + } + withClue("minorVersion parsing should fail for invalid minor version number") { + intercept[IllegalArgumentException] { + minorVersion("2.0z") + } + } + } + + test("Parse Spark major and minor versions") { + assert(majorMinorVersion("2.0") === (2, 0)) + assert(majorMinorVersion("12.10.11") === (12, 10)) + assert(majorMinorVersion("2.0.1-SNAPSHOT") === (2, 0)) + assert(majorMinorVersion("2.0.x") === (2, 0)) + withClue("majorMinorVersion parsing should fail for invalid major version number") { + intercept[IllegalArgumentException] { + majorMinorVersion("2z.0") + } + } + withClue("majorMinorVersion parsing should fail for invalid minor version number") { + intercept[IllegalArgumentException] { + majorMinorVersion("2.0z") + } + } + } +} From 2ed601217ffd8945829ac762fae35202f3e55686 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Fri, 9 Sep 2016 05:43:34 -0700 Subject: [PATCH 240/270] [SPARK-17464][SPARKR][ML] SparkR spark.als argument reg should be 0.1 by default. ## What changes were proposed in this pull request? SparkR ```spark.als``` arguments ```reg``` should be 0.1 by default, which need to be consistent with ML. ## How was this patch tested? Existing tests. Author: Yanbo Liang Closes #15021 from yanboliang/spark-17464. --- R/pkg/R/mllib.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index f321fd19b39b4..f8d1095a493dc 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -1241,7 +1241,7 @@ setMethod("predict", signature(object = "GaussianMixtureModel"), #' @note spark.als since 2.1.0 setMethod("spark.als", signature(data = "SparkDataFrame"), function(data, ratingCol = "rating", userCol = "user", itemCol = "item", - rank = 10, reg = 1.0, maxIter = 10, nonnegative = FALSE, + rank = 10, reg = 0.1, maxIter = 10, nonnegative = FALSE, implicitPrefs = FALSE, alpha = 1.0, numUserBlocks = 10, numItemBlocks = 10, checkpointInterval = 10, seed = 0) { From 7098a12945e71a159784836b75da855a603e1631 Mon Sep 17 00:00:00 2001 From: Satendra Kumar Date: Fri, 9 Sep 2016 19:15:06 +0100 Subject: [PATCH 241/270] Streaming doc correction. ## What changes were proposed in this pull request? (Please fill in changes proposed in this fix) Streaming doc correction. ## How was this patch tested? (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: Satendra Kumar Closes #14996 from satendrakumar06/patch-1. --- docs/streaming-programming-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index c0e4f3b35afa5..5392b4a9bcf4b 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -2072,7 +2072,7 @@ unifiedStream.pprint()
    -Another parameter that should be considered is the receiver's blocking interval, +Another parameter that should be considered is the receiver's block interval, which is determined by the [configuration parameter](configuration.html#spark-streaming) `spark.streaming.blockInterval`. For most receivers, the received data is coalesced together into blocks of data before storing inside Spark's memory. The number of blocks in each batch From a3981c28c956a82ccf5b1c61d45b6bd252d4abed Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Fri, 9 Sep 2016 13:43:32 -0500 Subject: [PATCH 242/270] [SPARK-17433] YarnShuffleService doesn't handle moving credentials levelDb The secrets leveldb isn't being moved if you run spark shuffle services without yarn nm recovery on and then turn it on. This fixes that. I unfortunately missed this when I ported the patch from our internal branch 2 to master branch due to the changes for the recovery path. Note this only applies to master since it is the only place the yarn nm recovery dir is used. Unit tests ran and tested on 8 node cluster. Fresh startup with NM recovery, fresh startup no nm recovery, switching between no nm recovery and recovery. Also tested running applications to make sure wasn't affected by rolling upgrade. Author: Thomas Graves Author: Tom Graves Closes #14999 from tgravescs/SPARK-17433. --- .../network/yarn/YarnShuffleService.java | 56 +++++++++++++------ .../yarn/YarnShuffleServiceSuite.scala | 12 +++- 2 files changed, 50 insertions(+), 18 deletions(-) diff --git a/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java b/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java index df082e4a92747..43c8df721d5a1 100644 --- a/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java +++ b/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.ByteBuffer; +import java.nio.file.Files; import java.util.List; import java.util.Map; @@ -159,8 +160,7 @@ protected void serviceInit(Configuration conf) throws Exception { // If we don't find one, then we choose a file to use to save the state next time. Even if // an application was stopped while the NM was down, we expect yarn to call stopApplication() // when it comes back - registeredExecutorFile = - new File(getRecoveryPath().toUri().getPath(), RECOVERY_FILE_NAME); + registeredExecutorFile = initRecoveryDb(RECOVERY_FILE_NAME); TransportConf transportConf = new TransportConf("shuffle", new HadoopConfigProvider(conf)); blockHandler = new ExternalShuffleBlockHandler(transportConf, registeredExecutorFile); @@ -196,7 +196,7 @@ protected void serviceInit(Configuration conf) throws Exception { private void createSecretManager() throws IOException { secretManager = new ShuffleSecretManager(); - secretsFile = new File(getRecoveryPath().toUri().getPath(), SECRETS_RECOVERY_FILE_NAME); + secretsFile = initRecoveryDb(SECRETS_RECOVERY_FILE_NAME); // Make sure this is protected in case its not in the NM recovery dir FileSystem fs = FileSystem.getLocal(_conf); @@ -328,37 +328,59 @@ public void setRecoveryPath(Path recoveryPath) { } /** - * Get the recovery path, this will override the default one to get our own maintained - * recovery path. + * Get the path specific to this auxiliary service to use for recovery. + */ + protected Path getRecoveryPath(String fileName) { + return _recoveryPath; + } + + /** + * Figure out the recovery path and handle moving the DB if YARN NM recovery gets enabled + * when it previously was not. If YARN NM recovery is enabled it uses that path, otherwise + * it will uses a YARN local dir. */ - protected Path getRecoveryPath() { + protected File initRecoveryDb(String dbFileName) { + if (_recoveryPath != null) { + File recoveryFile = new File(_recoveryPath.toUri().getPath(), dbFileName); + if (recoveryFile.exists()) { + return recoveryFile; + } + } + // db doesn't exist in recovery path go check local dirs for it String[] localDirs = _conf.getTrimmedStrings("yarn.nodemanager.local-dirs"); for (String dir : localDirs) { - File f = new File(new Path(dir).toUri().getPath(), RECOVERY_FILE_NAME); + File f = new File(new Path(dir).toUri().getPath(), dbFileName); if (f.exists()) { if (_recoveryPath == null) { // If NM recovery is not enabled, we should specify the recovery path using NM local // dirs, which is compatible with the old code. _recoveryPath = new Path(dir); + return f; } else { - // If NM recovery is enabled and the recovery file exists in old NM local dirs, which - // means old version of Spark already generated the recovery file, we should copy the - // old file in to a new recovery path for the compatibility. - if (!f.renameTo(new File(_recoveryPath.toUri().getPath(), RECOVERY_FILE_NAME))) { - // Fail to move recovery file to new path - logger.error("Failed to move recovery file {} to the path {}", - RECOVERY_FILE_NAME, _recoveryPath.toString()); + // If the recovery path is set then either NM recovery is enabled or another recovery + // DB has been initialized. If NM recovery is enabled and had set the recovery path + // make sure to move all DBs to the recovery path from the old NM local dirs. + // If another DB was initialized first just make sure all the DBs are in the same + // location. + File newLoc = new File(_recoveryPath.toUri().getPath(), dbFileName); + if (!newLoc.equals(f)) { + try { + Files.move(f.toPath(), newLoc.toPath()); + } catch (Exception e) { + // Fail to move recovery file to new path, just continue on with new DB location + logger.error("Failed to move recovery file {} to the path {}", + dbFileName, _recoveryPath.toString(), e); + } } + return newLoc; } - break; } } - if (_recoveryPath == null) { _recoveryPath = new Path(localDirs[0]); } - return _recoveryPath; + return new File(_recoveryPath.toUri().getPath(), dbFileName); } /** diff --git a/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala b/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala index 9a071862bbdb0..c86bf7f70c98e 100644 --- a/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala +++ b/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala @@ -267,13 +267,15 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd s2.stop() } - test("moving recovery file form NM local dir to recovery path") { + test("moving recovery file from NM local dir to recovery path") { // This is to test when Hadoop is upgrade to 2.5+ and NM recovery is enabled, we should move // old recovery file to the new path to keep compatibility // Simulate s1 is running on old version of Hadoop in which recovery file is in the NM local // dir. s1 = new YarnShuffleService + // set auth to true to test the secrets recovery + yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, true) s1.init(yarnConfig) val app1Id = ApplicationId.newInstance(0, 1) val app1Data: ApplicationInitializationContext = @@ -286,6 +288,8 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd val execStateFile = s1.registeredExecutorFile execStateFile should not be (null) + val secretsFile = s1.secretsFile + secretsFile should not be (null) val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER) val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER) @@ -312,10 +316,16 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd s2.init(yarnConfig) val execStateFile2 = s2.registeredExecutorFile + val secretsFile2 = s2.secretsFile + recoveryPath.toString should be (new Path(execStateFile2.getParentFile.toURI).toString) + recoveryPath.toString should be (new Path(secretsFile2.getParentFile.toURI).toString) eventually(timeout(10 seconds), interval(5 millis)) { assert(!execStateFile.exists()) } + eventually(timeout(10 seconds), interval(5 millis)) { + assert(!secretsFile.exists()) + } val handler2 = s2.blockHandler val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2) From f7d2143705c8c1baeed0bc62940f9dba636e705b Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Fri, 9 Sep 2016 14:23:05 -0700 Subject: [PATCH 243/270] [SPARK-17354] [SQL] Partitioning by dates/timestamps should work with Parquet vectorized reader ## What changes were proposed in this pull request? This PR fixes `ColumnVectorUtils.populate` so that Parquet vectorized reader can read partitioned table with dates/timestamps. This works fine with Parquet normal reader. This is being only called within [VectorizedParquetRecordReader.java#L185](https://github.com/apache/spark/blob/master/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java#L185). When partition column types are explicitly given to `DateType` or `TimestampType` (rather than inferring the type of partition column), this fails with the exception below: ``` 16/09/01 10:30:07 ERROR Executor: Exception in task 0.0 in stage 5.0 (TID 6) java.lang.ClassCastException: java.lang.Integer cannot be cast to java.sql.Date at org.apache.spark.sql.execution.vectorized.ColumnVectorUtils.populate(ColumnVectorUtils.java:89) at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initBatch(VectorizedParquetRecordReader.java:185) at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initBatch(VectorizedParquetRecordReader.java:204) at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$buildReader$1.apply(ParquetFileFormat.scala:362) ... ``` ## How was this patch tested? Unit tests in `SQLQuerySuite`. Author: hyukjinkwon Closes #14919 from HyukjinKwon/SPARK-17354. --- .../vectorized/ColumnVectorUtils.java | 5 +- .../execution/vectorized/ColumnarBatch.java | 6 +++ .../datasources/parquet/ParquetIOSuite.scala | 49 ++++++++++++++++++- .../sql/hive/execution/SQLQuerySuite.scala | 21 ++++++++ 4 files changed, 78 insertions(+), 3 deletions(-) diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java index 2fa476b9cfb71..900d7c431e723 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java @@ -86,8 +86,9 @@ public static void populate(ColumnVector col, InternalRow row, int fieldIdx) { col.getChildColumn(0).putInts(0, capacity, c.months); col.getChildColumn(1).putLongs(0, capacity, c.microseconds); } else if (t instanceof DateType) { - Date date = (Date)row.get(fieldIdx, t); - col.putInts(0, capacity, DateTimeUtils.fromJavaDate(date)); + col.putInts(0, capacity, row.getInt(fieldIdx)); + } else if (t instanceof TimestampType) { + col.putLongs(0, capacity, row.getLong(fieldIdx)); } } } diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java index f3afa8f938f86..62abc2a821a3a 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java @@ -137,6 +137,10 @@ public InternalRow copy() { DataType dt = columns[i].dataType(); if (dt instanceof BooleanType) { row.setBoolean(i, getBoolean(i)); + } else if (dt instanceof ByteType) { + row.setByte(i, getByte(i)); + } else if (dt instanceof ShortType) { + row.setShort(i, getShort(i)); } else if (dt instanceof IntegerType) { row.setInt(i, getInt(i)); } else if (dt instanceof LongType) { @@ -154,6 +158,8 @@ public InternalRow copy() { row.setDecimal(i, getDecimal(i, t.precision(), t.scale()), t.precision()); } else if (dt instanceof DateType) { row.setInt(i, getInt(i)); + } else if (dt instanceof TimestampType) { + row.setLong(i, getLong(i)); } else { throw new RuntimeException("Not implemented. " + dt); } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala index 4aa046bd91e02..3161a630af0f1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala @@ -38,11 +38,12 @@ import org.apache.parquet.schema.{MessageType, MessageTypeParser} import org.apache.spark.SparkException import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.{InternalRow, ScalaReflection} -import org.apache.spark.sql.catalyst.expressions.UnsafeRow +import org.apache.spark.sql.catalyst.expressions.{GenericMutableRow, UnsafeRow} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types._ +import org.apache.spark.unsafe.types.UTF8String // Write support class for nested groups: ParquetWriter initializes GroupWriteSupport // with an empty configuration (it is after all not intended to be used in this way?) @@ -689,6 +690,52 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext { } } } + + test("VectorizedParquetRecordReader - partition column types") { + withTempPath { dir => + Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath) + + val dataTypes = + Seq(StringType, BooleanType, ByteType, ShortType, IntegerType, LongType, + FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType) + + val constantValues = + Seq( + UTF8String.fromString("a string"), + true, + 1.toByte, + 2.toShort, + 3, + Long.MaxValue, + 0.25.toFloat, + 0.75D, + Decimal("1234.23456"), + DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")), + DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"))) + + dataTypes.zip(constantValues).foreach { case (dt, v) => + val schema = StructType(StructField("pcol", dt) :: Nil) + val vectorizedReader = new VectorizedParquetRecordReader + val partitionValues = new GenericMutableRow(Array(v)) + val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0) + + try { + vectorizedReader.initialize(file, null) + vectorizedReader.initBatch(schema, partitionValues) + vectorizedReader.nextKeyValue() + val row = vectorizedReader.getCurrentValue.asInstanceOf[InternalRow] + + // Use `GenericMutableRow` by explicitly copying rather than `ColumnarBatch` + // in order to use get(...) method which is not implemented in `ColumnarBatch`. + val actual = row.copy().get(1, dt) + val expected = v + assert(actual == expected) + } finally { + vectorizedReader.close() + } + } + } + } } class JobCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index 05d0687fb7e48..dc4d099f0f666 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -1787,6 +1787,27 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton { } } + test("SPARK-17354: Partitioning by dates/timestamps works with Parquet vectorized reader") { + withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") { + sql( + """CREATE TABLE order(id INT) + |PARTITIONED BY (pd DATE, pt TIMESTAMP) + |STORED AS PARQUET + """.stripMargin) + + sql("set hive.exec.dynamic.partition.mode=nonstrict") + sql( + """INSERT INTO TABLE order PARTITION(pd, pt) + |SELECT 1 AS id, CAST('1990-02-24' AS DATE) AS pd, CAST('1990-02-24' AS TIMESTAMP) AS pt + """.stripMargin) + val actual = sql("SELECT * FROM order") + val expected = sql( + "SELECT 1 AS id, CAST('1990-02-24' AS DATE) AS pd, CAST('1990-02-24' AS TIMESTAMP) AS pt") + checkAnswer(actual, expected) + sql("DROP TABLE order") + } + } + def testCommandAvailable(command: String): Boolean = { val attempt = Try(Process(command).run(ProcessLogger(_ => ())).exitValue()) attempt.isSuccess && attempt.get == 0 From 335491704c526921da3b3c5035175677ba5b92de Mon Sep 17 00:00:00 2001 From: Tejas Patil Date: Sat, 10 Sep 2016 09:27:22 +0800 Subject: [PATCH 244/270] [SPARK-15453][SQL] FileSourceScanExec to extract `outputOrdering` information ## What changes were proposed in this pull request? Jira : https://issues.apache.org/jira/browse/SPARK-15453 Extracting sort ordering information in `FileSourceScanExec` so that planner can make use of it. My motivation to make this change was to get Sort Merge join in par with Hive's Sort-Merge-Bucket join when the source tables are bucketed + sorted. Query: ``` val df = (0 until 16).map(i => (i % 8, i * 2, i.toString)).toDF("i", "j", "k").coalesce(1) df.write.bucketBy(8, "j", "k").sortBy("j", "k").saveAsTable("table8") df.write.bucketBy(8, "j", "k").sortBy("j", "k").saveAsTable("table9") context.sql("SELECT * FROM table8 a JOIN table9 b ON a.j=b.j AND a.k=b.k").explain(true) ``` Before: ``` == Physical Plan == *SortMergeJoin [j#120, k#121], [j#123, k#124], Inner :- *Sort [j#120 ASC, k#121 ASC], false, 0 : +- *Project [i#119, j#120, k#121] : +- *Filter (isnotnull(k#121) && isnotnull(j#120)) : +- *FileScan orc default.table8[i#119,j#120,k#121] Batched: false, Format: ORC, InputPaths: file:/Users/tejasp/Desktop/dev/tp-spark/spark-warehouse/table8, PartitionFilters: [], PushedFilters: [IsNotNull(k), IsNotNull(j)], ReadSchema: struct +- *Sort [j#123 ASC, k#124 ASC], false, 0 +- *Project [i#122, j#123, k#124] +- *Filter (isnotnull(k#124) && isnotnull(j#123)) +- *FileScan orc default.table9[i#122,j#123,k#124] Batched: false, Format: ORC, InputPaths: file:/Users/tejasp/Desktop/dev/tp-spark/spark-warehouse/table9, PartitionFilters: [], PushedFilters: [IsNotNull(k), IsNotNull(j)], ReadSchema: struct ``` After: (note that the `Sort` step is no longer there) ``` == Physical Plan == *SortMergeJoin [j#49, k#50], [j#52, k#53], Inner :- *Project [i#48, j#49, k#50] : +- *Filter (isnotnull(k#50) && isnotnull(j#49)) : +- *FileScan orc default.table8[i#48,j#49,k#50] Batched: false, Format: ORC, InputPaths: file:/Users/tejasp/Desktop/dev/tp-spark/spark-warehouse/table8, PartitionFilters: [], PushedFilters: [IsNotNull(k), IsNotNull(j)], ReadSchema: struct +- *Project [i#51, j#52, k#53] +- *Filter (isnotnull(j#52) && isnotnull(k#53)) +- *FileScan orc default.table9[i#51,j#52,k#53] Batched: false, Format: ORC, InputPaths: file:/Users/tejasp/Desktop/dev/tp-spark/spark-warehouse/table9, PartitionFilters: [], PushedFilters: [IsNotNull(j), IsNotNull(k)], ReadSchema: struct ``` ## How was this patch tested? Added a test case in `JoinSuite`. Ran all other tests in `JoinSuite` Author: Tejas Patil Closes #14864 from tejasapatil/SPARK-15453_smb_optimization. --- .../sql/execution/DataSourceScanExec.scala | 79 +++++++++++++++---- .../spark/sql/sources/BucketedReadSuite.scala | 63 ++++++++++++++- 2 files changed, 123 insertions(+), 19 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala index 9597bdf34b714..6cdba406937de 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala @@ -23,12 +23,11 @@ import org.apache.commons.lang3.StringUtils import org.apache.hadoop.fs.{BlockLocation, FileStatus, LocatedFileStatus, Path} import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{Row, SparkSession, SQLContext} +import org.apache.spark.sql.{AnalysisException, SparkSession} import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier} import org.apache.spark.sql.catalyst.catalog.BucketSpec import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} -import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, UnknownPartitioning} import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat => ParquetSource} @@ -156,24 +155,72 @@ case class FileSourceScanExec( false } - override val outputPartitioning: Partitioning = { + @transient private lazy val selectedPartitions = relation.location.listFiles(partitionFilters) + + override val (outputPartitioning, outputOrdering): (Partitioning, Seq[SortOrder]) = { val bucketSpec = if (relation.sparkSession.sessionState.conf.bucketingEnabled) { relation.bucketSpec } else { None } - bucketSpec.map { spec => - val numBuckets = spec.numBuckets - val bucketColumns = spec.bucketColumnNames.flatMap { n => - output.find(_.name == n) - } - if (bucketColumns.size == spec.bucketColumnNames.size) { - HashPartitioning(bucketColumns, numBuckets) - } else { - UnknownPartitioning(0) - } - }.getOrElse { - UnknownPartitioning(0) + bucketSpec match { + case Some(spec) => + // For bucketed columns: + // ----------------------- + // `HashPartitioning` would be used only when: + // 1. ALL the bucketing columns are being read from the table + // + // For sorted columns: + // --------------------- + // Sort ordering should be used when ALL these criteria's match: + // 1. `HashPartitioning` is being used + // 2. A prefix (or all) of the sort columns are being read from the table. + // + // Sort ordering would be over the prefix subset of `sort columns` being read + // from the table. + // eg. + // Assume (col0, col2, col3) are the columns read from the table + // If sort columns are (col0, col1), then sort ordering would be considered as (col0) + // If sort columns are (col1, col0), then sort ordering would be empty as per rule #2 + // above + + def toAttribute(colName: String): Option[Attribute] = + output.find(_.name == colName) + + val bucketColumns = spec.bucketColumnNames.flatMap(n => toAttribute(n)) + if (bucketColumns.size == spec.bucketColumnNames.size) { + val partitioning = HashPartitioning(bucketColumns, spec.numBuckets) + val sortColumns = + spec.sortColumnNames.map(x => toAttribute(x)).takeWhile(x => x.isDefined).map(_.get) + + val sortOrder = if (sortColumns.nonEmpty) { + // In case of bucketing, its possible to have multiple files belonging to the + // same bucket in a given relation. Each of these files are locally sorted + // but those files combined together are not globally sorted. Given that, + // the RDD partition will not be sorted even if the relation has sort columns set + // Current solution is to check if all the buckets have a single file in it + + val files = selectedPartitions.flatMap(partition => partition.files) + val bucketToFilesGrouping = + files.map(_.getPath.getName).groupBy(file => BucketingUtils.getBucketId(file)) + val singleFilePartitions = bucketToFilesGrouping.forall(p => p._2.length <= 1) + + if (singleFilePartitions) { + // TODO Currently Spark does not support writing columns sorting in descending order + // so using Ascending order. This can be fixed in future + sortColumns.map(attribute => SortOrder(attribute, Ascending)) + } else { + Nil + } + } else { + Nil + } + (partitioning, sortOrder) + } else { + (UnknownPartitioning(0), Nil) + } + case _ => + (UnknownPartitioning(0), Nil) } } @@ -187,8 +234,6 @@ case class FileSourceScanExec( "InputPaths" -> relation.location.paths.mkString(", ")) private lazy val inputRDD: RDD[InternalRow] = { - val selectedPartitions = relation.location.listFiles(partitionFilters) - val readFile: (PartitionedFile) => Iterator[InternalRow] = relation.fileFormat.buildReaderWithPartitionValues( sparkSession = relation.sparkSession, diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala index ca2ec9f6a5ede..3ff85176de10e 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala @@ -23,7 +23,7 @@ import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.catalog.BucketSpec import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning -import org.apache.spark.sql.execution.DataSourceScanExec +import org.apache.spark.sql.execution.{DataSourceScanExec, SortExec} import org.apache.spark.sql.execution.datasources.DataSourceStrategy import org.apache.spark.sql.execution.exchange.ShuffleExchange import org.apache.spark.sql.execution.joins.SortMergeJoinExec @@ -237,7 +237,9 @@ class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSinglet bucketSpecRight: Option[BucketSpec], joinColumns: Seq[String], shuffleLeft: Boolean, - shuffleRight: Boolean): Unit = { + shuffleRight: Boolean, + sortLeft: Boolean = true, + sortRight: Boolean = true): Unit = { withTable("bucketed_table1", "bucketed_table2") { def withBucket( writer: DataFrameWriter[Row], @@ -247,6 +249,15 @@ class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSinglet spec.numBuckets, spec.bucketColumnNames.head, spec.bucketColumnNames.tail: _*) + + if (spec.sortColumnNames.nonEmpty) { + writer.sortBy( + spec.sortColumnNames.head, + spec.sortColumnNames.tail: _* + ) + } else { + writer + } }.getOrElse(writer) } @@ -267,12 +278,21 @@ class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSinglet assert(joined.queryExecution.executedPlan.isInstanceOf[SortMergeJoinExec]) val joinOperator = joined.queryExecution.executedPlan.asInstanceOf[SortMergeJoinExec] + // check existence of shuffle assert( joinOperator.left.find(_.isInstanceOf[ShuffleExchange]).isDefined == shuffleLeft, s"expected shuffle in plan to be $shuffleLeft but found\n${joinOperator.left}") assert( joinOperator.right.find(_.isInstanceOf[ShuffleExchange]).isDefined == shuffleRight, s"expected shuffle in plan to be $shuffleRight but found\n${joinOperator.right}") + + // check existence of sort + assert( + joinOperator.left.find(_.isInstanceOf[SortExec]).isDefined == sortLeft, + s"expected sort in plan to be $shuffleLeft but found\n${joinOperator.left}") + assert( + joinOperator.right.find(_.isInstanceOf[SortExec]).isDefined == sortRight, + s"expected sort in plan to be $shuffleRight but found\n${joinOperator.right}") } } } @@ -321,6 +341,45 @@ class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSinglet } } + test("avoid shuffle and sort when bucket and sort columns are join keys") { + val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j"))) + testBucketing( + bucketSpec, bucketSpec, Seq("i", "j"), + shuffleLeft = false, shuffleRight = false, + sortLeft = false, sortRight = false + ) + } + + test("avoid shuffle and sort when sort columns are a super set of join keys") { + val bucketSpec1 = Some(BucketSpec(8, Seq("i"), Seq("i", "j"))) + val bucketSpec2 = Some(BucketSpec(8, Seq("i"), Seq("i", "k"))) + testBucketing( + bucketSpec1, bucketSpec2, Seq("i"), + shuffleLeft = false, shuffleRight = false, + sortLeft = false, sortRight = false + ) + } + + test("only sort one side when sort columns are different") { + val bucketSpec1 = Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j"))) + val bucketSpec2 = Some(BucketSpec(8, Seq("i", "j"), Seq("k"))) + testBucketing( + bucketSpec1, bucketSpec2, Seq("i", "j"), + shuffleLeft = false, shuffleRight = false, + sortLeft = false, sortRight = true + ) + } + + test("only sort one side when sort columns are same but their ordering is different") { + val bucketSpec1 = Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j"))) + val bucketSpec2 = Some(BucketSpec(8, Seq("i", "j"), Seq("j", "i"))) + testBucketing( + bucketSpec1, bucketSpec2, Seq("i", "j"), + shuffleLeft = false, shuffleRight = false, + sortLeft = false, sortRight = true + ) + } + test("avoid shuffle when grouping keys are equal to bucket keys") { withTable("bucketed_table") { df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("bucketed_table") From 1fec3ce4e19664aa9f9238d9491b0cb1511f9be1 Mon Sep 17 00:00:00 2001 From: Yves Raimond Date: Sat, 10 Sep 2016 00:15:59 -0700 Subject: [PATCH 245/270] [SPARK-11496][GRAPHX] Parallel implementation of personalized pagerank (Updated version of [PR-9457](https://github.com/apache/spark/pull/9457), rebased on latest Spark master, and using mllib-local). This implements a parallel version of personalized pagerank, which runs all propagations for a list of source vertices in parallel. I ran a few benchmarks on the full [DBpedia](http://dbpedia.org/) graph. When running personalized pagerank for only one source node, the existing implementation is twice as fast as the parallel one (because of the SparseVector overhead). However for 10 source nodes, the parallel implementation is four times as fast. When increasing the number of source nodes, this difference becomes even greater. ![image](https://cloud.githubusercontent.com/assets/2491/10927702/dd82e4fa-8256-11e5-89a8-4799b407f502.png) Author: Yves Raimond Closes #14998 from moustaki/parallel-ppr. --- graphx/pom.xml | 5 ++ .../org/apache/spark/graphx/GraphOps.scala | 12 ++- .../apache/spark/graphx/lib/PageRank.scala | 81 +++++++++++++++++++ .../spark/graphx/lib/PageRankSuite.scala | 24 ++++++ 4 files changed, 121 insertions(+), 1 deletion(-) diff --git a/graphx/pom.xml b/graphx/pom.xml index bd4e53371b86d..10d5ba93ebb88 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -46,6 +46,11 @@ test-jar test + + org.apache.spark + spark-mllib-local_${scala.binary.version} + ${project.version} + org.apache.xbean xbean-asm5-shaded diff --git a/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala b/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala index 868658dfe55e5..90907300be975 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala @@ -20,9 +20,10 @@ package org.apache.spark.graphx import scala.reflect.ClassTag import scala.util.Random -import org.apache.spark.SparkException import org.apache.spark.graphx.lib._ +import org.apache.spark.ml.linalg.Vector import org.apache.spark.rdd.RDD +import org.apache.spark.SparkException /** * Contains additional functionality for [[Graph]]. All operations are expressed in terms of the @@ -391,6 +392,15 @@ class GraphOps[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]) extends Seriali PageRank.runUntilConvergenceWithOptions(graph, tol, resetProb, Some(src)) } + /** + * Run parallel personalized PageRank for a given array of source vertices, such + * that all random walks are started relative to the source vertices + */ + def staticParallelPersonalizedPageRank(sources: Array[VertexId], numIter: Int, + resetProb: Double = 0.15) : Graph[Vector, Double] = { + PageRank.runParallelPersonalizedPageRank(graph, numIter, resetProb, sources) + } + /** * Run Personalized PageRank for a fixed number of iterations with * with all iterations originating at the source node diff --git a/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala b/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala index 2f5bd4ed4ff6b..f4b00757a8b54 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala @@ -19,8 +19,11 @@ package org.apache.spark.graphx.lib import scala.reflect.ClassTag +import breeze.linalg.{Vector => BV} + import org.apache.spark.graphx._ import org.apache.spark.internal.Logging +import org.apache.spark.ml.linalg.{Vector, Vectors} /** * PageRank algorithm implementation. There are two implementations of PageRank implemented. @@ -162,6 +165,84 @@ object PageRank extends Logging { rankGraph } + /** + * Run Personalized PageRank for a fixed number of iterations, for a + * set of starting nodes in parallel. Returns a graph with vertex attributes + * containing the pagerank relative to all starting nodes (as a sparse vector) and + * edge attributes the normalized edge weight + * + * @tparam VD The original vertex attribute (not used) + * @tparam ED The original edge attribute (not used) + * + * @param graph The graph on which to compute personalized pagerank + * @param numIter The number of iterations to run + * @param resetProb The random reset probability + * @param sources The list of sources to compute personalized pagerank from + * @return the graph with vertex attributes + * containing the pagerank relative to all starting nodes (as a sparse vector) and + * edge attributes the normalized edge weight + */ + def runParallelPersonalizedPageRank[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED], + numIter: Int, resetProb: Double = 0.15, + sources: Array[VertexId]): Graph[Vector, Double] = { + // TODO if one sources vertex id is outside of the int range + // we won't be able to store its activations in a sparse vector + val zero = Vectors.sparse(sources.size, List()).asBreeze + val sourcesInitMap = sources.zipWithIndex.map { case (vid, i) => + val v = Vectors.sparse(sources.size, Array(i), Array(resetProb)).asBreeze + (vid, v) + }.toMap + val sc = graph.vertices.sparkContext + val sourcesInitMapBC = sc.broadcast(sourcesInitMap) + // Initialize the PageRank graph with each edge attribute having + // weight 1/outDegree and each source vertex with attribute 1.0. + var rankGraph = graph + // Associate the degree with each vertex + .outerJoinVertices(graph.outDegrees) { (vid, vdata, deg) => deg.getOrElse(0) } + // Set the weight on the edges based on the degree + .mapTriplets(e => 1.0 / e.srcAttr, TripletFields.Src) + .mapVertices { (vid, attr) => + if (sourcesInitMapBC.value contains vid) { + sourcesInitMapBC.value(vid) + } else { + zero + } + } + + var i = 0 + while (i < numIter) { + val prevRankGraph = rankGraph + // Propagates the message along outbound edges + // and adding start nodes back in with activation resetProb + val rankUpdates = rankGraph.aggregateMessages[BV[Double]]( + ctx => ctx.sendToDst(ctx.srcAttr :* ctx.attr), + (a : BV[Double], b : BV[Double]) => a :+ b, TripletFields.Src) + + rankGraph = rankGraph.joinVertices(rankUpdates) { + (vid, oldRank, msgSum) => + val popActivations: BV[Double] = msgSum :* (1.0 - resetProb) + val resetActivations = if (sourcesInitMapBC.value contains vid) { + sourcesInitMapBC.value(vid) + } else { + zero + } + popActivations :+ resetActivations + }.cache() + + rankGraph.edges.foreachPartition(x => {}) // also materializes rankGraph.vertices + prevRankGraph.vertices.unpersist(false) + prevRankGraph.edges.unpersist(false) + + logInfo(s"Parallel Personalized PageRank finished iteration $i.") + + i += 1 + } + + rankGraph.mapVertices { (vid, attr) => + Vectors.fromBreeze(attr) + } + } + /** * Run a dynamic version of PageRank returning a graph with vertex attributes containing the * PageRank and edge attributes containing the normalized edge weight. diff --git a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala index bdff31446f8ee..b6305c8d00aba 100644 --- a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala +++ b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala @@ -118,11 +118,29 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext { val dynamicRanks = starGraph.personalizedPageRank(0, 0, resetProb).vertices.cache() assert(compareRanks(staticRanks2, dynamicRanks) < errorTol) + val parallelStaticRanks1 = starGraph + .staticParallelPersonalizedPageRank(Array(0), 1, resetProb).mapVertices { + case (vertexId, vector) => vector(0) + }.vertices.cache() + assert(compareRanks(staticRanks1, parallelStaticRanks1) < errorTol) + + val parallelStaticRanks2 = starGraph + .staticParallelPersonalizedPageRank(Array(0, 1), 2, resetProb).mapVertices { + case (vertexId, vector) => vector(0) + }.vertices.cache() + assert(compareRanks(staticRanks2, parallelStaticRanks2) < errorTol) + // We have one outbound edge from 1 to 0 val otherStaticRanks2 = starGraph.staticPersonalizedPageRank(1, numIter = 2, resetProb) .vertices.cache() val otherDynamicRanks = starGraph.personalizedPageRank(1, 0, resetProb).vertices.cache() + val otherParallelStaticRanks2 = starGraph + .staticParallelPersonalizedPageRank(Array(0, 1), 2, resetProb).mapVertices { + case (vertexId, vector) => vector(1) + }.vertices.cache() assert(compareRanks(otherDynamicRanks, otherStaticRanks2) < errorTol) + assert(compareRanks(otherStaticRanks2, otherParallelStaticRanks2) < errorTol) + assert(compareRanks(otherDynamicRanks, otherParallelStaticRanks2) < errorTol) } } // end of test Star PersonalPageRank @@ -177,6 +195,12 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext { val dynamicRanks = chain.personalizedPageRank(4, tol, resetProb).vertices assert(compareRanks(staticRanks, dynamicRanks) < errorTol) + + val parallelStaticRanks = chain + .staticParallelPersonalizedPageRank(Array(4), numIter, resetProb).mapVertices { + case (vertexId, vector) => vector(0) + }.vertices.cache() + assert(compareRanks(staticRanks, parallelStaticRanks) < errorTol) } } } From bcdd259c371b1dcdb41baf227867d7e2ecb923c6 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Sat, 10 Sep 2016 00:27:10 -0700 Subject: [PATCH 246/270] [SPARK-15509][FOLLOW-UP][ML][SPARKR] R MLlib algorithms should support input columns "features" and "label" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What changes were proposed in this pull request? #13584 resolved the issue of features and label columns conflict with ```RFormula``` default ones when loading libsvm data, but it still left some issues should be resolved: 1, It’s not necessary to check and rename label column. Since we have considerations on the design of ```RFormula```, it can handle the case of label column already exists(with restriction of the existing label column should be numeric/boolean type). So it’s not necessary to change the column name to avoid conflict. If the label column is not numeric/boolean type, ```RFormula``` will throw exception. 2, We should rename features column name to new one if there is conflict, but appending a random value is enough since it was used internally only. We done similar work when implementing ```SQLTransformer```. 3, We should set correct new features column for the estimators. Take ```GLM``` as example: ```GLM``` estimator should set features column with the changed one(rFormula.getFeaturesCol) rather than the default “features”. Although it’s same when training model, but it involves problems when predicting. The following is the prediction result of GLM before this PR: ![image](https://cloud.githubusercontent.com/assets/1962026/18308227/84c3c452-74a8-11e6-9caa-9d6d846cc957.png) We should drop the internal used feature column name, otherwise, it will appear on the prediction DataFrame which will confused users. And this behavior is same as other scenarios which does not exist column name conflict. After this PR: ![image](https://cloud.githubusercontent.com/assets/1962026/18308240/92082a04-74a8-11e6-9226-801f52b856d9.png) ## How was this patch tested? Existing unit tests. Author: Yanbo Liang Closes #14993 from yanboliang/spark-15509. --- .../ml/r/AFTSurvivalRegressionWrapper.scala | 1 + .../spark/ml/r/GaussianMixtureWrapper.scala | 1 + .../GeneralizedLinearRegressionWrapper.scala | 1 + .../ml/r/IsotonicRegressionWrapper.scala | 1 + .../org/apache/spark/ml/r/KMeansWrapper.scala | 1 + .../apache/spark/ml/r/NaiveBayesWrapper.scala | 1 + .../org/apache/spark/ml/r/RWrapperUtils.scala | 34 +++---------------- .../spark/ml/r/RWrapperUtilsSuite.scala | 16 +++------ 8 files changed, 14 insertions(+), 42 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala index 67d037ed6e024..bd965acf56944 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala @@ -99,6 +99,7 @@ private[r] object AFTSurvivalRegressionWrapper extends MLReadable[AFTSurvivalReg val aft = new AFTSurvivalRegression() .setCensorCol(censorCol) .setFitIntercept(rFormula.hasIntercept) + .setFeaturesCol(rFormula.getFeaturesCol) val pipeline = new Pipeline() .setStages(Array(rFormulaModel, aft)) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala index b654233a89360..b708702959829 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/GaussianMixtureWrapper.scala @@ -85,6 +85,7 @@ private[r] object GaussianMixtureWrapper extends MLReadable[GaussianMixtureWrapp .setK(k) .setMaxIter(maxIter) .setTol(tol) + .setFeaturesCol(rFormula.getFeaturesCol) val pipeline = new Pipeline() .setStages(Array(rFormulaModel, gm)) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala index 35313258f940e..b1bb577e1ffe4 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala @@ -89,6 +89,7 @@ private[r] object GeneralizedLinearRegressionWrapper .setMaxIter(maxIter) .setWeightCol(weightCol) .setRegParam(regParam) + .setFeaturesCol(rFormula.getFeaturesCol) val pipeline = new Pipeline() .setStages(Array(rFormulaModel, glr)) .fit(data) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala index 2ed7d7b770cc9..48632316f3950 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala @@ -75,6 +75,7 @@ private[r] object IsotonicRegressionWrapper .setIsotonic(isotonic) .setFeatureIndex(featureIndex) .setWeightCol(weightCol) + .setFeaturesCol(rFormula.getFeaturesCol) val pipeline = new Pipeline() .setStages(Array(rFormulaModel, isotonicRegression)) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala index 8616a8c01e5ac..ea9458525aa31 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/KMeansWrapper.scala @@ -86,6 +86,7 @@ private[r] object KMeansWrapper extends MLReadable[KMeansWrapper] { .setK(k) .setMaxIter(maxIter) .setInitMode(initMode) + .setFeaturesCol(rFormula.getFeaturesCol) val pipeline = new Pipeline() .setStages(Array(rFormulaModel, kMeans)) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala index f2cb24b96404b..d1a39fea76ef8 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/NaiveBayesWrapper.scala @@ -73,6 +73,7 @@ private[r] object NaiveBayesWrapper extends MLReadable[NaiveBayesWrapper] { val naiveBayes = new NaiveBayes() .setSmoothing(smoothing) .setModelType("bernoulli") + .setFeaturesCol(rFormula.getFeaturesCol) .setPredictionCol(PREDICTED_LABEL_INDEX_COL) val idxToStr = new IndexToString() .setInputCol(PREDICTED_LABEL_INDEX_COL) diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala b/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala index 6a435992e3b35..379007c4d948d 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/r/RWrapperUtils.scala @@ -19,14 +19,15 @@ package org.apache.spark.ml.r import org.apache.spark.internal.Logging import org.apache.spark.ml.feature.RFormula +import org.apache.spark.ml.util.Identifiable import org.apache.spark.sql.Dataset object RWrapperUtils extends Logging { /** * DataFrame column check. - * When loading data, default columns "features" and "label" will be added. And these two names - * would conflict with RFormula default feature and label column names. + * When loading libsvm data, default columns "features" and "label" will be added. + * And "features" would conflict with RFormula default feature column names. * Here is to change the column name to avoid "column already exists" error. * * @param rFormula RFormula instance @@ -34,38 +35,11 @@ object RWrapperUtils extends Logging { * @return Unit */ def checkDataColumns(rFormula: RFormula, data: Dataset[_]): Unit = { - if (data.schema.fieldNames.contains(rFormula.getLabelCol)) { - val newLabelName = convertToUniqueName(rFormula.getLabelCol, data.schema.fieldNames) - logWarning( - s"data containing ${rFormula.getLabelCol} column, using new name $newLabelName instead") - rFormula.setLabelCol(newLabelName) - } - if (data.schema.fieldNames.contains(rFormula.getFeaturesCol)) { - val newFeaturesName = convertToUniqueName(rFormula.getFeaturesCol, data.schema.fieldNames) + val newFeaturesName = s"${Identifiable.randomUID(rFormula.getFeaturesCol)}" logWarning(s"data containing ${rFormula.getFeaturesCol} column, " + s"using new name $newFeaturesName instead") rFormula.setFeaturesCol(newFeaturesName) } } - - /** - * Convert conflicting name to be an unique name. - * Appending a sequence number, like originalName_output1 - * and incrementing until it is not already there - * - * @param originalName Original name - * @param fieldNames Array of field names in existing schema - * @return String - */ - def convertToUniqueName(originalName: String, fieldNames: Array[String]): String = { - var counter = 1 - var newName = originalName + "_output" - - while (fieldNames.contains(newName)) { - newName = originalName + "_output" + counter - counter += 1 - } - newName - } } diff --git a/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala index ddc24cb3a6481..27b03918d951e 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/r/RWrapperUtilsSuite.scala @@ -35,22 +35,14 @@ class RWrapperUtilsSuite extends SparkFunSuite with MLlibTestSparkContext { // after checking, model build is ok RWrapperUtils.checkDataColumns(rFormula, data) - assert(rFormula.getLabelCol == "label_output") - assert(rFormula.getFeaturesCol == "features_output") + assert(rFormula.getLabelCol == "label") + assert(rFormula.getFeaturesCol.startsWith("features_")) val model = rFormula.fit(data) assert(model.isInstanceOf[RFormulaModel]) - assert(model.getLabelCol == "label_output") - assert(model.getFeaturesCol == "features_output") - } - - test("generate unique name by appending a sequence number") { - val originalName = "label" - val fieldNames = Array("label_output", "label_output1", "label_output2") - val newName = RWrapperUtils.convertToUniqueName(originalName, fieldNames) - - assert(newName === "label_output3") + assert(model.getLabelCol == "label") + assert(model.getFeaturesCol.startsWith("features_")) } } From 6ea5055fa734d435b5f148cf52d3385a57926b60 Mon Sep 17 00:00:00 2001 From: Ryan Blue Date: Sat, 10 Sep 2016 10:18:53 +0100 Subject: [PATCH 247/270] [SPARK-17396][CORE] Share the task support between UnionRDD instances. ## What changes were proposed in this pull request? Share the ForkJoinTaskSupport between UnionRDD instances to avoid creating a huge number of threads if lots of RDDs are created at the same time. ## How was this patch tested? This uses existing UnionRDD tests. Author: Ryan Blue Closes #14985 from rdblue/SPARK-17396-use-shared-pool. --- .../main/scala/org/apache/spark/rdd/UnionRDD.scala | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/rdd/UnionRDD.scala b/core/src/main/scala/org/apache/spark/rdd/UnionRDD.scala index 8171dcc046379..ad1fddbde7b00 100644 --- a/core/src/main/scala/org/apache/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/UnionRDD.scala @@ -20,7 +20,7 @@ package org.apache.spark.rdd import java.io.{IOException, ObjectOutputStream} import scala.collection.mutable.ArrayBuffer -import scala.collection.parallel.ForkJoinTaskSupport +import scala.collection.parallel.{ForkJoinTaskSupport, ThreadPoolTaskSupport} import scala.concurrent.forkjoin.ForkJoinPool import scala.reflect.ClassTag @@ -58,6 +58,11 @@ private[spark] class UnionPartition[T: ClassTag]( } } +object UnionRDD { + private[spark] lazy val partitionEvalTaskSupport = + new ForkJoinTaskSupport(new ForkJoinPool(8)) +} + @DeveloperApi class UnionRDD[T: ClassTag]( sc: SparkContext, @@ -68,13 +73,10 @@ class UnionRDD[T: ClassTag]( private[spark] val isPartitionListingParallel: Boolean = rdds.length > conf.getInt("spark.rdd.parallelListingThreshold", 10) - @transient private lazy val partitionEvalTaskSupport = - new ForkJoinTaskSupport(new ForkJoinPool(8)) - override def getPartitions: Array[Partition] = { val parRDDs = if (isPartitionListingParallel) { val parArray = rdds.par - parArray.tasksupport = partitionEvalTaskSupport + parArray.tasksupport = UnionRDD.partitionEvalTaskSupport parArray } else { rdds From 71b7d42f5fa8d3a891ce831eedb308d1f407dd7e Mon Sep 17 00:00:00 2001 From: Xin Ren Date: Sat, 10 Sep 2016 09:52:53 -0700 Subject: [PATCH 248/270] [SPARK-16445][MLLIB][SPARKR] Fix @return description for sparkR mlp summary() method ## What changes were proposed in this pull request? Fix summary() method's `return` description for spark.mlp ## How was this patch tested? Ran tests locally on my laptop. Author: Xin Ren Closes #15015 from keypointt/SPARK-16445-2. --- R/pkg/R/mllib.R | 6 +++--- R/pkg/inst/tests/testthat/test_mllib.R | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R index f8d1095a493dc..234b208166b54 100644 --- a/R/pkg/R/mllib.R +++ b/R/pkg/R/mllib.R @@ -720,8 +720,9 @@ setMethod("predict", signature(object = "MultilayerPerceptronClassificationModel # Returns the summary of a Multilayer Perceptron Classification Model produced by \code{spark.mlp} #' @param object a Multilayer Perceptron Classification Model fitted by \code{spark.mlp} -#' @return \code{summary} returns a list containing \code{layers}, the label distribution, and -#' \code{tables}, conditional probabilities given the target label. +#' @return \code{summary} returns a list containing \code{labelCount}, \code{layers}, and +#' \code{weights}. For \code{weights}, it is a numeric vector with length equal to +#' the expected given the architecture (i.e., for 8-10-2 network, 100 connection weights). #' @rdname spark.mlp #' @export #' @aliases summary,MultilayerPerceptronClassificationModel-method @@ -732,7 +733,6 @@ setMethod("summary", signature(object = "MultilayerPerceptronClassificationModel labelCount <- callJMethod(jobj, "labelCount") layers <- unlist(callJMethod(jobj, "layers")) weights <- callJMethod(jobj, "weights") - weights <- matrix(weights, nrow = length(weights)) list(labelCount = labelCount, layers = layers, weights = weights) }) diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index ac896cfbcfff7..5b1404c621bd1 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -369,6 +369,8 @@ test_that("spark.mlp", { expect_equal(summary$labelCount, 3) expect_equal(summary$layers, c(4, 5, 4, 3)) expect_equal(length(summary$weights), 64) + expect_equal(head(summary$weights, 5), list(-0.878743, 0.2154151, -1.16304, -0.6583214, 1.009825), + tolerance = 1e-6) # Test predict method mlpTestDF <- df From 29ba9578f44c7caa8451386cee1f03f4e0ed8fc7 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Sun, 11 Sep 2016 08:00:55 +0100 Subject: [PATCH 249/270] [SPARK-17389][ML][MLLIB] KMeans speedup with better choice of k-means|| init steps = 2 ## What changes were proposed in this pull request? Reduce default k-means|| init steps to 2 from 5. See JIRA for discussion. See also https://github.com/apache/spark/pull/14948 ## How was this patch tested? Existing tests. Author: Sean Owen Closes #14956 from srowen/SPARK-17389.2. --- .../org/apache/spark/mllib/clustering/KMeans.scala | 6 +++--- .../clustering/PowerIterationClusteringSuite.scala | 10 +++------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala index de9fa4aebf484..23141aaf42b49 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala @@ -51,10 +51,10 @@ class KMeans private ( /** * Constructs a KMeans instance with default parameters: {k: 2, maxIterations: 20, runs: 1, - * initializationMode: "k-means||", initializationSteps: 5, epsilon: 1e-4, seed: random}. + * initializationMode: "k-means||", initializationSteps: 2, epsilon: 1e-4, seed: random}. */ @Since("0.8.0") - def this() = this(2, 20, 1, KMeans.K_MEANS_PARALLEL, 5, 1e-4, Utils.random.nextLong()) + def this() = this(2, 20, 1, KMeans.K_MEANS_PARALLEL, 2, 1e-4, Utils.random.nextLong()) /** * Number of clusters to create (k). @@ -134,7 +134,7 @@ class KMeans private ( /** * Set the number of steps for the k-means|| initialization mode. This is an advanced - * setting -- the default of 5 is almost always enough. Default: 5. + * setting -- the default of 2 is almost always enough. Default: 2. */ @Since("0.8.0") def setInitializationSteps(initializationSteps: Int): this.type = { diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringSuite.scala index 3d81d375c716e..b33b86b39a42f 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringSuite.scala @@ -49,7 +49,7 @@ class PowerIterationClusteringSuite extends SparkFunSuite with MLlibTestSparkCon val r1 = 1.0 val n1 = 10 val r2 = 4.0 - val n2 = 40 + val n2 = 10 val n = n1 + n2 val points = genCircle(r1, n1) ++ genCircle(r2, n2) val similarities = for (i <- 1 until n; j <- 0 until i) yield { @@ -83,7 +83,7 @@ class PowerIterationClusteringSuite extends SparkFunSuite with MLlibTestSparkCon val r1 = 1.0 val n1 = 10 val r2 = 4.0 - val n2 = 40 + val n2 = 10 val n = n1 + n2 val points = genCircle(r1, n1) ++ genCircle(r2, n2) val similarities = for (i <- 1 until n; j <- 0 until i) yield { @@ -91,11 +91,7 @@ class PowerIterationClusteringSuite extends SparkFunSuite with MLlibTestSparkCon } val edges = similarities.flatMap { case (i, j, s) => - if (i != j) { - Seq(Edge(i, j, s), Edge(j, i, s)) - } else { - None - } + Seq(Edge(i, j, s), Edge(j, i, s)) } val graph = Graph.fromEdges(sc.parallelize(edges, 2), 0.0) From 180796ecb3a00facde2d98affdb5aa38dd258875 Mon Sep 17 00:00:00 2001 From: Timothy Hunter Date: Sun, 11 Sep 2016 08:03:45 +0100 Subject: [PATCH 250/270] [SPARK-17439][SQL] Fixing compression issues with approximate quantiles and adding more tests ## What changes were proposed in this pull request? This PR build on #14976 and fixes a correctness bug that would cause the wrong quantile to be returned for small target errors. ## How was this patch tested? This PR adds 8 unit tests that were failing without the fix. Author: Timothy Hunter Author: Sean Owen Closes #15002 from thunterdb/ml-1783. --- .../sql/catalyst/util/QuantileSummaries.scala | 16 +++++++--- .../util/QuantileSummariesSuite.scala | 29 +++++++++++++++++-- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala index 7512ace188569..fd62bd511fac0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.catalyst.util -import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.{ArrayBuffer, ListBuffer} import org.apache.spark.sql.catalyst.util.QuantileSummaries.Stats @@ -61,7 +61,12 @@ class QuantileSummaries( def insert(x: Double): QuantileSummaries = { headSampled += x if (headSampled.size >= defaultHeadSize) { - this.withHeadBufferInserted + val result = this.withHeadBufferInserted + if (result.sampled.length >= compressThreshold) { + result.compress() + } else { + result + } } else { this } @@ -236,7 +241,7 @@ object QuantileSummaries { if (currentSamples.isEmpty) { return Array.empty[Stats] } - val res: ArrayBuffer[Stats] = ArrayBuffer.empty + val res = ListBuffer.empty[Stats] // Start for the last element, which is always part of the set. // The head contains the current new head, that may be merged with the current element. var head = currentSamples.last @@ -258,7 +263,10 @@ object QuantileSummaries { } res.prepend(head) // If necessary, add the minimum element: - res.prepend(currentSamples.head) + val currHead = currentSamples.head + if (currHead.value < head.value) { + res.prepend(currentSamples.head) + } res.toArray } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala index 89b2a22a3de45..5e90970b1bb2e 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/QuantileSummariesSuite.scala @@ -40,6 +40,20 @@ class QuantileSummariesSuite extends SparkFunSuite { summary.compress() } + /** + * Interleaves compression and insertions. + */ + private def buildCompressSummary( + data: Seq[Double], + epsi: Double, + threshold: Int): QuantileSummaries = { + var summary = new QuantileSummaries(threshold, epsi) + data.foreach { x => + summary = summary.insert(x).compress() + } + summary + } + private def checkQuantile(quant: Double, data: Seq[Double], summary: QuantileSummaries): Unit = { val approx = summary.query(quant) // The rank of the approximation. @@ -54,8 +68,8 @@ class QuantileSummariesSuite extends SparkFunSuite { for { (seq_name, data) <- Seq(increasing, decreasing, random) - epsi <- Seq(0.1, 0.0001) - compression <- Seq(1000, 10) + epsi <- Seq(0.1, 0.0001) // With a significant value and with full precision + compression <- Seq(1000, 10) // This interleaves n so that we test without and with compression } { test(s"Extremas with epsi=$epsi and seq=$seq_name, compression=$compression") { @@ -75,6 +89,17 @@ class QuantileSummariesSuite extends SparkFunSuite { checkQuantile(0.1, data, s) checkQuantile(0.001, data, s) } + + test(s"Some quantile values with epsi=$epsi and seq=$seq_name, compression=$compression " + + s"(interleaved)") { + val s = buildCompressSummary(data, epsi, compression) + assert(s.count == data.size, s"Found count=${s.count} but data size=${data.size}") + checkQuantile(0.9999, data, s) + checkQuantile(0.9, data, s) + checkQuantile(0.5, data, s) + checkQuantile(0.1, data, s) + checkQuantile(0.001, data, s) + } } // Tests for merging procedure From bf22217377d7fe95b436d8b563c501aab2797f78 Mon Sep 17 00:00:00 2001 From: tone-zhang Date: Sun, 11 Sep 2016 10:17:53 +0100 Subject: [PATCH 251/270] [SPARK-17330][SPARK UT] Clean up spark-warehouse in UT ## What changes were proposed in this pull request? Check the database warehouse used in Spark UT, and remove the existing database file before run the UT (SPARK-8368). ## How was this patch tested? Run Spark UT with the command for several times: ./build/sbt -Pyarn -Phadoop-2.6 -Phive -Phive-thriftserver "test-only *HiveSparkSubmitSuit*" Without the patch, the test case can be passed only at the first time, and always failed from the second time. With the patch the test case always can be passed correctly. Author: tone-zhang Closes #14894 from tone-zhang/issue1. --- .../org/apache/spark/sql/execution/command/DDLSuite.scala | 2 ++ .../org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index 05f826a11b586..95672e01f5546 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -43,6 +43,8 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { // drop all databases, tables and functions after each test spark.sessionState.catalog.reset() } finally { + val path = System.getProperty("user.dir") + "/spark-warehouse" + Utils.deleteRecursively(new File(path)) super.afterEach() } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala index af282866669b8..29317e2887861 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala @@ -590,7 +590,9 @@ object SparkSubmitClassLoaderTest extends Logging { def main(args: Array[String]) { Utils.configTestLog4j("INFO") val conf = new SparkConf() + val hiveWarehouseLocation = Utils.createTempDir() conf.set("spark.ui.enabled", "false") + conf.set("spark.sql.warehouse.dir", hiveWarehouseLocation.toString) val sc = new SparkContext(conf) val hiveContext = new TestHiveContext(sc) val df = hiveContext.createDataFrame((1 to 100).map(i => (i, i))).toDF("i", "j") @@ -699,11 +701,13 @@ object SPARK_9757 extends QueryTest { def main(args: Array[String]): Unit = { Utils.configTestLog4j("INFO") + val hiveWarehouseLocation = Utils.createTempDir() val sparkContext = new SparkContext( new SparkConf() .set("spark.sql.hive.metastore.version", "0.13.1") .set("spark.sql.hive.metastore.jars", "maven") - .set("spark.ui.enabled", "false")) + .set("spark.ui.enabled", "false") + .set("spark.sql.warehouse.dir", hiveWarehouseLocation.toString)) val hiveContext = new TestHiveContext(sparkContext) spark = hiveContext.sparkSession From c76baff0cc4775c2191d075cc9a8176e4915fec8 Mon Sep 17 00:00:00 2001 From: Bryan Cutler Date: Sun, 11 Sep 2016 10:19:39 +0100 Subject: [PATCH 252/270] [SPARK-17336][PYSPARK] Fix appending multiple times to PYTHONPATH from spark-config.sh ## What changes were proposed in this pull request? During startup of Spark standalone, the script file spark-config.sh appends to the PYTHONPATH and can be sourced many times, causing duplicates in the path. This change adds a env flag that is set when the PYTHONPATH is appended so it will happen only one time. ## How was this patch tested? Manually started standalone master/worker and verified PYTHONPATH has no duplicate entries. Author: Bryan Cutler Closes #15028 from BryanCutler/fix-duplicate-pythonpath-SPARK-17336. --- sbin/spark-config.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sbin/spark-config.sh b/sbin/spark-config.sh index a7a44cdde6c77..b7284487c511d 100755 --- a/sbin/spark-config.sh +++ b/sbin/spark-config.sh @@ -26,5 +26,8 @@ fi export SPARK_CONF_DIR="${SPARK_CONF_DIR:-"${SPARK_HOME}/conf"}" # Add the PySpark classes to the PYTHONPATH: -export PYTHONPATH="${SPARK_HOME}/python:${PYTHONPATH}" -export PYTHONPATH="${SPARK_HOME}/python/lib/py4j-0.10.3-src.zip:${PYTHONPATH}" +if [ -z "${PYSPARK_PYTHONPATH_SET}" ]; then + export PYTHONPATH="${SPARK_HOME}/python:${PYTHONPATH}" + export PYTHONPATH="${SPARK_HOME}/python/lib/py4j-0.10.3-src.zip:${PYTHONPATH}" + export PYSPARK_PYTHONPATH_SET=1 +fi From 883c7631847a95684534222c1b6cfed8e62710c8 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Sun, 11 Sep 2016 13:47:13 +0100 Subject: [PATCH 253/270] [SPARK-17389][FOLLOW-UP][ML] Change KMeans k-means|| default init steps from 5 to 2. ## What changes were proposed in this pull request? #14956 reduced default k-means|| init steps to 2 from 5 only for spark.mllib package, we should also do same change for spark.ml and PySpark. ## How was this patch tested? Existing tests. Author: Yanbo Liang Closes #15050 from yanboliang/spark-17389. --- .../scala/org/apache/spark/ml/clustering/KMeans.scala | 4 ++-- .../org/apache/spark/ml/clustering/KMeansSuite.scala | 2 +- python/pyspark/ml/clustering.py | 10 +++++----- python/pyspark/mllib/clustering.py | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala index 6c46be719674b..b04e82838e714 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala @@ -69,7 +69,7 @@ private[clustering] trait KMeansParams extends Params with HasMaxIter with HasFe /** * Param for the number of steps for the k-means|| initialization mode. This is an advanced - * setting -- the default of 5 is almost always enough. Must be > 0. Default: 5. + * setting -- the default of 2 is almost always enough. Must be > 0. Default: 2. * @group expertParam */ @Since("1.5.0") @@ -262,7 +262,7 @@ class KMeans @Since("1.5.0") ( k -> 2, maxIter -> 20, initMode -> MLlibKMeans.K_MEANS_PARALLEL, - initSteps -> 5, + initSteps -> 2, tol -> 1e-4) @Since("1.5.0") diff --git a/mllib/src/test/scala/org/apache/spark/ml/clustering/KMeansSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/clustering/KMeansSuite.scala index 88f31a1cd26fb..c9ba5a288aadf 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/clustering/KMeansSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/clustering/KMeansSuite.scala @@ -45,7 +45,7 @@ class KMeansSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultR assert(kmeans.getPredictionCol === "prediction") assert(kmeans.getMaxIter === 20) assert(kmeans.getInitMode === MLlibKMeans.K_MEANS_PARALLEL) - assert(kmeans.getInitSteps === 5) + assert(kmeans.getInitSteps === 2) assert(kmeans.getTol === 1e-4) } diff --git a/python/pyspark/ml/clustering.py b/python/pyspark/ml/clustering.py index 4dab83362a0a4..7632f05c3b68c 100644 --- a/python/pyspark/ml/clustering.py +++ b/python/pyspark/ml/clustering.py @@ -254,14 +254,14 @@ class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol @keyword_only def __init__(self, featuresCol="features", predictionCol="prediction", k=2, - initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None): + initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None): """ __init__(self, featuresCol="features", predictionCol="prediction", k=2, \ - initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None) + initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None) """ super(KMeans, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid) - self._setDefault(k=2, initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20) + self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20) kwargs = self.__init__._input_kwargs self.setParams(**kwargs) @@ -271,10 +271,10 @@ def _create_model(self, java_model): @keyword_only @since("1.5.0") def setParams(self, featuresCol="features", predictionCol="prediction", k=2, - initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None): + initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None): """ setParams(self, featuresCol="features", predictionCol="prediction", k=2, \ - initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None) + initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None) Sets params for KMeans. """ diff --git a/python/pyspark/mllib/clustering.py b/python/pyspark/mllib/clustering.py index 29aa615125770..2036168e456fd 100644 --- a/python/pyspark/mllib/clustering.py +++ b/python/pyspark/mllib/clustering.py @@ -306,7 +306,7 @@ class KMeans(object): @classmethod @since('0.9.0') def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||", - seed=None, initializationSteps=5, epsilon=1e-4, initialModel=None): + seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None): """ Train a k-means clustering model. @@ -330,9 +330,9 @@ def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||" (default: None) :param initializationSteps: Number of steps for the k-means|| initialization mode. - This is an advanced setting -- the default of 5 is almost + This is an advanced setting -- the default of 2 is almost always enough. - (default: 5) + (default: 2) :param epsilon: Distance threshold within which a center will be considered to have converged. If all centers move less than this Euclidean From 767d48076971f6f1e2c93ee540a9b2e5e465631b Mon Sep 17 00:00:00 2001 From: Sameer Agarwal Date: Sun, 11 Sep 2016 17:35:27 +0200 Subject: [PATCH 254/270] [SPARK-17415][SQL] Better error message for driver-side broadcast join OOMs ## What changes were proposed in this pull request? This is a trivial patch that catches all `OutOfMemoryError` while building the broadcast hash relation and rethrows it by wrapping it in a nice error message. ## How was this patch tested? Existing Tests Author: Sameer Agarwal Closes #14979 from sameeragarwal/broadcast-join-error. --- .../exchange/BroadcastExchangeExec.scala | 73 +++++++++++-------- 1 file changed, 42 insertions(+), 31 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala index a809076de5419..7be5d31d4a765 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala @@ -21,6 +21,7 @@ import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ import org.apache.spark.{broadcast, SparkException} +import org.apache.spark.launcher.SparkLauncher import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.UnsafeRow @@ -28,6 +29,7 @@ import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, BroadcastPar import org.apache.spark.sql.execution.{SparkPlan, SQLExecution} import org.apache.spark.sql.execution.metric.SQLMetrics import org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.util.ThreadUtils /** @@ -70,38 +72,47 @@ case class BroadcastExchangeExec( // This will run in another thread. Set the execution id so that we can connect these jobs // with the correct execution. SQLExecution.withExecutionId(sparkContext, executionId) { - val beforeCollect = System.nanoTime() - // Note that we use .executeCollect() because we don't want to convert data to Scala types - val input: Array[InternalRow] = child.executeCollect() - if (input.length >= 512000000) { - throw new SparkException( - s"Cannot broadcast the table with more than 512 millions rows: ${input.length} rows") + try { + val beforeCollect = System.nanoTime() + // Note that we use .executeCollect() because we don't want to convert data to Scala types + val input: Array[InternalRow] = child.executeCollect() + if (input.length >= 512000000) { + throw new SparkException( + s"Cannot broadcast the table with more than 512 millions rows: ${input.length} rows") + } + val beforeBuild = System.nanoTime() + longMetric("collectTime") += (beforeBuild - beforeCollect) / 1000000 + val dataSize = input.map(_.asInstanceOf[UnsafeRow].getSizeInBytes.toLong).sum + longMetric("dataSize") += dataSize + if (dataSize >= (8L << 30)) { + throw new SparkException( + s"Cannot broadcast the table that is larger than 8GB: ${dataSize >> 30} GB") + } + + // Construct and broadcast the relation. + val relation = mode.transform(input) + val beforeBroadcast = System.nanoTime() + longMetric("buildTime") += (beforeBroadcast - beforeBuild) / 1000000 + + val broadcasted = sparkContext.broadcast(relation) + longMetric("broadcastTime") += (System.nanoTime() - beforeBroadcast) / 1000000 + + // There are some cases we don't care about the metrics and call `SparkPlan.doExecute` + // directly without setting an execution id. We should be tolerant to it. + if (executionId != null) { + sparkContext.listenerBus.post(SparkListenerDriverAccumUpdates( + executionId.toLong, metrics.values.map(m => m.id -> m.value).toSeq)) + } + + broadcasted + } catch { + case oe: OutOfMemoryError => + throw new OutOfMemoryError(s"Not enough memory to build and broadcast the table to " + + s"all worker nodes. As a workaround, you can either disable broadcast by setting " + + s"${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1 or increase the spark driver " + + s"memory by setting ${SparkLauncher.DRIVER_MEMORY} to a higher value") + .initCause(oe.getCause) } - val beforeBuild = System.nanoTime() - longMetric("collectTime") += (beforeBuild - beforeCollect) / 1000000 - val dataSize = input.map(_.asInstanceOf[UnsafeRow].getSizeInBytes.toLong).sum - longMetric("dataSize") += dataSize - if (dataSize >= (8L << 30)) { - throw new SparkException( - s"Cannot broadcast the table that is larger than 8GB: ${dataSize >> 30} GB") - } - - // Construct and broadcast the relation. - val relation = mode.transform(input) - val beforeBroadcast = System.nanoTime() - longMetric("buildTime") += (beforeBroadcast - beforeBuild) / 1000000 - - val broadcasted = sparkContext.broadcast(relation) - longMetric("broadcastTime") += (System.nanoTime() - beforeBroadcast) / 1000000 - - // There are some cases we don't care about the metrics and call `SparkPlan.doExecute` - // directly without setting an execution id. We should be tolerant to it. - if (executionId != null) { - sparkContext.listenerBus.post(SparkListenerDriverAccumUpdates( - executionId.toLong, metrics.values.map(m => m.id -> m.value).toSeq)) - } - - broadcasted } }(BroadcastExchangeExec.executionContext) } From 72eec70bdbf6fb67c977463db5d8d95dd3040ae8 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Sun, 11 Sep 2016 21:51:22 -0700 Subject: [PATCH 255/270] [SPARK-17486] Remove unused TaskMetricsUIData.updatedBlockStatuses field The `TaskMetricsUIData.updatedBlockStatuses` field is assigned to but never read, increasing the memory consumption of the web UI. We should remove this field. Author: Josh Rosen Closes #15038 from JoshRosen/remove-updated-block-statuses-from-TaskMetricsUIData. --- core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala index 66b88129ee414..74bca9931acf7 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala @@ -23,7 +23,6 @@ import scala.collection.mutable.{HashMap, LinkedHashMap} import org.apache.spark.JobExecutionStatus import org.apache.spark.executor.{ShuffleReadMetrics, ShuffleWriteMetrics, TaskMetrics} import org.apache.spark.scheduler.{AccumulableInfo, TaskInfo} -import org.apache.spark.storage.{BlockId, BlockStatus} import org.apache.spark.util.AccumulatorContext import org.apache.spark.util.collection.OpenHashSet @@ -145,7 +144,6 @@ private[spark] object UIData { memoryBytesSpilled = m.memoryBytesSpilled, diskBytesSpilled = m.diskBytesSpilled, peakExecutionMemory = m.peakExecutionMemory, - updatedBlockStatuses = m.updatedBlockStatuses.toList, inputMetrics = InputMetricsUIData(m.inputMetrics.bytesRead, m.inputMetrics.recordsRead), outputMetrics = OutputMetricsUIData(m.outputMetrics.bytesWritten, m.outputMetrics.recordsWritten), @@ -193,7 +191,6 @@ private[spark] object UIData { memoryBytesSpilled: Long, diskBytesSpilled: Long, peakExecutionMemory: Long, - updatedBlockStatuses: Seq[(BlockId, BlockStatus)], inputMetrics: InputMetricsUIData, outputMetrics: OutputMetricsUIData, shuffleReadMetrics: ShuffleReadMetricsUIData, From cc87280fcd065b01667ca7a59a1a32c7ab757355 Mon Sep 17 00:00:00 2001 From: cenyuhai Date: Mon, 12 Sep 2016 11:52:56 +0100 Subject: [PATCH 256/270] [SPARK-17171][WEB UI] DAG will list all partitions in the graph MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What changes were proposed in this pull request? DAG will list all partitions in the graph, it is too slow and hard to see all graph. Always we don't want to see all partitions,we just want to see the relations of DAG graph. So I just show 2 root nodes for Rdds. Before this PR, the DAG graph looks like [dag1.png](https://issues.apache.org/jira/secure/attachment/12824702/dag1.png), [dag3.png](https://issues.apache.org/jira/secure/attachment/12825456/dag3.png), after this PR, the DAG graph looks like [dag2.png](https://issues.apache.org/jira/secure/attachment/12824703/dag2.png),[dag4.png](https://issues.apache.org/jira/secure/attachment/12825457/dag4.png) Author: cenyuhai Author: 岑玉海 <261810726@qq.com> Closes #14737 from cenyuhai/SPARK-17171. --- .../spark/ui/scope/RDDOperationGraph.scala | 35 +++++++++++++++---- .../ui/scope/RDDOperationGraphListener.scala | 6 +++- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraph.scala b/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraph.scala index 84ca750e1a96a..0e330879d50f9 100644 --- a/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraph.scala +++ b/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraph.scala @@ -26,7 +26,7 @@ import org.apache.commons.lang3.StringEscapeUtils import org.apache.spark.internal.Logging import org.apache.spark.scheduler.StageInfo -import org.apache.spark.storage.StorageLevel +import org.apache.spark.storage.{RDDInfo, StorageLevel} /** * A representation of a generic cluster graph used for storing information on RDD operations. @@ -107,7 +107,7 @@ private[ui] object RDDOperationGraph extends Logging { * supporting in the future if we decide to group certain stages within the same job under * a common scope (e.g. part of a SQL query). */ - def makeOperationGraph(stage: StageInfo): RDDOperationGraph = { + def makeOperationGraph(stage: StageInfo, retainedNodes: Int): RDDOperationGraph = { val edges = new ListBuffer[RDDOperationEdge] val nodes = new mutable.HashMap[Int, RDDOperationNode] val clusters = new mutable.HashMap[String, RDDOperationCluster] // indexed by cluster ID @@ -119,18 +119,37 @@ private[ui] object RDDOperationGraph extends Logging { { if (stage.attemptId == 0) "" else s" (attempt ${stage.attemptId})" } val rootCluster = new RDDOperationCluster(stageClusterId, stageClusterName) + var rootNodeCount = 0 + val addRDDIds = new mutable.HashSet[Int]() + val dropRDDIds = new mutable.HashSet[Int]() + // Find nodes, edges, and operation scopes that belong to this stage - stage.rddInfos.foreach { rdd => - edges ++= rdd.parentIds.map { parentId => RDDOperationEdge(parentId, rdd.id) } + stage.rddInfos.sortBy(_.id).foreach { rdd => + val parentIds = rdd.parentIds + val isAllowed = + if (parentIds.isEmpty) { + rootNodeCount += 1 + rootNodeCount <= retainedNodes + } else { + parentIds.exists(id => addRDDIds.contains(id) || !dropRDDIds.contains(id)) + } + + if (isAllowed) { + addRDDIds += rdd.id + edges ++= parentIds.filter(id => !dropRDDIds.contains(id)).map(RDDOperationEdge(_, rdd.id)) + } else { + dropRDDIds += rdd.id + } // TODO: differentiate between the intention to cache an RDD and whether it's actually cached val node = nodes.getOrElseUpdate(rdd.id, RDDOperationNode( rdd.id, rdd.name, rdd.storageLevel != StorageLevel.NONE, rdd.callSite)) - if (rdd.scope.isEmpty) { // This RDD has no encompassing scope, so we put it directly in the root cluster // This should happen only if an RDD is instantiated outside of a public RDD API - rootCluster.attachChildNode(node) + if (isAllowed) { + rootCluster.attachChildNode(node) + } } else { // Otherwise, this RDD belongs to an inner cluster, // which may be nested inside of other clusters @@ -154,7 +173,9 @@ private[ui] object RDDOperationGraph extends Logging { rootCluster.attachChildCluster(cluster) } } - rddClusters.lastOption.foreach { cluster => cluster.attachChildNode(node) } + if (isAllowed) { + rddClusters.lastOption.foreach { cluster => cluster.attachChildNode(node) } + } } } diff --git a/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala b/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala index bcae56e2f114c..37a12a8646938 100644 --- a/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala +++ b/core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala @@ -41,6 +41,10 @@ private[ui] class RDDOperationGraphListener(conf: SparkConf) extends SparkListen private[ui] val jobIds = new mutable.ArrayBuffer[Int] private[ui] val stageIds = new mutable.ArrayBuffer[Int] + // How many root nodes to retain in DAG Graph + private[ui] val retainedNodes = + conf.getInt("spark.ui.dagGraph.retainedRootRDDs", Int.MaxValue) + // How many jobs or stages to retain graph metadata for private val retainedJobs = conf.getInt("spark.ui.retainedJobs", SparkUI.DEFAULT_RETAINED_JOBS) @@ -82,7 +86,7 @@ private[ui] class RDDOperationGraphListener(conf: SparkConf) extends SparkListen val stageId = stageInfo.stageId stageIds += stageId stageIdToJobId(stageId) = jobId - stageIdToGraph(stageId) = RDDOperationGraph.makeOperationGraph(stageInfo) + stageIdToGraph(stageId) = RDDOperationGraph.makeOperationGraph(stageInfo, retainedNodes) trimStagesIfNecessary() } From 4efcdb7feae24e41d8120b59430f8b77cc2106a6 Mon Sep 17 00:00:00 2001 From: codlife <1004910847@qq.com> Date: Mon, 12 Sep 2016 12:10:46 +0100 Subject: [PATCH 257/270] [SPARK-17447] Performance improvement in Partitioner.defaultPartitioner without sortBy ## What changes were proposed in this pull request? if there are many rdds in some situations,the sort will loss he performance servely,actually we needn't sort the rdds , we can just scan the rdds one time to gain the same goal. ## How was this patch tested? manual tests Author: codlife <1004910847@qq.com> Closes #15039 from codlife/master. --- .../scala/org/apache/spark/Partitioner.scala | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/Partitioner.scala b/core/src/main/scala/org/apache/spark/Partitioner.scala index 98c3abe93b553..93dfbc0e6ed65 100644 --- a/core/src/main/scala/org/apache/spark/Partitioner.scala +++ b/core/src/main/scala/org/apache/spark/Partitioner.scala @@ -55,14 +55,16 @@ object Partitioner { * We use two method parameters (rdd, others) to enforce callers passing at least 1 RDD. */ def defaultPartitioner(rdd: RDD[_], others: RDD[_]*): Partitioner = { - val bySize = (Seq(rdd) ++ others).sortBy(_.partitions.length).reverse - for (r <- bySize if r.partitioner.isDefined && r.partitioner.get.numPartitions > 0) { - return r.partitioner.get - } - if (rdd.context.conf.contains("spark.default.parallelism")) { - new HashPartitioner(rdd.context.defaultParallelism) + val rdds = (Seq(rdd) ++ others) + val hasPartitioner = rdds.filter(_.partitioner.exists(_.numPartitions > 0)) + if (hasPartitioner.nonEmpty) { + hasPartitioner.maxBy(_.partitions.length).partitioner.get } else { - new HashPartitioner(bySize.head.partitions.length) + if (rdd.context.conf.contains("spark.default.parallelism")) { + new HashPartitioner(rdd.context.defaultParallelism) + } else { + new HashPartitioner(rdds.map(_.partitions.length).max) + } } } } From b3c22912284c2a010a4af3c43dc5e6fd53c68f8c Mon Sep 17 00:00:00 2001 From: Gaetan Semet Date: Mon, 12 Sep 2016 12:21:33 +0100 Subject: [PATCH 258/270] [SPARK-16992][PYSPARK] use map comprehension in doc Code is equivalent, but map comprehency is most of the time faster than a map. Author: Gaetan Semet Closes #14863 from Stibbons/map_comprehension. --- examples/src/main/python/ml/quantile_discretizer_example.py | 2 +- examples/src/main/python/ml/vector_slicer_example.py | 4 ++-- examples/src/main/python/sql/hive.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/src/main/python/ml/quantile_discretizer_example.py b/examples/src/main/python/ml/quantile_discretizer_example.py index 788a0baffebb4..0fc1d1949a77d 100644 --- a/examples/src/main/python/ml/quantile_discretizer_example.py +++ b/examples/src/main/python/ml/quantile_discretizer_example.py @@ -29,7 +29,7 @@ .getOrCreate() # $example on$ - data = [(0, 18.0,), (1, 19.0,), (2, 8.0,), (3, 5.0,), (4, 2.2,)] + data = [(0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2)] df = spark.createDataFrame(data, ["id", "hour"]) # $example off$ diff --git a/examples/src/main/python/ml/vector_slicer_example.py b/examples/src/main/python/ml/vector_slicer_example.py index d2f46b190f9a8..68c8cfe27e375 100644 --- a/examples/src/main/python/ml/vector_slicer_example.py +++ b/examples/src/main/python/ml/vector_slicer_example.py @@ -32,8 +32,8 @@ # $example on$ df = spark.createDataFrame([ - Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3}),), - Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]),)]) + Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3})), + Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]))]) slicer = VectorSlicer(inputCol="userFeatures", outputCol="features", indices=[1]) diff --git a/examples/src/main/python/sql/hive.py b/examples/src/main/python/sql/hive.py index 9b2a2c4e6a16b..98b48908b5a12 100644 --- a/examples/src/main/python/sql/hive.py +++ b/examples/src/main/python/sql/hive.py @@ -79,7 +79,7 @@ # You can also use DataFrames to create temporary views within a SparkSession. Record = Row("key", "value") - recordsDF = spark.createDataFrame(map(lambda i: Record(i, "val_" + str(i)), range(1, 101))) + recordsDF = spark.createDataFrame([Record(i, "val_" + str(i)) for i in range(1, 101)]) recordsDF.createOrReplaceTempView("records") # Queries can then join DataFrame data with data stored in Hive. From 8087ecf8daad1587d0ce9040991b14320628a65e Mon Sep 17 00:00:00 2001 From: WeichenXu Date: Mon, 12 Sep 2016 12:23:16 +0100 Subject: [PATCH 259/270] [SPARK CORE][MINOR] fix "default partitioner cannot partition array keys" error message in PairRDDfunctions ## What changes were proposed in this pull request? In order to avoid confusing user, error message in `PairRDDfunctions` `Default partitioner cannot partition array keys.` is updated, the one in `partitionBy` is replaced with `Specified partitioner cannot partition array keys.` other is replaced with `Specified or default partitioner cannot partition array keys.` ## How was this patch tested? N/A Author: WeichenXu Closes #15045 from WeichenXu123/fix_partitionBy_error_message. --- .../scala/org/apache/spark/rdd/PairRDDFunctions.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala index 7d6a8805bc016..068f4ed8ad745 100644 --- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala @@ -83,7 +83,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) throw new SparkException("Cannot use map-side combining with array keys.") } if (partitioner.isInstanceOf[HashPartitioner]) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } } val aggregator = new Aggregator[K, V, C]( @@ -530,7 +530,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) */ def partitionBy(partitioner: Partitioner): RDD[(K, V)] = self.withScope { if (keyClass.isArray && partitioner.isInstanceOf[HashPartitioner]) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } if (self.partitioner == Some(partitioner)) { self @@ -784,7 +784,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) partitioner: Partitioner) : RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))] = self.withScope { if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } val cg = new CoGroupedRDD[K](Seq(self, other1, other2, other3), partitioner) cg.mapValues { case Array(vs, w1s, w2s, w3s) => @@ -802,7 +802,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) def cogroup[W](other: RDD[(K, W)], partitioner: Partitioner) : RDD[(K, (Iterable[V], Iterable[W]))] = self.withScope { if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } val cg = new CoGroupedRDD[K](Seq(self, other), partitioner) cg.mapValues { case Array(vs, w1s) => @@ -817,7 +817,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) def cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)], partitioner: Partitioner) : RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))] = self.withScope { if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } val cg = new CoGroupedRDD[K](Seq(self, other1, other2), partitioner) cg.mapValues { case Array(vs, w1s, w2s) => From 1742c3ab86d75ce3d352f7cddff65e62fb7c8dd4 Mon Sep 17 00:00:00 2001 From: Sean Zhong Date: Mon, 12 Sep 2016 11:30:06 -0700 Subject: [PATCH 260/270] [SPARK-17503][CORE] Fix memory leak in Memory store when unable to cache the whole RDD in memory ## What changes were proposed in this pull request? MemoryStore may throw OutOfMemoryError when trying to cache a super big RDD that cannot fit in memory. ``` scala> sc.parallelize(1 to 1000000000, 100).map(x => new Array[Long](1000)).cache().count() java.lang.OutOfMemoryError: Java heap space at $line14.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anonfun$1.apply(:24) at $line14.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anonfun$1.apply(:23) at scala.collection.Iterator$$anon$11.next(Iterator.scala:409) at scala.collection.Iterator$JoinIterator.next(Iterator.scala:232) at org.apache.spark.storage.memory.PartiallyUnrolledIterator.next(MemoryStore.scala:683) at org.apache.spark.InterruptibleIterator.next(InterruptibleIterator.scala:43) at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1684) at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1134) at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1134) at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1915) at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1915) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70) at org.apache.spark.scheduler.Task.run(Task.scala:86) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ``` Spark MemoryStore uses SizeTrackingVector as a temporary unrolling buffer to store all input values that it has read so far before transferring the values to storage memory cache. The problem is that when the input RDD is too big for caching in memory, the temporary unrolling memory SizeTrackingVector is not garbage collected in time. As SizeTrackingVector can occupy all available storage memory, it may cause the executor JVM to run out of memory quickly. More info can be found at https://issues.apache.org/jira/browse/SPARK-17503 ## How was this patch tested? Unit test and manual test. ### Before change Heap memory consumption screen shot 2016-09-12 at 4 16 15 pm Heap dump screen shot 2016-09-12 at 4 34 19 pm ### After change Heap memory consumption screen shot 2016-09-12 at 4 29 10 pm Author: Sean Zhong Closes #15056 from clockfly/memory_store_leak. --- .../spark/storage/memory/MemoryStore.scala | 40 +++++++----- .../PartiallyUnrolledIteratorSuite.scala | 61 +++++++++++++++++++ 2 files changed, 87 insertions(+), 14 deletions(-) create mode 100644 core/src/test/scala/org/apache/spark/storage/PartiallyUnrolledIteratorSuite.scala diff --git a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala index d220ab51d115b..1a3bf2bb672c6 100644 --- a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala +++ b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala @@ -663,31 +663,43 @@ private[spark] class MemoryStore( private[storage] class PartiallyUnrolledIterator[T]( memoryStore: MemoryStore, unrollMemory: Long, - unrolled: Iterator[T], + private[this] var unrolled: Iterator[T], rest: Iterator[T]) extends Iterator[T] { - private[this] var unrolledIteratorIsConsumed: Boolean = false - private[this] var iter: Iterator[T] = { - val completionIterator = CompletionIterator[T, Iterator[T]](unrolled, { - unrolledIteratorIsConsumed = true - memoryStore.releaseUnrollMemoryForThisTask(MemoryMode.ON_HEAP, unrollMemory) - }) - completionIterator ++ rest + private def releaseUnrollMemory(): Unit = { + memoryStore.releaseUnrollMemoryForThisTask(MemoryMode.ON_HEAP, unrollMemory) + // SPARK-17503: Garbage collects the unrolling memory before the life end of + // PartiallyUnrolledIterator. + unrolled = null } - override def hasNext: Boolean = iter.hasNext - override def next(): T = iter.next() + override def hasNext: Boolean = { + if (unrolled == null) { + rest.hasNext + } else if (!unrolled.hasNext) { + releaseUnrollMemory() + rest.hasNext + } else { + true + } + } + + override def next(): T = { + if (unrolled == null) { + rest.next() + } else { + unrolled.next() + } + } /** * Called to dispose of this iterator and free its memory. */ def close(): Unit = { - if (!unrolledIteratorIsConsumed) { - memoryStore.releaseUnrollMemoryForThisTask(MemoryMode.ON_HEAP, unrollMemory) - unrolledIteratorIsConsumed = true + if (unrolled != null) { + releaseUnrollMemory() } - iter = null } } diff --git a/core/src/test/scala/org/apache/spark/storage/PartiallyUnrolledIteratorSuite.scala b/core/src/test/scala/org/apache/spark/storage/PartiallyUnrolledIteratorSuite.scala new file mode 100644 index 0000000000000..02c2331dc3946 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/storage/PartiallyUnrolledIteratorSuite.scala @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.storage + +import org.mockito.Matchers +import org.mockito.Mockito._ +import org.scalatest.mock.MockitoSugar + +import org.apache.spark.SparkFunSuite +import org.apache.spark.memory.MemoryMode.ON_HEAP +import org.apache.spark.storage.memory.{MemoryStore, PartiallyUnrolledIterator} + +class PartiallyUnrolledIteratorSuite extends SparkFunSuite with MockitoSugar { + test("join two iterators") { + val unrollSize = 1000 + val unroll = (0 until unrollSize).iterator + val restSize = 500 + val rest = (unrollSize until restSize + unrollSize).iterator + + val memoryStore = mock[MemoryStore] + val joinIterator = new PartiallyUnrolledIterator(memoryStore, unrollSize, unroll, rest) + + // Firstly iterate over unrolling memory iterator + (0 until unrollSize).foreach { value => + assert(joinIterator.hasNext) + assert(joinIterator.hasNext) + assert(joinIterator.next() == value) + } + + joinIterator.hasNext + joinIterator.hasNext + verify(memoryStore, times(1)) + .releaseUnrollMemoryForThisTask(Matchers.eq(ON_HEAP), Matchers.eq(unrollSize.toLong)) + + // Secondly, iterate over rest iterator + (unrollSize until unrollSize + restSize).foreach { value => + assert(joinIterator.hasNext) + assert(joinIterator.hasNext) + assert(joinIterator.next() == value) + } + + joinIterator.close() + // MemoryMode.releaseUnrollMemoryForThisTask is called only once + verifyNoMoreInteractions(memoryStore) + } +} From 3d40896f410590c0be044b3fa7e5d32115fac05e Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Mon, 12 Sep 2016 13:09:33 -0700 Subject: [PATCH 261/270] [SPARK-17483] Refactoring in BlockManager status reporting and block removal This patch makes three minor refactorings to the BlockManager: - Move the `if (info.tellMaster)` check out of `reportBlockStatus`; this fixes an issue where a debug logging message would incorrectly claim to have reported a block status to the master even though no message had been sent (in case `info.tellMaster == false`). This also makes it easier to write code which unconditionally sends block statuses to the master (which is necessary in another patch of mine). - Split `removeBlock()` into two methods, the existing method and an internal `removeBlockInternal()` method which is designed to be called by internal code that already holds a write lock on the block. This is also needed by a followup patch. - Instead of calling `getCurrentBlockStatus()` in `removeBlock()`, just pass `BlockStatus.empty`; the block status should always be empty following complete removal of a block. These changes were originally authored as part of a bug fix patch which is targeted at branch-2.0 and master; I've split them out here into their own separate PR in order to make them easier to review and so that the behavior-changing parts of my other patch can be isolated to their own PR. Author: Josh Rosen Closes #15036 from JoshRosen/cache-failure-race-conditions-refactorings-only. --- .../apache/spark/storage/BlockManager.scala | 87 +++++++++---------- 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index 0614646771bd0..9e63777caf037 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -217,7 +217,7 @@ private[spark] class BlockManager( logInfo(s"Reporting ${blockInfoManager.size} blocks to the master.") for ((blockId, info) <- blockInfoManager.entries) { val status = getCurrentBlockStatus(blockId, info) - if (!tryToReportBlockStatus(blockId, info, status)) { + if (info.tellMaster && !tryToReportBlockStatus(blockId, status)) { logError(s"Failed to report $blockId to master; giving up.") return } @@ -298,7 +298,7 @@ private[spark] class BlockManager( /** * Get the BlockStatus for the block identified by the given ID, if it exists. - * NOTE: This is mainly for testing, and it doesn't fetch information from external block store. + * NOTE: This is mainly for testing. */ def getStatus(blockId: BlockId): Option[BlockStatus] = { blockInfoManager.get(blockId).map { info => @@ -333,10 +333,9 @@ private[spark] class BlockManager( */ private def reportBlockStatus( blockId: BlockId, - info: BlockInfo, status: BlockStatus, droppedMemorySize: Long = 0L): Unit = { - val needReregister = !tryToReportBlockStatus(blockId, info, status, droppedMemorySize) + val needReregister = !tryToReportBlockStatus(blockId, status, droppedMemorySize) if (needReregister) { logInfo(s"Got told to re-register updating block $blockId") // Re-registering will report our new block for free. @@ -352,17 +351,12 @@ private[spark] class BlockManager( */ private def tryToReportBlockStatus( blockId: BlockId, - info: BlockInfo, status: BlockStatus, droppedMemorySize: Long = 0L): Boolean = { - if (info.tellMaster) { - val storageLevel = status.storageLevel - val inMemSize = Math.max(status.memSize, droppedMemorySize) - val onDiskSize = status.diskSize - master.updateBlockInfo(blockManagerId, blockId, storageLevel, inMemSize, onDiskSize) - } else { - true - } + val storageLevel = status.storageLevel + val inMemSize = Math.max(status.memSize, droppedMemorySize) + val onDiskSize = status.diskSize + master.updateBlockInfo(blockManagerId, blockId, storageLevel, inMemSize, onDiskSize) } /** @@ -374,7 +368,7 @@ private[spark] class BlockManager( info.synchronized { info.level match { case null => - BlockStatus(StorageLevel.NONE, memSize = 0L, diskSize = 0L) + BlockStatus.empty case level => val inMem = level.useMemory && memoryStore.contains(blockId) val onDisk = level.useDisk && diskStore.contains(blockId) @@ -807,12 +801,10 @@ private[spark] class BlockManager( // Now that the block is in either the memory or disk store, // tell the master about it. info.size = size - if (tellMaster) { - reportBlockStatus(blockId, info, putBlockStatus) - } - Option(TaskContext.get()).foreach { c => - c.taskMetrics().incUpdatedBlockStatuses(blockId -> putBlockStatus) + if (tellMaster && info.tellMaster) { + reportBlockStatus(blockId, putBlockStatus) } + addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus) } logDebug("Put block %s locally took %s".format(blockId, Utils.getUsedTimeMs(startTimeMs))) if (level.replication > 1) { @@ -961,15 +953,12 @@ private[spark] class BlockManager( val putBlockStatus = getCurrentBlockStatus(blockId, info) val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid if (blockWasSuccessfullyStored) { - // Now that the block is in either the memory, externalBlockStore, or disk store, - // tell the master about it. + // Now that the block is in either the memory or disk store, tell the master about it. info.size = size - if (tellMaster) { - reportBlockStatus(blockId, info, putBlockStatus) - } - Option(TaskContext.get()).foreach { c => - c.taskMetrics().incUpdatedBlockStatuses(blockId -> putBlockStatus) + if (tellMaster && info.tellMaster) { + reportBlockStatus(blockId, putBlockStatus) } + addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus) logDebug("Put block %s locally took %s".format(blockId, Utils.getUsedTimeMs(startTimeMs))) if (level.replication > 1) { val remoteStartTime = System.currentTimeMillis @@ -1271,12 +1260,10 @@ private[spark] class BlockManager( val status = getCurrentBlockStatus(blockId, info) if (info.tellMaster) { - reportBlockStatus(blockId, info, status, droppedMemorySize) + reportBlockStatus(blockId, status, droppedMemorySize) } if (blockIsUpdated) { - Option(TaskContext.get()).foreach { c => - c.taskMetrics().incUpdatedBlockStatuses(blockId -> status) - } + addUpdatedBlockStatusToTaskMetrics(blockId, status) } status.storageLevel } @@ -1316,21 +1303,31 @@ private[spark] class BlockManager( // The block has already been removed; do nothing. logWarning(s"Asked to remove block $blockId, which does not exist") case Some(info) => - // Removals are idempotent in disk store and memory store. At worst, we get a warning. - val removedFromMemory = memoryStore.remove(blockId) - val removedFromDisk = diskStore.remove(blockId) - if (!removedFromMemory && !removedFromDisk) { - logWarning(s"Block $blockId could not be removed as it was not found in either " + - "the disk, memory, or external block store") - } - blockInfoManager.removeBlock(blockId) - val removeBlockStatus = getCurrentBlockStatus(blockId, info) - if (tellMaster && info.tellMaster) { - reportBlockStatus(blockId, info, removeBlockStatus) - } - Option(TaskContext.get()).foreach { c => - c.taskMetrics().incUpdatedBlockStatuses(blockId -> removeBlockStatus) - } + removeBlockInternal(blockId, tellMaster = tellMaster && info.tellMaster) + addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty) + } + } + + /** + * Internal version of [[removeBlock()]] which assumes that the caller already holds a write + * lock on the block. + */ + private def removeBlockInternal(blockId: BlockId, tellMaster: Boolean): Unit = { + // Removals are idempotent in disk store and memory store. At worst, we get a warning. + val removedFromMemory = memoryStore.remove(blockId) + val removedFromDisk = diskStore.remove(blockId) + if (!removedFromMemory && !removedFromDisk) { + logWarning(s"Block $blockId could not be removed as it was not found on disk or in memory") + } + blockInfoManager.removeBlock(blockId) + if (tellMaster) { + reportBlockStatus(blockId, BlockStatus.empty) + } + } + + private def addUpdatedBlockStatusToTaskMetrics(blockId: BlockId, status: BlockStatus): Unit = { + Option(TaskContext.get()).foreach { c => + c.taskMetrics().incUpdatedBlockStatuses(blockId -> status) } } From 7c51b99a428a965ff7d136e1cdda20305d260453 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Mon, 12 Sep 2016 15:24:33 -0700 Subject: [PATCH 262/270] [SPARK-14818] Post-2.0 MiMa exclusion and build changes This patch makes a handful of post-Spark-2.0 MiMa exclusion and build updates. It should be merged to master and a subset of it should be picked into branch-2.0 in order to test Spark 2.0.1-SNAPSHOT. - Remove the ` sketch`, `mllibLocal`, and `streamingKafka010` from the list of excluded subprojects so that MiMa checks them. - Remove now-unnecessary special-case handling of the Kafka 0.8 artifact in `mimaSettings`. - Move the exclusion added in SPARK-14743 from `v20excludes` to `v21excludes`, since that patch was only merged into master and not branch-2.0. - Add exclusions for an API change introduced by SPARK-17096 / #14675. - Add missing exclusions for the `o.a.spark.internal` and `o.a.spark.sql.internal` packages. Author: Josh Rosen Closes #15061 from JoshRosen/post-2.0-mima-changes. --- project/MimaBuild.scala | 11 ++--------- project/MimaExcludes.scala | 12 +++++++++--- project/SparkBuild.scala | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/project/MimaBuild.scala b/project/MimaBuild.scala index 2a989dd4f7a1d..77397eab81ede 100644 --- a/project/MimaBuild.scala +++ b/project/MimaBuild.scala @@ -88,15 +88,8 @@ object MimaBuild { def mimaSettings(sparkHome: File, projectRef: ProjectRef) = { val organization = "org.apache.spark" - val previousSparkVersion = "1.6.0" - // This check can be removed post-2.0 - val project = if (previousSparkVersion == "1.6.0" && - projectRef.project == "streaming-kafka-0-8" - ) { - "streaming-kafka" - } else { - projectRef.project - } + val previousSparkVersion = "2.0.0" + val project = projectRef.project val fullId = "spark-" + project + "_2.11" mimaDefaultSettings ++ Seq(previousArtifact := Some(organization % fullId % previousSparkVersion), diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala index 16f26e7d283b4..fbd78aeb20dd6 100644 --- a/project/MimaExcludes.scala +++ b/project/MimaExcludes.scala @@ -37,6 +37,8 @@ object MimaExcludes { // Exclude rules for 2.1.x lazy val v21excludes = v20excludes ++ { Seq( + // [SPARK-14743] Improve delegation token handling in secure cluster + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.SparkHadoopUtil.getTimeFromNowToRenewal"), // [SPARK-16199][SQL] Add a method to list the referenced columns in data source Filter ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.sources.Filter.references"), // [SPARK-16853][SQL] Fixes encoder error in DataSet typed select @@ -51,12 +53,15 @@ object MimaExcludes { Seq( excludePackage("org.apache.spark.rpc"), excludePackage("org.spark-project.jetty"), + excludePackage("org.spark_project.jetty"), + excludePackage("org.apache.spark.internal"), excludePackage("org.apache.spark.unused"), excludePackage("org.apache.spark.unsafe"), excludePackage("org.apache.spark.memory"), excludePackage("org.apache.spark.util.collection.unsafe"), excludePackage("org.apache.spark.sql.catalyst"), excludePackage("org.apache.spark.sql.execution"), + excludePackage("org.apache.spark.sql.internal"), ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.feature.PCAModel.this"), ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.status.api.v1.StageData.this"), ProblemFilters.exclude[MissingMethodProblem]( @@ -787,9 +792,10 @@ object MimaExcludes { ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.parquetFile"), ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.applySchema") ) ++ Seq( - // [SPARK-14743] Improve delegation token handling in secure cluster - ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.SparkHadoopUtil.getTimeFromNowToRenewal") - ) + // SPARK-17096: Improve exception string reported through the StreamingQueryListener + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryTerminated.stackTrace"), + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryTerminated.this") + ) } def excludes(version: String) = version match { diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index d164ead4ba73e..a39c93e9574fa 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -353,7 +353,7 @@ object SparkBuild extends PomBuild { val mimaProjects = allProjects.filterNot { x => Seq( spark, hive, hiveThriftServer, catalyst, repl, networkCommon, networkShuffle, networkYarn, - unsafe, tags, sketch, mllibLocal, streamingKafka010 + unsafe, tags ).contains(x) } From f9c580f11098d95f098936a0b90fa21d71021205 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Mon, 12 Sep 2016 15:43:57 -0700 Subject: [PATCH 263/270] [SPARK-17485] Prevent failed remote reads of cached blocks from failing entire job ## What changes were proposed in this pull request? In Spark's `RDD.getOrCompute` we first try to read a local copy of a cached RDD block, then a remote copy, and only fall back to recomputing the block if no cached copy (local or remote) can be read. This logic works correctly in the case where no remote copies of the block exist, but if there _are_ remote copies and reads of those copies fail (due to network issues or internal Spark bugs) then the BlockManager will throw a `BlockFetchException` that will fail the task (and which could possibly fail the whole job if the read failures keep occurring). In the cases of TorrentBroadcast and task result fetching we really do want to fail the entire job in case no remote blocks can be fetched, but this logic is inappropriate for reads of cached RDD blocks because those can/should be recomputed in case cached blocks are unavailable. Therefore, I think that the `BlockManager.getRemoteBytes()` method should never throw on remote fetch errors and, instead, should handle failures by returning `None`. ## How was this patch tested? Block manager changes should be covered by modified tests in `BlockManagerSuite`: the old tests expected exceptions to be thrown on failed remote reads, while the modified tests now expect `None` to be returned from the `getRemote*` method. I also manually inspected all usages of `BlockManager.getRemoteValues()`, `getRemoteBytes()`, and `get()` to verify that they correctly pattern-match on the result and handle `None`. Note that these `None` branches are already exercised because the old `getRemoteBytes` returned `None` when no remote locations for the block could be found (which could occur if an executor died and its block manager de-registered with the master). Author: Josh Rosen Closes #15037 from JoshRosen/SPARK-17485. --- .../spark/storage/BlockFetchException.scala | 24 ------------------- .../apache/spark/storage/BlockManager.scala | 5 ++-- .../spark/storage/BlockManagerSuite.scala | 10 +++----- 3 files changed, 6 insertions(+), 33 deletions(-) delete mode 100644 core/src/main/scala/org/apache/spark/storage/BlockFetchException.scala diff --git a/core/src/main/scala/org/apache/spark/storage/BlockFetchException.scala b/core/src/main/scala/org/apache/spark/storage/BlockFetchException.scala deleted file mode 100644 index f6e46ae9a481a..0000000000000 --- a/core/src/main/scala/org/apache/spark/storage/BlockFetchException.scala +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.storage - -import org.apache.spark.SparkException - -private[spark] -case class BlockFetchException(messages: String, throwable: Throwable) - extends SparkException(messages, throwable) diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index 9e63777caf037..a724fdf009789 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -559,8 +559,9 @@ private[spark] class BlockManager( // Give up trying anymore locations. Either we've tried all of the original locations, // or we've refreshed the list of locations from the master, and have still // hit failures after trying locations from the refreshed list. - throw new BlockFetchException(s"Failed to fetch block after" + - s" ${totalFailureCount} fetch failures. Most recent failure cause:", e) + logWarning(s"Failed to fetch block after $totalFailureCount fetch failures. " + + s"Most recent failure cause:", e) + return None } logWarning(s"Failed to fetch remote block $blockId " + diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala index 87c8628ce97e9..fdf28b7dcbcf4 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala @@ -513,10 +513,8 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE assert(store.getRemoteBytes("list1").isDefined, "list1Get expected to be fetched") store3.stop() store3 = null - // exception throw because there is no locations - intercept[BlockFetchException] { - store.getRemoteBytes("list1") - } + // Should return None instead of throwing an exception: + assert(store.getRemoteBytes("list1").isEmpty) } test("SPARK-14252: getOrElseUpdate should still read from remote storage") { @@ -1186,9 +1184,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE new MockBlockTransferService(conf.getInt("spark.block.failures.beforeLocationRefresh", 5)) store = makeBlockManager(8000, "executor1", transferService = Option(mockBlockTransferService)) store.putSingle("item", 999L, StorageLevel.MEMORY_ONLY, tellMaster = true) - intercept[BlockFetchException] { - store.getRemoteBytes("item") - } + assert(store.getRemoteBytes("item").isEmpty) } test("SPARK-13328: refresh block locations (fetch should succeed after location refresh)") { From a91ab705e8c124aa116c3e5b1f3ba88ce832dcde Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Mon, 12 Sep 2016 16:35:42 -0700 Subject: [PATCH 264/270] [SPARK-17474] [SQL] fix python udf in TakeOrderedAndProjectExec ## What changes were proposed in this pull request? When there is any Python UDF in the Project between Sort and Limit, it will be collected into TakeOrderedAndProjectExec, ExtractPythonUDFs failed to pull the Python UDFs out because QueryPlan.expressions does not include the expression inside Option[Seq[Expression]]. Ideally, we should fix the `QueryPlan.expressions`, but tried with no luck (it always run into infinite loop). In PR, I changed the TakeOrderedAndProjectExec to no use Option[Seq[Expression]] to workaround it. cc JoshRosen ## How was this patch tested? Added regression test. Author: Davies Liu Closes #15030 from davies/all_expr. --- python/pyspark/sql/tests.py | 8 ++++++++ .../apache/spark/sql/execution/SparkStrategies.scala | 8 ++++---- .../scala/org/apache/spark/sql/execution/limit.scala | 12 ++++++------ .../sql/execution/TakeOrderedAndProjectSuite.scala | 4 ++-- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py index fd8e9cec3e0bd..769e4540720e7 100644 --- a/python/pyspark/sql/tests.py +++ b/python/pyspark/sql/tests.py @@ -376,6 +376,14 @@ def test_udf_in_generate(self): row = df.select(explode(f(*df))).groupBy().sum().first() self.assertEqual(row[0], 10) + def test_udf_with_order_by_and_limit(self): + from pyspark.sql.functions import udf + my_copy = udf(lambda x: x, IntegerType()) + df = self.spark.range(10).orderBy("id") + res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1) + res.explain(True) + self.assertEqual(res.collect(), [Row(id=0, copy=0)]) + def test_basic_functions(self): rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}']) df = self.spark.read.json(rdd) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index c389593b4f767..3441ccf53b45b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -66,22 +66,22 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case logical.ReturnAnswer(rootPlan) => rootPlan match { case logical.Limit(IntegerLiteral(limit), logical.Sort(order, true, child)) => - execution.TakeOrderedAndProjectExec(limit, order, None, planLater(child)) :: Nil + execution.TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil case logical.Limit( IntegerLiteral(limit), logical.Project(projectList, logical.Sort(order, true, child))) => execution.TakeOrderedAndProjectExec( - limit, order, Some(projectList), planLater(child)) :: Nil + limit, order, projectList, planLater(child)) :: Nil case logical.Limit(IntegerLiteral(limit), child) => execution.CollectLimitExec(limit, planLater(child)) :: Nil case other => planLater(other) :: Nil } case logical.Limit(IntegerLiteral(limit), logical.Sort(order, true, child)) => - execution.TakeOrderedAndProjectExec(limit, order, None, planLater(child)) :: Nil + execution.TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil case logical.Limit( IntegerLiteral(limit), logical.Project(projectList, logical.Sort(order, true, child))) => execution.TakeOrderedAndProjectExec( - limit, order, Some(projectList), planLater(child)) :: Nil + limit, order, projectList, planLater(child)) :: Nil case _ => Nil } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala index 781c016095427..01fbe5b7c2c03 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala @@ -114,11 +114,11 @@ case class GlobalLimitExec(limit: Int, child: SparkPlan) extends BaseLimitExec { case class TakeOrderedAndProjectExec( limit: Int, sortOrder: Seq[SortOrder], - projectList: Option[Seq[NamedExpression]], + projectList: Seq[NamedExpression], child: SparkPlan) extends UnaryExecNode { override def output: Seq[Attribute] = { - projectList.map(_.map(_.toAttribute)).getOrElse(child.output) + projectList.map(_.toAttribute) } override def outputPartitioning: Partitioning = SinglePartition @@ -126,8 +126,8 @@ case class TakeOrderedAndProjectExec( override def executeCollect(): Array[InternalRow] = { val ord = new LazilyGeneratedOrdering(sortOrder, child.output) val data = child.execute().map(_.copy()).takeOrdered(limit)(ord) - if (projectList.isDefined) { - val proj = UnsafeProjection.create(projectList.get, child.output) + if (projectList != child.output) { + val proj = UnsafeProjection.create(projectList, child.output) data.map(r => proj(r).copy()) } else { data @@ -148,8 +148,8 @@ case class TakeOrderedAndProjectExec( localTopK, child.output, SinglePartition, serializer)) shuffled.mapPartitions { iter => val topK = org.apache.spark.util.collection.Utils.takeOrdered(iter.map(_.copy()), limit)(ord) - if (projectList.isDefined) { - val proj = UnsafeProjection.create(projectList.get, child.output) + if (projectList != child.output) { + val proj = UnsafeProjection.create(projectList, child.output) topK.map(r => proj(r)) } else { topK diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/TakeOrderedAndProjectSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/TakeOrderedAndProjectSuite.scala index 3217e34bd8ad3..7e317a4d80265 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/TakeOrderedAndProjectSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/TakeOrderedAndProjectSuite.scala @@ -59,7 +59,7 @@ class TakeOrderedAndProjectSuite extends SparkPlanTest with SharedSQLContext { checkThatPlansAgree( generateRandomInputData(), input => - noOpFilter(TakeOrderedAndProjectExec(limit, sortOrder, None, input)), + noOpFilter(TakeOrderedAndProjectExec(limit, sortOrder, input.output, input)), input => GlobalLimitExec(limit, LocalLimitExec(limit, @@ -74,7 +74,7 @@ class TakeOrderedAndProjectSuite extends SparkPlanTest with SharedSQLContext { generateRandomInputData(), input => noOpFilter( - TakeOrderedAndProjectExec(limit, sortOrder, Some(Seq(input.output.last)), input)), + TakeOrderedAndProjectExec(limit, sortOrder, Seq(input.output.last), input)), input => GlobalLimitExec(limit, LocalLimitExec(limit, From 46f5c201e70053635bdeab4984ba1b649478bd12 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Tue, 13 Sep 2016 10:42:51 +0100 Subject: [PATCH 265/270] [BUILD] Closing some stale PRs and ones suggested to be closed by committer(s) ## What changes were proposed in this pull request? This PR proposes to close some stale PRs and ones suggested to be closed by committer(s) Closes #10052 Closes #11079 Closes #12661 Closes #12772 Closes #12958 Closes #12990 Closes #13409 Closes #13779 Closes #13811 Closes #14577 Closes #14714 Closes #14875 Closes #15020 ## How was this patch tested? N/A Author: hyukjinkwon Closes #15057 from HyukjinKwon/closing-stale-pr. From 3f6a2bb3f7beac4ce928eb660ee36258b5b9e8c8 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Tue, 13 Sep 2016 12:54:03 +0200 Subject: [PATCH 266/270] [SPARK-17515] CollectLimit.execute() should perform per-partition limits ## What changes were proposed in this pull request? CollectLimit.execute() incorrectly omits per-partition limits, leading to performance regressions in case this case is hit (which should not happen in normal operation, but can occur in some cases (see #15068 for one example). ## How was this patch tested? Regression test in SQLQuerySuite that asserts the number of records scanned from the input RDD. Author: Josh Rosen Closes #15070 from JoshRosen/SPARK-17515. --- .../scala/org/apache/spark/sql/execution/limit.scala | 3 ++- .../test/scala/org/apache/spark/sql/SQLQuerySuite.scala | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala index 01fbe5b7c2c03..86a8770715600 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala @@ -39,9 +39,10 @@ case class CollectLimitExec(limit: Int, child: SparkPlan) extends UnaryExecNode override def executeCollect(): Array[InternalRow] = child.executeTake(limit) private val serializer: Serializer = new UnsafeRowSerializer(child.output.size) protected override def doExecute(): RDD[InternalRow] = { + val locallyLimited = child.execute().mapPartitionsInternal(_.take(limit)) val shuffled = new ShuffledRowRDD( ShuffleExchange.prepareShuffleDependency( - child.execute(), child.output, SinglePartition, serializer)) + locallyLimited, child.output, SinglePartition, serializer)) shuffled.mapPartitionsInternal(_.take(limit)) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index eac266cba55b8..a2164f9ae3d3e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -2661,4 +2661,13 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext { data.selectExpr("`part.col1`", "`col.1`")) } } + + test("SPARK-17515: CollectLimit.execute() should perform per-partition limits") { + val numRecordsRead = spark.sparkContext.longAccumulator + spark.range(1, 100, 1, numPartitions = 10).map { x => + numRecordsRead.add(1) + x + }.limit(1).queryExecution.toRdd.count() + assert(numRecordsRead.value === 10) + } } From 4ba63b193c1ac292493e06343d9d618c12c5ef3f Mon Sep 17 00:00:00 2001 From: jiangxingbo Date: Tue, 13 Sep 2016 17:04:51 +0200 Subject: [PATCH 267/270] [SPARK-17142][SQL] Complex query triggers binding error in HashAggregateExec ## What changes were proposed in this pull request? In `ReorderAssociativeOperator` rule, we extract foldable expressions with Add/Multiply arithmetics, and replace with eval literal. For example, `(a + 1) + (b + 2)` is optimized to `(a + b + 3)` by this rule. For aggregate operator, output expressions should be derived from groupingExpressions, current implemenation of `ReorderAssociativeOperator` rule may break this promise. A instance could be: ``` SELECT ((t1.a + 1) + (t2.a + 2)) AS out_col FROM testdata2 AS t1 INNER JOIN testdata2 AS t2 ON (t1.a = t2.a) GROUP BY (t1.a + 1), (t2.a + 2) ``` `((t1.a + 1) + (t2.a + 2))` is optimized to `(t1.a + t2.a + 3)`, which could not be derived from `ExpressionSet((t1.a +1), (t2.a + 2))`. Maybe we should improve the rule of `ReorderAssociativeOperator` by adding a GroupingExpressionSet to keep Aggregate.groupingExpressions, and respect these expressions during the optimize stage. ## How was this patch tested? Add new test case in `ReorderAssociativeOperatorSuite`. Author: jiangxingbo Closes #14917 from jiangxb1987/rao. --- .../sql/catalyst/optimizer/expressions.scala | 31 ++++++++++++++----- .../ReorderAssociativeOperatorSuite.scala | 16 +++++++++- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala index 82ab111aa2259..b7458910da13e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala @@ -57,20 +57,37 @@ object ConstantFolding extends Rule[LogicalPlan] { * Reorder associative integral-type operators and fold all constants into one. */ object ReorderAssociativeOperator extends Rule[LogicalPlan] { - private def flattenAdd(e: Expression): Seq[Expression] = e match { - case Add(l, r) => flattenAdd(l) ++ flattenAdd(r) + private def flattenAdd( + expression: Expression, + groupSet: ExpressionSet): Seq[Expression] = expression match { + case expr @ Add(l, r) if !groupSet.contains(expr) => + flattenAdd(l, groupSet) ++ flattenAdd(r, groupSet) case other => other :: Nil } - private def flattenMultiply(e: Expression): Seq[Expression] = e match { - case Multiply(l, r) => flattenMultiply(l) ++ flattenMultiply(r) + private def flattenMultiply( + expression: Expression, + groupSet: ExpressionSet): Seq[Expression] = expression match { + case expr @ Multiply(l, r) if !groupSet.contains(expr) => + flattenMultiply(l, groupSet) ++ flattenMultiply(r, groupSet) case other => other :: Nil } + private def collectGroupingExpressions(plan: LogicalPlan): ExpressionSet = plan match { + case Aggregate(groupingExpressions, aggregateExpressions, child) => + ExpressionSet.apply(groupingExpressions) + case _ => ExpressionSet(Seq()) + } + def apply(plan: LogicalPlan): LogicalPlan = plan transform { - case q: LogicalPlan => q transformExpressionsDown { + case q: LogicalPlan => + // We have to respect aggregate expressions which exists in grouping expressions when plan + // is an Aggregate operator, otherwise the optimized expression could not be derived from + // grouping expressions. + val groupingExpressionSet = collectGroupingExpressions(q) + q transformExpressionsDown { case a: Add if a.deterministic && a.dataType.isInstanceOf[IntegralType] => - val (foldables, others) = flattenAdd(a).partition(_.foldable) + val (foldables, others) = flattenAdd(a, groupingExpressionSet).partition(_.foldable) if (foldables.size > 1) { val foldableExpr = foldables.reduce((x, y) => Add(x, y)) val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType) @@ -79,7 +96,7 @@ object ReorderAssociativeOperator extends Rule[LogicalPlan] { a } case m: Multiply if m.deterministic && m.dataType.isInstanceOf[IntegralType] => - val (foldables, others) = flattenMultiply(m).partition(_.foldable) + val (foldables, others) = flattenMultiply(m, groupingExpressionSet).partition(_.foldable) if (foldables.size > 1) { val foldableExpr = foldables.reduce((x, y) => Multiply(x, y)) val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ReorderAssociativeOperatorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ReorderAssociativeOperatorSuite.scala index 05e15e9ec4728..a1ab0a834474f 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ReorderAssociativeOperatorSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ReorderAssociativeOperatorSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.plans.PlanTest +import org.apache.spark.sql.catalyst.plans.{Inner, PlanTest} import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan} import org.apache.spark.sql.catalyst.rules.RuleExecutor @@ -60,4 +60,18 @@ class ReorderAssociativeOperatorSuite extends PlanTest { comparePlans(optimized, correctAnswer) } + + test("nested expression with aggregate operator") { + val originalQuery = + testRelation.as("t1") + .join(testRelation.as("t2"), Inner, Some("t1.a".attr === "t2.a".attr)) + .groupBy("t1.a".attr + 1, "t2.a".attr + 1)( + (("t1.a".attr + 1) + ("t2.a".attr + 1)).as("col")) + + val optimized = Optimize.execute(originalQuery.analyze) + + val correctAnswer = originalQuery.analyze + + comparePlans(optimized, correctAnswer) + } } From 72edc7e958271cedb01932880550cfc2c0631204 Mon Sep 17 00:00:00 2001 From: Burak Yavuz Date: Tue, 13 Sep 2016 15:11:55 -0700 Subject: [PATCH 268/270] [SPARK-17531] Don't initialize Hive Listeners for the Execution Client ## What changes were proposed in this pull request? If a user provides listeners inside the Hive Conf, the configuration for these listeners are passed to the Hive Execution Client as well. This may cause issues for two reasons: 1. The Execution Client will actually generate garbage 2. The listener class needs to be both in the Spark Classpath and Hive Classpath This PR empties the listener configurations in `HiveUtils.newTemporaryConfiguration` so that the execution client will not contain the listener confs, but the metadata client will. ## How was this patch tested? Unit tests Author: Burak Yavuz Closes #15086 from brkyvz/null-listeners. --- .../org/apache/spark/sql/hive/HiveUtils.scala | 7 ++++ .../spark/sql/hive/HiveUtilsSuite.scala | 36 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala index bdec611453b2d..39d71e164bf51 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala @@ -394,6 +394,13 @@ private[spark] object HiveUtils extends Logging { // hive.metastore.uris is not set. propMap.put(ConfVars.METASTOREURIS.varname, "") + // The execution client will generate garbage events, therefore the listeners that are generated + // for the execution clients are useless. In order to not output garbage, we don't generate + // these listeners. + propMap.put(ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, "") + propMap.put(ConfVars.METASTORE_EVENT_LISTENERS.varname, "") + propMap.put(ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname, "") + propMap.toMap } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala new file mode 100644 index 0000000000000..667a7ddd8bb61 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive + +import org.apache.hadoop.hive.conf.HiveConf.ConfVars + +import org.apache.spark.sql.hive.test.TestHiveSingleton +import org.apache.spark.sql.test.SQLTestUtils +import org.apache.spark.sql.QueryTest + +class HiveUtilsSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { + + test("newTemporaryConfiguration overwrites listener configurations") { + Seq(true, false).foreach { useInMemoryDerby => + val conf = HiveUtils.newTemporaryConfiguration(useInMemoryDerby) + assert(conf(ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname) === "") + assert(conf(ConfVars.METASTORE_EVENT_LISTENERS.varname) === "") + assert(conf(ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname) === "") + } + } +} From 37b93f54e89332b6b77bb02c1c2299614338fd7c Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Wed, 14 Sep 2016 00:37:42 +0200 Subject: [PATCH 269/270] [SPARK-17530][SQL] Add Statistics into DESCRIBE FORMATTED ### What changes were proposed in this pull request? Statistics is missing in the output of `DESCRIBE FORMATTED`. This PR is to add it. After the PR, the output will be like: ``` +----------------------------+----------------------------------------------------------------------------------------------------------------------+-------+ |col_name |data_type |comment| +----------------------------+----------------------------------------------------------------------------------------------------------------------+-------+ |key |string |null | |value |string |null | | | | | |# Detailed Table Information| | | |Database: |default | | |Owner: |xiaoli | | |Create Time: |Tue Sep 13 14:36:57 PDT 2016 | | |Last Access Time: |Wed Dec 31 16:00:00 PST 1969 | | |Location: |file:/private/var/folders/4b/sgmfldk15js406vk7lw5llzw0000gn/T/warehouse-9982e1db-df17-4376-a140-dbbee0203d83/texttable| | |Table Type: |MANAGED | | |Statistics: |sizeInBytes=5812, rowCount=500, isBroadcastable=false | | |Table Parameters: | | | | rawDataSize |-1 | | | numFiles |1 | | | transient_lastDdlTime |1473802620 | | | totalSize |5812 | | | COLUMN_STATS_ACCURATE |false | | | numRows |-1 | | | | | | |# Storage Information | | | |SerDe Library: |org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe | | |InputFormat: |org.apache.hadoop.mapred.TextInputFormat | | |OutputFormat: |org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat | | |Compressed: |No | | |Storage Desc Parameters: | | | | serialization.format |1 | | +----------------------------+----------------------------------------------------------------------------------------------------------------------+-------+ ``` Also improve the output of statistics in `DESCRIBE EXTENDED` by removing duplicate `Statistics`. Below is the example after the PR: ``` +----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------+ |col_name |data_type |comment| +----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------+ |key |string |null | |value |string |null | | | | | |# Detailed Table Information|CatalogTable( Table: `default`.`texttable` Owner: xiaoli Created: Tue Sep 13 14:38:43 PDT 2016 Last Access: Wed Dec 31 16:00:00 PST 1969 Type: MANAGED Schema: [StructField(key,StringType,true), StructField(value,StringType,true)] Provider: hive Properties: [rawDataSize=-1, numFiles=1, transient_lastDdlTime=1473802726, totalSize=5812, COLUMN_STATS_ACCURATE=false, numRows=-1] Statistics: sizeInBytes=5812, rowCount=500, isBroadcastable=false Storage(Location: file:/private/var/folders/4b/sgmfldk15js406vk7lw5llzw0000gn/T/warehouse-8ea5c5a0-5680-4778-91cb-c6334cf8a708/texttable, InputFormat: org.apache.hadoop.mapred.TextInputFormat, OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, Serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, Properties: [serialization.format=1]))| | +----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------+ ``` ### How was this patch tested? Manually tested. Author: gatorsmile Closes #15083 from gatorsmile/descFormattedStats. --- .../spark/sql/catalyst/catalog/interface.scala | 2 +- .../sql/catalyst/plans/logical/Statistics.scala | 15 ++++++++------- .../spark/sql/execution/command/tables.scala | 1 + 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala index e74fa6e638a0b..e52251f960ff4 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala @@ -191,7 +191,7 @@ case class CatalogTable( viewText.map("View: " + _).getOrElse(""), comment.map("Comment: " + _).getOrElse(""), if (properties.nonEmpty) s"Properties: $tableProperties" else "", - if (stats.isDefined) s"Statistics: ${stats.get}" else "", + if (stats.isDefined) s"Statistics: ${stats.get.simpleString}" else "", s"$storage") output.filter(_.nonEmpty).mkString("CatalogTable(\n\t", "\n\t", ")") diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala index 58fa537a18e3e..3cf20385dd712 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala @@ -38,12 +38,13 @@ case class Statistics( sizeInBytes: BigInt, rowCount: Option[BigInt] = None, isBroadcastable: Boolean = false) { - override def toString: String = { - val output = - Seq(s"sizeInBytes=$sizeInBytes", - if (rowCount.isDefined) s"rowCount=${rowCount.get}" else "", - s"isBroadcastable=$isBroadcastable" - ) - output.filter(_.nonEmpty).mkString("Statistics(", ", ", ")") + override def toString: String = "Statistics(" + simpleString + ")" + + /** Readable string representation for the Statistics. */ + def simpleString: String = { + Seq(s"sizeInBytes=$sizeInBytes", + if (rowCount.isDefined) s"rowCount=${rowCount.get}" else "", + s"isBroadcastable=$isBroadcastable" + ).filter(_.nonEmpty).mkString("", ", ", "") } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 027f3588e2922..9fbcd48b4a911 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -468,6 +468,7 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF append(buffer, "Last Access Time:", new Date(table.lastAccessTime).toString, "") append(buffer, "Location:", table.storage.locationUri.getOrElse(""), "") append(buffer, "Table Type:", table.tableType.name, "") + table.stats.foreach(s => append(buffer, "Statistics:", s.simpleString, "")) append(buffer, "Table Parameters:", "", "") table.properties.foreach { case (key, value) => From a454a4d86bbed1b6988da0a0e23b3e87a1a16340 Mon Sep 17 00:00:00 2001 From: junyangq Date: Tue, 13 Sep 2016 21:01:03 -0700 Subject: [PATCH 270/270] [SPARK-17317][SPARKR] Add SparkR vignette ## What changes were proposed in this pull request? This PR tries to add a SparkR vignette, which works as a friendly guidance going through the functionality provided by SparkR. ## How was this patch tested? Manual test. Author: junyangq Author: Shivaram Venkataraman Author: Junyang Qian Closes #14980 from junyangq/SPARKR-vignette. --- R/create-docs.sh | 11 +- R/pkg/vignettes/sparkr-vignettes.Rmd | 861 +++++++++++++++++++++++++++ 2 files changed, 870 insertions(+), 2 deletions(-) create mode 100644 R/pkg/vignettes/sparkr-vignettes.Rmd diff --git a/R/create-docs.sh b/R/create-docs.sh index d2ae160b50021..0dfba22463396 100755 --- a/R/create-docs.sh +++ b/R/create-docs.sh @@ -17,11 +17,13 @@ # limitations under the License. # -# Script to create API docs for SparkR -# This requires `devtools` and `knitr` to be installed on the machine. +# Script to create API docs and vignettes for SparkR +# This requires `devtools`, `knitr` and `rmarkdown` to be installed on the machine. # After running this script the html docs can be found in # $SPARK_HOME/R/pkg/html +# The vignettes can be found in +# $SPARK_HOME/R/pkg/vignettes/sparkr_vignettes.html set -o pipefail set -e @@ -43,4 +45,9 @@ Rscript -e 'libDir <- "../../lib"; library(SparkR, lib.loc=libDir); library(knit popd +# render creates SparkR vignettes +Rscript -e 'library(rmarkdown); paths <- .libPaths(); .libPaths(c("lib", paths)); Sys.setenv(SPARK_HOME=tools::file_path_as_absolute("..")); render("pkg/vignettes/sparkr-vignettes.Rmd"); .libPaths(paths)' + +find pkg/vignettes/. -not -name '.' -not -name '*.Rmd' -not -name '*.md' -not -name '*.pdf' -not -name '*.html' -delete + popd diff --git a/R/pkg/vignettes/sparkr-vignettes.Rmd b/R/pkg/vignettes/sparkr-vignettes.Rmd new file mode 100644 index 0000000000000..aea52db8b8556 --- /dev/null +++ b/R/pkg/vignettes/sparkr-vignettes.Rmd @@ -0,0 +1,861 @@ +--- +title: "SparkR - Practical Guide" +output: + html_document: + theme: united + toc: true + toc_depth: 4 + toc_float: true + highlight: textmate +--- + +## Overview + +SparkR is an R package that provides a light-weight frontend to use Apache Spark from R. With Spark `r packageVersion("SparkR")`, SparkR provides a distributed data frame implementation that supports data processing operations like selection, filtering, aggregation etc. and distributed machine learning using [MLlib](http://spark.apache.org/mllib/). + +## Getting Started + +We begin with an example running on the local machine and provide an overview of the use of SparkR: data ingestion, data processing and machine learning. + +First, let's load and attach the package. +```{r, message=FALSE} +library(SparkR) +``` + +`SparkSession` is the entry point into SparkR which connects your R program to a Spark cluster. You can create a `SparkSession` using `sparkR.session` and pass in options such as the application name, any Spark packages depended on, etc. + +We use default settings in which it runs in local mode. It auto downloads Spark package in the background if no previous installation is found. For more details about setup, see [Spark Session](#SetupSparkSession). + +```{r, message=FALSE} +sparkR.session() +``` + +The operations in SparkR are centered around an R class called `SparkDataFrame`. It is a distributed collection of data organized into named columns, which is conceptually equivalent to a table in a relational database or a data frame in R, but with richer optimizations under the hood. + +`SparkDataFrame` can be constructed from a wide array of sources such as: structured data files, tables in Hive, external databases, or existing local R data frames. For example, we create a `SparkDataFrame` from a local R data frame, + +```{r} +cars <- cbind(model = rownames(mtcars), mtcars) +carsDF <- createDataFrame(cars) +``` + +We can view the first few rows of the `SparkDataFrame` by `head` or `showDF` function. +```{r} +head(carsDF) +``` + +Common data processing operations such as `filter`, `select` are supported on the `SparkDataFrame`. +```{r} +carsSubDF <- select(carsDF, "model", "mpg", "hp") +carsSubDF <- filter(carsSubDF, carsSubDF$hp >= 200) +head(carsSubDF) +``` + +SparkR can use many common aggregation functions after grouping. + +```{r} +carsGPDF <- summarize(groupBy(carsDF, carsDF$gear), count = n(carsDF$gear)) +head(carsGPDF) +``` + +The results `carsDF` and `carsSubDF` are `SparkDataFrame` objects. To convert back to R `data.frame`, we can use `collect`. **Caution**: This can cause your interactive environment to run out of memory, though, because `collect()` fetches the entire distributed `DataFrame` to your client, which is acting as a Spark driver. +```{r} +carsGP <- collect(carsGPDF) +class(carsGP) +``` + +SparkR supports a number of commonly used machine learning algorithms. Under the hood, SparkR uses MLlib to train the model. Users can call `summary` to print a summary of the fitted model, `predict` to make predictions on new data, and `write.ml`/`read.ml` to save/load fitted models. + +SparkR supports a subset of R formula operators for model fitting, including ‘~’, ‘.’, ‘:’, ‘+’, and ‘-‘. We use linear regression as an example. +```{r} +model <- spark.glm(carsDF, mpg ~ wt + cyl) +``` + +The result matches that returned by R `glm` function applied to the corresponding `data.frame` `mtcars` of `carsDF`. In fact, for Generalized Linear Model, we specifically expose `glm` for `SparkDataFrame` as well so that the above is equivalent to `model <- glm(mpg ~ wt + cyl, data = carsDF)`. + +```{r} +summary(model) +``` + +The model can be saved by `write.ml` and loaded back using `read.ml`. +```{r, eval=FALSE} +write.ml(model, path = "/HOME/tmp/mlModel/glmModel") +``` + +In the end, we can stop Spark Session by running +```{r, eval=FALSE} +sparkR.session.stop() +``` + +## Setup + +### Installation + +Different from many other R packages, to use SparkR, you need an additional installation of Apache Spark. The Spark installation will be used to run a backend process that will compile and execute SparkR programs. + +If you don't have Spark installed on the computer, you may download it from [Apache Spark Website](http://spark.apache.org/downloads.html). Alternatively, we provide an easy-to-use function `install.spark` to complete this process. You don't have to call it explicitly. We will check the installation when `sparkR.session` is called and `install.spark` function will be triggered automatically if no installation is found. + +```{r, eval=FALSE} +install.spark() +``` + +If you already have Spark installed, you don't have to install again and can pass the `sparkHome` argument to `sparkR.session` to let SparkR know where the Spark installation is. + +```{r, eval=FALSE} +sparkR.session(sparkHome = "/HOME/spark") +``` + +### Spark Session {#SetupSparkSession} + + +In addition to `sparkHome`, many other options can be specified in `sparkR.session`. For a complete list, see [Starting up: SparkSession](http://spark.apache.org/docs/latest/sparkr.html#starting-up-sparksession) and [SparkR API doc](http://spark.apache.org/docs/latest/api/R/sparkR.session.html). + +In particular, the following Spark driver properties can be set in `sparkConfig`. + +Property Name | Property group | spark-submit equivalent +---------------- | ------------------ | ---------------------- +spark.driver.memory | Application Properties | --driver-memory +spark.driver.extraClassPath | Runtime Environment | --driver-class-path +spark.driver.extraJavaOptions | Runtime Environment | --driver-java-options +spark.driver.extraLibraryPath | Runtime Environment | --driver-library-path + +**For Windows users**: Due to different file prefixes across operating systems, to avoid the issue of potential wrong prefix, a current workaround is to specify `spark.sql.warehouse.dir` when starting the `SparkSession`. + +```{r, eval=FALSE} +spark_warehouse_path <- file.path(path.expand('~'), "spark-warehouse") +sparkR.session(spark.sql.warehouse.dir = spark_warehouse_path) +``` + + +#### Cluster Mode +SparkR can connect to remote Spark clusters. [Cluster Mode Overview](http://spark.apache.org/docs/latest/cluster-overview.html) is a good introduction to different Spark cluster modes. + +When connecting SparkR to a remote Spark cluster, make sure that the Spark version and Hadoop version on the machine match the corresponding versions on the cluster. Current SparkR package is compatible with +```{r, echo=FALSE, tidy = TRUE} +paste("Spark", packageVersion("SparkR")) +``` +It should be used both on the local computer and on the remote cluster. + +To connect, pass the URL of the master node to `sparkR.session`. A complete list can be seen in [Spark Master URLs](http://spark.apache.org/docs/latest/submitting-applications.html#master-urls). +For example, to connect to a local standalone Spark master, we can call + +```{r, eval=FALSE} +sparkR.session(master = "spark://local:7077") +``` + +For YARN cluster, SparkR supports the client mode with the master set as "yarn". +```{r, eval=FALSE} +sparkR.session(master = "yarn") +``` +Yarn cluster mode is not supported in the current version. + +## Data Import + +### Local Data Frame +The simplest way is to convert a local R data frame into a `SparkDataFrame`. Specifically we can use `as.DataFrame` or `createDataFrame` and pass in the local R data frame to create a `SparkDataFrame`. As an example, the following creates a `SparkDataFrame` based using the `faithful` dataset from R. +```{r} +df <- as.DataFrame(faithful) +head(df) +``` + +### Data Sources +SparkR supports operating on a variety of data sources through the `SparkDataFrame` interface. You can check the Spark SQL programming guide for more [specific options](https://spark.apache.org/docs/latest/sql-programming-guide.html#manually-specifying-options) that are available for the built-in data sources. + +The general method for creating `SparkDataFrame` from data sources is `read.df`. This method takes in the path for the file to load and the type of data source, and the currently active Spark Session will be used automatically. SparkR supports reading CSV, JSON and Parquet files natively and through Spark Packages you can find data source connectors for popular file formats like Avro. These packages can be added with `sparkPackages` parameter when initializing SparkSession using `sparkR.session'.` + +```{r, eval=FALSE} +sparkR.session(sparkPackages = "com.databricks:spark-avro_2.11:3.0.0") +``` + +We can see how to use data sources using an example CSV input file. For more information please refer to SparkR [read.df](https://spark.apache.org/docs/latest/api/R/read.df.html) API documentation. +```{r, eval=FALSE} +df <- read.df(csvPath, "csv", header = "true", inferSchema = "true", na.strings = "NA") +``` + +The data sources API natively supports JSON formatted input files. Note that the file that is used here is not a typical JSON file. Each line in the file must contain a separate, self-contained valid JSON object. As a consequence, a regular multi-line JSON file will most often fail. + +Let's take a look at the first two lines of the raw JSON file used here. + +```{r} +filePath <- paste0(sparkR.conf("spark.home"), + "/examples/src/main/resources/people.json") +readLines(filePath, n = 2L) +``` + +We use `read.df` to read that into a `SparkDataFrame`. + +```{r} +people <- read.df(filePath, "json") +count(people) +head(people) +``` + +SparkR automatically infers the schema from the JSON file. +```{r} +printSchema(people) +``` + +If we want to read multiple JSON files, `read.json` can be used. +```{r} +people <- read.json(paste0(Sys.getenv("SPARK_HOME"), + c("/examples/src/main/resources/people.json", + "/examples/src/main/resources/people.json"))) +count(people) +``` + +The data sources API can also be used to save out `SparkDataFrames` into multiple file formats. For example we can save the `SparkDataFrame` from the previous example to a Parquet file using `write.df`. +```{r, eval=FALSE} +write.df(people, path = "people.parquet", source = "parquet", mode = "overwrite") +``` + +### Hive Tables +You can also create SparkDataFrames from Hive tables. To do this we will need to create a SparkSession with Hive support which can access tables in the Hive MetaStore. Note that Spark should have been built with Hive support and more details can be found in the [SQL programming guide](https://spark.apache.org/docs/latest/sql-programming-guide.html). In SparkR, by default it will attempt to create a SparkSession with Hive support enabled (`enableHiveSupport = TRUE`). + +```{r, eval=FALSE} +sql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)") + +txtPath <- paste0(sparkR.conf("spark.home"), "/examples/src/main/resources/kv1.txt") +sqlCMD <- sprintf("LOAD DATA LOCAL INPATH '%s' INTO TABLE src", txtPath) +sql(sqlCMD) + +results <- sql("FROM src SELECT key, value") + +# results is now a SparkDataFrame +head(results) +``` + + +## Data Processing + +**To dplyr users**: SparkR has similar interface as dplyr in data processing. However, some noticeable differences are worth mentioning in the first place. We use `df` to represent a `SparkDataFrame` and `col` to represent the name of column here. + +1. indicate columns. SparkR uses either a character string of the column name or a Column object constructed with `$` to indicate a column. For example, to select `col` in `df`, we can write `select(df, "col")` or `select(df, df$col)`. + +2. describe conditions. In SparkR, the Column object representation can be inserted into the condition directly, or we can use a character string to describe the condition, without referring to the `SparkDataFrame` used. For example, to select rows with value > 1, we can write `filter(df, df$col > 1)` or `filter(df, "col > 1")`. + +Here are more concrete examples. + +dplyr | SparkR +-------- | --------- +`select(mtcars, mpg, hp)` | `select(carsDF, "mpg", "hp")` +`filter(mtcars, mpg > 20, hp > 100)` | `filter(carsDF, carsDF$mpg > 20, carsDF$hp > 100)` + +Other differences will be mentioned in the specific methods. + +We use the `SparkDataFrame` `carsDF` created above. We can get basic information about the `SparkDataFrame`. +```{r} +carsDF +``` + +Print out the schema in tree format. +```{r} +printSchema(carsDF) +``` + +### SparkDataFrame Operations + +#### Selecting rows, columns + +SparkDataFrames support a number of functions to do structured data processing. Here we include some basic examples and a complete list can be found in the [API](https://spark.apache.org/docs/latest/api/R/index.html) docs: + +You can also pass in column name as strings. +```{r} +head(select(carsDF, "mpg")) +``` + +Filter the SparkDataFrame to only retain rows with mpg less than 20 miles/gallon. +```{r} +head(filter(carsDF, carsDF$mpg < 20)) +``` + +#### Grouping, Aggregation + +A common flow of grouping and aggregation is + +1. Use `groupBy` or `group_by` with respect to some grouping variables to create a `GroupedData` object + +2. Feed the `GroupedData` object to `agg` or `summarize` functions, with some provided aggregation functions to compute a number within each group. + +A number of widely used functions are supported to aggregate data after grouping, including `avg`, `countDistinct`, `count`, `first`, `kurtosis`, `last`, `max`, `mean`, `min`, `sd`, `skewness`, `stddev_pop`, `stddev_samp`, `sumDistinct`, `sum`, `var_pop`, `var_samp`, `var`. See the [API doc for `mean`](http://spark.apache.org/docs/latest/api/R/mean.html) and other `agg_funcs` linked there. + +For example we can compute a histogram of the number of cylinders in the `mtcars` dataset as shown below. + +```{r} +numCyl <- summarize(groupBy(carsDF, carsDF$cyl), count = n(carsDF$cyl)) +head(numCyl) +``` + +#### Operating on Columns + +SparkR also provides a number of functions that can directly applied to columns for data processing and during aggregation. The example below shows the use of basic arithmetic functions. + +```{r} +carsDF_km <- carsDF +carsDF_km$kmpg <- carsDF_km$mpg * 1.61 +head(select(carsDF_km, "model", "mpg", "kmpg")) +``` + + +### Window Functions +A window function is a variation of aggregation function. In simple words, + +* aggregation function: `n` to `1` mapping - returns a single value for a group of entries. Examples include `sum`, `count`, `max`. + +* window function: `n` to `n` mapping - returns one value for each entry in the group, but the value may depend on all the entries of the *group*. Examples include `rank`, `lead`, `lag`. + +Formally, the *group* mentioned above is called the *frame*. Every input row can have a unique frame associated with it and the output of the window function on that row is based on the rows confined in that frame. + +Window functions are often used in conjunction with the following functions: `windowPartitionBy`, `windowOrderBy`, `partitionBy`, `orderBy`, `over`. To illustrate this we next look at an example. + +We still use the `mtcars` dataset. The corresponding `SparkDataFrame` is `carsDF`. Suppose for each number of cylinders, we want to calculate the rank of each car in `mpg` within the group. +```{r} +carsSubDF <- select(carsDF, "model", "mpg", "cyl") +ws <- orderBy(windowPartitionBy("cyl"), "mpg") +carsRank <- withColumn(carsSubDF, "rank", over(rank(), ws)) +head(carsRank, n = 20L) +``` + +We explain in detail the above steps. + +* `windowPartitionBy` creates a window specification object `WindowSpec` that defines the partition. It controls which rows will be in the same partition as the given row. In this case, rows with the same value in `cyl` will be put in the same partition. `orderBy` further defines the ordering - the position a given row is in the partition. The resulting `WindowSpec` is returned as `ws`. + +More window specification methods include `rangeBetween`, which can define boundaries of the frame by value, and `rowsBetween`, which can define the boundaries by row indices. + +* `withColumn` appends a Column called `rank` to the `SparkDataFrame`. `over` returns a windowing column. The first argument is usually a Column returned by window function(s) such as `rank()`, `lead(carsDF$wt)`. That calculates the corresponding values according to the partitioned-and-ordered table. + +### User-Defined Function + +In SparkR, we support several kinds of user-defined functions (UDFs). + +#### Apply by Partition + +`dapply` can apply a function to each partition of a `SparkDataFrame`. The function to be applied to each partition of the `SparkDataFrame` should have only one parameter, a `data.frame` corresponding to a partition, and the output should be a `data.frame` as well. Schema specifies the row format of the resulting a `SparkDataFrame`. It must match to data types of returned value. See [here](#DataTypes) for mapping between R and Spark. + +We convert `mpg` to `kmpg` (kilometers per gallon). `carsSubDF` is a `SparkDataFrame` with a subset of `carsDF` columns. + +```{r} +carsSubDF <- select(carsDF, "model", "mpg") +schema <- structType(structField("model", "string"), structField("mpg", "double"), + structField("kmpg", "double")) +out <- dapply(carsSubDF, function(x) { x <- cbind(x, x$mpg * 1.61) }, schema) +head(collect(out)) +``` + +Like `dapply`, apply a function to each partition of a `SparkDataFrame` and collect the result back. The output of function should be a `data.frame`, but no schema is required in this case. Note that `dapplyCollect` can fail if the output of UDF run on all the partition cannot be pulled to the driver and fit in driver memory. + +```{r} +out <- dapplyCollect( + carsSubDF, + function(x) { + x <- cbind(x, "kmpg" = x$mpg * 1.61) + }) +head(out, 3) +``` + +#### Apply by Group +`gapply` can apply a function to each group of a `SparkDataFrame`. The function is to be applied to each group of the `SparkDataFrame` and should have only two parameters: grouping key and R `data.frame` corresponding to that key. The groups are chosen from `SparkDataFrames` column(s). The output of function should be a `data.frame`. Schema specifies the row format of the resulting `SparkDataFrame`. It must represent R function’s output schema on the basis of Spark data types. The column names of the returned `data.frame` are set by user. See [here](#DataTypes) for mapping between R and Spark. + +```{r} +schema <- structType(structField("cyl", "double"), structField("max_mpg", "double")) +result <- gapply( + carsDF, + "cyl", + function(key, x) { + y <- data.frame(key, max(x$mpg)) + }, + schema) +head(arrange(result, "max_mpg", decreasing = TRUE)) +``` + +Like gapply, `gapplyCollect` applies a function to each partition of a `SparkDataFrame` and collect the result back to R `data.frame`. The output of the function should be a `data.frame` but no schema is required in this case. Note that `gapplyCollect` can fail if the output of UDF run on all the partition cannot be pulled to the driver and fit in driver memory. + +```{r} +result <- gapplyCollect( + carsDF, + "cyl", + function(key, x) { + y <- data.frame(key, max(x$mpg)) + colnames(y) <- c("cyl", "max_mpg") + y + }) +head(result[order(result$max_mpg, decreasing = TRUE), ]) +``` + +#### Distribute Local Functions + +Similar to `lapply` in native R, `spark.lapply` runs a function over a list of elements and distributes the computations with Spark. `spark.lapply` works in a manner that is similar to `doParallel` or `lapply` to elements of a list. The results of all the computations should fit in a single machine. If that is not the case you can do something like `df <- createDataFrame(list)` and then use `dapply`. + +We use `svm` in package `e1071` as an example. We use all default settings except for varying costs of constraints violation. `spark.lapply` can train those different models in parallel. + +```{r} +costs <- exp(seq(from = log(1), to = log(1000), length.out = 5)) +train <- function(cost) { + stopifnot(requireNamespace("e1071", quietly = TRUE)) + model <- e1071::svm(Species ~ ., data = iris, cost = cost) + summary(model) +} +``` + +Return a list of model's summaries. +```{r} +model.summaries <- spark.lapply(costs, train) +``` + +```{r} +class(model.summaries) +``` + + +To avoid lengthy display, we only present the result of the second fitted model. You are free to inspect other models as well. +```{r} +print(model.summaries[[2]]) +``` + + +### SQL Queries +A `SparkDataFrame` can also be registered as a temporary view in Spark SQL and that allows you to run SQL queries over its data. The sql function enables applications to run SQL queries programmatically and returns the result as a `SparkDataFrame`. + +```{r} +people <- read.df(paste0(sparkR.conf("spark.home"), + "/examples/src/main/resources/people.json"), "json") +``` + +Register this SparkDataFrame as a temporary view. + +```{r} +createOrReplaceTempView(people, "people") +``` + +SQL statements can be run by using the sql method. +```{r} +teenagers <- sql("SELECT name FROM people WHERE age >= 13 AND age <= 19") +head(teenagers) +``` + + +## Machine Learning + +SparkR supports the following machine learning models and algorithms. + +* Generalized Linear Model (GLM) + +* Naive Bayes Model + +* $k$-means Clustering + +* Accelerated Failure Time (AFT) Survival Model + +* Gaussian Mixture Model (GMM) + +* Latent Dirichlet Allocation (LDA) + +* Multilayer Perceptron Model + +* Collaborative Filtering with Alternating Least Squares (ALS) + +* Isotonic Regression Model + +More will be added in the future. + +### R Formula + +For most above, SparkR supports **R formula operators**, including `~`, `.`, `:`, `+` and `-` for model fitting. This makes it a similar experience as using R functions. + +### Training and Test Sets + +We can easily split `SparkDataFrame` into random training and test sets by the `randomSplit` function. It returns a list of split `SparkDataFrames` with provided `weights`. We use `carsDF` as an example and want to have about $70%$ training data and $30%$ test data. +```{r} +splitDF_list <- randomSplit(carsDF, c(0.7, 0.3), seed = 0) +carsDF_train <- splitDF_list[[1]] +carsDF_test <- splitDF_list[[2]] +``` + +```{r} +count(carsDF_train) +head(carsDF_train) +``` + +```{r} +count(carsDF_test) +head(carsDF_test) +``` + + +### Models and Algorithms + +#### Generalized Linear Model + +The main function is `spark.glm`. The following families and link functions are supported. The default is gaussian. + +Family | Link Function +------ | --------- +gaussian | identity, log, inverse +binomial | logit, probit, cloglog (complementary log-log) +poisson | log, identity, sqrt +gamma | inverse, identity, log + +There are three ways to specify the `family` argument. + +* Family name as a character string, e.g. `family = "gaussian"`. + +* Family function, e.g. `family = binomial`. + +* Result returned by a family function, e.g. `family = poisson(link = log)` + +For more information regarding the families and their link functions, see the Wikipedia page [Generalized Linear Model](https://en.wikipedia.org/wiki/Generalized_linear_model). + +We use the `mtcars` dataset as an illustration. The corresponding `SparkDataFrame` is `carsDF`. After fitting the model, we print out a summary and see the fitted values by making predictions on the original dataset. We can also pass into a new `SparkDataFrame` of same schema to predict on new data. + +```{r} +gaussianGLM <- spark.glm(carsDF, mpg ~ wt + hp) +summary(gaussianGLM) +``` +When doing prediction, a new column called `prediction` will be appended. Let's look at only a subset of columns here. +```{r} +gaussianFitted <- predict(gaussianGLM, carsDF) +head(select(gaussianFitted, "model", "prediction", "mpg", "wt", "hp")) +``` + +#### Naive Bayes Model + +Naive Bayes model assumes independence among the features. `spark.naiveBayes` fits a [Bernoulli naive Bayes model](https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Bernoulli_naive_Bayes) against a SparkDataFrame. The data should be all categorical. These models are often used for document classification. + +```{r} +titanic <- as.data.frame(Titanic) +titanicDF <- createDataFrame(titanic[titanic$Freq > 0, -5]) +naiveBayesModel <- spark.naiveBayes(titanicDF, Survived ~ Class + Sex + Age) +summary(naiveBayesModel) +naiveBayesPrediction <- predict(naiveBayesModel, titanicDF) +head(select(naiveBayesPrediction, "Class", "Sex", "Age", "Survived", "prediction")) +``` + +#### k-Means Clustering + +`spark.kmeans` fits a $k$-means clustering model against a `SparkDataFrame`. As an unsupervised learning method, we don't need a response variable. Hence, the left hand side of the R formula should be left blank. The clustering is based only on the variables on the right hand side. + +```{r} +kmeansModel <- spark.kmeans(carsDF, ~ mpg + hp + wt, k = 3) +summary(kmeansModel) +kmeansPredictions <- predict(kmeansModel, carsDF) +head(select(kmeansPredictions, "model", "mpg", "hp", "wt", "prediction"), n = 20L) +``` + +#### AFT Survival Model +Survival analysis studies the expected duration of time until an event happens, and often the relationship with risk factors or treatment taken on the subject. In contrast to standard regression analysis, survival modeling has to deal with special characteristics in the data including non-negative survival time and censoring. + +Accelerated Failure Time (AFT) model is a parametric survival model for censored data that assumes the effect of a covariate is to accelerate or decelerate the life course of an event by some constant. For more information, refer to the Wikipedia page [AFT Model](https://en.wikipedia.org/wiki/Accelerated_failure_time_model) and the references there. Different from a [Proportional Hazards Model](https://en.wikipedia.org/wiki/Proportional_hazards_model) designed for the same purpose, the AFT model is easier to parallelize because each instance contributes to the objective function independently. +```{r} +library(survival) +ovarianDF <- createDataFrame(ovarian) +aftModel <- spark.survreg(ovarianDF, Surv(futime, fustat) ~ ecog_ps + rx) +summary(aftModel) +aftPredictions <- predict(aftModel, ovarianDF) +head(aftPredictions) +``` + +#### Gaussian Mixture Model + +(Coming in 2.1.0) + +`spark.gaussianMixture` fits multivariate [Gaussian Mixture Model](https://en.wikipedia.org/wiki/Mixture_model#Multivariate_Gaussian_mixture_model) (GMM) against a `SparkDataFrame`. [Expectation-Maximization](https://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm) (EM) is used to approximate the maximum likelihood estimator (MLE) of the model. + +We use a simulated example to demostrate the usage. +```{r} +X1 <- data.frame(V1 = rnorm(4), V2 = rnorm(4)) +X2 <- data.frame(V1 = rnorm(6, 3), V2 = rnorm(6, 4)) +data <- rbind(X1, X2) +df <- createDataFrame(data) +gmmModel <- spark.gaussianMixture(df, ~ V1 + V2, k = 2) +summary(gmmModel) +gmmFitted <- predict(gmmModel, df) +head(select(gmmFitted, "V1", "V2", "prediction")) +``` + + +#### Latent Dirichlet Allocation + +(Coming in 2.1.0) + +`spark.lda` fits a [Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) model on a `SparkDataFrame`. It is often used in topic modeling in which topics are inferred from a collection of text documents. LDA can be thought of as a clustering algorithm as follows: + +* Topics correspond to cluster centers, and documents correspond to examples (rows) in a dataset. + +* Topics and documents both exist in a feature space, where feature vectors are vectors of word counts (bag of words). + +* Rather than estimating a clustering using a traditional distance, LDA uses a function based on a statistical model of how text documents are generated. + +To use LDA, we need to specify a `features` column in `data` where each entry represents a document. There are two type options for the column: + +* character string: This can be a string of the whole document. It will be parsed automatically. Additional stop words can be added in `customizedStopWords`. + +* libSVM: Each entry is a collection of words and will be processed directly. + +There are several parameters LDA takes for fitting the model. + +* `k`: number of topics (default 10). + +* `maxIter`: maximum iterations (default 20). + +* `optimizer`: optimizer to train an LDA model, "online" (default) uses [online variational inference](https://www.cs.princeton.edu/~blei/papers/HoffmanBleiBach2010b.pdf). "em" uses [expectation-maximization](https://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm). + +* `subsamplingRate`: For `optimizer = "online"`. Fraction of the corpus to be sampled and used in each iteration of mini-batch gradient descent, in range (0, 1] (default 0.05). + +* `topicConcentration`: concentration parameter (commonly named beta or eta) for the prior placed on topic distributions over terms, default -1 to set automatically on the Spark side. Use `summary` to retrieve the effective topicConcentration. Only 1-size numeric is accepted. + +* `docConcentration`: concentration parameter (commonly named alpha) for the prior placed on documents distributions over topics (theta), default -1 to set automatically on the Spark side. Use `summary` to retrieve the effective docConcentration. Only 1-size or k-size numeric is accepted. + +* `maxVocabSize`: maximum vocabulary size, default 1 << 18. + +Two more functions are provided for the fitted model. + +* `spark.posterior` returns a `SparkDataFrame` containing a column of posterior probabilities vectors named "topicDistribution". + +* `spark.perplexity` returns the log perplexity of given `SparkDataFrame`, or the log perplexity of the training data if missing argument `data`. + +For more information, see the help document `?spark.lda`. + +Let's look an artificial example. +```{r} +corpus <- data.frame(features = c( + "1 2 6 0 2 3 1 1 0 0 3", + "1 3 0 1 3 0 0 2 0 0 1", + "1 4 1 0 0 4 9 0 1 2 0", + "2 1 0 3 0 0 5 0 2 3 9", + "3 1 1 9 3 0 2 0 0 1 3", + "4 2 0 3 4 5 1 1 1 4 0", + "2 1 0 3 0 0 5 0 2 2 9", + "1 1 1 9 2 1 2 0 0 1 3", + "4 4 0 3 4 2 1 3 0 0 0", + "2 8 2 0 3 0 2 0 2 7 2", + "1 1 1 9 0 2 2 0 0 3 3", + "4 1 0 0 4 5 1 3 0 1 0")) +corpusDF <- createDataFrame(corpus) +model <- spark.lda(data = corpusDF, k = 5, optimizer = "em") +summary(model) +``` + +```{r} +posterior <- spark.posterior(model, corpusDF) +head(posterior) +``` + +```{r} +perplexity <- spark.perplexity(model, corpusDF) +perplexity +``` + + +#### Multilayer Perceptron + +(Coming in 2.1.0) + +Multilayer perceptron classifier (MLPC) is a classifier based on the [feedforward artificial neural network](https://en.wikipedia.org/wiki/Feedforward_neural_network). MLPC consists of multiple layers of nodes. Each layer is fully connected to the next layer in the network. Nodes in the input layer represent the input data. All other nodes map inputs to outputs by a linear combination of the inputs with the node’s weights $w$ and bias $b$ and applying an activation function. This can be written in matrix form for MLPC with $K+1$ layers as follows: +$$ +y(x)=f_K(\ldots f_2(w_2^T f_1(w_1^T x + b_1) + b_2) \ldots + b_K). +$$ + +Nodes in intermediate layers use sigmoid (logistic) function: +$$ +f(z_i) = \frac{1}{1+e^{-z_i}}. +$$ + +Nodes in the output layer use softmax function: +$$ +f(z_i) = \frac{e^{z_i}}{\sum_{k=1}^N e^{z_k}}. +$$ + +The number of nodes $N$ in the output layer corresponds to the number of classes. + +MLPC employs backpropagation for learning the model. We use the logistic loss function for optimization and L-BFGS as an optimization routine. + +`spark.mlp` requires at least two columns in `data`: one named `"label"` and the other one `"features"`. The `"features"` column should be in libSVM-format. According to the description above, there are several additional parameters that can be set: + +* `layers`: integer vector containing the number of nodes for each layer. + +* `solver`: solver parameter, supported options: `"gd"` (minibatch gradient descent) or `"l-bfgs"`. + +* `maxIter`: maximum iteration number. + +* `tol`: convergence tolerance of iterations. + +* `stepSize`: step size for `"gd"`. + +* `seed`: seed parameter for weights initialization. + +#### Collaborative Filtering + +(Coming in 2.1.0) + +`spark.als` learns latent factors in [collaborative filtering](https://en.wikipedia.org/wiki/Recommender_system#Collaborative_filtering) via [alternating least squares](http://dl.acm.org/citation.cfm?id=1608614). + +There are multiple options that can be configured in `spark.als`, including `rank`, `reg`, `nonnegative`. For a complete list, refer to the help file. + +```{r} +ratings <- list(list(0, 0, 4.0), list(0, 1, 2.0), list(1, 1, 3.0), list(1, 2, 4.0), + list(2, 1, 1.0), list(2, 2, 5.0)) +df <- createDataFrame(ratings, c("user", "item", "rating")) +model <- spark.als(df, "rating", "user", "item", rank = 10, reg = 0.1, nonnegative = TRUE) +``` + +Extract latent factors. +```{r} +stats <- summary(model) +userFactors <- stats$userFactors +itemFactors <- stats$itemFactors +head(userFactors) +head(itemFactors) +``` + +Make predictions. + +```{r} +predicted <- predict(model, df) +head(predicted) +``` + +#### Isotonic Regression Model + +(Coming in 2.1.0) + +`spark.isoreg` fits an [Isotonic Regression](https://en.wikipedia.org/wiki/Isotonic_regression) model against a `SparkDataFrame`. It solves a weighted univariate a regression problem under a complete order constraint. Specifically, given a set of real observed responses $y_1, \ldots, y_n$, corresponding real features $x_1, \ldots, x_n$, and optionally positive weights $w_1, \ldots, w_n$, we want to find a monotone (piecewise linear) function $f$ to minimize +$$ +\ell(f) = \sum_{i=1}^n w_i (y_i - f(x_i))^2. +$$ + +There are a few more arguments that may be useful. + +* `weightCol`: a character string specifying the weight column. + +* `isotonic`: logical value indicating whether the output sequence should be isotonic/increasing (`TRUE`) or antitonic/decreasing (`FALSE`). + +* `featureIndex`: the index of the feature on the right hand side of the formula if it is a vector column (default: 0), no effect otherwise. + +We use an artificial example to show the use. + +```{r} +y <- c(3.0, 6.0, 8.0, 5.0, 7.0) +x <- c(1.0, 2.0, 3.5, 3.0, 4.0) +w <- rep(1.0, 5) +data <- data.frame(y = y, x = x, w = w) +df <- createDataFrame(data) +isoregModel <- spark.isoreg(df, y ~ x, weightCol = "w") +isoregFitted <- predict(isoregModel, df) +head(select(isoregFitted, "x", "y", "prediction")) +``` + +In the prediction stage, based on the fitted monotone piecewise function, the rules are: + +* If the prediction input exactly matches a training feature then associated prediction is returned. In case there are multiple predictions with the same feature then one of them is returned. Which one is undefined. + +* If the prediction input is lower or higher than all training features then prediction with lowest or highest feature is returned respectively. In case there are multiple predictions with the same feature then the lowest or highest is returned respectively. + +* If the prediction input falls between two training features then prediction is treated as piecewise linear function and interpolated value is calculated from the predictions of the two closest features. In case there are multiple values with the same feature then the same rules as in previous point are used. + +For example, when the input is $3.2$, the two closest feature values are $3.0$ and $3.5$, then predicted value would be a linear interpolation between the predicted values at $3.0$ and $3.5$. + +```{r} +newDF <- createDataFrame(data.frame(x = c(1.5, 3.2))) +head(predict(isoregModel, newDF)) +``` + +#### What's More? +We also expect Decision Tree, Random Forest, Kolmogorov-Smirnov Test coming in the next version 2.1.0. + +### Model Persistence +The following example shows how to save/load an ML model by SparkR. +```{r} +irisDF <- suppressWarnings(createDataFrame(iris)) +gaussianGLM <- spark.glm(irisDF, Sepal_Length ~ Sepal_Width + Species, family = "gaussian") + +# Save and then load a fitted MLlib model +modelPath <- tempfile(pattern = "ml", fileext = ".tmp") +write.ml(gaussianGLM, modelPath) +gaussianGLM2 <- read.ml(modelPath) + +# Check model summary +summary(gaussianGLM2) + +# Check model prediction +gaussianPredictions <- predict(gaussianGLM2, irisDF) +head(gaussianPredictions) + +unlink(modelPath) +``` + + +## Advanced Topics + +### SparkR Object Classes + +There are three main object classes in SparkR you may be working with. + +* `SparkDataFrame`: the central component of SparkR. It is an S4 class representing distributed collection of data organized into named columns, which is conceptually equivalent to a table in a relational database or a data frame in R. It has two slots `sdf` and `env`. + + `sdf` stores a reference to the corresponding Spark Dataset in the Spark JVM backend. + + `env` saves the meta-information of the object such as `isCached`. + +It can be created by data import methods or by transforming an existing `SparkDataFrame`. We can manipulate `SparkDataFrame` by numerous data processing functions and feed that into machine learning algorithms. + +* `Column`: an S4 class representing column of `SparkDataFrame`. The slot `jc` saves a reference to the corresponding Column object in the Spark JVM backend. + +It can be obtained from a `SparkDataFrame` by `$` operator, `df$col`. More often, it is used together with other functions, for example, with `select` to select particular columns, with `filter` and constructed conditions to select rows, with aggregation functions to compute aggregate statistics for each group. + +* `GroupedData`: an S4 class representing grouped data created by `groupBy` or by transforming other `GroupedData`. Its `sgd` slot saves a reference to a RelationalGroupedDataset object in the backend. + +This is often an intermediate object with group information and followed up by aggregation operations. + +### Architecture + +A complete description of architecture can be seen in reference, in particular the paper *SparkR: Scaling R Programs with Spark*. + +Under the hood of SparkR is Spark SQL engine. This avoids the overheads of running interpreted R code, and the optimized SQL execution engine in Spark uses structural information about data and computation flow to perform a bunch of optimizations to speed up the computation. + +The main method calls of actual computation happen in the Spark JVM of the driver. We have a socket-based SparkR API that allows us to invoke functions on the JVM from R. We use a SparkR JVM backend that listens on a Netty-based socket server. + +Two kinds of RPCs are supported in the SparkR JVM backend: method invocation and creating new objects. Method invocation can be done in two ways. + +* `sparkR.invokeJMethod` takes a reference to an existing Java object and a list of arguments to be passed on to the method. + +* `sparkR.invokeJStatic` takes a class name for static method and a list of arguments to be passed on to the method. + +The arguments are serialized using our custom wire format which is then deserialized on the JVM side. We then use Java reflection to invoke the appropriate method. + +To create objects, `sparkR.newJObject` is used and then similarly the appropriate constructor is invoked with provided arguments. + +Finally, we use a new R class `jobj` that refers to a Java object existing in the backend. These references are tracked on the Java side and are automatically garbage collected when they go out of scope on the R side. + +## Appendix + +### R and Spark Data Types {#DataTypes} + +R | Spark +----------- | ------------- +byte | byte +integer | integer +float | float +double | double +numeric | double +character | string +string | string +binary | binary +raw | binary +logical | boolean +POSIXct | timestamp +POSIXlt | timestamp +Date | date +array | array +list | array +env | map + +## References + +* [Spark Cluster Mode Overview](http://spark.apache.org/docs/latest/cluster-overview.html) + +* [Submitting Spark Applications](http://spark.apache.org/docs/latest/submitting-applications.html) + +* [Machine Learning Library Guide (MLlib)](http://spark.apache.org/docs/latest/ml-guide.html) + +* [SparkR: Scaling R Programs with Spark](https://people.csail.mit.edu/matei/papers/2016/sigmod_sparkr.pdf), Shivaram Venkataraman, Zongheng Yang, Davies Liu, Eric Liang, Hossein Falaki, Xiangrui Meng, Reynold Xin, Ali Ghodsi, Michael Franklin, Ion Stoica, and Matei Zaharia. SIGMOD 2016. June 2016. + +```{r, echo=FALSE} +sparkR.session.stop() +```