From 6dd37cbaacbad78a4bc52684ef5a6f2654e987df Mon Sep 17 00:00:00 2001 From: Takeshi Yamamuro Date: Fri, 21 Aug 2020 21:23:41 +0900 Subject: [PATCH 01/54] [SPARK-32682][INFRA] Use workflow_dispatch to enable manual test triggers ### What changes were proposed in this pull request? This PR proposes to add a `workflow_dispatch` entry in the GitHub Action script (`build_and_test.yml`). This update can enable developers to run the Spark tests for a specific branch on their own local repository, so I think it might help to check if al the tests can pass before opening a new PR. Screen Shot 2020-08-21 at 16 28 41 ### Why are the changes needed? To reduce the pressure of GitHub Actions on the Spark repository. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Manually checked. Closes #29504 from maropu/DispatchTest. Authored-by: Takeshi Yamamuro Signed-off-by: Takeshi Yamamuro --- .github/workflows/build_and_test.yml | 9 +++++++++ dev/run-tests.py | 8 +++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 8f02d5df1938b..a3f2fb2ed1491 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -7,6 +7,11 @@ on: pull_request: branches: - master + workflow_dispatch: + inputs: + target: + description: 'Target branch to run' + required: true jobs: # Build: build Spark and run the tests for specified modules. @@ -82,12 +87,16 @@ jobs: # GitHub Actions' default miniconda to use in pip packaging test. CONDA_PREFIX: /usr/share/miniconda GITHUB_PREV_SHA: ${{ github.event.before }} + GITHUB_INPUT_BRANCH: ${{ github.event.inputs.target }} steps: - name: Checkout Spark repository uses: actions/checkout@v2 # In order to fetch changed files with: fetch-depth: 0 + - name: Merge dispatched input branch + if: ${{ github.event.inputs.target != '' }} + run: git merge --progress --ff-only origin/${{ github.event.inputs.target }} # Cache local repositories. Note that GitHub Actions cache has a 2G limit. - name: Cache Scala, SBT, Maven and Zinc uses: actions/cache@v1 diff --git a/dev/run-tests.py b/dev/run-tests.py index 93023d41e297a..3e118dcbc160d 100755 --- a/dev/run-tests.py +++ b/dev/run-tests.py @@ -655,7 +655,13 @@ def main(): # If we're running the tests in Github Actions, attempt to detect and test # only the affected modules. if test_env == "github_actions": - if os.environ["GITHUB_BASE_REF"] != "": + if os.environ["GITHUB_INPUT_BRANCH"] != "": + # Dispatched request + # Note that it assumes Github Actions has already merged + # the given `GITHUB_INPUT_BRANCH` branch. + changed_files = identify_changed_files_from_git_commits( + "HEAD", target_branch=os.environ["GITHUB_SHA"]) + elif os.environ["GITHUB_BASE_REF"] != "": # Pull requests changed_files = identify_changed_files_from_git_commits( os.environ["GITHUB_SHA"], target_branch=os.environ["GITHUB_BASE_REF"]) From 3dca81e4f5d51c81d6c183ddf762de011e4b9093 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Sat, 22 Aug 2020 06:23:46 +0900 Subject: [PATCH 02/54] [SPARK-32669][SQL][TEST] Expression unit tests should explore all cases that can lead to null result ### What changes were proposed in this pull request? Add document to `ExpressionEvalHelper`, and ask people to explore all the cases that can lead to null results (including null in struct fields, array elements and map values). This PR also fixes `ComplexTypeSuite.GetArrayStructFields` to explore all the null cases. ### Why are the changes needed? It happened several times that we hit correctness bugs caused by wrong expression nullability. When writing unit tests, we usually don't test the nullability flag directly, and it's too late to add such tests for all expressions. In https://github.com/apache/spark/pull/22375, we extended the expression test framework, which checks the nullability flag when the expected result/field/element is null. This requires the test cases to explore all the cases that can lead to null results ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? I reverted https://github.com/apache/spark/commit/5d296ed39e3dd79ddb10c68657e773adba40a5e0 locally, and `ComplexTypeSuite` can catch the bug. Closes #29493 from cloud-fan/small. Authored-by: Wenchen Fan Signed-off-by: Takeshi Yamamuro --- .../expressions/ComplexTypeSuite.scala | 31 +++++++++++-------- .../expressions/ExpressionEvalHelper.scala | 5 +++ 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala index dbe43709d1d35..cdb83d3580f0a 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala @@ -143,21 +143,26 @@ class ComplexTypeSuite extends SparkFunSuite with ExpressionEvalHelper { } test("GetArrayStructFields") { - val typeAS = ArrayType(StructType(StructField("a", IntegerType, false) :: Nil)) - val typeNullAS = ArrayType(StructType(StructField("a", IntegerType) :: Nil)) - val arrayStruct = Literal.create(Seq(create_row(1)), typeAS) - val nullArrayStruct = Literal.create(null, typeNullAS) - - def getArrayStructFields(expr: Expression, fieldName: String): GetArrayStructFields = { - expr.dataType match { - case ArrayType(StructType(fields), containsNull) => - val field = fields.find(_.name == fieldName).get - GetArrayStructFields(expr, field, fields.indexOf(field), fields.length, containsNull) - } + // test 4 types: struct field nullability X array element nullability + val type1 = ArrayType(StructType(StructField("a", IntegerType) :: Nil)) + val type2 = ArrayType(StructType(StructField("a", IntegerType, nullable = false) :: Nil)) + val type3 = ArrayType(StructType(StructField("a", IntegerType) :: Nil), containsNull = false) + val type4 = ArrayType( + StructType(StructField("a", IntegerType, nullable = false) :: Nil), containsNull = false) + + val input1 = Literal.create(Seq(create_row(1)), type4) + val input2 = Literal.create(Seq(create_row(null)), type3) + val input3 = Literal.create(Seq(null), type2) + val input4 = Literal.create(null, type1) + + def getArrayStructFields(expr: Expression, fieldName: String): Expression = { + ExtractValue.apply(expr, Literal.create(fieldName, StringType), _ == _) } - checkEvaluation(getArrayStructFields(arrayStruct, "a"), Seq(1)) - checkEvaluation(getArrayStructFields(nullArrayStruct, "a"), null) + checkEvaluation(getArrayStructFields(input1, "a"), Seq(1)) + checkEvaluation(getArrayStructFields(input2, "a"), Seq(null)) + checkEvaluation(getArrayStructFields(input3, "a"), Seq(null)) + checkEvaluation(getArrayStructFields(input4, "a"), null) } test("SPARK-32167: nullability of GetArrayStructFields") { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala index 6f73c1b0c04fb..341b26ddf6575 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala @@ -40,6 +40,11 @@ import org.apache.spark.util.Utils /** * A few helper functions for expression evaluation testing. Mixin this trait to use them. + * + * Note: when you write unit test for an expression and call `checkEvaluation` to check the result, + * please make sure that you explore all the cases that can lead to null result (including + * null in struct fields, array elements and map values). The framework will test the + * nullability flag of the expression automatically. */ trait ExpressionEvalHelper extends ScalaCheckDrivenPropertyChecks with PlanTestBase { self: SparkFunSuite => From 1450b5e095c4dde4eb38d6237e54d6bfa96955e2 Mon Sep 17 00:00:00 2001 From: Brandon Jiang Date: Sat, 22 Aug 2020 06:45:35 +0900 Subject: [PATCH 03/54] [MINOR][DOCS] fix typo for docs,log message and comments ### What changes were proposed in this pull request? Fix typo for docs, log messages and comments ### Why are the changes needed? typo fix to increase readability ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? manual test has been performed to test the updated Closes #29443 from brandonJY/spell-fix-doc. Authored-by: Brandon Jiang Signed-off-by: Takeshi Yamamuro --- .../main/java/org/apache/spark/network/util/TransportConf.java | 2 +- .../src/main/java/org/apache/spark/api/plugin/DriverPlugin.java | 2 +- .../apache/spark/resource/ResourceDiscoveryScriptPlugin.scala | 2 +- docs/job-scheduling.md | 2 +- docs/sql-ref-syntax-qry-select-groupby.md | 2 +- docs/sql-ref-syntax-qry-select-hints.md | 2 +- docs/sql-ref.md | 2 +- .../src/main/java/org/apache/spark/launcher/LauncherServer.java | 2 +- sbin/decommission-worker.sh | 2 +- .../org/apache/spark/sql/connector/catalog/TableCatalog.java | 2 +- .../org/apache/spark/sql/catalyst/QueryPlanningTracker.scala | 2 +- .../sql/execution/datasources/v2/ShowTablePropertiesExec.scala | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java index 6c37f9a382376..646e4278811f4 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java +++ b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java @@ -290,7 +290,7 @@ public boolean sharedByteBufAllocators() { } /** - * If enabled then off-heap byte buffers will be prefered for the shared ByteBuf allocators. + * If enabled then off-heap byte buffers will be preferred for the shared ByteBuf allocators. */ public boolean preferDirectBufsForSharedByteBufAllocators() { return conf.getBoolean("spark.network.io.preferDirectBufs", true); diff --git a/core/src/main/java/org/apache/spark/api/plugin/DriverPlugin.java b/core/src/main/java/org/apache/spark/api/plugin/DriverPlugin.java index 0c0d0df8ae682..1d676ff781c70 100644 --- a/core/src/main/java/org/apache/spark/api/plugin/DriverPlugin.java +++ b/core/src/main/java/org/apache/spark/api/plugin/DriverPlugin.java @@ -41,7 +41,7 @@ public interface DriverPlugin { * initialization. *

* It's recommended that plugins be careful about what operations are performed in this call, - * preferrably performing expensive operations in a separate thread, or postponing them until + * preferably performing expensive operations in a separate thread, or postponing them until * the application has fully started. * * @param sc The SparkContext loading the plugin. diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceDiscoveryScriptPlugin.scala b/core/src/main/scala/org/apache/spark/resource/ResourceDiscoveryScriptPlugin.scala index 11a9bb86d3034..d861e91771673 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceDiscoveryScriptPlugin.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceDiscoveryScriptPlugin.scala @@ -29,7 +29,7 @@ import org.apache.spark.util.Utils.executeAndGetOutput /** * The default plugin that is loaded into a Spark application to control how custom * resources are discovered. This executes the discovery script specified by the user - * and gets the json output back and contructs ResourceInformation objects from that. + * and gets the json output back and constructs ResourceInformation objects from that. * If the user specifies custom plugins, this is the last one to be executed and * throws if the resource isn't discovered. * diff --git a/docs/job-scheduling.md b/docs/job-scheduling.md index 1eed0ff3ee5e8..7c7385b325a7f 100644 --- a/docs/job-scheduling.md +++ b/docs/job-scheduling.md @@ -298,7 +298,7 @@ later. In order to synchronize PVM threads with JVM threads, you should set `PYSPARK_PIN_THREAD` environment variable to `true`. This pinned thread mode allows one PVM thread has one corresponding JVM thread. With this mode, -`pyspark.InheritableThread` is recommanded to use together for a PVM thread to inherit the interitable attributes +`pyspark.InheritableThread` is recommended to use together for a PVM thread to inherit the inheritable attributes such as local properties in a JVM thread. Note that `PYSPARK_PIN_THREAD` is currently experimental and not recommended for use in production. diff --git a/docs/sql-ref-syntax-qry-select-groupby.md b/docs/sql-ref-syntax-qry-select-groupby.md index 6137c0d80f313..934e5f70d4b08 100644 --- a/docs/sql-ref-syntax-qry-select-groupby.md +++ b/docs/sql-ref-syntax-qry-select-groupby.md @@ -58,7 +58,7 @@ aggregate_name ( [ DISTINCT ] expression [ , ... ] ) [ FILTER ( WHERE boolean_ex * **grouping_expression** - Specifies the critieria based on which the rows are grouped together. The grouping of rows is performed based on + Specifies the criteria based on which the rows are grouped together. The grouping of rows is performed based on result values of the grouping expressions. A grouping expression may be a column alias, a column position or an expression. diff --git a/docs/sql-ref-syntax-qry-select-hints.md b/docs/sql-ref-syntax-qry-select-hints.md index 247ce48e79445..5f1cb4c5bed0b 100644 --- a/docs/sql-ref-syntax-qry-select-hints.md +++ b/docs/sql-ref-syntax-qry-select-hints.md @@ -31,7 +31,7 @@ Hints give users a way to suggest how Spark SQL to use specific approaches to ge ### Partitioning Hints -Partitioning hints allow users to suggest a partitioning stragety that Spark should follow. `COALESCE`, `REPARTITION`, +Partitioning hints allow users to suggest a partitioning strategy that Spark should follow. `COALESCE`, `REPARTITION`, and `REPARTITION_BY_RANGE` hints are supported and are equivalent to `coalesce`, `repartition`, and `repartitionByRange` [Dataset APIs](api/scala/org/apache/spark/sql/Dataset.html), respectively. These hints give users a way to tune performance and control the number of output files in Spark SQL. When multiple partitioning hints are diff --git a/docs/sql-ref.md b/docs/sql-ref.md index 8d0c6734c4bd6..6a87166f7133d 100644 --- a/docs/sql-ref.md +++ b/docs/sql-ref.md @@ -32,7 +32,7 @@ Spark SQL is Apache Spark's module for working with structured data. This guide * [Integration with Hive UDFs/UDAFs/UDTFs](sql-ref-functions-udf-hive.html) * [Identifiers](sql-ref-identifier.html) * [Literals](sql-ref-literals.html) - * [Null Semanitics](sql-ref-null-semantics.html) + * [Null Semantics](sql-ref-null-semantics.html) * [SQL Syntax](sql-ref-syntax.html) * [DDL Statements](sql-ref-syntax-ddl.html) * [DML Statements](sql-ref-syntax-dml.html) diff --git a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java index 3ff77878f68a8..d5a277ba581a0 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java +++ b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java @@ -364,7 +364,7 @@ public void close() throws IOException { * * This method allows a short period for the above to happen (same amount of time as the * connection timeout, which is configurable). This should be fine for well-behaved - * applications, where they close the connection arond the same time the app handle detects the + * applications, where they close the connection around the same time the app handle detects the * app has finished. * * In case the connection is not closed within the grace period, this method forcefully closes diff --git a/sbin/decommission-worker.sh b/sbin/decommission-worker.sh index cf81a53f395c2..07e1e1771f7c1 100755 --- a/sbin/decommission-worker.sh +++ b/sbin/decommission-worker.sh @@ -46,7 +46,7 @@ else fi # Check if --block-until-exit is set. -# This is done for systems which block on the decomissioning script and on exit +# This is done for systems which block on the decommissioning script and on exit # shut down the entire system (e.g. K8s). if [ "$1" == "--block-until-exit" ]; then shift diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java index 1809b9cdb52e5..b818515adf9c0 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableCatalog.java @@ -176,7 +176,7 @@ Table alterTable( * @param newIdent the new table identifier of the table * @throws NoSuchTableException If the table to rename doesn't exist or is a view * @throws TableAlreadyExistsException If the new table name already exists or is a view - * @throws UnsupportedOperationException If the namespaces of old and new identiers do not + * @throws UnsupportedOperationException If the namespaces of old and new identifiers do not * match (optional) */ void renameTable(Identifier oldIdent, Identifier newIdent) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/QueryPlanningTracker.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/QueryPlanningTracker.scala index cd75407c7ee7a..35551d8ba77dc 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/QueryPlanningTracker.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/QueryPlanningTracker.scala @@ -28,7 +28,7 @@ import org.apache.spark.util.BoundedPriorityQueue * There are two separate concepts we track: * * 1. Phases: These are broad scope phases in query planning, as listed below, i.e. analysis, - * optimizationm and physical planning (just planning). + * optimization and physical planning (just planning). * * 2. Rules: These are the individual Catalyst rules that we track. In addition to time, we also * track the number of invocations and effective invocations. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala index fef63cb8253ca..95715fd1af56e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala @@ -36,7 +36,7 @@ case class ShowTablePropertiesExec( import scala.collection.JavaConverters._ val toRow = RowEncoder(schema).resolveAndBind().createSerializer() - // The reservered properties are accessible through DESCRIBE + // The reserved properties are accessible through DESCRIBE val properties = catalogTable.properties.asScala .filter { case (k, v) => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(k) } propertyKey match { From 1fd54f4bf58342c067adfa28f0705a4efef5e60a Mon Sep 17 00:00:00 2001 From: Jatin Puri Date: Fri, 21 Aug 2020 16:14:29 -0700 Subject: [PATCH 04/54] [SPARK-32662][ML] CountVectorizerModel: Remove requirement for minimum Vocab size ### What changes were proposed in this pull request? The strict requirement for the vocabulary to remain non-empty has been removed in this pull request. Link to the discussion: http://apache-spark-user-list.1001560.n3.nabble.com/Ability-to-have-CountVectorizerModel-vocab-as-empty-td38396.html ### Why are the changes needed? This soothens running it across the corner cases. Without this, the user has to manupulate the data in genuine case, which may be a perfectly fine valid use-case. Question: Should we a log when empty vocabulary is found instead? ### Does this PR introduce _any_ user-facing change? May be a slight change. If someone has put a try-catch to detect an empty vocab. Then that behavior would no longer stand still. ### How was this patch tested? 1. Added testcase to `fit` generating an empty vocabulary 2. Added testcase to `transform` with empty vocabulary Request to review: srowen hhbyyh Closes #29482 from purijatin/spark_32662. Authored-by: Jatin Puri Signed-off-by: Huaxin Gao --- .../spark/ml/feature/CountVectorizer.scala | 5 +- .../ml/feature/CountVectorizerSuite.scala | 74 +++++++++++++++---- 2 files changed, 63 insertions(+), 16 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala index 6d39f18df60ea..fd07073c306e3 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala @@ -241,7 +241,10 @@ class CountVectorizer @Since("1.5.0") (@Since("1.5.0") override val uid: String) } wordCounts.unpersist() - require(vocab.length > 0, "The vocabulary size should be > 0. Lower minDF as necessary.") + if (vocab.isEmpty) { + this.logWarning("The vocabulary size is empty. " + + "If this was unexpected, you may wish to lower minDF (or) increase maxDF.") + } copyValues(new CountVectorizerModel(uid, vocab).setParent(this)) } diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala index bca580d411373..5e32a654c130b 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/CountVectorizerSuite.scala @@ -188,21 +188,6 @@ class CountVectorizerSuite extends MLTest with DefaultReadWriteTest { } } - test("CountVectorizer throws exception when vocab is empty") { - intercept[IllegalArgumentException] { - val df = Seq( - (0, split("a a b b c c")), - (1, split("aa bb cc")) - ).toDF("id", "words") - val cvModel = new CountVectorizer() - .setInputCol("words") - .setOutputCol("features") - .setVocabSize(3) // limit vocab size to 3 - .setMinDF(3) - .fit(df) - } - } - test("CountVectorizerModel with minTF count") { val df = Seq( (0, split("a a a b b c c c d "), Vectors.sparse(4, Seq((0, 3.0), (2, 3.0)))), @@ -305,4 +290,63 @@ class CountVectorizerSuite extends MLTest with DefaultReadWriteTest { .setOutputCol("features") interaction.transform(df1) } + + test("SPARK-32662: Test on empty dataset") { + val df = Seq[(Int, Array[String])]().toDF("id", "words") + val cvModel = new CountVectorizer() + .setInputCol("words") + .setOutputCol("features") + .fit(df) + assert(cvModel.vocabulary.isEmpty === true) + val ans = cvModel.transform(df).select("features").collect() + assert(ans.length === 0) + } + + test("SPARK-32662: Remove requirement for minimum vocabulary size") { + val df = Seq( + (0, Array[String]()), + (1, Array[String]()) + ).toDF("id", "words") + val cvModel = new CountVectorizer() + .setInputCol("words") + .setOutputCol("features") + .fit(df) + assert(cvModel.vocabulary.isEmpty === true) + testTransformer[(Int, Seq[String])](df, cvModel, "features") { + case Row(features: Vector) => + assert(features === Vectors.sparse(0, Seq())) + } + + val df2 = Seq( + (0, Array("a", "b", "c")), + (1, Array("d", "e")), + (2, Array[String]()) + ).toDF("id", "words") + val cvModel2 = new CountVectorizer() + .setInputCol("words") + .setOutputCol("features") + .setMinDF(2) + .fit(df2) + assert(cvModel2.vocabulary.isEmpty === true) + testTransformer[(Int, Seq[String])](df2, cvModel2, "features") { + case Row(features: Vector) => + assert(features === Vectors.sparse(0, Seq())) + } + + val df3 = Seq( + (0, Array("a")), + (1, Array("a")), + (2, Array("a")) + ).toDF("id", "words") + val cvModel3 = new CountVectorizer() + .setInputCol("words") + .setOutputCol("features") + .setMaxDF(2) + .fit(df3) + assert(cvModel3.vocabulary.isEmpty === true) + testTransformer[(Int, Seq[String])](df3, cvModel3, "features") { + case Row(features: Vector) => + assert(features === Vectors.sparse(0, Seq())) + } + } } From 12f4331b9eb563cb0cfbf6a241d1d085ca4f7676 Mon Sep 17 00:00:00 2001 From: "Robert (Bobby) Evans" Date: Sat, 22 Aug 2020 11:07:14 +0900 Subject: [PATCH 05/54] [SPARK-32672][SQL] Fix data corruption in boolean bit set compression ## What changes were proposed in this pull request? This fixed SPARK-32672 a data corruption. Essentially the BooleanBitSet CompressionScheme would miss nulls at the end of a CompressedBatch. The values would then default to false. ### Why are the changes needed? It fixes data corruption ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? I manually tested it against the original issue that was producing errors for me. I also added in a unit test. Closes #29506 from revans2/SPARK-32672. Authored-by: Robert (Bobby) Evans Signed-off-by: HyukjinKwon --- .../compression/compressionSchemes.scala | 6 ++--- .../compression/BooleanBitSetSuite.scala | 26 +++++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/compressionSchemes.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/compressionSchemes.scala index 00a1d54b41709..3cc59af9b7ce3 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/compressionSchemes.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/compressionSchemes.scala @@ -318,7 +318,8 @@ private[columnar] case object RunLengthEncoding extends CompressionScheme { var valueCountLocal = 0 var currentValueLocal: Long = 0 - while (valueCountLocal < runLocal || (pos < capacity)) { + while (pos < capacity) { + assert(valueCountLocal <= runLocal) if (pos != nextNullIndex) { if (valueCountLocal == runLocal) { currentValueLocal = getFunction(buffer) @@ -616,7 +617,6 @@ private[columnar] case object BooleanBitSet extends CompressionScheme { override def hasNext: Boolean = visited < count override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { - val countLocal = count var currentWordLocal: Long = 0 var visitedLocal: Int = 0 val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) @@ -626,7 +626,7 @@ private[columnar] case object BooleanBitSet extends CompressionScheme { var pos = 0 var seenNulls = 0 - while (visitedLocal < countLocal) { + while (pos < capacity) { if (pos != nextNullIndex) { val bit = visitedLocal % BITS_PER_LONG diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/BooleanBitSetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/BooleanBitSetSuite.scala index 192db0e910d03..111a620df8c24 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/BooleanBitSetSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/BooleanBitSetSuite.scala @@ -156,4 +156,30 @@ class BooleanBitSetSuite extends SparkFunSuite { test(s"$BooleanBitSet: multiple words and 1 more bit for decompression()") { skeletonForDecompress(BITS_PER_LONG * 2 + 1) } + + test(s"$BooleanBitSet: Only nulls for decompression()") { + val builder = TestCompressibleColumnBuilder(new NoopColumnStats, BOOLEAN, BooleanBitSet) + val numRows = 10 + + val rows = Seq.fill[InternalRow](numRows)({ + val row = new GenericInternalRow(1) + row.setNullAt(0) + row + }) + rows.foreach(builder.appendFrom(_, 0)) + val buffer = builder.build() + + // Rewinds, skips column header and 4 more bytes for compression scheme ID + val headerSize = CompressionScheme.columnHeaderSize(buffer) + buffer.position(headerSize) + assertResult(BooleanBitSet.typeId, "Wrong compression scheme ID")(buffer.getInt()) + + val decoder = BooleanBitSet.decoder(buffer, BOOLEAN) + val columnVector = new OnHeapColumnVector(numRows, BooleanType) + decoder.decompress(columnVector, numRows) + + (0 until numRows).foreach { rowNum => + assert(columnVector.isNullAt(rowNum)) + } + } } From 8b26c69ce7f9077775a3c7bbabb1c47ee6a51a23 Mon Sep 17 00:00:00 2001 From: Yuanjian Li Date: Sat, 22 Aug 2020 21:32:23 +0900 Subject: [PATCH 06/54] [SPARK-31792][SS][DOC][FOLLOW-UP] Rephrase the description for some operations ### What changes were proposed in this pull request? Rephrase the description for some operations to make it clearer. ### Why are the changes needed? Add more detail in the document. ### Does this PR introduce _any_ user-facing change? No, document only. ### How was this patch tested? Document only. Closes #29269 from xuanyuanking/SPARK-31792-follow. Authored-by: Yuanjian Li Signed-off-by: Jungtaek Lim (HeartSaVioR) --- docs/web-ui.md | 10 +++++----- .../sql/execution/streaming/MicroBatchExecution.scala | 3 +-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/web-ui.md b/docs/web-ui.md index 69c9da6428938..465f526ee5b3d 100644 --- a/docs/web-ui.md +++ b/docs/web-ui.md @@ -426,11 +426,11 @@ queries. Currently, it contains the following metrics. * **Batch Duration.** The process duration of each batch. * **Operation Duration.** The amount of time taken to perform various operations in milliseconds. The tracked operations are listed as follows. - * addBatch: Adds result data of the current batch to the sink. - * getBatch: Gets a new batch of data to process. - * latestOffset: Gets the latest offsets for sources. - * queryPlanning: Generates the execution plan. - * walCommit: Writes the offsets to the metadata log. + * addBatch: Time taken to read the micro-batch's input data from the sources, process it, and write the batch's output to the sink. This should take the bulk of the micro-batch's time. + * getBatch: Time taken to prepare the logical query to read the input of the current micro-batch from the sources. + * latestOffset & getOffset: Time taken to query the maximum available offset for this source. + * queryPlanning: Time taken to generates the execution plan. + * walCommit: Time taken to write the offsets to the metadata log. As an early-release version, the statistics page is still under development and will be improved in future releases. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala index e022bfb6835d2..e0731db1f3c18 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala @@ -566,8 +566,7 @@ class MicroBatchExecution( val nextBatch = new Dataset(lastExecution, RowEncoder(lastExecution.analyzed.schema)) - val batchSinkProgress: Option[StreamWriterCommitProgress] = - reportTimeTaken("addBatch") { + val batchSinkProgress: Option[StreamWriterCommitProgress] = reportTimeTaken("addBatch") { SQLExecution.withNewExecutionId(lastExecution) { sink match { case s: Sink => s.addBatch(currentBatchId, nextBatch) From 25c7d0fe6ae20a4c1c42e0cd0b448c08ab03f3fb Mon Sep 17 00:00:00 2001 From: yangjie01 Date: Sat, 22 Aug 2020 09:24:16 -0500 Subject: [PATCH 07/54] [SPARK-32526][SQL] Pass all test of sql/catalyst module in Scala 2.13 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What changes were proposed in this pull request? The purpose of this pr is to resolve [SPARK-32526](https://issues.apache.org/jira/browse/SPARK-32526), all remaining failed cases are fixed. The main change of this pr as follow: - Change of `ExecutorAllocationManager.scala` for core module compilation in Scala 2.13, it's a blocking problem - Change `Seq[_]` to `scala.collection.Seq[_]` refer to failed cases - Added different expected plan of `Test 4: Star with several branches` of StarJoinCostBasedReorderSuite for Scala 2.13 because the candidates plans: ``` Join Inner, (d1_pk#5 = f1_fk1#0) :- Join Inner, (f1_fk2#1 = d2_pk#8) : :- Join Inner, (f1_fk3#2 = d3_pk#11) ``` and ``` Join Inner, (f1_fk2#1 = d2_pk#8) :- Join Inner, (d1_pk#5 = f1_fk1#0) : :- Join Inner, (f1_fk3#2 = d3_pk#11) ``` have same cost `Cost(200,9200)`, but `HashMap` is rewritten in scala 2.13 and The order of iterations leads to different results. This pr fix test cases as follow: - LiteralExpressionSuite (1 FAILED -> PASS) - StarJoinCostBasedReorderSuite ( 1 FAILED-> PASS) - ObjectExpressionsSuite( 2 FAILED-> PASS) - ScalaReflectionSuite (1 FAILED-> PASS) - RowEncoderSuite (10 FAILED-> PASS) - ExpressionEncoderSuite (ABORTED-> PASS) ### Why are the changes needed? We need to support a Scala 2.13 build. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? ```sh -$ sudo pip install 'sphinx<3.1.0' mkdocs numpy pydata_sphinx_theme +$ sudo pip install 'sphinx<3.1.0' mkdocs numpy pydata_sphinx_theme ipython nbsphinx ``` ## Generating the Documentation HTML diff --git a/python/docs/source/conf.py b/python/docs/source/conf.py index 7b1939d976080..738765a576290 100644 --- a/python/docs/source/conf.py +++ b/python/docs/source/conf.py @@ -45,8 +45,20 @@ 'sphinx.ext.viewcode', 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', + 'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx. + # For ipython directive in reStructuredText files. It is generated by the notebook. + 'IPython.sphinxext.ipython_console_highlighting' ] +# Links used globally in the RST files. +# These are defined here to allow link substitutions dynamically. +rst_epilog = """ +.. |binder| replace:: Live Notebook +.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb +.. |examples| replace:: Examples +.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python +""".format(os.environ.get("RELEASE_TAG", "master")) + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -84,7 +96,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/python/docs/source/getting_started/index.rst b/python/docs/source/getting_started/index.rst index 457368c8194cb..cf4f7de11dbe3 100644 --- a/python/docs/source/getting_started/index.rst +++ b/python/docs/source/getting_started/index.rst @@ -20,3 +20,7 @@ Getting Started =============== +.. toctree:: + :maxdepth: 2 + + quickstart diff --git a/python/docs/source/getting_started/quickstart.ipynb b/python/docs/source/getting_started/quickstart.ipynb new file mode 100644 index 0000000000000..34a3641205364 --- /dev/null +++ b/python/docs/source/getting_started/quickstart.ipynb @@ -0,0 +1,1177 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Quickstart\n", + "\n", + "This is a short introduction and quickstart for the PySpark DataFrame API. PySpark DataFrames are lazily evaluated. They are implemented on top of [RDD](https://spark.apache.org/docs/latest/rdd-programming-guide.html#overview)s. When Spark [transforms](https://spark.apache.org/docs/latest/rdd-programming-guide.html#transformations) data, it does not immediately compute the transformation but plans how to compute later. When [actions](https://spark.apache.org/docs/latest/rdd-programming-guide.html#actions) such as `collect()` are explicitly called, the computation starts.\n", + "This notebook shows the basic usages of the DataFrame, geared mainly for new users. You can run the latest version of these examples by yourself on a live notebook [here](https://mybinder.org/v2/gh/databricks/apache/master?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb).\n", + "\n", + "There is also other useful information in Apache Spark documentation site, see the latest version of [Spark SQL and DataFrames](https://spark.apache.org/docs/latest/sql-programming-guide.html), [RDD Programming Guide](https://spark.apache.org/docs/latest/rdd-programming-guide.html), [Structured Streaming Programming Guide](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html), [Spark Streaming Programming Guide](https://spark.apache.org/docs/latest/streaming-programming-guide.html) and [Machine Learning Library (MLlib) Guide](https://spark.apache.org/docs/latest/ml-guide.html).\n", + "\n", + "PySaprk applications start with initializing `SparkSession` which is the entry point of PySpark as below. In case of running it in PySpark shell via pyspark executable, the shell automatically creates the session in the variable spark for users." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from pyspark.sql import SparkSession\n", + "\n", + "spark = SparkSession.builder.getOrCreate()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## DataFrame Creation\n", + "\n", + "A PySpark DataFrame can be created via `pyspark.sql.SparkSession.createDataFrame` typically by passing a list of lists, tuples, dictionaries and `pyspark.sql.Row`s, a [pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) and an RDD consisting of such a list.\n", + "`pyspark.sql.SparkSession.createDataFrame` takes the `schema` argument to specify the schema of the DataFrame. When it is omitted, PySpark infers the corresponding schema by taking a sample from the data.\n", + "\n", + "Firstly, you can create a PySpark DataFrame from a list of rows" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DataFrame[a: bigint, b: double, c: string, d: date, e: timestamp]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from datetime import datetime, date\n", + "import pandas as pd\n", + "from pyspark.sql import Row\n", + "\n", + "df = spark.createDataFrame([\n", + " Row(a=1, b=2., c='string1', d=date(2000, 1, 1), e=datetime(2000, 1, 1, 12, 0)),\n", + " Row(a=2, b=3., c='string2', d=date(2000, 2, 1), e=datetime(2000, 1, 2, 12, 0)),\n", + " Row(a=4, b=5., c='string3', d=date(2000, 3, 1), e=datetime(2000, 1, 3, 12, 0))\n", + "])\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a PySpark DataFrame with an explicit schema." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DataFrame[a: bigint, b: double, c: string, d: date, e: timestamp]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df = spark.createDataFrame([\n", + " (1, 2., 'string1', date(2000, 1, 1), datetime(2000, 1, 1, 12, 0)),\n", + " (2, 3., 'string2', date(2000, 2, 1), datetime(2000, 1, 2, 12, 0)),\n", + " (3, 4., 'string3', date(2000, 3, 1), datetime(2000, 1, 3, 12, 0))\n", + "], schema='a long, b double, c string, d date, e timestamp')\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a PySpark DataFrame from a pandas DataFrame" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DataFrame[a: bigint, b: double, c: string, d: date, e: timestamp]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pandas_df = pd.DataFrame({\n", + " 'a': [1, 2, 3],\n", + " 'b': [2., 3., 4.],\n", + " 'c': ['string1', 'string2', 'string3'],\n", + " 'd': [date(2000, 1, 1), date(2000, 2, 1), date(2000, 3, 1)],\n", + " 'e': [datetime(2000, 1, 1, 12, 0), datetime(2000, 1, 2, 12, 0), datetime(2000, 1, 3, 12, 0)]\n", + "})\n", + "df = spark.createDataFrame(pandas_df)\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a PySpark DataFrame from an RDD consisting of a list of tuples." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DataFrame[a: bigint, b: double, c: string, d: date, e: timestamp]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "rdd = spark.sparkContext.parallelize([\n", + " (1, 2., 'string1', date(2000, 1, 1), datetime(2000, 1, 1, 12, 0)),\n", + " (2, 3., 'string2', date(2000, 2, 1), datetime(2000, 1, 2, 12, 0)),\n", + " (3, 4., 'string3', date(2000, 3, 1), datetime(2000, 1, 3, 12, 0))\n", + "])\n", + "df = spark.createDataFrame(rdd, schema=['a', 'b', 'c', 'd', 'e'])\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The DataFrames created above all have the same results and schema." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+---+---+-------+----------+-------------------+\n", + "| a| b| c| d| e|\n", + "+---+---+-------+----------+-------------------+\n", + "| 1|2.0|string1|2000-01-01|2000-01-01 12:00:00|\n", + "| 2|3.0|string2|2000-02-01|2000-01-02 12:00:00|\n", + "| 3|4.0|string3|2000-03-01|2000-01-03 12:00:00|\n", + "+---+---+-------+----------+-------------------+\n", + "\n", + "root\n", + " |-- a: long (nullable = true)\n", + " |-- b: double (nullable = true)\n", + " |-- c: string (nullable = true)\n", + " |-- d: date (nullable = true)\n", + " |-- e: timestamp (nullable = true)\n", + "\n" + ] + } + ], + "source": [ + "# All DataFrames above result same.\n", + "df.show()\n", + "df.printSchema()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Viewing Data\n", + "\n", + "The top rows of a DataFrame can be displayed using `DataFrame.show()`." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+---+---+-------+----------+-------------------+\n", + "| a| b| c| d| e|\n", + "+---+---+-------+----------+-------------------+\n", + "| 1|2.0|string1|2000-01-01|2000-01-01 12:00:00|\n", + "+---+---+-------+----------+-------------------+\n", + "only showing top 1 row\n", + "\n" + ] + } + ], + "source": [ + "df.show(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Alternatively, you can enable `spark.sql.repl.eagerEval.enabled` configuration for the eager evaluation of PySpark DataFrame in notebooks such as Jupyter. The number of rows to show can be controlled via `spark.sql.repl.eagerEval.maxNumRows` configuration." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "
abcde
12.0string12000-01-012000-01-01 12:00:00
23.0string22000-02-012000-01-02 12:00:00
34.0string32000-03-012000-01-03 12:00:00
\n" + ], + "text/plain": [ + "DataFrame[a: bigint, b: double, c: string, d: date, e: timestamp]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "spark.conf.set('spark.sql.repl.eagerEval.enabled', True)\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The rows can also be shown vertically. This is useful when rows are too long to show horizontally." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "-RECORD 0------------------\n", + " a | 1 \n", + " b | 2.0 \n", + " c | string1 \n", + " d | 2000-01-01 \n", + " e | 2000-01-01 12:00:00 \n", + "only showing top 1 row\n", + "\n" + ] + } + ], + "source": [ + "df.show(1, vertical=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the DataFrame's schema and column names as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['a', 'b', 'c', 'd', 'e']" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.columns" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "root\n", + " |-- a: long (nullable = true)\n", + " |-- b: double (nullable = true)\n", + " |-- c: string (nullable = true)\n", + " |-- d: date (nullable = true)\n", + " |-- e: timestamp (nullable = true)\n", + "\n" + ] + } + ], + "source": [ + "df.printSchema()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Show the summary of the DataFrame" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-------+---+---+-------+\n", + "|summary| a| b| c|\n", + "+-------+---+---+-------+\n", + "| count| 3| 3| 3|\n", + "| mean|2.0|3.0| null|\n", + "| stddev|1.0|1.0| null|\n", + "| min| 1|2.0|string1|\n", + "| max| 3|4.0|string3|\n", + "+-------+---+---+-------+\n", + "\n" + ] + } + ], + "source": [ + "df.select(\"a\", \"b\", \"c\").describe().show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`DataFrame.collect()` collects the distributed data to the driver side as the local data in Python. Note that this can throw an out-of-memory error when the dataset is too larget to fit in the driver side because it collects all the data from executors to the driver side." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Row(a=1, b=2.0, c='string1', d=datetime.date(2000, 1, 1), e=datetime.datetime(2000, 1, 1, 12, 0)),\n", + " Row(a=2, b=3.0, c='string2', d=datetime.date(2000, 2, 1), e=datetime.datetime(2000, 1, 2, 12, 0)),\n", + " Row(a=3, b=4.0, c='string3', d=datetime.date(2000, 3, 1), e=datetime.datetime(2000, 1, 3, 12, 0))]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.collect()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In order to avoid throwing an out-of-memory exception, use `DataFrame.take()` or `DataFrame.tail()`." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Row(a=1, b=2.0, c='string1', d=datetime.date(2000, 1, 1), e=datetime.datetime(2000, 1, 1, 12, 0))]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.take(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "PySpark DataFrame also provides the conversion back to a [pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) to leverage pandas APIs. Note that `toPandas` also collects all data into the driver side that can easily cause an out-of-memory-error when the data is too large to fit into the driver side." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
abcde
012.0string12000-01-012000-01-01 12:00:00
123.0string22000-02-012000-01-02 12:00:00
234.0string32000-03-012000-01-03 12:00:00
\n", + "
" + ], + "text/plain": [ + " a b c d e\n", + "0 1 2.0 string1 2000-01-01 2000-01-01 12:00:00\n", + "1 2 3.0 string2 2000-02-01 2000-01-02 12:00:00\n", + "2 3 4.0 string3 2000-03-01 2000-01-03 12:00:00" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.toPandas()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Selecting and Accessing Data\n", + "\n", + "PySpark DataFrame is lazily evaluated and simply selecting a column does not trigger the computation but it returns a `Column` instance." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Column" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.a" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In fact, most of column-wise operations return `Column`s." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from pyspark.sql import Column\n", + "from pyspark.sql.functions import upper\n", + "\n", + "type(df.c) == type(upper(df.c)) == type(df.c.isNull())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These `Column`s can be used to select the columns from a DataFrame. For example, `DataFrame.select()` takes the `Column` instances that returns another DataFrame." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-------+\n", + "| c|\n", + "+-------+\n", + "|string1|\n", + "|string2|\n", + "|string3|\n", + "+-------+\n", + "\n" + ] + } + ], + "source": [ + "df.select(df.c).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Assign new `Column` instance." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+---+---+-------+----------+-------------------+-------+\n", + "| a| b| c| d| e|upper_c|\n", + "+---+---+-------+----------+-------------------+-------+\n", + "| 1|2.0|string1|2000-01-01|2000-01-01 12:00:00|STRING1|\n", + "| 2|3.0|string2|2000-02-01|2000-01-02 12:00:00|STRING2|\n", + "| 3|4.0|string3|2000-03-01|2000-01-03 12:00:00|STRING3|\n", + "+---+---+-------+----------+-------------------+-------+\n", + "\n" + ] + } + ], + "source": [ + "df.withColumn('upper_c', upper(df.c)).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To select a subset of rows, use `DataFrame.filter()`." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+---+---+-------+----------+-------------------+\n", + "| a| b| c| d| e|\n", + "+---+---+-------+----------+-------------------+\n", + "| 1|2.0|string1|2000-01-01|2000-01-01 12:00:00|\n", + "+---+---+-------+----------+-------------------+\n", + "\n" + ] + } + ], + "source": [ + "df.filter(df.a == 1).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Applying a Function\n", + "\n", + "PySpark supports various UDFs and APIs to allow users to execute Python native functions. See also the latest [Pandas UDFs](https://spark.apache.org/docs/latest/sql-pyspark-pandas-with-arrow.html#pandas-udfs-aka-vectorized-udfs) and [Pandas Function APIs](https://spark.apache.org/docs/latest/sql-pyspark-pandas-with-arrow.html#pandas-function-apis). For instance, the example below allows users to directly use the APIs in [a pandas Series](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html) within Python native function." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+------------------+\n", + "|pandas_plus_one(a)|\n", + "+------------------+\n", + "| 2|\n", + "| 3|\n", + "| 4|\n", + "+------------------+\n", + "\n" + ] + } + ], + "source": [ + "import pandas\n", + "from pyspark.sql.functions import pandas_udf\n", + "\n", + "@pandas_udf('long')\n", + "def pandas_plus_one(series: pd.Series) -> pd.Series:\n", + " # Simply plus one by using pandas Series.\n", + " return series + 1\n", + "\n", + "df.select(pandas_plus_one(df.a)).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Another example is `DataFrame.mapInPandas` which allows users directly use the APIs in a [pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) without any restrictions such as the result length." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+---+---+-------+----------+-------------------+\n", + "| a| b| c| d| e|\n", + "+---+---+-------+----------+-------------------+\n", + "| 1|2.0|string1|2000-01-01|2000-01-01 12:00:00|\n", + "+---+---+-------+----------+-------------------+\n", + "\n" + ] + } + ], + "source": [ + "def pandas_filter_func(iterator):\n", + " for pandas_df in iterator:\n", + " yield pandas_df[pandas_df.a == 1]\n", + "\n", + "df.mapInPandas(pandas_filter_func, schema=df.schema).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Grouping Data\n", + "\n", + "PySpark DataFrame also provides a way of handling grouped data by using the common approach, split-apply-combine strategy.\n", + "It groups the data by a certain condition applies a function to each group and then combines them back to the DataFrame." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----+------+---+---+\n", + "|color| fruit| v1| v2|\n", + "+-----+------+---+---+\n", + "| red|banana| 1| 10|\n", + "| blue|banana| 2| 20|\n", + "| red|carrot| 3| 30|\n", + "| blue| grape| 4| 40|\n", + "| red|carrot| 5| 50|\n", + "|black|carrot| 6| 60|\n", + "| red|banana| 7| 70|\n", + "| red| grape| 8| 80|\n", + "+-----+------+---+---+\n", + "\n" + ] + } + ], + "source": [ + "df = spark.createDataFrame([\n", + " ['red', 'banana', 1, 10], ['blue', 'banana', 2, 20], ['red', 'carrot', 3, 30],\n", + " ['blue', 'grape', 4, 40], ['red', 'carrot', 5, 50], ['black', 'carrot', 6, 60],\n", + " ['red', 'banana', 7, 70], ['red', 'grape', 8, 80]], schema=['color', 'fruit', 'v1', 'v2'])\n", + "df.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Grouping and then applying the `avg()` function to the resulting groups." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----+-------+-------+\n", + "|color|avg(v1)|avg(v2)|\n", + "+-----+-------+-------+\n", + "| red| 4.8| 48.0|\n", + "|black| 6.0| 60.0|\n", + "| blue| 3.0| 30.0|\n", + "+-----+-------+-------+\n", + "\n" + ] + } + ], + "source": [ + "df.groupby('color').avg().show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also apply a Python native function against each group by using pandas APIs." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----+------+---+---+\n", + "|color| fruit| v1| v2|\n", + "+-----+------+---+---+\n", + "| red|banana| -3| 10|\n", + "| red|carrot| -1| 30|\n", + "| red|carrot| 0| 50|\n", + "| red|banana| 2| 70|\n", + "| red| grape| 3| 80|\n", + "|black|carrot| 0| 60|\n", + "| blue|banana| -1| 20|\n", + "| blue| grape| 1| 40|\n", + "+-----+------+---+---+\n", + "\n" + ] + } + ], + "source": [ + "def plus_mean(pandas_df):\n", + " return pandas_df.assign(v1=pandas_df.v1 - pandas_df.v1.mean())\n", + "\n", + "df.groupby('color').applyInPandas(plus_mean, schema=df.schema).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Co-grouping and applying a function." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+--------+---+---+---+\n", + "| time| id| v1| v2|\n", + "+--------+---+---+---+\n", + "|20000101| 1|1.0| x|\n", + "|20000102| 1|3.0| x|\n", + "|20000101| 2|2.0| y|\n", + "|20000102| 2|4.0| y|\n", + "+--------+---+---+---+\n", + "\n" + ] + } + ], + "source": [ + "df1 = spark.createDataFrame(\n", + " [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],\n", + " ('time', 'id', 'v1'))\n", + "\n", + "df2 = spark.createDataFrame(\n", + " [(20000101, 1, 'x'), (20000101, 2, 'y')],\n", + " ('time', 'id', 'v2'))\n", + "\n", + "def asof_join(l, r):\n", + " return pd.merge_asof(l, r, on='time', by='id')\n", + "\n", + "df1.groupby('id').cogroup(df2.groupby('id')).applyInPandas(\n", + " asof_join, schema='time int, id int, v1 double, v2 string').show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Getting Data in/out\n", + "\n", + "CSV is straightforward and easy to use. Parquet and ORC are efficient and compact file formats to read and write faster.\n", + "\n", + "There are many other data sources available in PySpark such as JDBC, text, binaryFile, Avro, etc. See also the latest [Spark SQL, DataFrames and Datasets Guide](https://spark.apache.org/docs/latest/sql-programming-guide.html) in Apache Spark documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CSV" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----+------+---+---+\n", + "|color| fruit| v1| v2|\n", + "+-----+------+---+---+\n", + "| red|banana| 1| 10|\n", + "| blue|banana| 2| 20|\n", + "| red|carrot| 3| 30|\n", + "| blue| grape| 4| 40|\n", + "| red|carrot| 5| 50|\n", + "|black|carrot| 6| 60|\n", + "| red|banana| 7| 70|\n", + "| red| grape| 8| 80|\n", + "+-----+------+---+---+\n", + "\n" + ] + } + ], + "source": [ + "df.write.csv('foo.csv', header=True)\n", + "spark.read.csv('foo.csv', header=True).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parquet" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----+------+---+---+\n", + "|color| fruit| v1| v2|\n", + "+-----+------+---+---+\n", + "| red|banana| 1| 10|\n", + "| blue|banana| 2| 20|\n", + "| red|carrot| 3| 30|\n", + "| blue| grape| 4| 40|\n", + "| red|carrot| 5| 50|\n", + "|black|carrot| 6| 60|\n", + "| red|banana| 7| 70|\n", + "| red| grape| 8| 80|\n", + "+-----+------+---+---+\n", + "\n" + ] + } + ], + "source": [ + "df.write.parquet('bar.parquet')\n", + "spark.read.parquet('bar.parquet').show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ORC" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----+------+---+---+\n", + "|color| fruit| v1| v2|\n", + "+-----+------+---+---+\n", + "| red|banana| 1| 10|\n", + "| blue|banana| 2| 20|\n", + "| red|carrot| 3| 30|\n", + "| blue| grape| 4| 40|\n", + "| red|carrot| 5| 50|\n", + "|black|carrot| 6| 60|\n", + "| red|banana| 7| 70|\n", + "| red| grape| 8| 80|\n", + "+-----+------+---+---+\n", + "\n" + ] + } + ], + "source": [ + "df.write.orc('zoo.orc')\n", + "spark.read.orc('zoo.orc').show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Working with SQL\n", + "\n", + "DataFrame and Spark SQL share the same execution engine so they can be interchangeably used seamlessly. For example, you can register the DataFrame as a table and run a SQL easily as below:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+--------+\n", + "|count(1)|\n", + "+--------+\n", + "| 8|\n", + "+--------+\n", + "\n" + ] + } + ], + "source": [ + "df.createOrReplaceTempView(\"tableA\")\n", + "spark.sql(\"SELECT count(*) from tableA\").show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition, UDFs can be registered and invoked in SQL out of the box:" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----------+\n", + "|add_one(v1)|\n", + "+-----------+\n", + "| 2|\n", + "| 3|\n", + "| 4|\n", + "| 5|\n", + "| 6|\n", + "| 7|\n", + "| 8|\n", + "| 9|\n", + "+-----------+\n", + "\n" + ] + } + ], + "source": [ + "@pandas_udf(\"integer\")\n", + "def add_one(s: pd.Series) -> pd.Series:\n", + " return s + 1\n", + "\n", + "spark.udf.register(\"add_one\", add_one)\n", + "spark.sql(\"SELECT add_one(v1) FROM tableA\").show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These SQL expressions can directly be mixed and used as PySpark columns." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+-----------+\n", + "|add_one(v1)|\n", + "+-----------+\n", + "| 2|\n", + "| 3|\n", + "| 4|\n", + "| 5|\n", + "| 6|\n", + "| 7|\n", + "| 8|\n", + "| 9|\n", + "+-----------+\n", + "\n", + "+--------------+\n", + "|(count(1) > 0)|\n", + "+--------------+\n", + "| true|\n", + "+--------------+\n", + "\n" + ] + } + ], + "source": [ + "from pyspark.sql.functions import expr\n", + "\n", + "df.selectExpr('add_one(v1)').show()\n", + "df.select(expr('count(*)') > 0).show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.8" + }, + "name": "quickstart", + "notebookId": 1927513300154480 + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/python/docs/source/index.rst b/python/docs/source/index.rst index b9180cefe5dcc..4286f616374c5 100644 --- a/python/docs/source/index.rst +++ b/python/docs/source/index.rst @@ -21,7 +21,7 @@ PySpark Documentation ===================== -.. TODO(SPARK-32204): Add Binder integration at Live Notebook. +|binder|_ | `GitHub `_ | `Issues `_ | |examples|_ | `Community `_ PySpark is an interface for Apache Spark in Python. It not only allows you to write Spark applications using Python APIs, but also provides the PySpark shell for From b07e7429a6af27418da271ac7c374f325e843a25 Mon Sep 17 00:00:00 2001 From: HyukjinKwon Date: Wed, 26 Aug 2020 12:25:59 +0900 Subject: [PATCH 38/54] [SPARK-32695][INFRA] Explicitly cache and hash 'build' directly in GitHub Actions ### What changes were proposed in this pull request? This PR proposes to explicitly cache and hash the files/directories under 'build' for SBT and Zinc at GitHub Actions. Otherwise, it can end up with overwriting `build` directory. See also https://github.com/apache/spark/pull/29286#issuecomment-679368436 Previously, other files like `build/mvn` and `build/sbt` are also cached and overwritten. So, when you have some changes there, they are ignored. ### Why are the changes needed? To make GitHub Actions build stable. ### Does this PR introduce _any_ user-facing change? No, dev-only. ### How was this patch tested? The builds in this PR test it out. Closes #29536 from HyukjinKwon/SPARK-32695. Authored-by: HyukjinKwon Signed-off-by: HyukjinKwon --- .github/workflows/build_and_test.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 78d5d8ec110e3..1c0f50328ee72 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -99,10 +99,14 @@ jobs: run: git merge --progress --ff-only origin/${{ github.event.inputs.target }} # Cache local repositories. Note that GitHub Actions cache has a 2G limit. - name: Cache Scala, SBT, Maven and Zinc - uses: actions/cache@v1 + uses: actions/cache@v2 with: - path: build - key: build-${{ hashFiles('**/pom.xml') }} + path: | + build/apache-maven-* + build/zinc-* + build/scala-* + build/*.jar + key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} restore-keys: | build- - name: Cache Maven local repository @@ -116,7 +120,7 @@ jobs: uses: actions/cache@v2 with: path: ~/.ivy2/cache - key: ${{ matrix.java }}-${{ matrix.hadoop }}-ivy-${{ hashFiles('**/pom.xml') }}-${{ hashFiles('**/plugins.sbt') }} + key: ${{ matrix.java }}-${{ matrix.hadoop }}-ivy-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} restore-keys: | ${{ matrix.java }}-${{ matrix.hadoop }}-ivy- - name: Install JDK ${{ matrix.java }} From f510d21e9300c5ccb19b9d3685195821ff5920d0 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Wed, 26 Aug 2020 14:46:14 +0900 Subject: [PATCH 39/54] [SPARK-32466][FOLLOW-UP][TEST][SQL] Regenerate the golden explain file for PlanStabilitySuite ### What changes were proposed in this pull request? This PR regenerates the golden explain file based on the fix: https://github.com/apache/spark/pull/29537 ### Why are the changes needed? Eliminates the personal related information (e.g., local directories) in the explain plan. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Checked manually. Closes #29546 from Ngone51/follow-up-gen-golden-file. Authored-by: yi.wu Signed-off-by: HyukjinKwon --- .../q10.sf100/explain.txt | 14 ++++---- .../approved-plans-modified/q10/explain.txt | 14 ++++---- .../q19.sf100/explain.txt | 12 +++---- .../approved-plans-modified/q19/explain.txt | 12 +++---- .../q27.sf100/explain.txt | 18 +++++----- .../approved-plans-modified/q27/explain.txt | 18 +++++----- .../q3.sf100/explain.txt | 6 ++-- .../approved-plans-modified/q3/explain.txt | 6 ++-- .../q34.sf100/explain.txt | 10 +++--- .../approved-plans-modified/q34/explain.txt | 10 +++--- .../q42.sf100/explain.txt | 6 ++-- .../approved-plans-modified/q42/explain.txt | 6 ++-- .../q43.sf100/explain.txt | 6 ++-- .../approved-plans-modified/q43/explain.txt | 6 ++-- .../q46.sf100/explain.txt | 12 +++---- .../approved-plans-modified/q46/explain.txt | 12 +++---- .../q52.sf100/explain.txt | 6 ++-- .../approved-plans-modified/q52/explain.txt | 6 ++-- .../q53.sf100/explain.txt | 8 ++--- .../approved-plans-modified/q53/explain.txt | 8 ++--- .../q55.sf100/explain.txt | 6 ++-- .../approved-plans-modified/q55/explain.txt | 6 ++-- .../q59.sf100/explain.txt | 14 ++++---- .../approved-plans-modified/q59/explain.txt | 14 ++++---- .../q63.sf100/explain.txt | 8 ++--- .../approved-plans-modified/q63/explain.txt | 8 ++--- .../q65.sf100/explain.txt | 10 +++--- .../approved-plans-modified/q65/explain.txt | 10 +++--- .../q68.sf100/explain.txt | 14 ++++---- .../approved-plans-modified/q68/explain.txt | 12 +++---- .../q7.sf100/explain.txt | 10 +++--- .../approved-plans-modified/q7/explain.txt | 10 +++--- .../q73.sf100/explain.txt | 10 +++--- .../approved-plans-modified/q73/explain.txt | 10 +++--- .../q79.sf100/explain.txt | 10 +++--- .../approved-plans-modified/q79/explain.txt | 10 +++--- .../q89.sf100/explain.txt | 8 ++--- .../approved-plans-modified/q89/explain.txt | 8 ++--- .../q98.sf100/explain.txt | 6 ++-- .../approved-plans-modified/q98/explain.txt | 6 ++-- .../ss_max.sf100/explain.txt | 2 +- .../ss_max/explain.txt | 2 +- .../approved-plans-v1_4/q1.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q1/explain.txt | 10 +++--- .../approved-plans-v1_4/q10.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q10/explain.txt | 14 ++++---- .../approved-plans-v1_4/q12.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q12/explain.txt | 6 ++-- .../approved-plans-v1_4/q13.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q13/explain.txt | 12 +++---- .../q14a.sf100/explain.txt | 32 ++++++++--------- .../approved-plans-v1_4/q14a/explain.txt | 32 ++++++++--------- .../q14b.sf100/explain.txt | 34 +++++++++--------- .../approved-plans-v1_4/q14b/explain.txt | 36 +++++++++---------- .../approved-plans-v1_4/q15.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q15/explain.txt | 8 ++--- .../approved-plans-v1_4/q16.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q16/explain.txt | 12 +++---- .../approved-plans-v1_4/q17.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q17/explain.txt | 14 ++++---- .../approved-plans-v1_4/q18.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q18/explain.txt | 14 ++++---- .../approved-plans-v1_4/q19.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q19/explain.txt | 12 +++---- .../approved-plans-v1_4/q2.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q2/explain.txt | 10 +++--- .../approved-plans-v1_4/q20.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q20/explain.txt | 6 ++-- .../approved-plans-v1_4/q21.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q21/explain.txt | 8 ++--- .../approved-plans-v1_4/q22.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q22/explain.txt | 8 ++--- .../q23a.sf100/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q23a/explain.txt | 22 ++++++------ .../q23b.sf100/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q23b/explain.txt | 26 +++++++------- .../q24a.sf100/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q24a/explain.txt | 24 ++++++------- .../q24b.sf100/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q24b/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q25.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q25/explain.txt | 14 ++++---- .../approved-plans-v1_4/q26.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q26/explain.txt | 10 +++--- .../approved-plans-v1_4/q27.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q27/explain.txt | 10 +++--- .../approved-plans-v1_4/q28.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q28/explain.txt | 12 +++---- .../approved-plans-v1_4/q29.sf100/explain.txt | 16 ++++----- .../approved-plans-v1_4/q29/explain.txt | 16 ++++----- .../approved-plans-v1_4/q3.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q3/explain.txt | 6 ++-- .../approved-plans-v1_4/q30.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q30/explain.txt | 12 +++---- .../approved-plans-v1_4/q31.sf100/explain.txt | 20 +++++------ .../approved-plans-v1_4/q31/explain.txt | 20 +++++------ .../approved-plans-v1_4/q32.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q32/explain.txt | 8 ++--- .../approved-plans-v1_4/q33.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q33/explain.txt | 14 ++++---- .../approved-plans-v1_4/q34.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q34/explain.txt | 10 +++--- .../approved-plans-v1_4/q35.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q35/explain.txt | 14 ++++---- .../approved-plans-v1_4/q36.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q36/explain.txt | 8 ++--- .../approved-plans-v1_4/q37.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q37/explain.txt | 8 ++--- .../approved-plans-v1_4/q38.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q38/explain.txt | 10 +++--- .../q39a.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q39a/explain.txt | 12 +++---- .../q39b.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q39b/explain.txt | 12 +++---- .../approved-plans-v1_4/q4.sf100/explain.txt | 18 +++++----- .../approved-plans-v1_4/q4/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q40.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q40/explain.txt | 10 +++--- .../approved-plans-v1_4/q41.sf100/explain.txt | 4 +-- .../approved-plans-v1_4/q41/explain.txt | 4 +-- .../approved-plans-v1_4/q42.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q42/explain.txt | 6 ++-- .../approved-plans-v1_4/q43.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q43/explain.txt | 6 ++-- .../approved-plans-v1_4/q44.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q44/explain.txt | 6 ++-- .../approved-plans-v1_4/q45.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q45/explain.txt | 12 +++---- .../approved-plans-v1_4/q46.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q46/explain.txt | 12 +++---- .../approved-plans-v1_4/q47.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q47/explain.txt | 14 ++++---- .../approved-plans-v1_4/q48.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q48/explain.txt | 10 +++--- .../approved-plans-v1_4/q49.sf100/explain.txt | 30 ++++++++-------- .../approved-plans-v1_4/q49/explain.txt | 30 ++++++++-------- .../approved-plans-v1_4/q5.sf100/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q5/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q50.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q50/explain.txt | 10 +++--- .../approved-plans-v1_4/q51.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q51/explain.txt | 6 ++-- .../approved-plans-v1_4/q52.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q52/explain.txt | 6 ++-- .../approved-plans-v1_4/q53.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q53/explain.txt | 8 ++--- .../approved-plans-v1_4/q54.sf100/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q54/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q55.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q55/explain.txt | 6 ++-- .../approved-plans-v1_4/q56.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q56/explain.txt | 14 ++++---- .../approved-plans-v1_4/q57.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q57/explain.txt | 8 ++--- .../approved-plans-v1_4/q58.sf100/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q58/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q59.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q59/explain.txt | 12 +++---- .../approved-plans-v1_4/q6.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q6/explain.txt | 14 ++++---- .../approved-plans-v1_4/q60.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q60/explain.txt | 14 ++++---- .../approved-plans-v1_4/q61.sf100/explain.txt | 18 +++++----- .../approved-plans-v1_4/q61/explain.txt | 16 ++++----- .../approved-plans-v1_4/q62.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q62/explain.txt | 10 +++--- .../approved-plans-v1_4/q63.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q63/explain.txt | 8 ++--- .../approved-plans-v1_4/q64.sf100/explain.txt | 30 ++++++++-------- .../approved-plans-v1_4/q64/explain.txt | 32 ++++++++--------- .../approved-plans-v1_4/q65.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q65/explain.txt | 10 +++--- .../approved-plans-v1_4/q66.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q66/explain.txt | 12 +++---- .../approved-plans-v1_4/q67.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q67/explain.txt | 8 ++--- .../approved-plans-v1_4/q68.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q68/explain.txt | 12 +++---- .../approved-plans-v1_4/q69.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q69/explain.txt | 14 ++++---- .../approved-plans-v1_4/q7.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q7/explain.txt | 10 +++--- .../approved-plans-v1_4/q70.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q70/explain.txt | 10 +++--- .../approved-plans-v1_4/q71.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q71/explain.txt | 12 +++---- .../approved-plans-v1_4/q72.sf100/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q72/explain.txt | 22 ++++++------ .../approved-plans-v1_4/q73.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q73/explain.txt | 10 +++--- .../approved-plans-v1_4/q74.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q74/explain.txt | 16 ++++----- .../approved-plans-v1_4/q75.sf100/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q75/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q76.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q76/explain.txt | 10 +++--- .../approved-plans-v1_4/q77.sf100/explain.txt | 18 +++++----- .../approved-plans-v1_4/q77/explain.txt | 18 +++++----- .../approved-plans-v1_4/q78.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q78/explain.txt | 14 ++++---- .../approved-plans-v1_4/q79.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q79/explain.txt | 10 +++--- .../approved-plans-v1_4/q8.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q8/explain.txt | 12 +++---- .../approved-plans-v1_4/q80.sf100/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q80/explain.txt | 24 ++++++------- .../approved-plans-v1_4/q81.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q81/explain.txt | 12 +++---- .../approved-plans-v1_4/q82.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q82/explain.txt | 8 ++--- .../approved-plans-v1_4/q83.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q83/explain.txt | 14 ++++---- .../approved-plans-v1_4/q84.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q84/explain.txt | 12 +++---- .../approved-plans-v1_4/q85.sf100/explain.txt | 16 ++++----- .../approved-plans-v1_4/q85/explain.txt | 16 ++++----- .../approved-plans-v1_4/q86.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q86/explain.txt | 6 ++-- .../approved-plans-v1_4/q87.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q87/explain.txt | 10 +++--- .../approved-plans-v1_4/q88.sf100/explain.txt | 36 +++++++++---------- .../approved-plans-v1_4/q88/explain.txt | 36 +++++++++---------- .../approved-plans-v1_4/q89.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q89/explain.txt | 8 ++--- .../approved-plans-v1_4/q9.sf100/explain.txt | 32 ++++++++--------- .../approved-plans-v1_4/q9/explain.txt | 32 ++++++++--------- .../approved-plans-v1_4/q90.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q90/explain.txt | 12 +++---- .../approved-plans-v1_4/q91.sf100/explain.txt | 14 ++++---- .../approved-plans-v1_4/q91/explain.txt | 14 ++++---- .../approved-plans-v1_4/q92.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q92/explain.txt | 8 ++--- .../approved-plans-v1_4/q93.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q93/explain.txt | 6 ++-- .../approved-plans-v1_4/q94.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q94/explain.txt | 12 +++---- .../approved-plans-v1_4/q95.sf100/explain.txt | 12 +++---- .../approved-plans-v1_4/q95/explain.txt | 16 ++++----- .../approved-plans-v1_4/q96.sf100/explain.txt | 8 ++--- .../approved-plans-v1_4/q96/explain.txt | 8 ++--- .../approved-plans-v1_4/q97.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q97/explain.txt | 6 ++-- .../approved-plans-v1_4/q98.sf100/explain.txt | 6 ++-- .../approved-plans-v1_4/q98/explain.txt | 6 ++-- .../approved-plans-v1_4/q99.sf100/explain.txt | 10 +++--- .../approved-plans-v1_4/q99/explain.txt | 10 +++--- .../q10a.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q10a/explain.txt | 14 ++++---- .../approved-plans-v2_7/q11.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q11/explain.txt | 16 ++++----- .../approved-plans-v2_7/q12.sf100/explain.txt | 6 ++-- .../approved-plans-v2_7/q12/explain.txt | 6 ++-- .../approved-plans-v2_7/q14.sf100/explain.txt | 34 +++++++++--------- .../approved-plans-v2_7/q14/explain.txt | 36 +++++++++---------- .../q14a.sf100/explain.txt | 34 +++++++++--------- .../approved-plans-v2_7/q14a/explain.txt | 34 +++++++++--------- .../q18a.sf100/explain.txt | 34 +++++++++--------- .../approved-plans-v2_7/q18a/explain.txt | 30 ++++++++-------- .../approved-plans-v2_7/q20.sf100/explain.txt | 6 ++-- .../approved-plans-v2_7/q20/explain.txt | 6 ++-- .../approved-plans-v2_7/q22.sf100/explain.txt | 8 ++--- .../approved-plans-v2_7/q22/explain.txt | 8 ++--- .../q22a.sf100/explain.txt | 8 ++--- .../approved-plans-v2_7/q22a/explain.txt | 8 ++--- .../approved-plans-v2_7/q24.sf100/explain.txt | 24 ++++++------- .../approved-plans-v2_7/q24/explain.txt | 24 ++++++------- .../q27a.sf100/explain.txt | 18 +++++----- .../approved-plans-v2_7/q27a/explain.txt | 18 +++++----- .../approved-plans-v2_7/q34.sf100/explain.txt | 10 +++--- .../approved-plans-v2_7/q34/explain.txt | 10 +++--- .../approved-plans-v2_7/q35.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q35/explain.txt | 14 ++++---- .../q35a.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q35a/explain.txt | 14 ++++---- .../q36a.sf100/explain.txt | 8 ++--- .../approved-plans-v2_7/q36a/explain.txt | 8 ++--- .../approved-plans-v2_7/q47.sf100/explain.txt | 10 +++--- .../approved-plans-v2_7/q47/explain.txt | 10 +++--- .../approved-plans-v2_7/q49.sf100/explain.txt | 34 +++++++++--------- .../approved-plans-v2_7/q49/explain.txt | 30 ++++++++-------- .../q51a.sf100/explain.txt | 6 ++-- .../approved-plans-v2_7/q51a/explain.txt | 6 ++-- .../approved-plans-v2_7/q57.sf100/explain.txt | 10 +++--- .../approved-plans-v2_7/q57/explain.txt | 10 +++--- .../approved-plans-v2_7/q5a.sf100/explain.txt | 22 ++++++------ .../approved-plans-v2_7/q5a/explain.txt | 22 ++++++------ .../approved-plans-v2_7/q6.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q6/explain.txt | 14 ++++---- .../approved-plans-v2_7/q64.sf100/explain.txt | 30 ++++++++-------- .../approved-plans-v2_7/q64/explain.txt | 32 ++++++++--------- .../q67a.sf100/explain.txt | 8 ++--- .../approved-plans-v2_7/q67a/explain.txt | 8 ++--- .../q70a.sf100/explain.txt | 10 +++--- .../approved-plans-v2_7/q70a/explain.txt | 10 +++--- .../approved-plans-v2_7/q72.sf100/explain.txt | 22 ++++++------ .../approved-plans-v2_7/q72/explain.txt | 22 ++++++------ .../approved-plans-v2_7/q74.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q74/explain.txt | 16 ++++----- .../approved-plans-v2_7/q75.sf100/explain.txt | 24 ++++++------- .../approved-plans-v2_7/q75/explain.txt | 24 ++++++------- .../q77a.sf100/explain.txt | 18 +++++----- .../approved-plans-v2_7/q77a/explain.txt | 18 +++++----- .../approved-plans-v2_7/q78.sf100/explain.txt | 14 ++++---- .../approved-plans-v2_7/q78/explain.txt | 14 ++++---- .../q80a.sf100/explain.txt | 24 ++++++------- .../approved-plans-v2_7/q80a/explain.txt | 24 ++++++------- .../q86a.sf100/explain.txt | 6 ++-- .../approved-plans-v2_7/q86a/explain.txt | 6 ++-- .../approved-plans-v2_7/q98.sf100/explain.txt | 6 ++-- .../approved-plans-v2_7/q98/explain.txt | 6 ++-- 310 files changed, 2065 insertions(+), 2065 deletions(-) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10.sf100/explain.txt index 12a8ec439eab4..4caad37094cd6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10.sf100/explain.txt @@ -56,7 +56,7 @@ TakeOrderedAndProject (52) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -78,7 +78,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (6) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#5, ws_bill_customer_sk#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Condition : (isnotnull(ws_sold_date_sk#5) AND isnotnull(ws_bill_customer_sk#6)) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,2002), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,7), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [3]: [ws_sold_date_sk#5, ws_bill_customer_sk#6, d_date_sk#7] (16) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#12, cs_ship_customer_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_customer_sk)] ReadSchema: struct @@ -164,7 +164,7 @@ Join condition: None (26) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#16, ss_customer_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -207,7 +207,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (36) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_county#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_county, [Walker County,Richland County,Gaines County,Douglas County,Dona Ana County]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -242,7 +242,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (44) Scan parquet default.customer_demographics Output [9]: [cd_demo_sk#24, cd_gender#25, cd_marital_status#26, cd_education_status#27, cd_purchase_estimate#28, cd_credit_rating#29, cd_dep_count#30, cd_dep_employed_count#31, cd_dep_college_count#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10/explain.txt index f6c43526fbe5d..20f60b396c78e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q10/explain.txt @@ -52,7 +52,7 @@ TakeOrderedAndProject (48) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -66,7 +66,7 @@ Condition : ((isnotnull(c_customer_sk#1) AND isnotnull(c_current_addr_sk#3)) AND (4) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#4, ws_bill_customer_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Condition : (isnotnull(ws_sold_date_sk#4) AND isnotnull(ws_bill_customer_sk#5)) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_moy#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2002), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,7), IsNotNull(d_date_sk)] ReadSchema: struct @@ -111,7 +111,7 @@ Input [3]: [ws_sold_date_sk#4, ws_bill_customer_sk#5, d_date_sk#6] (14) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#11, cs_ship_customer_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_customer_sk)] ReadSchema: struct @@ -148,7 +148,7 @@ Join condition: None (23) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#15, ss_customer_sk#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -187,7 +187,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (32) Scan parquet default.customer_address Output [2]: [ca_address_sk#19, ca_county#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_county, [Walker County,Richland County,Gaines County,Douglas County,Dona Ana County]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -218,7 +218,7 @@ Input [3]: [c_current_cdemo_sk#2, c_current_addr_sk#3, ca_address_sk#19] (39) Scan parquet default.customer_demographics Output [9]: [cd_demo_sk#22, cd_gender#23, cd_marital_status#24, cd_education_status#25, cd_purchase_estimate#26, cd_credit_rating#27, cd_dep_count#28, cd_dep_employed_count#29, cd_dep_college_count#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19.sf100/explain.txt index 1239699c0e839..6e44fc8e9608b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19.sf100/explain.txt @@ -43,7 +43,7 @@ TakeOrderedAndProject (39) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), GreaterThanOrEqual(d_date_sk,2451484), LessThanOrEqual(d_date_sk,2451513), IsNotNull(d_date_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#5, ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451484), LessThanOrEqual(ss_sold_date_sk,2451513), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)) (12) Scan parquet default.customer Output [2]: [c_customer_sk#11, c_current_addr_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -115,7 +115,7 @@ Input [6]: [ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9, (17) Scan parquet default.store Output [2]: [s_store_sk#13, s_zip#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_zip), IsNotNull(s_store_sk)] ReadSchema: struct @@ -146,7 +146,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[2, int, true] as bigint)) (24) Scan parquet default.customer_address Output [2]: [ca_address_sk#17, ca_zip#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip)] ReadSchema: struct @@ -169,7 +169,7 @@ Input [6]: [ss_item_sk#6, ss_ext_sales_price#9, c_current_addr_sk#12, s_zip#14, (29) Scan parquet default.item Output [6]: [i_item_sk#19, i_brand_id#20, i_brand#21, i_manufact_id#22, i_manufact#23, i_manager_id#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,7), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19/explain.txt index 5a404d7719934..8a3919ca1daaf 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q19/explain.txt @@ -43,7 +43,7 @@ TakeOrderedAndProject (39) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), GreaterThanOrEqual(d_date_sk,2451484), LessThanOrEqual(d_date_sk,2451513), IsNotNull(d_date_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#4, ss_item_sk#5, ss_customer_sk#6, ss_store_sk#7, ss_ext_sales_price#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451484), LessThanOrEqual(ss_sold_date_sk,2451513), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Input [6]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_customer_sk#6, ss_s (11) Scan parquet default.item Output [6]: [i_item_sk#10, i_brand_id#11, i_brand#12, i_manufact_id#13, i_manufact#14, i_manager_id#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,7), IsNotNull(i_item_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [9]: [ss_item_sk#5, ss_customer_sk#6, ss_store_sk#7, ss_ext_sales_price#8, (18) Scan parquet default.customer Output [2]: [c_customer_sk#17, c_current_addr_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -146,7 +146,7 @@ Input [9]: [ss_customer_sk#6, ss_store_sk#7, ss_ext_sales_price#8, i_brand_id#11 (24) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_zip#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip)] ReadSchema: struct @@ -173,7 +173,7 @@ Input [9]: [ss_store_sk#7, ss_ext_sales_price#8, i_brand_id#11, i_brand#12, i_ma (30) Scan parquet default.store Output [2]: [s_store_sk#23, s_zip#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_zip), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt index 5cec6f18579e2..0c568bacff1d0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt @@ -81,7 +81,7 @@ TakeOrderedAndProject (77) (1) Scan parquet default.date_dim Output [2]: [d_date_sk#1, d_year#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), LessThanOrEqual(d_date_sk,2451910), GreaterThanOrEqual(d_date_sk,2451545), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -126,7 +126,7 @@ Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_stor (11) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#12, cd_gender#13, cd_marital_status#14, cd_education_status#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_education_status), IsNotNull(cd_gender), IsNotNull(cd_marital_status), EqualTo(cd_gender,F), EqualTo(cd_marital_status,D), EqualTo(cd_education_status,Primary), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Input [8]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_p (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_state, [TN,AL,SD]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Input [8]: [ss_item_sk#5, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sale (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -232,7 +232,7 @@ Output [1]: [d_date_sk#1] (34) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -255,7 +255,7 @@ Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_stor (39) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_state, [TN,AL,SD]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -331,7 +331,7 @@ Output [1]: [d_date_sk#1] (56) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -378,7 +378,7 @@ Input [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sale (67) Scan parquet default.item Output [1]: [i_item_sk#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27/explain.txt index 151e713ff2e3c..6e0bd7a6c32ab 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27/explain.txt @@ -81,7 +81,7 @@ TakeOrderedAndProject (77) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -95,7 +95,7 @@ Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451545) (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), IsNotNull(cd_education_status), IsNotNull(cd_gender), EqualTo(cd_gender,F), EqualTo(cd_marital_status,D), EqualTo(cd_education_status,Primary), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -126,7 +126,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), LessThanOrEqual(d_date_sk,2451910), GreaterThanOrEqual(d_date_sk,2451545), IsNotNull(d_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_state, [TN,AL,SD]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -229,7 +229,7 @@ Results [7]: [i_item_id#21, s_state#18, 0 AS g_state#48, avg(cast(agg1#23 as big (33) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -267,7 +267,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (42) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_state, [TN,AL,SD]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -328,7 +328,7 @@ Results [7]: [i_item_id#21, null AS s_state#75, 1 AS g_state#76, avg(cast(agg1#2 (55) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -378,7 +378,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (67) Scan parquet default.item Output [1]: [i_item_sk#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3.sf100/explain.txt index c7212ca883700..93736a83ebfc6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [Or(Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2415355),LessThanOrEqual(ss_sold_date_sk,2415385)),And(GreaterThanOrEqual(ss_sold_date_sk,2415720),LessThanOrEqual(ss_sold_date_sk,2415750))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2416085),LessThanOrEqual(ss_sold_date_sk,2416115)),And(GreaterThanOrEqual(ss_sold_date_sk,2416450),LessThanOrEqual(ss_sold_date_sk,2416480)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2416816),LessThanOrEqual(ss_sold_date_sk,2416846)),And(GreaterThanOrEqual(ss_sold_date_sk,2417181),LessThanOrEqual(ss_sold_date_sk,2417211))),And(GreaterThanOrEqual(ss_sold_date_sk,2417546),LessThanOrEqual(ss_sold_date_sk,2417576)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2417911),LessThanOrEqual(ss_sold_date_sk,2417941)),And(GreaterThanOrEqual(ss_sold_date_sk,2418277),LessThanOrEqual(ss_sold_date_sk,2418307))),And(GreaterThanOrEqual(ss_sold_date_sk,2418642),LessThanOrEqual(ss_sold_date_sk,2418672))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2419007),LessThanOrEqual(ss_sold_date_sk,2419037)),And(GreaterThanOrEqual(ss_sold_date_sk,2419372),LessThanOrEqual(ss_sold_date_sk,2419402))),And(GreaterThanOrEqual(ss_sold_date_sk,2419738),LessThanOrEqual(ss_sold_date_sk,2419768))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2420103),LessThanOrEqual(ss_sold_date_sk,2420133)),And(GreaterThanOrEqual(ss_sold_date_sk,2420468),LessThanOrEqual(ss_sold_date_sk,2420498))),And(GreaterThanOrEqual(ss_sold_date_sk,2420833),LessThanOrEqual(ss_sold_date_sk,2420863))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2421199),LessThanOrEqual(ss_sold_date_sk,2421229)),And(GreaterThanOrEqual(ss_sold_date_sk,2421564),LessThanOrEqual(ss_sold_date_sk,2421594))),And(GreaterThanOrEqual(ss_sold_date_sk,2421929),LessThanOrEqual(ss_sold_date_sk,2421959)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2422294),LessThanOrEqual(ss_sold_date_sk,2422324)),And(GreaterThanOrEqual(ss_sold_date_sk,2422660),LessThanOrEqual(ss_sold_date_sk,2422690))),And(GreaterThanOrEqual(ss_sold_date_sk,2423025),LessThanOrEqual(ss_sold_date_sk,2423055))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2423390),LessThanOrEqual(ss_sold_date_sk,2423420)),And(GreaterThanOrEqual(ss_sold_date_sk,2423755),LessThanOrEqual(ss_sold_date_sk,2423785))),And(GreaterThanOrEqual(ss_sold_date_sk,2424121),LessThanOrEqual(ss_sold_date_sk,2424151)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2424486),LessThanOrEqual(ss_sold_date_sk,2424516)),And(GreaterThanOrEqual(ss_sold_date_sk,2424851),LessThanOrEqual(ss_sold_date_sk,2424881))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2425216),LessThanOrEqual(ss_sold_date_sk,2425246)),And(GreaterThanOrEqual(ss_sold_date_sk,2425582),LessThanOrEqual(ss_sold_date_sk,2425612)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2425947),LessThanOrEqual(ss_sold_date_sk,2425977)),And(GreaterThanOrEqual(ss_sold_date_sk,2426312),LessThanOrEqual(ss_sold_date_sk,2426342))),And(GreaterThanOrEqual(ss_sold_date_sk,2426677),LessThanOrEqual(ss_sold_date_sk,2426707)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2427043),LessThanOrEqual(ss_sold_date_sk,2427073)),And(GreaterThanOrEqual(ss_sold_date_sk,2427408),LessThanOrEqual(ss_sold_date_sk,2427438))),And(GreaterThanOrEqual(ss_sold_date_sk,2427773),LessThanOrEqual(ss_sold_date_sk,2427803))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2428138),LessThanOrEqual(ss_sold_date_sk,2428168)),And(GreaterThanOrEqual(ss_sold_date_sk,2428504),LessThanOrEqual(ss_sold_date_sk,2428534))),And(GreaterThanOrEqual(ss_sold_date_sk,2428869),LessThanOrEqual(ss_sold_date_sk,2428899))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2429234),LessThanOrEqual(ss_sold_date_sk,2429264)),And(GreaterThanOrEqual(ss_sold_date_sk,2429599),LessThanOrEqual(ss_sold_date_sk,2429629))),And(GreaterThanOrEqual(ss_sold_date_sk,2429965),LessThanOrEqual(ss_sold_date_sk,2429995))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2430330),LessThanOrEqual(ss_sold_date_sk,2430360)),And(GreaterThanOrEqual(ss_sold_date_sk,2430695),LessThanOrEqual(ss_sold_date_sk,2430725))),And(GreaterThanOrEqual(ss_sold_date_sk,2431060),LessThanOrEqual(ss_sold_date_sk,2431090)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2431426),LessThanOrEqual(ss_sold_date_sk,2431456)),And(GreaterThanOrEqual(ss_sold_date_sk,2431791),LessThanOrEqual(ss_sold_date_sk,2431821))),And(GreaterThanOrEqual(ss_sold_date_sk,2432156),LessThanOrEqual(ss_sold_date_sk,2432186))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2432521),LessThanOrEqual(ss_sold_date_sk,2432551)),And(GreaterThanOrEqual(ss_sold_date_sk,2432887),LessThanOrEqual(ss_sold_date_sk,2432917))),And(GreaterThanOrEqual(ss_sold_date_sk,2433252),LessThanOrEqual(ss_sold_date_sk,2433282))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2433617),LessThanOrEqual(ss_sold_date_sk,2433647)),And(GreaterThanOrEqual(ss_sold_date_sk,2433982),LessThanOrEqual(ss_sold_date_sk,2434012))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2434348),LessThanOrEqual(ss_sold_date_sk,2434378)),And(GreaterThanOrEqual(ss_sold_date_sk,2434713),LessThanOrEqual(ss_sold_date_sk,2434743)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2435078),LessThanOrEqual(ss_sold_date_sk,2435108)),And(GreaterThanOrEqual(ss_sold_date_sk,2435443),LessThanOrEqual(ss_sold_date_sk,2435473))),And(GreaterThanOrEqual(ss_sold_date_sk,2435809),LessThanOrEqual(ss_sold_date_sk,2435839)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2436174),LessThanOrEqual(ss_sold_date_sk,2436204)),And(GreaterThanOrEqual(ss_sold_date_sk,2436539),LessThanOrEqual(ss_sold_date_sk,2436569))),And(GreaterThanOrEqual(ss_sold_date_sk,2436904),LessThanOrEqual(ss_sold_date_sk,2436934))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2437270),LessThanOrEqual(ss_sold_date_sk,2437300)),And(GreaterThanOrEqual(ss_sold_date_sk,2437635),LessThanOrEqual(ss_sold_date_sk,2437665))),And(GreaterThanOrEqual(ss_sold_date_sk,2438000),LessThanOrEqual(ss_sold_date_sk,2438030))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2438365),LessThanOrEqual(ss_sold_date_sk,2438395)),And(GreaterThanOrEqual(ss_sold_date_sk,2438731),LessThanOrEqual(ss_sold_date_sk,2438761))),And(GreaterThanOrEqual(ss_sold_date_sk,2439096),LessThanOrEqual(ss_sold_date_sk,2439126))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2439461),LessThanOrEqual(ss_sold_date_sk,2439491)),And(GreaterThanOrEqual(ss_sold_date_sk,2439826),LessThanOrEqual(ss_sold_date_sk,2439856))),And(GreaterThanOrEqual(ss_sold_date_sk,2440192),LessThanOrEqual(ss_sold_date_sk,2440222)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2440557),LessThanOrEqual(ss_sold_date_sk,2440587)),And(GreaterThanOrEqual(ss_sold_date_sk,2440922),LessThanOrEqual(ss_sold_date_sk,2440952))),And(GreaterThanOrEqual(ss_sold_date_sk,2441287),LessThanOrEqual(ss_sold_date_sk,2441317))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2441653),LessThanOrEqual(ss_sold_date_sk,2441683)),And(GreaterThanOrEqual(ss_sold_date_sk,2442018),LessThanOrEqual(ss_sold_date_sk,2442048))),And(GreaterThanOrEqual(ss_sold_date_sk,2442383),LessThanOrEqual(ss_sold_date_sk,2442413)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2442748),LessThanOrEqual(ss_sold_date_sk,2442778)),And(GreaterThanOrEqual(ss_sold_date_sk,2443114),LessThanOrEqual(ss_sold_date_sk,2443144))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2443479),LessThanOrEqual(ss_sold_date_sk,2443509)),And(GreaterThanOrEqual(ss_sold_date_sk,2443844),LessThanOrEqual(ss_sold_date_sk,2443874)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2444209),LessThanOrEqual(ss_sold_date_sk,2444239)),And(GreaterThanOrEqual(ss_sold_date_sk,2444575),LessThanOrEqual(ss_sold_date_sk,2444605))),And(GreaterThanOrEqual(ss_sold_date_sk,2444940),LessThanOrEqual(ss_sold_date_sk,2444970)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2445305),LessThanOrEqual(ss_sold_date_sk,2445335)),And(GreaterThanOrEqual(ss_sold_date_sk,2445670),LessThanOrEqual(ss_sold_date_sk,2445700))),And(GreaterThanOrEqual(ss_sold_date_sk,2446036),LessThanOrEqual(ss_sold_date_sk,2446066))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2446401),LessThanOrEqual(ss_sold_date_sk,2446431)),And(GreaterThanOrEqual(ss_sold_date_sk,2446766),LessThanOrEqual(ss_sold_date_sk,2446796))),And(GreaterThanOrEqual(ss_sold_date_sk,2447131),LessThanOrEqual(ss_sold_date_sk,2447161))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2447497),LessThanOrEqual(ss_sold_date_sk,2447527)),And(GreaterThanOrEqual(ss_sold_date_sk,2447862),LessThanOrEqual(ss_sold_date_sk,2447892))),And(GreaterThanOrEqual(ss_sold_date_sk,2448227),LessThanOrEqual(ss_sold_date_sk,2448257))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2448592),LessThanOrEqual(ss_sold_date_sk,2448622)),And(GreaterThanOrEqual(ss_sold_date_sk,2448958),LessThanOrEqual(ss_sold_date_sk,2448988))),And(GreaterThanOrEqual(ss_sold_date_sk,2449323),LessThanOrEqual(ss_sold_date_sk,2449353)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2449688),LessThanOrEqual(ss_sold_date_sk,2449718)),And(GreaterThanOrEqual(ss_sold_date_sk,2450053),LessThanOrEqual(ss_sold_date_sk,2450083))),And(GreaterThanOrEqual(ss_sold_date_sk,2450419),LessThanOrEqual(ss_sold_date_sk,2450449))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2450784),LessThanOrEqual(ss_sold_date_sk,2450814)),And(GreaterThanOrEqual(ss_sold_date_sk,2451149),LessThanOrEqual(ss_sold_date_sk,2451179))),And(GreaterThanOrEqual(ss_sold_date_sk,2451514),LessThanOrEqual(ss_sold_date_sk,2451544)))))))),Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2451880),LessThanOrEqual(ss_sold_date_sk,2451910)),And(GreaterThanOrEqual(ss_sold_date_sk,2452245),LessThanOrEqual(ss_sold_date_sk,2452275))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2452610),LessThanOrEqual(ss_sold_date_sk,2452640)),And(GreaterThanOrEqual(ss_sold_date_sk,2452975),LessThanOrEqual(ss_sold_date_sk,2453005)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2453341),LessThanOrEqual(ss_sold_date_sk,2453371)),And(GreaterThanOrEqual(ss_sold_date_sk,2453706),LessThanOrEqual(ss_sold_date_sk,2453736))),And(GreaterThanOrEqual(ss_sold_date_sk,2454071),LessThanOrEqual(ss_sold_date_sk,2454101)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2454436),LessThanOrEqual(ss_sold_date_sk,2454466)),And(GreaterThanOrEqual(ss_sold_date_sk,2454802),LessThanOrEqual(ss_sold_date_sk,2454832))),And(GreaterThanOrEqual(ss_sold_date_sk,2455167),LessThanOrEqual(ss_sold_date_sk,2455197))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2455532),LessThanOrEqual(ss_sold_date_sk,2455562)),And(GreaterThanOrEqual(ss_sold_date_sk,2455897),LessThanOrEqual(ss_sold_date_sk,2455927))),And(GreaterThanOrEqual(ss_sold_date_sk,2456263),LessThanOrEqual(ss_sold_date_sk,2456293))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2456628),LessThanOrEqual(ss_sold_date_sk,2456658)),And(GreaterThanOrEqual(ss_sold_date_sk,2456993),LessThanOrEqual(ss_sold_date_sk,2457023))),And(GreaterThanOrEqual(ss_sold_date_sk,2457358),LessThanOrEqual(ss_sold_date_sk,2457388))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2457724),LessThanOrEqual(ss_sold_date_sk,2457754)),And(GreaterThanOrEqual(ss_sold_date_sk,2458089),LessThanOrEqual(ss_sold_date_sk,2458119))),And(GreaterThanOrEqual(ss_sold_date_sk,2458454),LessThanOrEqual(ss_sold_date_sk,2458484)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2458819),LessThanOrEqual(ss_sold_date_sk,2458849)),And(GreaterThanOrEqual(ss_sold_date_sk,2459185),LessThanOrEqual(ss_sold_date_sk,2459215))),And(GreaterThanOrEqual(ss_sold_date_sk,2459550),LessThanOrEqual(ss_sold_date_sk,2459580))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2459915),LessThanOrEqual(ss_sold_date_sk,2459945)),And(GreaterThanOrEqual(ss_sold_date_sk,2460280),LessThanOrEqual(ss_sold_date_sk,2460310))),And(GreaterThanOrEqual(ss_sold_date_sk,2460646),LessThanOrEqual(ss_sold_date_sk,2460676)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2461011),LessThanOrEqual(ss_sold_date_sk,2461041)),And(GreaterThanOrEqual(ss_sold_date_sk,2461376),LessThanOrEqual(ss_sold_date_sk,2461406))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2461741),LessThanOrEqual(ss_sold_date_sk,2461771)),And(GreaterThanOrEqual(ss_sold_date_sk,2462107),LessThanOrEqual(ss_sold_date_sk,2462137)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2462472),LessThanOrEqual(ss_sold_date_sk,2462502)),And(GreaterThanOrEqual(ss_sold_date_sk,2462837),LessThanOrEqual(ss_sold_date_sk,2462867))),And(GreaterThanOrEqual(ss_sold_date_sk,2463202),LessThanOrEqual(ss_sold_date_sk,2463232)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2463568),LessThanOrEqual(ss_sold_date_sk,2463598)),And(GreaterThanOrEqual(ss_sold_date_sk,2463933),LessThanOrEqual(ss_sold_date_sk,2463963))),And(GreaterThanOrEqual(ss_sold_date_sk,2464298),LessThanOrEqual(ss_sold_date_sk,2464328))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2464663),LessThanOrEqual(ss_sold_date_sk,2464693)),And(GreaterThanOrEqual(ss_sold_date_sk,2465029),LessThanOrEqual(ss_sold_date_sk,2465059))),And(GreaterThanOrEqual(ss_sold_date_sk,2465394),LessThanOrEqual(ss_sold_date_sk,2465424))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2465759),LessThanOrEqual(ss_sold_date_sk,2465789)),And(GreaterThanOrEqual(ss_sold_date_sk,2466124),LessThanOrEqual(ss_sold_date_sk,2466154))),And(GreaterThanOrEqual(ss_sold_date_sk,2466490),LessThanOrEqual(ss_sold_date_sk,2466520))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2466855),LessThanOrEqual(ss_sold_date_sk,2466885)),And(GreaterThanOrEqual(ss_sold_date_sk,2467220),LessThanOrEqual(ss_sold_date_sk,2467250))),And(GreaterThanOrEqual(ss_sold_date_sk,2467585),LessThanOrEqual(ss_sold_date_sk,2467615)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2467951),LessThanOrEqual(ss_sold_date_sk,2467981)),And(GreaterThanOrEqual(ss_sold_date_sk,2468316),LessThanOrEqual(ss_sold_date_sk,2468346))),And(GreaterThanOrEqual(ss_sold_date_sk,2468681),LessThanOrEqual(ss_sold_date_sk,2468711))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2469046),LessThanOrEqual(ss_sold_date_sk,2469076)),And(GreaterThanOrEqual(ss_sold_date_sk,2469412),LessThanOrEqual(ss_sold_date_sk,2469442))),And(GreaterThanOrEqual(ss_sold_date_sk,2469777),LessThanOrEqual(ss_sold_date_sk,2469807))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2470142),LessThanOrEqual(ss_sold_date_sk,2470172)),And(GreaterThanOrEqual(ss_sold_date_sk,2470507),LessThanOrEqual(ss_sold_date_sk,2470537))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2470873),LessThanOrEqual(ss_sold_date_sk,2470903)),And(GreaterThanOrEqual(ss_sold_date_sk,2471238),LessThanOrEqual(ss_sold_date_sk,2471268)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2471603),LessThanOrEqual(ss_sold_date_sk,2471633)),And(GreaterThanOrEqual(ss_sold_date_sk,2471968),LessThanOrEqual(ss_sold_date_sk,2471998))),And(GreaterThanOrEqual(ss_sold_date_sk,2472334),LessThanOrEqual(ss_sold_date_sk,2472364)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2472699),LessThanOrEqual(ss_sold_date_sk,2472729)),And(GreaterThanOrEqual(ss_sold_date_sk,2473064),LessThanOrEqual(ss_sold_date_sk,2473094))),And(GreaterThanOrEqual(ss_sold_date_sk,2473429),LessThanOrEqual(ss_sold_date_sk,2473459))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2473795),LessThanOrEqual(ss_sold_date_sk,2473825)),And(GreaterThanOrEqual(ss_sold_date_sk,2474160),LessThanOrEqual(ss_sold_date_sk,2474190))),And(GreaterThanOrEqual(ss_sold_date_sk,2474525),LessThanOrEqual(ss_sold_date_sk,2474555))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2474890),LessThanOrEqual(ss_sold_date_sk,2474920)),And(GreaterThanOrEqual(ss_sold_date_sk,2475256),LessThanOrEqual(ss_sold_date_sk,2475286))),And(GreaterThanOrEqual(ss_sold_date_sk,2475621),LessThanOrEqual(ss_sold_date_sk,2475651))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2475986),LessThanOrEqual(ss_sold_date_sk,2476016)),And(GreaterThanOrEqual(ss_sold_date_sk,2476351),LessThanOrEqual(ss_sold_date_sk,2476381))),And(GreaterThanOrEqual(ss_sold_date_sk,2476717),LessThanOrEqual(ss_sold_date_sk,2476747)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2477082),LessThanOrEqual(ss_sold_date_sk,2477112)),And(GreaterThanOrEqual(ss_sold_date_sk,2477447),LessThanOrEqual(ss_sold_date_sk,2477477))),And(GreaterThanOrEqual(ss_sold_date_sk,2477812),LessThanOrEqual(ss_sold_date_sk,2477842))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2478178),LessThanOrEqual(ss_sold_date_sk,2478208)),And(GreaterThanOrEqual(ss_sold_date_sk,2478543),LessThanOrEqual(ss_sold_date_sk,2478573))),And(GreaterThanOrEqual(ss_sold_date_sk,2478908),LessThanOrEqual(ss_sold_date_sk,2478938)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2479273),LessThanOrEqual(ss_sold_date_sk,2479303)),And(GreaterThanOrEqual(ss_sold_date_sk,2479639),LessThanOrEqual(ss_sold_date_sk,2479669))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2480004),LessThanOrEqual(ss_sold_date_sk,2480034)),And(GreaterThanOrEqual(ss_sold_date_sk,2480369),LessThanOrEqual(ss_sold_date_sk,2480399)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2480734),LessThanOrEqual(ss_sold_date_sk,2480764)),And(GreaterThanOrEqual(ss_sold_date_sk,2481100),LessThanOrEqual(ss_sold_date_sk,2481130))),And(GreaterThanOrEqual(ss_sold_date_sk,2481465),LessThanOrEqual(ss_sold_date_sk,2481495)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2481830),LessThanOrEqual(ss_sold_date_sk,2481860)),And(GreaterThanOrEqual(ss_sold_date_sk,2482195),LessThanOrEqual(ss_sold_date_sk,2482225))),And(GreaterThanOrEqual(ss_sold_date_sk,2482561),LessThanOrEqual(ss_sold_date_sk,2482591))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2482926),LessThanOrEqual(ss_sold_date_sk,2482956)),And(GreaterThanOrEqual(ss_sold_date_sk,2483291),LessThanOrEqual(ss_sold_date_sk,2483321))),And(GreaterThanOrEqual(ss_sold_date_sk,2483656),LessThanOrEqual(ss_sold_date_sk,2483686))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2484022),LessThanOrEqual(ss_sold_date_sk,2484052)),And(GreaterThanOrEqual(ss_sold_date_sk,2484387),LessThanOrEqual(ss_sold_date_sk,2484417))),And(GreaterThanOrEqual(ss_sold_date_sk,2484752),LessThanOrEqual(ss_sold_date_sk,2484782))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2485117),LessThanOrEqual(ss_sold_date_sk,2485147)),And(GreaterThanOrEqual(ss_sold_date_sk,2485483),LessThanOrEqual(ss_sold_date_sk,2485513))),And(GreaterThanOrEqual(ss_sold_date_sk,2485848),LessThanOrEqual(ss_sold_date_sk,2485878)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2486213),LessThanOrEqual(ss_sold_date_sk,2486243)),And(GreaterThanOrEqual(ss_sold_date_sk,2486578),LessThanOrEqual(ss_sold_date_sk,2486608))),And(GreaterThanOrEqual(ss_sold_date_sk,2486944),LessThanOrEqual(ss_sold_date_sk,2486974))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2487309),LessThanOrEqual(ss_sold_date_sk,2487339)),And(GreaterThanOrEqual(ss_sold_date_sk,2487674),LessThanOrEqual(ss_sold_date_sk,2487704))),And(GreaterThanOrEqual(ss_sold_date_sk,2488039),LessThanOrEqual(ss_sold_date_sk,2488069))))))))), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -39,7 +39,7 @@ Condition : ((((((((((((ss_sold_date_sk#1 >= 2415355) AND (ss_sold_date_sk#1 <= (4) Scan parquet default.item Output [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manufact_id#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,436), IsNotNull(i_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_net_profit#3, i_item_sk#4, i_bra (11) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), EqualTo(d_moy,12), Or(Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2415355),LessThanOrEqual(d_date_sk,2415385)),And(GreaterThanOrEqual(d_date_sk,2415720),LessThanOrEqual(d_date_sk,2415750))),Or(And(GreaterThanOrEqual(d_date_sk,2416085),LessThanOrEqual(d_date_sk,2416115)),And(GreaterThanOrEqual(d_date_sk,2416450),LessThanOrEqual(d_date_sk,2416480)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2416816),LessThanOrEqual(d_date_sk,2416846)),And(GreaterThanOrEqual(d_date_sk,2417181),LessThanOrEqual(d_date_sk,2417211))),And(GreaterThanOrEqual(d_date_sk,2417546),LessThanOrEqual(d_date_sk,2417576)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2417911),LessThanOrEqual(d_date_sk,2417941)),And(GreaterThanOrEqual(d_date_sk,2418277),LessThanOrEqual(d_date_sk,2418307))),And(GreaterThanOrEqual(d_date_sk,2418642),LessThanOrEqual(d_date_sk,2418672))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2419007),LessThanOrEqual(d_date_sk,2419037)),And(GreaterThanOrEqual(d_date_sk,2419372),LessThanOrEqual(d_date_sk,2419402))),And(GreaterThanOrEqual(d_date_sk,2419738),LessThanOrEqual(d_date_sk,2419768))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2420103),LessThanOrEqual(d_date_sk,2420133)),And(GreaterThanOrEqual(d_date_sk,2420468),LessThanOrEqual(d_date_sk,2420498))),And(GreaterThanOrEqual(d_date_sk,2420833),LessThanOrEqual(d_date_sk,2420863))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2421199),LessThanOrEqual(d_date_sk,2421229)),And(GreaterThanOrEqual(d_date_sk,2421564),LessThanOrEqual(d_date_sk,2421594))),And(GreaterThanOrEqual(d_date_sk,2421929),LessThanOrEqual(d_date_sk,2421959)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2422294),LessThanOrEqual(d_date_sk,2422324)),And(GreaterThanOrEqual(d_date_sk,2422660),LessThanOrEqual(d_date_sk,2422690))),And(GreaterThanOrEqual(d_date_sk,2423025),LessThanOrEqual(d_date_sk,2423055))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2423390),LessThanOrEqual(d_date_sk,2423420)),And(GreaterThanOrEqual(d_date_sk,2423755),LessThanOrEqual(d_date_sk,2423785))),And(GreaterThanOrEqual(d_date_sk,2424121),LessThanOrEqual(d_date_sk,2424151)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2424486),LessThanOrEqual(d_date_sk,2424516)),And(GreaterThanOrEqual(d_date_sk,2424851),LessThanOrEqual(d_date_sk,2424881))),Or(And(GreaterThanOrEqual(d_date_sk,2425216),LessThanOrEqual(d_date_sk,2425246)),And(GreaterThanOrEqual(d_date_sk,2425582),LessThanOrEqual(d_date_sk,2425612)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2425947),LessThanOrEqual(d_date_sk,2425977)),And(GreaterThanOrEqual(d_date_sk,2426312),LessThanOrEqual(d_date_sk,2426342))),And(GreaterThanOrEqual(d_date_sk,2426677),LessThanOrEqual(d_date_sk,2426707)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2427043),LessThanOrEqual(d_date_sk,2427073)),And(GreaterThanOrEqual(d_date_sk,2427408),LessThanOrEqual(d_date_sk,2427438))),And(GreaterThanOrEqual(d_date_sk,2427773),LessThanOrEqual(d_date_sk,2427803))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2428138),LessThanOrEqual(d_date_sk,2428168)),And(GreaterThanOrEqual(d_date_sk,2428504),LessThanOrEqual(d_date_sk,2428534))),And(GreaterThanOrEqual(d_date_sk,2428869),LessThanOrEqual(d_date_sk,2428899))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2429234),LessThanOrEqual(d_date_sk,2429264)),And(GreaterThanOrEqual(d_date_sk,2429599),LessThanOrEqual(d_date_sk,2429629))),And(GreaterThanOrEqual(d_date_sk,2429965),LessThanOrEqual(d_date_sk,2429995))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2430330),LessThanOrEqual(d_date_sk,2430360)),And(GreaterThanOrEqual(d_date_sk,2430695),LessThanOrEqual(d_date_sk,2430725))),And(GreaterThanOrEqual(d_date_sk,2431060),LessThanOrEqual(d_date_sk,2431090)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2431426),LessThanOrEqual(d_date_sk,2431456)),And(GreaterThanOrEqual(d_date_sk,2431791),LessThanOrEqual(d_date_sk,2431821))),And(GreaterThanOrEqual(d_date_sk,2432156),LessThanOrEqual(d_date_sk,2432186))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2432521),LessThanOrEqual(d_date_sk,2432551)),And(GreaterThanOrEqual(d_date_sk,2432887),LessThanOrEqual(d_date_sk,2432917))),And(GreaterThanOrEqual(d_date_sk,2433252),LessThanOrEqual(d_date_sk,2433282))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2433617),LessThanOrEqual(d_date_sk,2433647)),And(GreaterThanOrEqual(d_date_sk,2433982),LessThanOrEqual(d_date_sk,2434012))),Or(And(GreaterThanOrEqual(d_date_sk,2434348),LessThanOrEqual(d_date_sk,2434378)),And(GreaterThanOrEqual(d_date_sk,2434713),LessThanOrEqual(d_date_sk,2434743)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2435078),LessThanOrEqual(d_date_sk,2435108)),And(GreaterThanOrEqual(d_date_sk,2435443),LessThanOrEqual(d_date_sk,2435473))),And(GreaterThanOrEqual(d_date_sk,2435809),LessThanOrEqual(d_date_sk,2435839)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2436174),LessThanOrEqual(d_date_sk,2436204)),And(GreaterThanOrEqual(d_date_sk,2436539),LessThanOrEqual(d_date_sk,2436569))),And(GreaterThanOrEqual(d_date_sk,2436904),LessThanOrEqual(d_date_sk,2436934))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2437270),LessThanOrEqual(d_date_sk,2437300)),And(GreaterThanOrEqual(d_date_sk,2437635),LessThanOrEqual(d_date_sk,2437665))),And(GreaterThanOrEqual(d_date_sk,2438000),LessThanOrEqual(d_date_sk,2438030))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2438365),LessThanOrEqual(d_date_sk,2438395)),And(GreaterThanOrEqual(d_date_sk,2438731),LessThanOrEqual(d_date_sk,2438761))),And(GreaterThanOrEqual(d_date_sk,2439096),LessThanOrEqual(d_date_sk,2439126))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2439461),LessThanOrEqual(d_date_sk,2439491)),And(GreaterThanOrEqual(d_date_sk,2439826),LessThanOrEqual(d_date_sk,2439856))),And(GreaterThanOrEqual(d_date_sk,2440192),LessThanOrEqual(d_date_sk,2440222)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2440557),LessThanOrEqual(d_date_sk,2440587)),And(GreaterThanOrEqual(d_date_sk,2440922),LessThanOrEqual(d_date_sk,2440952))),And(GreaterThanOrEqual(d_date_sk,2441287),LessThanOrEqual(d_date_sk,2441317))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2441653),LessThanOrEqual(d_date_sk,2441683)),And(GreaterThanOrEqual(d_date_sk,2442018),LessThanOrEqual(d_date_sk,2442048))),And(GreaterThanOrEqual(d_date_sk,2442383),LessThanOrEqual(d_date_sk,2442413)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2442748),LessThanOrEqual(d_date_sk,2442778)),And(GreaterThanOrEqual(d_date_sk,2443114),LessThanOrEqual(d_date_sk,2443144))),Or(And(GreaterThanOrEqual(d_date_sk,2443479),LessThanOrEqual(d_date_sk,2443509)),And(GreaterThanOrEqual(d_date_sk,2443844),LessThanOrEqual(d_date_sk,2443874)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2444209),LessThanOrEqual(d_date_sk,2444239)),And(GreaterThanOrEqual(d_date_sk,2444575),LessThanOrEqual(d_date_sk,2444605))),And(GreaterThanOrEqual(d_date_sk,2444940),LessThanOrEqual(d_date_sk,2444970)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2445305),LessThanOrEqual(d_date_sk,2445335)),And(GreaterThanOrEqual(d_date_sk,2445670),LessThanOrEqual(d_date_sk,2445700))),And(GreaterThanOrEqual(d_date_sk,2446036),LessThanOrEqual(d_date_sk,2446066))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2446401),LessThanOrEqual(d_date_sk,2446431)),And(GreaterThanOrEqual(d_date_sk,2446766),LessThanOrEqual(d_date_sk,2446796))),And(GreaterThanOrEqual(d_date_sk,2447131),LessThanOrEqual(d_date_sk,2447161))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2447497),LessThanOrEqual(d_date_sk,2447527)),And(GreaterThanOrEqual(d_date_sk,2447862),LessThanOrEqual(d_date_sk,2447892))),And(GreaterThanOrEqual(d_date_sk,2448227),LessThanOrEqual(d_date_sk,2448257))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2448592),LessThanOrEqual(d_date_sk,2448622)),And(GreaterThanOrEqual(d_date_sk,2448958),LessThanOrEqual(d_date_sk,2448988))),And(GreaterThanOrEqual(d_date_sk,2449323),LessThanOrEqual(d_date_sk,2449353)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2449688),LessThanOrEqual(d_date_sk,2449718)),And(GreaterThanOrEqual(d_date_sk,2450053),LessThanOrEqual(d_date_sk,2450083))),And(GreaterThanOrEqual(d_date_sk,2450419),LessThanOrEqual(d_date_sk,2450449))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2450784),LessThanOrEqual(d_date_sk,2450814)),And(GreaterThanOrEqual(d_date_sk,2451149),LessThanOrEqual(d_date_sk,2451179))),And(GreaterThanOrEqual(d_date_sk,2451514),LessThanOrEqual(d_date_sk,2451544)))))))),Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2451880),LessThanOrEqual(d_date_sk,2451910)),And(GreaterThanOrEqual(d_date_sk,2452245),LessThanOrEqual(d_date_sk,2452275))),Or(And(GreaterThanOrEqual(d_date_sk,2452610),LessThanOrEqual(d_date_sk,2452640)),And(GreaterThanOrEqual(d_date_sk,2452975),LessThanOrEqual(d_date_sk,2453005)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2453341),LessThanOrEqual(d_date_sk,2453371)),And(GreaterThanOrEqual(d_date_sk,2453706),LessThanOrEqual(d_date_sk,2453736))),And(GreaterThanOrEqual(d_date_sk,2454071),LessThanOrEqual(d_date_sk,2454101)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2454436),LessThanOrEqual(d_date_sk,2454466)),And(GreaterThanOrEqual(d_date_sk,2454802),LessThanOrEqual(d_date_sk,2454832))),And(GreaterThanOrEqual(d_date_sk,2455167),LessThanOrEqual(d_date_sk,2455197))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2455532),LessThanOrEqual(d_date_sk,2455562)),And(GreaterThanOrEqual(d_date_sk,2455897),LessThanOrEqual(d_date_sk,2455927))),And(GreaterThanOrEqual(d_date_sk,2456263),LessThanOrEqual(d_date_sk,2456293))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2456628),LessThanOrEqual(d_date_sk,2456658)),And(GreaterThanOrEqual(d_date_sk,2456993),LessThanOrEqual(d_date_sk,2457023))),And(GreaterThanOrEqual(d_date_sk,2457358),LessThanOrEqual(d_date_sk,2457388))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2457724),LessThanOrEqual(d_date_sk,2457754)),And(GreaterThanOrEqual(d_date_sk,2458089),LessThanOrEqual(d_date_sk,2458119))),And(GreaterThanOrEqual(d_date_sk,2458454),LessThanOrEqual(d_date_sk,2458484)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2458819),LessThanOrEqual(d_date_sk,2458849)),And(GreaterThanOrEqual(d_date_sk,2459185),LessThanOrEqual(d_date_sk,2459215))),And(GreaterThanOrEqual(d_date_sk,2459550),LessThanOrEqual(d_date_sk,2459580))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2459915),LessThanOrEqual(d_date_sk,2459945)),And(GreaterThanOrEqual(d_date_sk,2460280),LessThanOrEqual(d_date_sk,2460310))),And(GreaterThanOrEqual(d_date_sk,2460646),LessThanOrEqual(d_date_sk,2460676)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2461011),LessThanOrEqual(d_date_sk,2461041)),And(GreaterThanOrEqual(d_date_sk,2461376),LessThanOrEqual(d_date_sk,2461406))),Or(And(GreaterThanOrEqual(d_date_sk,2461741),LessThanOrEqual(d_date_sk,2461771)),And(GreaterThanOrEqual(d_date_sk,2462107),LessThanOrEqual(d_date_sk,2462137)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2462472),LessThanOrEqual(d_date_sk,2462502)),And(GreaterThanOrEqual(d_date_sk,2462837),LessThanOrEqual(d_date_sk,2462867))),And(GreaterThanOrEqual(d_date_sk,2463202),LessThanOrEqual(d_date_sk,2463232)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2463568),LessThanOrEqual(d_date_sk,2463598)),And(GreaterThanOrEqual(d_date_sk,2463933),LessThanOrEqual(d_date_sk,2463963))),And(GreaterThanOrEqual(d_date_sk,2464298),LessThanOrEqual(d_date_sk,2464328))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2464663),LessThanOrEqual(d_date_sk,2464693)),And(GreaterThanOrEqual(d_date_sk,2465029),LessThanOrEqual(d_date_sk,2465059))),And(GreaterThanOrEqual(d_date_sk,2465394),LessThanOrEqual(d_date_sk,2465424))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2465759),LessThanOrEqual(d_date_sk,2465789)),And(GreaterThanOrEqual(d_date_sk,2466124),LessThanOrEqual(d_date_sk,2466154))),And(GreaterThanOrEqual(d_date_sk,2466490),LessThanOrEqual(d_date_sk,2466520))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2466855),LessThanOrEqual(d_date_sk,2466885)),And(GreaterThanOrEqual(d_date_sk,2467220),LessThanOrEqual(d_date_sk,2467250))),And(GreaterThanOrEqual(d_date_sk,2467585),LessThanOrEqual(d_date_sk,2467615)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2467951),LessThanOrEqual(d_date_sk,2467981)),And(GreaterThanOrEqual(d_date_sk,2468316),LessThanOrEqual(d_date_sk,2468346))),And(GreaterThanOrEqual(d_date_sk,2468681),LessThanOrEqual(d_date_sk,2468711))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2469046),LessThanOrEqual(d_date_sk,2469076)),And(GreaterThanOrEqual(d_date_sk,2469412),LessThanOrEqual(d_date_sk,2469442))),And(GreaterThanOrEqual(d_date_sk,2469777),LessThanOrEqual(d_date_sk,2469807))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2470142),LessThanOrEqual(d_date_sk,2470172)),And(GreaterThanOrEqual(d_date_sk,2470507),LessThanOrEqual(d_date_sk,2470537))),Or(And(GreaterThanOrEqual(d_date_sk,2470873),LessThanOrEqual(d_date_sk,2470903)),And(GreaterThanOrEqual(d_date_sk,2471238),LessThanOrEqual(d_date_sk,2471268)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2471603),LessThanOrEqual(d_date_sk,2471633)),And(GreaterThanOrEqual(d_date_sk,2471968),LessThanOrEqual(d_date_sk,2471998))),And(GreaterThanOrEqual(d_date_sk,2472334),LessThanOrEqual(d_date_sk,2472364)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2472699),LessThanOrEqual(d_date_sk,2472729)),And(GreaterThanOrEqual(d_date_sk,2473064),LessThanOrEqual(d_date_sk,2473094))),And(GreaterThanOrEqual(d_date_sk,2473429),LessThanOrEqual(d_date_sk,2473459))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2473795),LessThanOrEqual(d_date_sk,2473825)),And(GreaterThanOrEqual(d_date_sk,2474160),LessThanOrEqual(d_date_sk,2474190))),And(GreaterThanOrEqual(d_date_sk,2474525),LessThanOrEqual(d_date_sk,2474555))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2474890),LessThanOrEqual(d_date_sk,2474920)),And(GreaterThanOrEqual(d_date_sk,2475256),LessThanOrEqual(d_date_sk,2475286))),And(GreaterThanOrEqual(d_date_sk,2475621),LessThanOrEqual(d_date_sk,2475651))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2475986),LessThanOrEqual(d_date_sk,2476016)),And(GreaterThanOrEqual(d_date_sk,2476351),LessThanOrEqual(d_date_sk,2476381))),And(GreaterThanOrEqual(d_date_sk,2476717),LessThanOrEqual(d_date_sk,2476747)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2477082),LessThanOrEqual(d_date_sk,2477112)),And(GreaterThanOrEqual(d_date_sk,2477447),LessThanOrEqual(d_date_sk,2477477))),And(GreaterThanOrEqual(d_date_sk,2477812),LessThanOrEqual(d_date_sk,2477842))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2478178),LessThanOrEqual(d_date_sk,2478208)),And(GreaterThanOrEqual(d_date_sk,2478543),LessThanOrEqual(d_date_sk,2478573))),And(GreaterThanOrEqual(d_date_sk,2478908),LessThanOrEqual(d_date_sk,2478938)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2479273),LessThanOrEqual(d_date_sk,2479303)),And(GreaterThanOrEqual(d_date_sk,2479639),LessThanOrEqual(d_date_sk,2479669))),Or(And(GreaterThanOrEqual(d_date_sk,2480004),LessThanOrEqual(d_date_sk,2480034)),And(GreaterThanOrEqual(d_date_sk,2480369),LessThanOrEqual(d_date_sk,2480399)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2480734),LessThanOrEqual(d_date_sk,2480764)),And(GreaterThanOrEqual(d_date_sk,2481100),LessThanOrEqual(d_date_sk,2481130))),And(GreaterThanOrEqual(d_date_sk,2481465),LessThanOrEqual(d_date_sk,2481495)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2481830),LessThanOrEqual(d_date_sk,2481860)),And(GreaterThanOrEqual(d_date_sk,2482195),LessThanOrEqual(d_date_sk,2482225))),And(GreaterThanOrEqual(d_date_sk,2482561),LessThanOrEqual(d_date_sk,2482591))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2482926),LessThanOrEqual(d_date_sk,2482956)),And(GreaterThanOrEqual(d_date_sk,2483291),LessThanOrEqual(d_date_sk,2483321))),And(GreaterThanOrEqual(d_date_sk,2483656),LessThanOrEqual(d_date_sk,2483686))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2484022),LessThanOrEqual(d_date_sk,2484052)),And(GreaterThanOrEqual(d_date_sk,2484387),LessThanOrEqual(d_date_sk,2484417))),And(GreaterThanOrEqual(d_date_sk,2484752),LessThanOrEqual(d_date_sk,2484782))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2485117),LessThanOrEqual(d_date_sk,2485147)),And(GreaterThanOrEqual(d_date_sk,2485483),LessThanOrEqual(d_date_sk,2485513))),And(GreaterThanOrEqual(d_date_sk,2485848),LessThanOrEqual(d_date_sk,2485878)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2486213),LessThanOrEqual(d_date_sk,2486243)),And(GreaterThanOrEqual(d_date_sk,2486578),LessThanOrEqual(d_date_sk,2486608))),And(GreaterThanOrEqual(d_date_sk,2486944),LessThanOrEqual(d_date_sk,2486974))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2487309),LessThanOrEqual(d_date_sk,2487339)),And(GreaterThanOrEqual(d_date_sk,2487674),LessThanOrEqual(d_date_sk,2487704))),And(GreaterThanOrEqual(d_date_sk,2488039),LessThanOrEqual(d_date_sk,2488069))))))))), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3/explain.txt index 6f5ff301fbeac..63739540ea8cc 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q3/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), EqualTo(d_moy,12), Or(Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2415355),LessThanOrEqual(d_date_sk,2415385)),And(GreaterThanOrEqual(d_date_sk,2415720),LessThanOrEqual(d_date_sk,2415750))),Or(And(GreaterThanOrEqual(d_date_sk,2416085),LessThanOrEqual(d_date_sk,2416115)),And(GreaterThanOrEqual(d_date_sk,2416450),LessThanOrEqual(d_date_sk,2416480)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2416816),LessThanOrEqual(d_date_sk,2416846)),And(GreaterThanOrEqual(d_date_sk,2417181),LessThanOrEqual(d_date_sk,2417211))),And(GreaterThanOrEqual(d_date_sk,2417546),LessThanOrEqual(d_date_sk,2417576)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2417911),LessThanOrEqual(d_date_sk,2417941)),And(GreaterThanOrEqual(d_date_sk,2418277),LessThanOrEqual(d_date_sk,2418307))),And(GreaterThanOrEqual(d_date_sk,2418642),LessThanOrEqual(d_date_sk,2418672))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2419007),LessThanOrEqual(d_date_sk,2419037)),And(GreaterThanOrEqual(d_date_sk,2419372),LessThanOrEqual(d_date_sk,2419402))),And(GreaterThanOrEqual(d_date_sk,2419738),LessThanOrEqual(d_date_sk,2419768))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2420103),LessThanOrEqual(d_date_sk,2420133)),And(GreaterThanOrEqual(d_date_sk,2420468),LessThanOrEqual(d_date_sk,2420498))),And(GreaterThanOrEqual(d_date_sk,2420833),LessThanOrEqual(d_date_sk,2420863))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2421199),LessThanOrEqual(d_date_sk,2421229)),And(GreaterThanOrEqual(d_date_sk,2421564),LessThanOrEqual(d_date_sk,2421594))),And(GreaterThanOrEqual(d_date_sk,2421929),LessThanOrEqual(d_date_sk,2421959)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2422294),LessThanOrEqual(d_date_sk,2422324)),And(GreaterThanOrEqual(d_date_sk,2422660),LessThanOrEqual(d_date_sk,2422690))),And(GreaterThanOrEqual(d_date_sk,2423025),LessThanOrEqual(d_date_sk,2423055))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2423390),LessThanOrEqual(d_date_sk,2423420)),And(GreaterThanOrEqual(d_date_sk,2423755),LessThanOrEqual(d_date_sk,2423785))),And(GreaterThanOrEqual(d_date_sk,2424121),LessThanOrEqual(d_date_sk,2424151)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2424486),LessThanOrEqual(d_date_sk,2424516)),And(GreaterThanOrEqual(d_date_sk,2424851),LessThanOrEqual(d_date_sk,2424881))),Or(And(GreaterThanOrEqual(d_date_sk,2425216),LessThanOrEqual(d_date_sk,2425246)),And(GreaterThanOrEqual(d_date_sk,2425582),LessThanOrEqual(d_date_sk,2425612)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2425947),LessThanOrEqual(d_date_sk,2425977)),And(GreaterThanOrEqual(d_date_sk,2426312),LessThanOrEqual(d_date_sk,2426342))),And(GreaterThanOrEqual(d_date_sk,2426677),LessThanOrEqual(d_date_sk,2426707)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2427043),LessThanOrEqual(d_date_sk,2427073)),And(GreaterThanOrEqual(d_date_sk,2427408),LessThanOrEqual(d_date_sk,2427438))),And(GreaterThanOrEqual(d_date_sk,2427773),LessThanOrEqual(d_date_sk,2427803))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2428138),LessThanOrEqual(d_date_sk,2428168)),And(GreaterThanOrEqual(d_date_sk,2428504),LessThanOrEqual(d_date_sk,2428534))),And(GreaterThanOrEqual(d_date_sk,2428869),LessThanOrEqual(d_date_sk,2428899))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2429234),LessThanOrEqual(d_date_sk,2429264)),And(GreaterThanOrEqual(d_date_sk,2429599),LessThanOrEqual(d_date_sk,2429629))),And(GreaterThanOrEqual(d_date_sk,2429965),LessThanOrEqual(d_date_sk,2429995))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2430330),LessThanOrEqual(d_date_sk,2430360)),And(GreaterThanOrEqual(d_date_sk,2430695),LessThanOrEqual(d_date_sk,2430725))),And(GreaterThanOrEqual(d_date_sk,2431060),LessThanOrEqual(d_date_sk,2431090)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2431426),LessThanOrEqual(d_date_sk,2431456)),And(GreaterThanOrEqual(d_date_sk,2431791),LessThanOrEqual(d_date_sk,2431821))),And(GreaterThanOrEqual(d_date_sk,2432156),LessThanOrEqual(d_date_sk,2432186))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2432521),LessThanOrEqual(d_date_sk,2432551)),And(GreaterThanOrEqual(d_date_sk,2432887),LessThanOrEqual(d_date_sk,2432917))),And(GreaterThanOrEqual(d_date_sk,2433252),LessThanOrEqual(d_date_sk,2433282))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2433617),LessThanOrEqual(d_date_sk,2433647)),And(GreaterThanOrEqual(d_date_sk,2433982),LessThanOrEqual(d_date_sk,2434012))),Or(And(GreaterThanOrEqual(d_date_sk,2434348),LessThanOrEqual(d_date_sk,2434378)),And(GreaterThanOrEqual(d_date_sk,2434713),LessThanOrEqual(d_date_sk,2434743)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2435078),LessThanOrEqual(d_date_sk,2435108)),And(GreaterThanOrEqual(d_date_sk,2435443),LessThanOrEqual(d_date_sk,2435473))),And(GreaterThanOrEqual(d_date_sk,2435809),LessThanOrEqual(d_date_sk,2435839)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2436174),LessThanOrEqual(d_date_sk,2436204)),And(GreaterThanOrEqual(d_date_sk,2436539),LessThanOrEqual(d_date_sk,2436569))),And(GreaterThanOrEqual(d_date_sk,2436904),LessThanOrEqual(d_date_sk,2436934))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2437270),LessThanOrEqual(d_date_sk,2437300)),And(GreaterThanOrEqual(d_date_sk,2437635),LessThanOrEqual(d_date_sk,2437665))),And(GreaterThanOrEqual(d_date_sk,2438000),LessThanOrEqual(d_date_sk,2438030))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2438365),LessThanOrEqual(d_date_sk,2438395)),And(GreaterThanOrEqual(d_date_sk,2438731),LessThanOrEqual(d_date_sk,2438761))),And(GreaterThanOrEqual(d_date_sk,2439096),LessThanOrEqual(d_date_sk,2439126))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2439461),LessThanOrEqual(d_date_sk,2439491)),And(GreaterThanOrEqual(d_date_sk,2439826),LessThanOrEqual(d_date_sk,2439856))),And(GreaterThanOrEqual(d_date_sk,2440192),LessThanOrEqual(d_date_sk,2440222)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2440557),LessThanOrEqual(d_date_sk,2440587)),And(GreaterThanOrEqual(d_date_sk,2440922),LessThanOrEqual(d_date_sk,2440952))),And(GreaterThanOrEqual(d_date_sk,2441287),LessThanOrEqual(d_date_sk,2441317))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2441653),LessThanOrEqual(d_date_sk,2441683)),And(GreaterThanOrEqual(d_date_sk,2442018),LessThanOrEqual(d_date_sk,2442048))),And(GreaterThanOrEqual(d_date_sk,2442383),LessThanOrEqual(d_date_sk,2442413)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2442748),LessThanOrEqual(d_date_sk,2442778)),And(GreaterThanOrEqual(d_date_sk,2443114),LessThanOrEqual(d_date_sk,2443144))),Or(And(GreaterThanOrEqual(d_date_sk,2443479),LessThanOrEqual(d_date_sk,2443509)),And(GreaterThanOrEqual(d_date_sk,2443844),LessThanOrEqual(d_date_sk,2443874)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2444209),LessThanOrEqual(d_date_sk,2444239)),And(GreaterThanOrEqual(d_date_sk,2444575),LessThanOrEqual(d_date_sk,2444605))),And(GreaterThanOrEqual(d_date_sk,2444940),LessThanOrEqual(d_date_sk,2444970)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2445305),LessThanOrEqual(d_date_sk,2445335)),And(GreaterThanOrEqual(d_date_sk,2445670),LessThanOrEqual(d_date_sk,2445700))),And(GreaterThanOrEqual(d_date_sk,2446036),LessThanOrEqual(d_date_sk,2446066))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2446401),LessThanOrEqual(d_date_sk,2446431)),And(GreaterThanOrEqual(d_date_sk,2446766),LessThanOrEqual(d_date_sk,2446796))),And(GreaterThanOrEqual(d_date_sk,2447131),LessThanOrEqual(d_date_sk,2447161))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2447497),LessThanOrEqual(d_date_sk,2447527)),And(GreaterThanOrEqual(d_date_sk,2447862),LessThanOrEqual(d_date_sk,2447892))),And(GreaterThanOrEqual(d_date_sk,2448227),LessThanOrEqual(d_date_sk,2448257))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2448592),LessThanOrEqual(d_date_sk,2448622)),And(GreaterThanOrEqual(d_date_sk,2448958),LessThanOrEqual(d_date_sk,2448988))),And(GreaterThanOrEqual(d_date_sk,2449323),LessThanOrEqual(d_date_sk,2449353)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2449688),LessThanOrEqual(d_date_sk,2449718)),And(GreaterThanOrEqual(d_date_sk,2450053),LessThanOrEqual(d_date_sk,2450083))),And(GreaterThanOrEqual(d_date_sk,2450419),LessThanOrEqual(d_date_sk,2450449))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2450784),LessThanOrEqual(d_date_sk,2450814)),And(GreaterThanOrEqual(d_date_sk,2451149),LessThanOrEqual(d_date_sk,2451179))),And(GreaterThanOrEqual(d_date_sk,2451514),LessThanOrEqual(d_date_sk,2451544)))))))),Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2451880),LessThanOrEqual(d_date_sk,2451910)),And(GreaterThanOrEqual(d_date_sk,2452245),LessThanOrEqual(d_date_sk,2452275))),Or(And(GreaterThanOrEqual(d_date_sk,2452610),LessThanOrEqual(d_date_sk,2452640)),And(GreaterThanOrEqual(d_date_sk,2452975),LessThanOrEqual(d_date_sk,2453005)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2453341),LessThanOrEqual(d_date_sk,2453371)),And(GreaterThanOrEqual(d_date_sk,2453706),LessThanOrEqual(d_date_sk,2453736))),And(GreaterThanOrEqual(d_date_sk,2454071),LessThanOrEqual(d_date_sk,2454101)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2454436),LessThanOrEqual(d_date_sk,2454466)),And(GreaterThanOrEqual(d_date_sk,2454802),LessThanOrEqual(d_date_sk,2454832))),And(GreaterThanOrEqual(d_date_sk,2455167),LessThanOrEqual(d_date_sk,2455197))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2455532),LessThanOrEqual(d_date_sk,2455562)),And(GreaterThanOrEqual(d_date_sk,2455897),LessThanOrEqual(d_date_sk,2455927))),And(GreaterThanOrEqual(d_date_sk,2456263),LessThanOrEqual(d_date_sk,2456293))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2456628),LessThanOrEqual(d_date_sk,2456658)),And(GreaterThanOrEqual(d_date_sk,2456993),LessThanOrEqual(d_date_sk,2457023))),And(GreaterThanOrEqual(d_date_sk,2457358),LessThanOrEqual(d_date_sk,2457388))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2457724),LessThanOrEqual(d_date_sk,2457754)),And(GreaterThanOrEqual(d_date_sk,2458089),LessThanOrEqual(d_date_sk,2458119))),And(GreaterThanOrEqual(d_date_sk,2458454),LessThanOrEqual(d_date_sk,2458484)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2458819),LessThanOrEqual(d_date_sk,2458849)),And(GreaterThanOrEqual(d_date_sk,2459185),LessThanOrEqual(d_date_sk,2459215))),And(GreaterThanOrEqual(d_date_sk,2459550),LessThanOrEqual(d_date_sk,2459580))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2459915),LessThanOrEqual(d_date_sk,2459945)),And(GreaterThanOrEqual(d_date_sk,2460280),LessThanOrEqual(d_date_sk,2460310))),And(GreaterThanOrEqual(d_date_sk,2460646),LessThanOrEqual(d_date_sk,2460676)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2461011),LessThanOrEqual(d_date_sk,2461041)),And(GreaterThanOrEqual(d_date_sk,2461376),LessThanOrEqual(d_date_sk,2461406))),Or(And(GreaterThanOrEqual(d_date_sk,2461741),LessThanOrEqual(d_date_sk,2461771)),And(GreaterThanOrEqual(d_date_sk,2462107),LessThanOrEqual(d_date_sk,2462137)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2462472),LessThanOrEqual(d_date_sk,2462502)),And(GreaterThanOrEqual(d_date_sk,2462837),LessThanOrEqual(d_date_sk,2462867))),And(GreaterThanOrEqual(d_date_sk,2463202),LessThanOrEqual(d_date_sk,2463232)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2463568),LessThanOrEqual(d_date_sk,2463598)),And(GreaterThanOrEqual(d_date_sk,2463933),LessThanOrEqual(d_date_sk,2463963))),And(GreaterThanOrEqual(d_date_sk,2464298),LessThanOrEqual(d_date_sk,2464328))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2464663),LessThanOrEqual(d_date_sk,2464693)),And(GreaterThanOrEqual(d_date_sk,2465029),LessThanOrEqual(d_date_sk,2465059))),And(GreaterThanOrEqual(d_date_sk,2465394),LessThanOrEqual(d_date_sk,2465424))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2465759),LessThanOrEqual(d_date_sk,2465789)),And(GreaterThanOrEqual(d_date_sk,2466124),LessThanOrEqual(d_date_sk,2466154))),And(GreaterThanOrEqual(d_date_sk,2466490),LessThanOrEqual(d_date_sk,2466520))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2466855),LessThanOrEqual(d_date_sk,2466885)),And(GreaterThanOrEqual(d_date_sk,2467220),LessThanOrEqual(d_date_sk,2467250))),And(GreaterThanOrEqual(d_date_sk,2467585),LessThanOrEqual(d_date_sk,2467615)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2467951),LessThanOrEqual(d_date_sk,2467981)),And(GreaterThanOrEqual(d_date_sk,2468316),LessThanOrEqual(d_date_sk,2468346))),And(GreaterThanOrEqual(d_date_sk,2468681),LessThanOrEqual(d_date_sk,2468711))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2469046),LessThanOrEqual(d_date_sk,2469076)),And(GreaterThanOrEqual(d_date_sk,2469412),LessThanOrEqual(d_date_sk,2469442))),And(GreaterThanOrEqual(d_date_sk,2469777),LessThanOrEqual(d_date_sk,2469807))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2470142),LessThanOrEqual(d_date_sk,2470172)),And(GreaterThanOrEqual(d_date_sk,2470507),LessThanOrEqual(d_date_sk,2470537))),Or(And(GreaterThanOrEqual(d_date_sk,2470873),LessThanOrEqual(d_date_sk,2470903)),And(GreaterThanOrEqual(d_date_sk,2471238),LessThanOrEqual(d_date_sk,2471268)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2471603),LessThanOrEqual(d_date_sk,2471633)),And(GreaterThanOrEqual(d_date_sk,2471968),LessThanOrEqual(d_date_sk,2471998))),And(GreaterThanOrEqual(d_date_sk,2472334),LessThanOrEqual(d_date_sk,2472364)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2472699),LessThanOrEqual(d_date_sk,2472729)),And(GreaterThanOrEqual(d_date_sk,2473064),LessThanOrEqual(d_date_sk,2473094))),And(GreaterThanOrEqual(d_date_sk,2473429),LessThanOrEqual(d_date_sk,2473459))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2473795),LessThanOrEqual(d_date_sk,2473825)),And(GreaterThanOrEqual(d_date_sk,2474160),LessThanOrEqual(d_date_sk,2474190))),And(GreaterThanOrEqual(d_date_sk,2474525),LessThanOrEqual(d_date_sk,2474555))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2474890),LessThanOrEqual(d_date_sk,2474920)),And(GreaterThanOrEqual(d_date_sk,2475256),LessThanOrEqual(d_date_sk,2475286))),And(GreaterThanOrEqual(d_date_sk,2475621),LessThanOrEqual(d_date_sk,2475651))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2475986),LessThanOrEqual(d_date_sk,2476016)),And(GreaterThanOrEqual(d_date_sk,2476351),LessThanOrEqual(d_date_sk,2476381))),And(GreaterThanOrEqual(d_date_sk,2476717),LessThanOrEqual(d_date_sk,2476747)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2477082),LessThanOrEqual(d_date_sk,2477112)),And(GreaterThanOrEqual(d_date_sk,2477447),LessThanOrEqual(d_date_sk,2477477))),And(GreaterThanOrEqual(d_date_sk,2477812),LessThanOrEqual(d_date_sk,2477842))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2478178),LessThanOrEqual(d_date_sk,2478208)),And(GreaterThanOrEqual(d_date_sk,2478543),LessThanOrEqual(d_date_sk,2478573))),And(GreaterThanOrEqual(d_date_sk,2478908),LessThanOrEqual(d_date_sk,2478938)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2479273),LessThanOrEqual(d_date_sk,2479303)),And(GreaterThanOrEqual(d_date_sk,2479639),LessThanOrEqual(d_date_sk,2479669))),Or(And(GreaterThanOrEqual(d_date_sk,2480004),LessThanOrEqual(d_date_sk,2480034)),And(GreaterThanOrEqual(d_date_sk,2480369),LessThanOrEqual(d_date_sk,2480399)))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2480734),LessThanOrEqual(d_date_sk,2480764)),And(GreaterThanOrEqual(d_date_sk,2481100),LessThanOrEqual(d_date_sk,2481130))),And(GreaterThanOrEqual(d_date_sk,2481465),LessThanOrEqual(d_date_sk,2481495)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2481830),LessThanOrEqual(d_date_sk,2481860)),And(GreaterThanOrEqual(d_date_sk,2482195),LessThanOrEqual(d_date_sk,2482225))),And(GreaterThanOrEqual(d_date_sk,2482561),LessThanOrEqual(d_date_sk,2482591))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2482926),LessThanOrEqual(d_date_sk,2482956)),And(GreaterThanOrEqual(d_date_sk,2483291),LessThanOrEqual(d_date_sk,2483321))),And(GreaterThanOrEqual(d_date_sk,2483656),LessThanOrEqual(d_date_sk,2483686))))),Or(Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2484022),LessThanOrEqual(d_date_sk,2484052)),And(GreaterThanOrEqual(d_date_sk,2484387),LessThanOrEqual(d_date_sk,2484417))),And(GreaterThanOrEqual(d_date_sk,2484752),LessThanOrEqual(d_date_sk,2484782))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2485117),LessThanOrEqual(d_date_sk,2485147)),And(GreaterThanOrEqual(d_date_sk,2485483),LessThanOrEqual(d_date_sk,2485513))),And(GreaterThanOrEqual(d_date_sk,2485848),LessThanOrEqual(d_date_sk,2485878)))),Or(Or(Or(And(GreaterThanOrEqual(d_date_sk,2486213),LessThanOrEqual(d_date_sk,2486243)),And(GreaterThanOrEqual(d_date_sk,2486578),LessThanOrEqual(d_date_sk,2486608))),And(GreaterThanOrEqual(d_date_sk,2486944),LessThanOrEqual(d_date_sk,2486974))),Or(Or(And(GreaterThanOrEqual(d_date_sk,2487309),LessThanOrEqual(d_date_sk,2487339)),And(GreaterThanOrEqual(d_date_sk,2487674),LessThanOrEqual(d_date_sk,2487704))),And(GreaterThanOrEqual(d_date_sk,2488039),LessThanOrEqual(d_date_sk,2488069))))))))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [Or(Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2415355),LessThanOrEqual(ss_sold_date_sk,2415385)),And(GreaterThanOrEqual(ss_sold_date_sk,2415720),LessThanOrEqual(ss_sold_date_sk,2415750))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2416085),LessThanOrEqual(ss_sold_date_sk,2416115)),And(GreaterThanOrEqual(ss_sold_date_sk,2416450),LessThanOrEqual(ss_sold_date_sk,2416480)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2416816),LessThanOrEqual(ss_sold_date_sk,2416846)),And(GreaterThanOrEqual(ss_sold_date_sk,2417181),LessThanOrEqual(ss_sold_date_sk,2417211))),And(GreaterThanOrEqual(ss_sold_date_sk,2417546),LessThanOrEqual(ss_sold_date_sk,2417576)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2417911),LessThanOrEqual(ss_sold_date_sk,2417941)),And(GreaterThanOrEqual(ss_sold_date_sk,2418277),LessThanOrEqual(ss_sold_date_sk,2418307))),And(GreaterThanOrEqual(ss_sold_date_sk,2418642),LessThanOrEqual(ss_sold_date_sk,2418672))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2419007),LessThanOrEqual(ss_sold_date_sk,2419037)),And(GreaterThanOrEqual(ss_sold_date_sk,2419372),LessThanOrEqual(ss_sold_date_sk,2419402))),And(GreaterThanOrEqual(ss_sold_date_sk,2419738),LessThanOrEqual(ss_sold_date_sk,2419768))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2420103),LessThanOrEqual(ss_sold_date_sk,2420133)),And(GreaterThanOrEqual(ss_sold_date_sk,2420468),LessThanOrEqual(ss_sold_date_sk,2420498))),And(GreaterThanOrEqual(ss_sold_date_sk,2420833),LessThanOrEqual(ss_sold_date_sk,2420863))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2421199),LessThanOrEqual(ss_sold_date_sk,2421229)),And(GreaterThanOrEqual(ss_sold_date_sk,2421564),LessThanOrEqual(ss_sold_date_sk,2421594))),And(GreaterThanOrEqual(ss_sold_date_sk,2421929),LessThanOrEqual(ss_sold_date_sk,2421959)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2422294),LessThanOrEqual(ss_sold_date_sk,2422324)),And(GreaterThanOrEqual(ss_sold_date_sk,2422660),LessThanOrEqual(ss_sold_date_sk,2422690))),And(GreaterThanOrEqual(ss_sold_date_sk,2423025),LessThanOrEqual(ss_sold_date_sk,2423055))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2423390),LessThanOrEqual(ss_sold_date_sk,2423420)),And(GreaterThanOrEqual(ss_sold_date_sk,2423755),LessThanOrEqual(ss_sold_date_sk,2423785))),And(GreaterThanOrEqual(ss_sold_date_sk,2424121),LessThanOrEqual(ss_sold_date_sk,2424151)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2424486),LessThanOrEqual(ss_sold_date_sk,2424516)),And(GreaterThanOrEqual(ss_sold_date_sk,2424851),LessThanOrEqual(ss_sold_date_sk,2424881))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2425216),LessThanOrEqual(ss_sold_date_sk,2425246)),And(GreaterThanOrEqual(ss_sold_date_sk,2425582),LessThanOrEqual(ss_sold_date_sk,2425612)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2425947),LessThanOrEqual(ss_sold_date_sk,2425977)),And(GreaterThanOrEqual(ss_sold_date_sk,2426312),LessThanOrEqual(ss_sold_date_sk,2426342))),And(GreaterThanOrEqual(ss_sold_date_sk,2426677),LessThanOrEqual(ss_sold_date_sk,2426707)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2427043),LessThanOrEqual(ss_sold_date_sk,2427073)),And(GreaterThanOrEqual(ss_sold_date_sk,2427408),LessThanOrEqual(ss_sold_date_sk,2427438))),And(GreaterThanOrEqual(ss_sold_date_sk,2427773),LessThanOrEqual(ss_sold_date_sk,2427803))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2428138),LessThanOrEqual(ss_sold_date_sk,2428168)),And(GreaterThanOrEqual(ss_sold_date_sk,2428504),LessThanOrEqual(ss_sold_date_sk,2428534))),And(GreaterThanOrEqual(ss_sold_date_sk,2428869),LessThanOrEqual(ss_sold_date_sk,2428899))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2429234),LessThanOrEqual(ss_sold_date_sk,2429264)),And(GreaterThanOrEqual(ss_sold_date_sk,2429599),LessThanOrEqual(ss_sold_date_sk,2429629))),And(GreaterThanOrEqual(ss_sold_date_sk,2429965),LessThanOrEqual(ss_sold_date_sk,2429995))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2430330),LessThanOrEqual(ss_sold_date_sk,2430360)),And(GreaterThanOrEqual(ss_sold_date_sk,2430695),LessThanOrEqual(ss_sold_date_sk,2430725))),And(GreaterThanOrEqual(ss_sold_date_sk,2431060),LessThanOrEqual(ss_sold_date_sk,2431090)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2431426),LessThanOrEqual(ss_sold_date_sk,2431456)),And(GreaterThanOrEqual(ss_sold_date_sk,2431791),LessThanOrEqual(ss_sold_date_sk,2431821))),And(GreaterThanOrEqual(ss_sold_date_sk,2432156),LessThanOrEqual(ss_sold_date_sk,2432186))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2432521),LessThanOrEqual(ss_sold_date_sk,2432551)),And(GreaterThanOrEqual(ss_sold_date_sk,2432887),LessThanOrEqual(ss_sold_date_sk,2432917))),And(GreaterThanOrEqual(ss_sold_date_sk,2433252),LessThanOrEqual(ss_sold_date_sk,2433282))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2433617),LessThanOrEqual(ss_sold_date_sk,2433647)),And(GreaterThanOrEqual(ss_sold_date_sk,2433982),LessThanOrEqual(ss_sold_date_sk,2434012))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2434348),LessThanOrEqual(ss_sold_date_sk,2434378)),And(GreaterThanOrEqual(ss_sold_date_sk,2434713),LessThanOrEqual(ss_sold_date_sk,2434743)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2435078),LessThanOrEqual(ss_sold_date_sk,2435108)),And(GreaterThanOrEqual(ss_sold_date_sk,2435443),LessThanOrEqual(ss_sold_date_sk,2435473))),And(GreaterThanOrEqual(ss_sold_date_sk,2435809),LessThanOrEqual(ss_sold_date_sk,2435839)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2436174),LessThanOrEqual(ss_sold_date_sk,2436204)),And(GreaterThanOrEqual(ss_sold_date_sk,2436539),LessThanOrEqual(ss_sold_date_sk,2436569))),And(GreaterThanOrEqual(ss_sold_date_sk,2436904),LessThanOrEqual(ss_sold_date_sk,2436934))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2437270),LessThanOrEqual(ss_sold_date_sk,2437300)),And(GreaterThanOrEqual(ss_sold_date_sk,2437635),LessThanOrEqual(ss_sold_date_sk,2437665))),And(GreaterThanOrEqual(ss_sold_date_sk,2438000),LessThanOrEqual(ss_sold_date_sk,2438030))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2438365),LessThanOrEqual(ss_sold_date_sk,2438395)),And(GreaterThanOrEqual(ss_sold_date_sk,2438731),LessThanOrEqual(ss_sold_date_sk,2438761))),And(GreaterThanOrEqual(ss_sold_date_sk,2439096),LessThanOrEqual(ss_sold_date_sk,2439126))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2439461),LessThanOrEqual(ss_sold_date_sk,2439491)),And(GreaterThanOrEqual(ss_sold_date_sk,2439826),LessThanOrEqual(ss_sold_date_sk,2439856))),And(GreaterThanOrEqual(ss_sold_date_sk,2440192),LessThanOrEqual(ss_sold_date_sk,2440222)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2440557),LessThanOrEqual(ss_sold_date_sk,2440587)),And(GreaterThanOrEqual(ss_sold_date_sk,2440922),LessThanOrEqual(ss_sold_date_sk,2440952))),And(GreaterThanOrEqual(ss_sold_date_sk,2441287),LessThanOrEqual(ss_sold_date_sk,2441317))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2441653),LessThanOrEqual(ss_sold_date_sk,2441683)),And(GreaterThanOrEqual(ss_sold_date_sk,2442018),LessThanOrEqual(ss_sold_date_sk,2442048))),And(GreaterThanOrEqual(ss_sold_date_sk,2442383),LessThanOrEqual(ss_sold_date_sk,2442413)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2442748),LessThanOrEqual(ss_sold_date_sk,2442778)),And(GreaterThanOrEqual(ss_sold_date_sk,2443114),LessThanOrEqual(ss_sold_date_sk,2443144))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2443479),LessThanOrEqual(ss_sold_date_sk,2443509)),And(GreaterThanOrEqual(ss_sold_date_sk,2443844),LessThanOrEqual(ss_sold_date_sk,2443874)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2444209),LessThanOrEqual(ss_sold_date_sk,2444239)),And(GreaterThanOrEqual(ss_sold_date_sk,2444575),LessThanOrEqual(ss_sold_date_sk,2444605))),And(GreaterThanOrEqual(ss_sold_date_sk,2444940),LessThanOrEqual(ss_sold_date_sk,2444970)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2445305),LessThanOrEqual(ss_sold_date_sk,2445335)),And(GreaterThanOrEqual(ss_sold_date_sk,2445670),LessThanOrEqual(ss_sold_date_sk,2445700))),And(GreaterThanOrEqual(ss_sold_date_sk,2446036),LessThanOrEqual(ss_sold_date_sk,2446066))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2446401),LessThanOrEqual(ss_sold_date_sk,2446431)),And(GreaterThanOrEqual(ss_sold_date_sk,2446766),LessThanOrEqual(ss_sold_date_sk,2446796))),And(GreaterThanOrEqual(ss_sold_date_sk,2447131),LessThanOrEqual(ss_sold_date_sk,2447161))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2447497),LessThanOrEqual(ss_sold_date_sk,2447527)),And(GreaterThanOrEqual(ss_sold_date_sk,2447862),LessThanOrEqual(ss_sold_date_sk,2447892))),And(GreaterThanOrEqual(ss_sold_date_sk,2448227),LessThanOrEqual(ss_sold_date_sk,2448257))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2448592),LessThanOrEqual(ss_sold_date_sk,2448622)),And(GreaterThanOrEqual(ss_sold_date_sk,2448958),LessThanOrEqual(ss_sold_date_sk,2448988))),And(GreaterThanOrEqual(ss_sold_date_sk,2449323),LessThanOrEqual(ss_sold_date_sk,2449353)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2449688),LessThanOrEqual(ss_sold_date_sk,2449718)),And(GreaterThanOrEqual(ss_sold_date_sk,2450053),LessThanOrEqual(ss_sold_date_sk,2450083))),And(GreaterThanOrEqual(ss_sold_date_sk,2450419),LessThanOrEqual(ss_sold_date_sk,2450449))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2450784),LessThanOrEqual(ss_sold_date_sk,2450814)),And(GreaterThanOrEqual(ss_sold_date_sk,2451149),LessThanOrEqual(ss_sold_date_sk,2451179))),And(GreaterThanOrEqual(ss_sold_date_sk,2451514),LessThanOrEqual(ss_sold_date_sk,2451544)))))))),Or(Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2451880),LessThanOrEqual(ss_sold_date_sk,2451910)),And(GreaterThanOrEqual(ss_sold_date_sk,2452245),LessThanOrEqual(ss_sold_date_sk,2452275))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2452610),LessThanOrEqual(ss_sold_date_sk,2452640)),And(GreaterThanOrEqual(ss_sold_date_sk,2452975),LessThanOrEqual(ss_sold_date_sk,2453005)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2453341),LessThanOrEqual(ss_sold_date_sk,2453371)),And(GreaterThanOrEqual(ss_sold_date_sk,2453706),LessThanOrEqual(ss_sold_date_sk,2453736))),And(GreaterThanOrEqual(ss_sold_date_sk,2454071),LessThanOrEqual(ss_sold_date_sk,2454101)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2454436),LessThanOrEqual(ss_sold_date_sk,2454466)),And(GreaterThanOrEqual(ss_sold_date_sk,2454802),LessThanOrEqual(ss_sold_date_sk,2454832))),And(GreaterThanOrEqual(ss_sold_date_sk,2455167),LessThanOrEqual(ss_sold_date_sk,2455197))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2455532),LessThanOrEqual(ss_sold_date_sk,2455562)),And(GreaterThanOrEqual(ss_sold_date_sk,2455897),LessThanOrEqual(ss_sold_date_sk,2455927))),And(GreaterThanOrEqual(ss_sold_date_sk,2456263),LessThanOrEqual(ss_sold_date_sk,2456293))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2456628),LessThanOrEqual(ss_sold_date_sk,2456658)),And(GreaterThanOrEqual(ss_sold_date_sk,2456993),LessThanOrEqual(ss_sold_date_sk,2457023))),And(GreaterThanOrEqual(ss_sold_date_sk,2457358),LessThanOrEqual(ss_sold_date_sk,2457388))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2457724),LessThanOrEqual(ss_sold_date_sk,2457754)),And(GreaterThanOrEqual(ss_sold_date_sk,2458089),LessThanOrEqual(ss_sold_date_sk,2458119))),And(GreaterThanOrEqual(ss_sold_date_sk,2458454),LessThanOrEqual(ss_sold_date_sk,2458484)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2458819),LessThanOrEqual(ss_sold_date_sk,2458849)),And(GreaterThanOrEqual(ss_sold_date_sk,2459185),LessThanOrEqual(ss_sold_date_sk,2459215))),And(GreaterThanOrEqual(ss_sold_date_sk,2459550),LessThanOrEqual(ss_sold_date_sk,2459580))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2459915),LessThanOrEqual(ss_sold_date_sk,2459945)),And(GreaterThanOrEqual(ss_sold_date_sk,2460280),LessThanOrEqual(ss_sold_date_sk,2460310))),And(GreaterThanOrEqual(ss_sold_date_sk,2460646),LessThanOrEqual(ss_sold_date_sk,2460676)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2461011),LessThanOrEqual(ss_sold_date_sk,2461041)),And(GreaterThanOrEqual(ss_sold_date_sk,2461376),LessThanOrEqual(ss_sold_date_sk,2461406))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2461741),LessThanOrEqual(ss_sold_date_sk,2461771)),And(GreaterThanOrEqual(ss_sold_date_sk,2462107),LessThanOrEqual(ss_sold_date_sk,2462137)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2462472),LessThanOrEqual(ss_sold_date_sk,2462502)),And(GreaterThanOrEqual(ss_sold_date_sk,2462837),LessThanOrEqual(ss_sold_date_sk,2462867))),And(GreaterThanOrEqual(ss_sold_date_sk,2463202),LessThanOrEqual(ss_sold_date_sk,2463232)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2463568),LessThanOrEqual(ss_sold_date_sk,2463598)),And(GreaterThanOrEqual(ss_sold_date_sk,2463933),LessThanOrEqual(ss_sold_date_sk,2463963))),And(GreaterThanOrEqual(ss_sold_date_sk,2464298),LessThanOrEqual(ss_sold_date_sk,2464328))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2464663),LessThanOrEqual(ss_sold_date_sk,2464693)),And(GreaterThanOrEqual(ss_sold_date_sk,2465029),LessThanOrEqual(ss_sold_date_sk,2465059))),And(GreaterThanOrEqual(ss_sold_date_sk,2465394),LessThanOrEqual(ss_sold_date_sk,2465424))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2465759),LessThanOrEqual(ss_sold_date_sk,2465789)),And(GreaterThanOrEqual(ss_sold_date_sk,2466124),LessThanOrEqual(ss_sold_date_sk,2466154))),And(GreaterThanOrEqual(ss_sold_date_sk,2466490),LessThanOrEqual(ss_sold_date_sk,2466520))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2466855),LessThanOrEqual(ss_sold_date_sk,2466885)),And(GreaterThanOrEqual(ss_sold_date_sk,2467220),LessThanOrEqual(ss_sold_date_sk,2467250))),And(GreaterThanOrEqual(ss_sold_date_sk,2467585),LessThanOrEqual(ss_sold_date_sk,2467615)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2467951),LessThanOrEqual(ss_sold_date_sk,2467981)),And(GreaterThanOrEqual(ss_sold_date_sk,2468316),LessThanOrEqual(ss_sold_date_sk,2468346))),And(GreaterThanOrEqual(ss_sold_date_sk,2468681),LessThanOrEqual(ss_sold_date_sk,2468711))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2469046),LessThanOrEqual(ss_sold_date_sk,2469076)),And(GreaterThanOrEqual(ss_sold_date_sk,2469412),LessThanOrEqual(ss_sold_date_sk,2469442))),And(GreaterThanOrEqual(ss_sold_date_sk,2469777),LessThanOrEqual(ss_sold_date_sk,2469807))))))),Or(Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2470142),LessThanOrEqual(ss_sold_date_sk,2470172)),And(GreaterThanOrEqual(ss_sold_date_sk,2470507),LessThanOrEqual(ss_sold_date_sk,2470537))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2470873),LessThanOrEqual(ss_sold_date_sk,2470903)),And(GreaterThanOrEqual(ss_sold_date_sk,2471238),LessThanOrEqual(ss_sold_date_sk,2471268)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2471603),LessThanOrEqual(ss_sold_date_sk,2471633)),And(GreaterThanOrEqual(ss_sold_date_sk,2471968),LessThanOrEqual(ss_sold_date_sk,2471998))),And(GreaterThanOrEqual(ss_sold_date_sk,2472334),LessThanOrEqual(ss_sold_date_sk,2472364)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2472699),LessThanOrEqual(ss_sold_date_sk,2472729)),And(GreaterThanOrEqual(ss_sold_date_sk,2473064),LessThanOrEqual(ss_sold_date_sk,2473094))),And(GreaterThanOrEqual(ss_sold_date_sk,2473429),LessThanOrEqual(ss_sold_date_sk,2473459))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2473795),LessThanOrEqual(ss_sold_date_sk,2473825)),And(GreaterThanOrEqual(ss_sold_date_sk,2474160),LessThanOrEqual(ss_sold_date_sk,2474190))),And(GreaterThanOrEqual(ss_sold_date_sk,2474525),LessThanOrEqual(ss_sold_date_sk,2474555))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2474890),LessThanOrEqual(ss_sold_date_sk,2474920)),And(GreaterThanOrEqual(ss_sold_date_sk,2475256),LessThanOrEqual(ss_sold_date_sk,2475286))),And(GreaterThanOrEqual(ss_sold_date_sk,2475621),LessThanOrEqual(ss_sold_date_sk,2475651))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2475986),LessThanOrEqual(ss_sold_date_sk,2476016)),And(GreaterThanOrEqual(ss_sold_date_sk,2476351),LessThanOrEqual(ss_sold_date_sk,2476381))),And(GreaterThanOrEqual(ss_sold_date_sk,2476717),LessThanOrEqual(ss_sold_date_sk,2476747)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2477082),LessThanOrEqual(ss_sold_date_sk,2477112)),And(GreaterThanOrEqual(ss_sold_date_sk,2477447),LessThanOrEqual(ss_sold_date_sk,2477477))),And(GreaterThanOrEqual(ss_sold_date_sk,2477812),LessThanOrEqual(ss_sold_date_sk,2477842))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2478178),LessThanOrEqual(ss_sold_date_sk,2478208)),And(GreaterThanOrEqual(ss_sold_date_sk,2478543),LessThanOrEqual(ss_sold_date_sk,2478573))),And(GreaterThanOrEqual(ss_sold_date_sk,2478908),LessThanOrEqual(ss_sold_date_sk,2478938)))))),Or(Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2479273),LessThanOrEqual(ss_sold_date_sk,2479303)),And(GreaterThanOrEqual(ss_sold_date_sk,2479639),LessThanOrEqual(ss_sold_date_sk,2479669))),Or(And(GreaterThanOrEqual(ss_sold_date_sk,2480004),LessThanOrEqual(ss_sold_date_sk,2480034)),And(GreaterThanOrEqual(ss_sold_date_sk,2480369),LessThanOrEqual(ss_sold_date_sk,2480399)))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2480734),LessThanOrEqual(ss_sold_date_sk,2480764)),And(GreaterThanOrEqual(ss_sold_date_sk,2481100),LessThanOrEqual(ss_sold_date_sk,2481130))),And(GreaterThanOrEqual(ss_sold_date_sk,2481465),LessThanOrEqual(ss_sold_date_sk,2481495)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2481830),LessThanOrEqual(ss_sold_date_sk,2481860)),And(GreaterThanOrEqual(ss_sold_date_sk,2482195),LessThanOrEqual(ss_sold_date_sk,2482225))),And(GreaterThanOrEqual(ss_sold_date_sk,2482561),LessThanOrEqual(ss_sold_date_sk,2482591))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2482926),LessThanOrEqual(ss_sold_date_sk,2482956)),And(GreaterThanOrEqual(ss_sold_date_sk,2483291),LessThanOrEqual(ss_sold_date_sk,2483321))),And(GreaterThanOrEqual(ss_sold_date_sk,2483656),LessThanOrEqual(ss_sold_date_sk,2483686))))),Or(Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2484022),LessThanOrEqual(ss_sold_date_sk,2484052)),And(GreaterThanOrEqual(ss_sold_date_sk,2484387),LessThanOrEqual(ss_sold_date_sk,2484417))),And(GreaterThanOrEqual(ss_sold_date_sk,2484752),LessThanOrEqual(ss_sold_date_sk,2484782))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2485117),LessThanOrEqual(ss_sold_date_sk,2485147)),And(GreaterThanOrEqual(ss_sold_date_sk,2485483),LessThanOrEqual(ss_sold_date_sk,2485513))),And(GreaterThanOrEqual(ss_sold_date_sk,2485848),LessThanOrEqual(ss_sold_date_sk,2485878)))),Or(Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2486213),LessThanOrEqual(ss_sold_date_sk,2486243)),And(GreaterThanOrEqual(ss_sold_date_sk,2486578),LessThanOrEqual(ss_sold_date_sk,2486608))),And(GreaterThanOrEqual(ss_sold_date_sk,2486944),LessThanOrEqual(ss_sold_date_sk,2486974))),Or(Or(And(GreaterThanOrEqual(ss_sold_date_sk,2487309),LessThanOrEqual(ss_sold_date_sk,2487339)),And(GreaterThanOrEqual(ss_sold_date_sk,2487674),LessThanOrEqual(ss_sold_date_sk,2487704))),And(GreaterThanOrEqual(ss_sold_date_sk,2488039),LessThanOrEqual(ss_sold_date_sk,2488069))))))))), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#4, ss_item_sk#5, ss_net_profi (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manufact_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,436), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt index 71928db20bcea..ac1fca4f67a02 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt @@ -43,7 +43,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450816), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2450816) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(And(GreaterThanOrEqual(d_dom,1),LessThanOrEqual(d_dom,3)),And(GreaterThanOrEqual(d_dom,25),LessThanOrEqual(d_dom,28))), In(d_year, [1998,1999,2000]), GreaterThanOrEqual(d_date_sk,2450816), LessThanOrEqual(d_date_sk,2451910), IsNotNull(d_date_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_county, [Saginaw County,Sumner County,Appanoose County,Daviess County,Fairfield County,Raleigh County,Ziebach County,Williamson County]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (31) Scan parquet default.customer Output [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt index 4da22519a1466..898d37403d6a0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt @@ -40,7 +40,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450816), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2450816) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(And(GreaterThanOrEqual(d_dom,1),LessThanOrEqual(d_dom,3)),And(GreaterThanOrEqual(d_dom,25),LessThanOrEqual(d_dom,28))), In(d_year, [1998,1999,2000]), GreaterThanOrEqual(d_date_sk,2450816), LessThanOrEqual(d_date_sk,2451910), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_county, [Saginaw County,Sumner County,Appanoose County,Daviess County,Fairfield County,Raleigh County,Ziebach County,Williamson County]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Condition : ((cnt#22 >= 15) AND (cnt#22 <= 20)) (29) Scan parquet default.customer Output [5]: [c_customer_sk#23, c_salutation#24, c_first_name#25, c_last_name#26, c_preferred_cust_flag#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42.sf100/explain.txt index 5f93b5077a921..3feb5c7308cf5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2451149), IsNotNull(d_date_sk), LessThanOrEqual(d_date_sk,2451179)] ReadSchema: struct @@ -47,7 +47,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451149), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_category_id#9, i_category#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42/explain.txt index 092b388c03b5c..86ab1688b8243 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q42/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), LessThanOrEqual(d_date_sk,2451179), GreaterThanOrEqual(d_date_sk,2451149), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451149), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_category_id#9, i_category#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43.sf100/explain.txt index 5e0761f2ebb7b..d53cc17819ccd 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_day_name#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2450816), LessThanOrEqual(d_date_sk,2451179), IsNotNull(d_date_sk)] ReadSchema: struct @@ -47,7 +47,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_store_sk#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450816), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_day_name#3, ss_sold_date_sk#5, ss_store_sk#6, ss_sale (11) Scan parquet default.store Output [4]: [s_store_sk#8, s_store_id#9, s_store_name#10, s_gmt_offset#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_gmt_offset), EqualTo(s_gmt_offset,-5.00), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43/explain.txt index c5db98d8ee408..d2ab43b3382f0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q43/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_day_name#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2450816), LessThanOrEqual(d_date_sk,2451179), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_day_name#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_store_sk#5, ss_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450816), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_day_name#3, ss_sold_date_sk#4, ss_store_sk#5, ss_sale (11) Scan parquet default.store Output [4]: [s_store_sk#8, s_store_id#9, s_store_name#10, s_gmt_offset#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_gmt_offset), EqualTo(s_gmt_offset,-5.00), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46.sf100/explain.txt index 872aae35fcaa1..7e82e34c4c637 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46.sf100/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [In(ss_sold_date_sk, [2451790,2451609,2451294,2451658,2452099,2451482,2451700,2452035,2452274,2451258,2451847,2451714,2451937,2451860,2451601,2451573,2451686,2452008,2451454,2451882,2451832,2452259,2451671,2451903,2451497,2452162,2451322,2451517,2451434,2451273,2451405,2452105,2451924,2452050,2452126,2452203,2451818,2451559,2451853,2451238,2451209,2451357,2451959,2452239,2451608,2452141,2452252,2451623,2451867,2451504,2451910,2452232,2451874,2451581,2451329,2451223,2451783,2452267,2452042,2451895,2451986,2452091,2451693,2451265,2451678,2451825,2451244,2451490,2451287,2451419,2451546,2451245,2451713,2452070,2451189,2451804,2451468,2451525,2451902,2452077,2452161,2451378,2451567,2451931,2451699,2451251,2451840,2452253,2451938,2451510,2452231,2452036,2451616,2451230,2452112,2451846,2451966,2451538,2451819,2452140,2452183,2451496,2451791,2451595,2451574,2451363,2451994,2451917,2451602,2452273,2451237,2451350,2451685,2451259,2451286,2451972,2452224,2451370,2452245,2451643,2451993,2451315,2451301,2451560,2451433,2452225,2451532,2451755,2451854,2451545,2451210,2451587,2451987,2451447,2452197,2451552,2451896,2451679,2452147,2451735,2452022,2451707,2451868,2451398,2451777,2451181,2451503,2451839,2452175,2451441,2452154,2452029,2452196,2451952,2451805,2451965,2451539,2452001,2451833,2451392,2451524,2451461,2452133,2451448,2451307,2451615,2451769,2451412,2451349,2451651,2451763,2451203,2452064,2451980,2451748,2451637,2452182,2451279,2451231,2451734,2451692,2452071,2451336,2451300,2451727,2451630,2452189,2451875,2451973,2451328,2452084,2451399,2451944,2452204,2451385,2451776,2451384,2451272,2451812,2451749,2451566,2451182,2451945,2451420,2451930,2452057,2451756,2451644,2451314,2451364,2452007,2451798,2451475,2452015,2451440,2452000,2451588,2452148,2451195,2452217,2451371,2452176,2451531,2452134,2452211,2451462,2451188,2451741,2452119,2451342,2451580,2451672,2451889,2451280,2451406,2451293,2451217,2452049,2452106,2451321,2451335,2451483,2452260,2451657,2451979,2451518,2451629,2451728,2451923,2451861,2451951,2452246,2451455,2451356,2451224,2452210,2452021,2451427,2451202,2452098,2452168,2451553,2451391,2451706,2452155,2451196,2451770,2452127,2451762,2452078,2451958,2451721,2451665,2452120,2451252,2452085,2452092,2451476,2452218,2452169,2451797,2451650,2451881,2451511,2451469,2451888,2452043,2452266,2451664,2452014,2451343,2452056,2452190,2452063,2451636,2451742,2451811,2451720,2451308,2451489,2451413,2451216,2451594,2452238,2451784,2451426,2451622,2451916,2452113,2451909,2451266,2451826,2451377,2452028]), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : (((((ss_sold_date_sk#1 INSET (2451790,2451609,2451294,2451658,245209 (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_dow, [6,0]), In(d_year, [1999,2000,2001]), In(d_date_sk, [2451790,2451609,2451294,2451658,2452099,2451482,2451700,2452035,2452274,2451258,2451847,2451714,2451937,2451860,2451601,2451573,2451686,2452008,2451454,2451882,2451832,2452259,2451671,2451903,2451497,2452162,2451322,2451517,2451434,2451273,2451405,2452105,2451924,2452050,2452126,2452203,2451818,2451559,2451853,2451238,2451209,2451357,2451959,2452239,2451608,2452141,2452252,2451623,2451867,2451504,2451910,2452232,2451874,2451581,2451329,2451223,2451783,2452267,2452042,2451895,2451986,2452091,2451693,2451265,2451678,2451825,2451244,2451490,2451287,2451419,2451546,2451245,2451713,2452070,2451189,2451804,2451468,2451525,2451902,2452077,2452161,2451378,2451567,2451931,2451699,2451251,2451840,2452253,2451938,2451510,2452231,2452036,2451616,2451230,2452112,2451846,2451966,2451538,2451819,2452140,2452183,2451496,2451791,2451595,2451574,2451363,2451994,2451917,2451602,2452273,2451237,2451350,2451685,2451259,2451286,2451972,2452224,2451370,2452245,2451643,2451993,2451315,2451301,2451560,2451433,2452225,2451532,2451755,2451854,2451545,2451210,2451587,2451987,2451447,2452197,2451552,2451896,2451679,2452147,2451735,2452022,2451707,2451868,2451398,2451777,2451181,2451503,2451839,2452175,2451441,2452154,2452029,2452196,2451952,2451805,2451965,2451539,2452001,2451833,2451392,2451524,2451461,2452133,2451448,2451307,2451615,2451769,2451412,2451349,2451651,2451763,2451203,2452064,2451980,2451748,2451637,2452182,2451279,2451231,2451734,2451692,2452071,2451336,2451300,2451727,2451630,2452189,2451875,2451973,2451328,2452084,2451399,2451944,2452204,2451385,2451776,2451384,2451272,2451812,2451749,2451566,2451182,2451945,2451420,2451930,2452057,2451756,2451644,2451314,2451364,2452007,2451798,2451475,2452015,2451440,2452000,2451588,2452148,2451195,2452217,2451371,2452176,2451531,2452134,2452211,2451462,2451188,2451741,2452119,2451342,2451580,2451672,2451889,2451280,2451406,2451293,2451217,2452049,2452106,2451321,2451335,2451483,2452260,2451657,2451979,2451518,2451629,2451728,2451923,2451861,2451951,2452246,2451455,2451356,2451224,2452210,2452021,2451427,2451202,2452098,2452168,2451553,2451391,2451706,2452155,2451196,2451770,2452127,2451762,2452078,2451958,2451721,2451665,2452120,2451252,2452085,2452092,2451476,2452218,2452169,2451797,2451650,2451881,2451511,2451469,2451888,2452043,2452266,2451664,2452014,2451343,2452056,2452190,2452063,2451636,2451742,2451811,2451720,2451308,2451489,2451413,2451216,2451594,2452238,2451784,2451426,2451622,2451916,2452113,2451909,2451266,2451826,2451377,2452028]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -100,7 +100,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.store Output [2]: [s_store_sk#13, s_city#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Midway,Concord,Spring Hill,Brownsville,Greenville]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#16, hd_dep_count#17, hd_vehicle_count#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,5),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -170,7 +170,7 @@ Arguments: [ss_addr_sk#4 ASC NULLS FIRST], false, 0 (27) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_city#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -223,7 +223,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (38) Scan parquet default.customer Output [4]: [c_customer_sk#34, c_current_addr_sk#35, c_first_name#36, c_last_name#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46/explain.txt index 8c2e3c0153844..1bf701ed689f9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q46/explain.txt @@ -47,7 +47,7 @@ TakeOrderedAndProject (43) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [In(ss_sold_date_sk, [2451790,2451609,2451294,2451658,2452099,2451482,2451700,2452035,2452274,2451258,2451847,2451714,2451937,2451860,2451601,2451573,2451686,2452008,2451454,2451882,2451832,2452259,2451671,2451903,2451497,2452162,2451322,2451517,2451434,2451273,2451405,2452105,2451924,2452050,2452126,2452203,2451818,2451559,2451853,2451238,2451209,2451357,2451959,2452239,2451608,2452141,2452252,2451623,2451867,2451504,2451910,2452232,2451874,2451581,2451329,2451223,2451783,2452267,2452042,2451895,2451986,2452091,2451693,2451265,2451678,2451825,2451244,2451490,2451287,2451419,2451546,2451245,2451713,2452070,2451189,2451804,2451468,2451525,2451902,2452077,2452161,2451378,2451567,2451931,2451699,2451251,2451840,2452253,2451938,2451510,2452231,2452036,2451616,2451230,2452112,2451846,2451966,2451538,2451819,2452140,2452183,2451496,2451791,2451595,2451574,2451363,2451994,2451917,2451602,2452273,2451237,2451350,2451685,2451259,2451286,2451972,2452224,2451370,2452245,2451643,2451993,2451315,2451301,2451560,2451433,2452225,2451532,2451755,2451854,2451545,2451210,2451587,2451987,2451447,2452197,2451552,2451896,2451679,2452147,2451735,2452022,2451707,2451868,2451398,2451777,2451181,2451503,2451839,2452175,2451441,2452154,2452029,2452196,2451952,2451805,2451965,2451539,2452001,2451833,2451392,2451524,2451461,2452133,2451448,2451307,2451615,2451769,2451412,2451349,2451651,2451763,2451203,2452064,2451980,2451748,2451637,2452182,2451279,2451231,2451734,2451692,2452071,2451336,2451300,2451727,2451630,2452189,2451875,2451973,2451328,2452084,2451399,2451944,2452204,2451385,2451776,2451384,2451272,2451812,2451749,2451566,2451182,2451945,2451420,2451930,2452057,2451756,2451644,2451314,2451364,2452007,2451798,2451475,2452015,2451440,2452000,2451588,2452148,2451195,2452217,2451371,2452176,2451531,2452134,2452211,2451462,2451188,2451741,2452119,2451342,2451580,2451672,2451889,2451280,2451406,2451293,2451217,2452049,2452106,2451321,2451335,2451483,2452260,2451657,2451979,2451518,2451629,2451728,2451923,2451861,2451951,2452246,2451455,2451356,2451224,2452210,2452021,2451427,2451202,2452098,2452168,2451553,2451391,2451706,2452155,2451196,2451770,2452127,2451762,2452078,2451958,2451721,2451665,2452120,2451252,2452085,2452092,2451476,2452218,2452169,2451797,2451650,2451881,2451511,2451469,2451888,2452043,2452266,2451664,2452014,2451343,2452056,2452190,2452063,2451636,2451742,2451811,2451720,2451308,2451489,2451413,2451216,2451594,2452238,2451784,2451426,2451622,2451916,2452113,2451909,2451266,2451826,2451377,2452028]), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Condition : (((((ss_sold_date_sk#1 INSET (2451790,2451609,2451294,2451658,245209 (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_dow, [6,0]), In(d_year, [1999,2000,2001]), In(d_date_sk, [2451790,2451609,2451294,2451658,2452099,2451482,2451700,2452035,2452274,2451258,2451847,2451714,2451937,2451860,2451601,2451573,2451686,2452008,2451454,2451882,2451832,2452259,2451671,2451903,2451497,2452162,2451322,2451517,2451434,2451273,2451405,2452105,2451924,2452050,2452126,2452203,2451818,2451559,2451853,2451238,2451209,2451357,2451959,2452239,2451608,2452141,2452252,2451623,2451867,2451504,2451910,2452232,2451874,2451581,2451329,2451223,2451783,2452267,2452042,2451895,2451986,2452091,2451693,2451265,2451678,2451825,2451244,2451490,2451287,2451419,2451546,2451245,2451713,2452070,2451189,2451804,2451468,2451525,2451902,2452077,2452161,2451378,2451567,2451931,2451699,2451251,2451840,2452253,2451938,2451510,2452231,2452036,2451616,2451230,2452112,2451846,2451966,2451538,2451819,2452140,2452183,2451496,2451791,2451595,2451574,2451363,2451994,2451917,2451602,2452273,2451237,2451350,2451685,2451259,2451286,2451972,2452224,2451370,2452245,2451643,2451993,2451315,2451301,2451560,2451433,2452225,2451532,2451755,2451854,2451545,2451210,2451587,2451987,2451447,2452197,2451552,2451896,2451679,2452147,2451735,2452022,2451707,2451868,2451398,2451777,2451181,2451503,2451839,2452175,2451441,2452154,2452029,2452196,2451952,2451805,2451965,2451539,2452001,2451833,2451392,2451524,2451461,2452133,2451448,2451307,2451615,2451769,2451412,2451349,2451651,2451763,2451203,2452064,2451980,2451748,2451637,2452182,2451279,2451231,2451734,2451692,2452071,2451336,2451300,2451727,2451630,2452189,2451875,2451973,2451328,2452084,2451399,2451944,2452204,2451385,2451776,2451384,2451272,2451812,2451749,2451566,2451182,2451945,2451420,2451930,2452057,2451756,2451644,2451314,2451364,2452007,2451798,2451475,2452015,2451440,2452000,2451588,2452148,2451195,2452217,2451371,2452176,2451531,2452134,2452211,2451462,2451188,2451741,2452119,2451342,2451580,2451672,2451889,2451280,2451406,2451293,2451217,2452049,2452106,2451321,2451335,2451483,2452260,2451657,2451979,2451518,2451629,2451728,2451923,2451861,2451951,2452246,2451455,2451356,2451224,2452210,2452021,2451427,2451202,2452098,2452168,2451553,2451391,2451706,2452155,2451196,2451770,2452127,2451762,2452078,2451958,2451721,2451665,2452120,2451252,2452085,2452092,2451476,2452218,2452169,2451797,2451650,2451881,2451511,2451469,2451888,2452043,2452266,2451664,2452014,2451343,2452056,2452190,2452063,2451636,2451742,2451811,2451720,2451308,2451489,2451413,2451216,2451594,2452238,2451784,2451426,2451622,2451916,2452113,2451909,2451266,2451826,2451377,2452028]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.store Output [2]: [s_store_sk#13, s_city#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Midway,Concord,Spring Hill,Brownsville,Greenville]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#16, hd_dep_count#17, hd_vehicle_count#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,5),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Input [7]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_ticket_number#6, s (25) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_city#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -199,7 +199,7 @@ Results [5]: [ss_ticket_number#6, ss_customer_sk#2, ca_city#21 AS bought_city#30 (34) Scan parquet default.customer Output [4]: [c_customer_sk#33, c_current_addr_sk#34, c_first_name#35, c_last_name#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52.sf100/explain.txt index 66cb0ccfe6e72..bb606f6860124 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2451149), IsNotNull(d_date_sk), LessThanOrEqual(d_date_sk,2451179)] ReadSchema: struct @@ -47,7 +47,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451149), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52/explain.txt index e1bf75575e040..bc7bc63475fd2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q52/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), LessThanOrEqual(d_date_sk,2451179), GreaterThanOrEqual(d_date_sk,2451149), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451149), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53.sf100/explain.txt index 1f17140c0ef1d..237ad5594b860 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,reference,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#11, ss_item_sk#12, ss_store_sk#13, ss_sales_price#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2452275), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manufact_id#5, ss_sold_date_sk#11, ss_item_sk#12, ss_ (11) Scan parquet default.store Output [1]: [s_store_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [5]: [i_manufact_id#5, ss_sold_date_sk#11, ss_store_sk#13, ss_sales_price# (17) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_month_seq#18, d_qoy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1222,1215,1223,1217,1214,1219,1213,1218,1220,1221,1216,1212]), LessThanOrEqual(d_date_sk,2452275), GreaterThanOrEqual(d_date_sk,2451911), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53/explain.txt index 656a81b8529b6..c2c2ddd8cc406 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q53/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,reference,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Input [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manufact_id#5] (5) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#10, ss_item_sk#11, ss_store_sk#12, ss_sales_price#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2452275), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manufact_id#5, ss_sold_date_sk#10, ss_item_sk#11, ss_ (11) Scan parquet default.date_dim Output [3]: [d_date_sk#15, d_month_seq#16, d_qoy#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1222,1215,1223,1217,1214,1219,1213,1218,1220,1221,1216,1212]), GreaterThanOrEqual(d_date_sk,2451911), LessThanOrEqual(d_date_sk,2452275), IsNotNull(d_date_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [i_manufact_id#5, ss_sold_date_sk#10, ss_store_sk#12, ss_sales_price# (18) Scan parquet default.store Output [1]: [s_store_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55.sf100/explain.txt index fcffe468011ba..6d9a28f187916 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2001), GreaterThanOrEqual(d_date_sk,2452215), LessThanOrEqual(d_date_sk,2452244), IsNotNull(d_date_sk)] ReadSchema: struct @@ -47,7 +47,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2452215), LessThanOrEqual(ss_sold_date_sk,2452244), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [4]: [d_date_sk#1, ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,48), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55/explain.txt index fbff0e718872f..49ecb64c4234b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q55/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2001), GreaterThanOrEqual(d_date_sk,2452215), LessThanOrEqual(d_date_sk,2452244), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2452215), LessThanOrEqual(ss_sold_date_sk,2452244), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [4]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,48), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59.sf100/explain.txt index baab17932fa4d..d525df46c8a4a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59.sf100/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_week_seq#5, d_day_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -114,7 +114,7 @@ Results [9]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (13) Scan parquet default.store Output [3]: [s_store_sk#37, s_store_id#38, s_store_name#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [12]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, tue_sales# (19) Scan parquet default.date_dim Output [2]: [d_month_seq#41, d_week_seq#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1185), LessThanOrEqual(d_month_seq,1196), IsNotNull(d_week_seq)] ReadSchema: struct @@ -172,7 +172,7 @@ Input [11]: [d_week_seq#5, sun_sales#30, mon_sales#31, tue_sales#32, wed_sales#3 (26) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -216,7 +216,7 @@ Results [8]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (35) Scan parquet default.store Output [2]: [s_store_sk#37, s_store_id#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -243,7 +243,7 @@ Input [10]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, wed_sales# (41) Scan parquet default.date_dim Output [2]: [d_month_seq#74, d_week_seq#75] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1197), LessThanOrEqual(d_month_seq,1208), IsNotNull(d_week_seq)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59/explain.txt index a6c1cd0876c52..d525df46c8a4a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q59/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_week_seq#5, d_day_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -114,7 +114,7 @@ Results [9]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (13) Scan parquet default.store Output [3]: [s_store_sk#37, s_store_id#38, s_store_name#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [12]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, tue_sales# (19) Scan parquet default.date_dim Output [2]: [d_month_seq#41, d_week_seq#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1185), LessThanOrEqual(d_month_seq,1196), IsNotNull(d_week_seq)] ReadSchema: struct @@ -172,7 +172,7 @@ Input [11]: [d_week_seq#5, sun_sales#30, mon_sales#31, tue_sales#32, wed_sales#3 (26) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -216,7 +216,7 @@ Results [8]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (35) Scan parquet default.store Output [2]: [s_store_sk#37, s_store_id#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -243,7 +243,7 @@ Input [10]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, wed_sales# (41) Scan parquet default.date_dim Output [2]: [d_month_seq#74, d_week_seq#75] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1197), LessThanOrEqual(d_month_seq,1208), IsNotNull(d_week_seq)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63.sf100/explain.txt index 77fb5364a7f96..543c374aa784f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manager_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,reference,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#11, ss_item_sk#12, ss_store_sk#13, ss_sales_price#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2452123), LessThanOrEqual(ss_sold_date_sk,2452487), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manager_id#5, ss_sold_date_sk#11, ss_item_sk#12, ss_s (11) Scan parquet default.store Output [1]: [s_store_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [5]: [i_manager_id#5, ss_sold_date_sk#11, ss_store_sk#13, ss_sales_price#1 (17) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_month_seq#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1222,1228,1223,1227,1219,1226,1224,1225,1230,1220,1221,1229]), LessThanOrEqual(d_date_sk,2452487), GreaterThanOrEqual(d_date_sk,2452123), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63/explain.txt index cb483711ee02e..f8faa8cc6a4a6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q63/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manager_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,reference,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Input [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manager_id#5] (5) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#10, ss_item_sk#11, ss_store_sk#12, ss_sales_price#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2452123), LessThanOrEqual(ss_sold_date_sk,2452487), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manager_id#5, ss_sold_date_sk#10, ss_item_sk#11, ss_s (11) Scan parquet default.date_dim Output [3]: [d_date_sk#15, d_month_seq#16, d_moy#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1222,1228,1223,1227,1219,1226,1224,1225,1230,1220,1221,1229]), LessThanOrEqual(d_date_sk,2452487), GreaterThanOrEqual(d_date_sk,2452123), IsNotNull(d_date_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [i_manager_id#5, ss_sold_date_sk#10, ss_store_sk#12, ss_sales_price#1 (18) Scan parquet default.store Output [1]: [s_store_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65.sf100/explain.txt index 425f7981e384e..6601207c6587b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65.sf100/explain.txt @@ -46,7 +46,7 @@ TakeOrderedAndProject (42) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2452275), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451911)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), GreaterThanOrEqual(d_date_sk,2451911), LessThanOrEqual(d_date_sk,2452275), IsNotNull(d_date_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Condition : isnotnull(revenue#12) (15) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#13, ss_item_sk#14, ss_store_sk#15, ss_sales_price#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2452275), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (31) Scan parquet default.store Output [2]: [s_store_sk#31, s_store_name#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -219,7 +219,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (37) Scan parquet default.item Output [5]: [i_item_sk#34, i_item_desc#35, i_current_price#36, i_wholesale_cost#37, i_brand#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65/explain.txt index a84b8320f05ae..cabd4e1f2ac3c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q65/explain.txt @@ -46,7 +46,7 @@ TakeOrderedAndProject (42) (1) Scan parquet default.store Output [2]: [s_store_sk#1, s_store_name#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : isnotnull(s_store_sk#1) (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#3, ss_item_sk#4, ss_store_sk#5, ss_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2452275), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#3) AND (ss_sold_date_sk#3 >= 2451911)) (7) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_month_seq#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), GreaterThanOrEqual(d_date_sk,2451911), LessThanOrEqual(d_date_sk,2452275), IsNotNull(d_date_sk)] ReadSchema: struct @@ -140,7 +140,7 @@ Input [5]: [s_store_sk#1, s_store_name#2, ss_store_sk#5, ss_item_sk#4, revenue#1 (21) Scan parquet default.item Output [5]: [i_item_sk#16, i_item_desc#17, i_current_price#18, i_wholesale_cost#19, i_brand#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -167,7 +167,7 @@ Input [9]: [s_store_name#2, ss_store_sk#5, ss_item_sk#4, revenue#14, i_item_sk#1 (27) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#22, ss_item_sk#23, ss_store_sk#24, ss_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2452275), IsNotNull(ss_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68.sf100/explain.txt index 31eaa3bf925a8..52641b6986146 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68.sf100/explain.txt @@ -56,7 +56,7 @@ TakeOrderedAndProject (52) (1) Scan parquet default.store_sales Output [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_ext_sales_price#7, ss_ext_list_price#8, ss_ext_tax#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [In(ss_sold_date_sk, [2451790,2451180,2452216,2451454,2452184,2451485,2451850,2451514,2452062,2451270,2452123,2451758,2451971,2451546,2451942,2451393,2451667,2451453,2452215,2451819,2451331,2451577,2451911,2452245,2451301,2451545,2451605,2451943,2451851,2451181,2452154,2451820,2452001,2451362,2451392,2451240,2452032,2451637,2451484,2452124,2451300,2451727,2452093,2451759,2451698,2451332,2451606,2451666,2451912,2452185,2451211,2451361,2452031,2451212,2451880,2451789,2451423,2451576,2451728,2452246,2452155,2452092,2451881,2451970,2451697,2452063,2451271,2451636,2451515,2451424,2451239,2452002]), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Condition : (((((ss_sold_date_sk#1 INSET (2451790,2451180,2452216,2451454,245218 (4) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_dom#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1999,2000,2001]), In(d_date_sk, [2451790,2451180,2452216,2451454,2452184,2451485,2451850,2451514,2452062,2451270,2452123,2451758,2451971,2451546,2451942,2451393,2451667,2451453,2452215,2451819,2451331,2451577,2451911,2452245,2451301,2451545,2451605,2451943,2451851,2451181,2452154,2451820,2452001,2451362,2451392,2451240,2452032,2451637,2451484,2452124,2451300,2451727,2452093,2451759,2451698,2451332,2451606,2451666,2451912,2452185,2451211,2451361,2452031,2451212,2451880,2451789,2451423,2451576,2451728,2452246,2452155,2452092,2451881,2451970,2451697,2452063,2451271,2451636,2451515,2451424,2451239,2452002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Input [10]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#14, s_city#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Midway,Fairview]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -132,7 +132,7 @@ Input [9]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#17, hd_dep_count#18, hd_vehicle_count#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,5),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -167,7 +167,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)) (26) Scan parquet default.customer_address Output [2]: [ca_address_sk#22, ca_city#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -216,7 +216,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (36) Scan parquet default.customer Output [4]: [c_customer_sk#39, c_current_addr_sk#40, c_first_name#41, c_last_name#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -255,7 +255,7 @@ Arguments: [c_current_addr_sk#40 ASC NULLS FIRST], false, 0 (45) Scan parquet default.customer_address Output [2]: [ca_address_sk#22, ca_city#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68/explain.txt index 37e6d860b808a..2963d89a06806 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q68/explain.txt @@ -47,7 +47,7 @@ TakeOrderedAndProject (43) (1) Scan parquet default.store_sales Output [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_ext_sales_price#7, ss_ext_list_price#8, ss_ext_tax#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [In(ss_sold_date_sk, [2451790,2451180,2452216,2451454,2452184,2451485,2451850,2451514,2452062,2451270,2452123,2451758,2451971,2451546,2451942,2451393,2451667,2451453,2452215,2451819,2451331,2451577,2451911,2452245,2451301,2451545,2451605,2451943,2451851,2451181,2452154,2451820,2452001,2451362,2451392,2451240,2452032,2451637,2451484,2452124,2451300,2451727,2452093,2451759,2451698,2451332,2451606,2451666,2451912,2452185,2451211,2451361,2452031,2451212,2451880,2451789,2451423,2451576,2451728,2452246,2452155,2452092,2451881,2451970,2451697,2452063,2451271,2451636,2451515,2451424,2451239,2452002]), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Condition : (((((ss_sold_date_sk#1 INSET (2451790,2451180,2452216,2451454,245218 (4) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_dom#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1999,2000,2001]), In(d_date_sk, [2451790,2451180,2452216,2451454,2452184,2451485,2451850,2451514,2452062,2451270,2452123,2451758,2451971,2451546,2451942,2451393,2451667,2451453,2452215,2451819,2451331,2451577,2451911,2452245,2451301,2451545,2451605,2451943,2451851,2451181,2452154,2451820,2452001,2451362,2451392,2451240,2452032,2451637,2451484,2452124,2451300,2451727,2452093,2451759,2451698,2451332,2451606,2451666,2451912,2452185,2451211,2451361,2452031,2451212,2451880,2451789,2451423,2451576,2451728,2452246,2452155,2452092,2451881,2451970,2451697,2452063,2451271,2451636,2451515,2451424,2451239,2452002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [10]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#14, s_city#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Midway,Fairview]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [9]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#17, hd_dep_count#18, hd_vehicle_count#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,5),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_ticket_number#6, s (25) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_city#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -199,7 +199,7 @@ Results [6]: [ss_ticket_number#6, ss_customer_sk#2, ca_city#22 AS bought_city#34 (34) Scan parquet default.customer Output [4]: [c_customer_sk#38, c_current_addr_sk#39, c_first_name#40, c_last_name#41] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt index fb14ae2c59517..b20ba979588d9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.date_dim Output [2]: [d_date_sk#1, d_year#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2450815), LessThanOrEqual(d_date_sk,2451179), IsNotNull(d_date_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450815), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_cdemo_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_prom (11) Scan parquet default.promotion Output [3]: [p_promo_sk#12, p_channel_email#13, p_channel_event#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(EqualTo(p_channel_email,N),EqualTo(p_channel_event,N)), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_p (18) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#16, cd_gender#17, cd_marital_status#18, cd_education_status#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_education_status), IsNotNull(cd_gender), IsNotNull(cd_marital_status), EqualTo(cd_gender,F), EqualTo(cd_marital_status,W), EqualTo(cd_education_status,Primary), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sale (25) Scan parquet default.item Output [2]: [i_item_sk#21, i_item_id#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7/explain.txt index 5a22d85a44049..b8afa28a5f796 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450815), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_cdemo_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2450815) (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), IsNotNull(cd_education_status), IsNotNull(cd_gender), EqualTo(cd_gender,F), EqualTo(cd_marital_status,W), EqualTo(cd_education_status,Primary), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), LessThanOrEqual(d_date_sk,2451179), GreaterThanOrEqual(d_date_sk,2450815), IsNotNull(d_date_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_promo_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_id#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [8]: [ss_item_sk#2, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.promotion Output [3]: [p_promo_sk#20, p_channel_email#21, p_channel_event#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(EqualTo(p_channel_email,N),EqualTo(p_channel_event,N)), IsNotNull(p_promo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt index ac585d84232d0..25da173c8ecde 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt @@ -40,7 +40,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [In(ss_sold_date_sk, [2451790,2451119,2451180,2451454,2450874,2450906,2450967,2451485,2451850,2451514,2451270,2451758,2451028,2451546,2450997,2450996,2451393,2451667,2451453,2451819,2450905,2451331,2451577,2451089,2451301,2451545,2451605,2451851,2451181,2451149,2451820,2451362,2451392,2451240,2450935,2451637,2451484,2451058,2451300,2451727,2451759,2450815,2451698,2451150,2451332,2451606,2451666,2451211,2450846,2450875,2450966,2450936,2451361,2451212,2451880,2451059,2451789,2451423,2451576,2450816,2451088,2451728,2451027,2451120,2451881,2451697,2450847,2451271,2451636,2451515,2451424,2451239]), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : ((((ss_sold_date_sk#1 INSET (2451790,2451119,2451180,2451454,2450874 (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1998,1999,2000]), In(d_date_sk, [2451790,2451119,2451180,2451454,2450874,2450906,2450967,2451485,2451850,2451514,2451270,2451758,2451028,2451546,2450997,2450996,2451393,2451667,2451453,2451819,2450905,2451331,2451577,2451089,2451301,2451545,2451605,2451851,2451181,2451149,2451820,2451362,2451392,2451240,2450935,2451637,2451484,2451058,2451300,2451727,2451759,2450815,2451698,2451150,2451332,2451606,2451666,2451211,2450846,2450875,2450966,2450936,2451361,2451212,2451880,2451059,2451789,2451423,2451576,2450816,2451088,2451728,2451027,2451120,2451881,2451697,2450847,2451271,2451636,2451515,2451424,2451239]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_county, [Fairfield County,Ziebach County,Bronx County,Barrow County]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -173,7 +173,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)) (30) Scan parquet default.customer Output [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt index 7fb1049935f19..e420b656c3ad0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt @@ -40,7 +40,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [In(ss_sold_date_sk, [2451790,2451119,2451180,2451454,2450874,2450906,2450967,2451485,2451850,2451514,2451270,2451758,2451028,2451546,2450997,2450996,2451393,2451667,2451453,2451819,2450905,2451331,2451577,2451089,2451301,2451545,2451605,2451851,2451181,2451149,2451820,2451362,2451392,2451240,2450935,2451637,2451484,2451058,2451300,2451727,2451759,2450815,2451698,2451150,2451332,2451606,2451666,2451211,2450846,2450875,2450966,2450936,2451361,2451212,2451880,2451059,2451789,2451423,2451576,2450816,2451088,2451728,2451027,2451120,2451881,2451697,2450847,2451271,2451636,2451515,2451424,2451239]), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : ((((ss_sold_date_sk#1 INSET (2451790,2451119,2451180,2451454,2450874 (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1998,1999,2000]), In(d_date_sk, [2451790,2451119,2451180,2451454,2450874,2450906,2450967,2451485,2451850,2451514,2451270,2451758,2451028,2451546,2450997,2450996,2451393,2451667,2451453,2451819,2450905,2451331,2451577,2451089,2451301,2451545,2451605,2451851,2451181,2451149,2451820,2451362,2451392,2451240,2450935,2451637,2451484,2451058,2451300,2451727,2451759,2450815,2451698,2451150,2451332,2451606,2451666,2451211,2450846,2450875,2450966,2450936,2451361,2451212,2451880,2451059,2451789,2451423,2451576,2450816,2451088,2451728,2451027,2451120,2451881,2451697,2450847,2451271,2451636,2451515,2451424,2451239]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_county, [Fairfield County,Ziebach County,Bronx County,Barrow County]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Condition : ((cnt#22 >= 1) AND (cnt#22 <= 5)) (29) Scan parquet default.customer Output [5]: [c_customer_sk#23, c_salutation#24, c_first_name#25, c_last_name#26, c_preferred_cust_flag#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79.sf100/explain.txt index cbe3432b9d2bb..034d9fd0344b0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79.sf100/explain.txt @@ -41,7 +41,7 @@ TakeOrderedAndProject (37) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450819), LessThanOrEqual(ss_sold_date_sk,2451904), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -55,7 +55,7 @@ Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2450819) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dow), EqualTo(d_dow,1), In(d_year, [1998,1999,2000]), GreaterThanOrEqual(d_date_sk,2450819), LessThanOrEqual(d_date_sk,2451904), IsNotNull(d_date_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#13, hd_dep_count#14, hd_vehicle_count#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,8),GreaterThan(hd_vehicle_count,0)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.store Output [3]: [s_store_sk#17, s_number_employees#18, s_city#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_number_employees), GreaterThanOrEqual(s_number_employees,200), LessThanOrEqual(s_number_employees,295), IsNotNull(s_store_sk)] ReadSchema: struct @@ -174,7 +174,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (30) Scan parquet default.customer Output [3]: [c_customer_sk#31, c_first_name#32, c_last_name#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79/explain.txt index e1c2116bf8d19..b4eb617bd231f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q79/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450819), LessThanOrEqual(ss_sold_date_sk,2451904), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2450819) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dow), EqualTo(d_dow,1), In(d_year, [1998,1999,2000]), GreaterThanOrEqual(d_date_sk,2450819), LessThanOrEqual(d_date_sk,2451904), IsNotNull(d_date_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.store Output [3]: [s_store_sk#13, s_number_employees#14, s_city#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_number_employees), GreaterThanOrEqual(s_number_employees,200), LessThanOrEqual(s_number_employees,295), IsNotNull(s_store_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [9]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#17, hd_dep_count#18, hd_vehicle_count#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,8),GreaterThan(hd_vehicle_count,0)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -163,7 +163,7 @@ Results [5]: [ss_ticket_number#6, ss_customer_sk#2, s_city#15, MakeDecimal(sum(U (28) Scan parquet default.customer Output [3]: [c_customer_sk#30, c_first_name#31, c_last_name#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89.sf100/explain.txt index d610aa7854bcb..a338144d26446 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89.sf100/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -49,7 +49,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451545)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), LessThanOrEqual(d_date_sk,2451910), GreaterThanOrEqual(d_date_sk,2451545), IsNotNull(d_date_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4, d_ (11) Scan parquet default.store Output [3]: [s_store_sk#9, s_store_name#10, s_company_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -107,7 +107,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#3, ss_sales_price#4, d_moy#7, s_store_sk#9 (17) Scan parquet default.item Output [4]: [i_item_sk#13, i_brand#14, i_class#15, i_category#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(In(i_category, [Home,Books,Electronics]),In(i_class, [wallpaper,parenting,musical])),And(In(i_category, [Shoes,Jewelry,Men]),In(i_class, [womens,birdal,pants]))), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89/explain.txt index 46e18398e24a2..e90ec6f52eda0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q89/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.item Output [4]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(In(i_category, [Home,Books,Electronics]),In(i_class, [wallpaper,parenting,musical])),And(In(i_category, [Shoes,Jewelry,Men]),In(i_class, [womens,birdal,pants]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -49,7 +49,7 @@ Condition : (((i_category#4 IN (Home,Books,Electronics) AND i_class#3 IN (wallpa (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#5, ss_item_sk#6, ss_store_sk#7, ss_sales_price#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Input [8]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, ss_sold_date_sk#5, (10) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), GreaterThanOrEqual(d_date_sk,2451545), LessThanOrEqual(d_date_sk,2451910), IsNotNull(d_date_sk)] ReadSchema: struct @@ -107,7 +107,7 @@ Input [8]: [i_brand#2, i_class#3, i_category#4, ss_sold_date_sk#5, ss_store_sk#7 (17) Scan parquet default.store Output [3]: [s_store_sk#14, s_store_name#15, s_company_name#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98.sf100/explain.txt index 62e06c90b0015..47842568c1bc7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98.sf100/explain.txt @@ -33,7 +33,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2451941), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -47,7 +47,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451911)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2001-01-01), LessThanOrEqual(d_date,2001-01-31), GreaterThanOrEqual(d_date_sk,2451911), LessThanOrEqual(d_date_sk,2451941), IsNotNull(d_date_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Jewelry,Sports,Books]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98/explain.txt index d5d8d64149346..dd52252978b50 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q98/explain.txt @@ -30,7 +30,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451911), LessThanOrEqual(ss_sold_date_sk,2451941), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -44,7 +44,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451911)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Jewelry,Sports,Books]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2001-01-01), LessThanOrEqual(d_date,2001-01-31), GreaterThanOrEqual(d_date_sk,2451911), LessThanOrEqual(d_date_sk,2451941), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max.sf100/explain.txt index aba9a769a0b6b..c6004b0d3b6a8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max.sf100/explain.txt @@ -12,7 +12,7 @@ (1) Scan parquet default.store_sales Output [9]: [ss_sold_date_sk#1, ss_sold_time_sk#2, ss_item_sk#3, ss_customer_sk#4, ss_cdemo_sk#5, ss_hdemo_sk#6, ss_addr_sk#7, ss_store_sk#8, ss_promo_sk#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max/explain.txt index f763b06dd842b..c6004b0d3b6a8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/ss_max/explain.txt @@ -12,7 +12,7 @@ (1) Scan parquet default.store_sales Output [9]: [ss_sold_date_sk#1, ss_sold_time_sk#2, ss_item_sk#3, ss_customer_sk#4, ss_cdemo_sk#5, ss_hdemo_sk#6, ss_addr_sk#7, ss_store_sk#8, ss_promo_sk#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSModifiedPlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1.sf100/explain.txt index 5caf95bee5481..ae136e8e008a7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1.sf100/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#1, sr_customer_sk#2, sr_store_sk#3, sr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk), IsNotNull(sr_customer_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : ((isnotnull(sr_returned_date_sk#1) AND isnotnull(sr_store_sk#3)) AND (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -118,7 +118,7 @@ Condition : isnotnull(ctr_total_return#14) (15) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#1, sr_customer_sk#2, sr_store_sk#3, sr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -197,7 +197,7 @@ Input [5]: [ctr_customer_sk#12, ctr_store_sk#13, ctr_total_return#14, (CAST(avg( (31) Scan parquet default.store Output [2]: [s_store_sk#28, s_state#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -236,7 +236,7 @@ Arguments: [ctr_customer_sk#12 ASC NULLS FIRST], false, 0 (40) Scan parquet default.customer Output [2]: [c_customer_sk#32, c_customer_id#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1/explain.txt index 756c752dd7040..8b11a662f9719 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q1/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#1, sr_customer_sk#2, sr_store_sk#3, sr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk), IsNotNull(sr_customer_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : ((isnotnull(sr_returned_date_sk#1) AND isnotnull(sr_store_sk#3)) AND (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -115,7 +115,7 @@ Condition : isnotnull(ctr_total_return#14) (15) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#1, sr_customer_sk#2, sr_store_sk#3, sr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -194,7 +194,7 @@ Input [5]: [ctr_customer_sk#12, ctr_store_sk#13, ctr_total_return#14, (CAST(avg( (31) Scan parquet default.store Output [2]: [s_store_sk#28, s_state#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -225,7 +225,7 @@ Input [3]: [ctr_customer_sk#12, ctr_store_sk#13, s_store_sk#28] (38) Scan parquet default.customer Output [2]: [c_customer_sk#31, c_customer_id#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10.sf100/explain.txt index 72f9339134e87..5bd420ff1a820 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10.sf100/explain.txt @@ -62,7 +62,7 @@ TakeOrderedAndProject (58) (1) Scan parquet default.customer Output [3]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Arguments: [c_customer_sk#3 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#7, ss_customer_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -98,7 +98,7 @@ Condition : isnotnull(ss_sold_date_sk#7) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2002), GreaterThanOrEqual(d_moy,1), LessThanOrEqual(d_moy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -142,7 +142,7 @@ Join condition: None (19) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#14, ws_bill_customer_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -181,7 +181,7 @@ Join condition: None (28) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#17, cs_ship_customer_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -228,7 +228,7 @@ Input [5]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5, exists#2 (39) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_county#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_county, [Rush County,Toole County,Jefferson County,Dona Ana County,La Porte County]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -267,7 +267,7 @@ Arguments: [c_current_cdemo_sk#4 ASC NULLS FIRST], false, 0 (48) Scan parquet default.customer_demographics Output [9]: [cd_demo_sk#24, cd_gender#25, cd_marital_status#26, cd_education_status#27, cd_purchase_estimate#28, cd_credit_rating#29, cd_dep_count#30, cd_dep_employed_count#31, cd_dep_college_count#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10/explain.txt index f9e871077f684..56c1a25a07c5c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q10/explain.txt @@ -54,7 +54,7 @@ TakeOrderedAndProject (50) (1) Scan parquet default.customer Output [3]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Condition : (isnotnull(c_current_addr_sk#5) AND isnotnull(c_current_cdemo_sk#4)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#6, ss_customer_sk#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -82,7 +82,7 @@ Condition : isnotnull(ss_sold_date_sk#6) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_year#9, d_moy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,2002), GreaterThanOrEqual(d_moy,1), LessThanOrEqual(d_moy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -122,7 +122,7 @@ Join condition: None (16) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#13, ws_bill_customer_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Join condition: None (24) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_ship_customer_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -200,7 +200,7 @@ Input [5]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5, exists#2 (34) Scan parquet default.customer_address Output [2]: [ca_address_sk#19, ca_county#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_county, [Rush County,Toole County,Jefferson County,Dona Ana County,La Porte County]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -231,7 +231,7 @@ Input [3]: [c_current_cdemo_sk#4, c_current_addr_sk#5, ca_address_sk#19] (41) Scan parquet default.customer_demographics Output [9]: [cd_demo_sk#22, cd_gender#23, cd_marital_status#24, cd_education_status#25, cd_purchase_estimate#26, cd_credit_rating#27, cd_dep_count#28, cd_dep_employed_count#29, cd_dep_college_count#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12.sf100/explain.txt index d62e42ea554fe..b4dd8173664b6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12.sf100/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Arguments: [ws_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12/explain.txt index 63dabf5c122e6..f786839e83e9c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q12/explain.txt @@ -28,7 +28,7 @@ TakeOrderedAndProject (24) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -42,7 +42,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Input [9]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt index e7cb5071d561f..586abbd8f3fef 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt @@ -42,7 +42,7 @@ (1) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] ReadSchema: struct @@ -60,7 +60,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint) (5) Scan parquet default.store_sales Output [10]: [ss_sold_date_sk#5, ss_cdemo_sk#6, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_hdemo_sk), Or(Or(And(GreaterThanOrEqual(ss_net_profit,100.00),LessThanOrEqual(ss_net_profit,200.00)),And(GreaterThanOrEqual(ss_net_profit,150.00),LessThanOrEqual(ss_net_profit,300.00))),And(GreaterThanOrEqual(ss_net_profit,50.00),LessThanOrEqual(ss_net_profit,250.00))), Or(Or(And(GreaterThanOrEqual(ss_sales_price,100.00),LessThanOrEqual(ss_sales_price,150.00)),And(GreaterThanOrEqual(ss_sales_price,50.00),LessThanOrEqual(ss_sales_price,100.00))),And(GreaterThanOrEqual(ss_sales_price,150.00),LessThanOrEqual(ss_sales_price,200.00)))] ReadSchema: struct @@ -83,7 +83,7 @@ Input [13]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3, ss_sold_d (10) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#15, hd_dep_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), Or(Or(EqualTo(hd_dep_count,3),EqualTo(hd_dep_count,1)),EqualTo(hd_dep_count,1))] ReadSchema: struct @@ -110,7 +110,7 @@ Input [13]: [cd_marital_status#2, cd_education_status#3, ss_sold_date_sk#5, ss_h (16) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [8]: [ss_sold_date_sk#5, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_e (23) Scan parquet default.store Output [1]: [s_store_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -168,7 +168,7 @@ Input [7]: [ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_ext_sales_price#12, (29) Scan parquet default.customer_address Output [3]: [ca_address_sk#23, ca_state#24, ca_country#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [TX,OH]),In(ca_state, [OR,NM,KY])),In(ca_state, [VA,TX,MS]))] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13/explain.txt index 010e711f30bb0..d0c4e0e4934e5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13/explain.txt @@ -42,7 +42,7 @@ (1) Scan parquet default.store_sales Output [10]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_hdemo_sk), Or(Or(And(GreaterThanOrEqual(ss_net_profit,100.00),LessThanOrEqual(ss_net_profit,200.00)),And(GreaterThanOrEqual(ss_net_profit,150.00),LessThanOrEqual(ss_net_profit,300.00))),And(GreaterThanOrEqual(ss_net_profit,50.00),LessThanOrEqual(ss_net_profit,250.00))), Or(Or(And(GreaterThanOrEqual(ss_sales_price,100.00),LessThanOrEqual(ss_sales_price,150.00)),And(GreaterThanOrEqual(ss_sales_price,50.00),LessThanOrEqual(ss_sales_price,100.00))),And(GreaterThanOrEqual(ss_sales_price,150.00),LessThanOrEqual(ss_sales_price,200.00)))] ReadSchema: struct @@ -56,7 +56,7 @@ Condition : ((((((isnotnull(ss_store_sk#5) AND isnotnull(ss_addr_sk#4)) AND isno (4) Scan parquet default.store Output [1]: [s_store_sk#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [11]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_s (10) Scan parquet default.customer_address Output [3]: [ca_address_sk#13, ca_state#14, ca_country#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [TX,OH]),In(ca_state, [OR,NM,KY])),In(ca_state, [VA,TX,MS]))] ReadSchema: struct @@ -114,7 +114,7 @@ Input [11]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_q (17) Scan parquet default.date_dim Output [2]: [d_date_sk#17, d_year#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [8]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_quantity#6, ss_s (24) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#20, cd_marital_status#21, cd_education_status#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] ReadSchema: struct @@ -172,7 +172,7 @@ Input [9]: [ss_cdemo_sk#2, ss_hdemo_sk#3, ss_quantity#6, ss_sales_price#7, ss_ex (30) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#24, hd_dep_count#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), Or(Or(EqualTo(hd_dep_count,3),EqualTo(hd_dep_count,1)),EqualTo(hd_dep_count,1))] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt index c03db659679ce..49cdab6de1e4f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt @@ -135,7 +135,7 @@ TakeOrderedAndProject (131) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (6) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_class_id), IsNotNull(i_brand_id), IsNotNull(i_category_id)] ReadSchema: struct @@ -171,7 +171,7 @@ Condition : ((isnotnull(i_class_id#8) AND isnotnull(i_brand_id#7)) AND isnotnull (9) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -185,7 +185,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -216,7 +216,7 @@ Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, d_date_sk#10] (19) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category_id), IsNotNull(i_brand_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -251,7 +251,7 @@ Arguments: [coalesce(brand_id#14, 0) ASC NULLS FIRST, isnull(brand_id#14) ASC NU (27) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#18, cs_item_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -277,7 +277,7 @@ Input [3]: [cs_sold_date_sk#18, cs_item_sk#19, d_date_sk#10] (33) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -317,7 +317,7 @@ Join condition: None (42) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#22, ws_item_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -426,7 +426,7 @@ Join condition: None (65) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -457,7 +457,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, d_d (72) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -530,7 +530,7 @@ Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, n (88) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#18, cs_item_sk#19, cs_quantity#48, cs_list_price#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -614,7 +614,7 @@ Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, n (107) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#22, ws_item_sk#23, ws_quantity#64, ws_list_price#65] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -757,7 +757,7 @@ Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquer (132) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -771,7 +771,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (135) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -802,7 +802,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (142) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -828,7 +828,7 @@ Input [4]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49, d_date_sk#10] (148) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#22, ws_quantity#64, ws_list_price#65] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt index c96b1c502a15f..5332be30cb775 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt @@ -119,7 +119,7 @@ TakeOrderedAndProject (115) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -133,7 +133,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_class_id), IsNotNull(i_brand_id), IsNotNull(i_category_id)] ReadSchema: struct @@ -147,7 +147,7 @@ Condition : ((isnotnull(i_class_id#7) AND isnotnull(i_brand_id#6)) AND isnotnull (7) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (10) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category_id), IsNotNull(i_brand_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -188,7 +188,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, i_item_sk#5, i_brand_id#6, i_class_ (16) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -219,7 +219,7 @@ Input [5]: [ss_sold_date_sk#1, i_brand_id#6, i_class_id#7, i_category_id#8, d_da (23) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_item_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -233,7 +233,7 @@ Condition : (isnotnull(cs_item_sk#17) AND isnotnull(cs_sold_date_sk#16)) (26) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -281,7 +281,7 @@ Join condition: None (37) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#20, ws_item_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -382,7 +382,7 @@ Join condition: None (58) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -417,7 +417,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, i_i (66) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -474,7 +474,7 @@ Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, n (78) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#16, cs_item_sk#17, cs_quantity#45, cs_list_price#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -546,7 +546,7 @@ Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, n (94) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#20, ws_item_sk#21, ws_quantity#60, ws_list_price#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -677,7 +677,7 @@ Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquer (116) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -691,7 +691,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (119) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -722,7 +722,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (126) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -748,7 +748,7 @@ Input [4]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46, d_date_sk#10] (132) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#20, ws_quantity#60, ws_list_price#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt index 8c27f7ddce018..6d39c12011180 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt @@ -114,7 +114,7 @@ TakeOrderedAndProject (110) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -136,7 +136,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (6) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_class_id), IsNotNull(i_brand_id), IsNotNull(i_category_id)] ReadSchema: struct @@ -150,7 +150,7 @@ Condition : ((isnotnull(i_class_id#8) AND isnotnull(i_brand_id#7)) AND isnotnull (9) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -164,7 +164,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -195,7 +195,7 @@ Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, d_date_sk#10] (19) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -230,7 +230,7 @@ Arguments: [coalesce(brand_id#14, 0) ASC NULLS FIRST, isnull(brand_id#14) ASC NU (27) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#18, cs_item_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -256,7 +256,7 @@ Input [3]: [cs_sold_date_sk#18, cs_item_sk#19, d_date_sk#10] (33) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Join condition: None (42) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#22, ws_item_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -405,7 +405,7 @@ Join condition: None (65) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -436,7 +436,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, d_d (72) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -528,7 +528,7 @@ Join condition: None (93) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -641,7 +641,7 @@ Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquer (111) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -655,7 +655,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (114) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -686,7 +686,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (121) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#18, cs_quantity#74, cs_list_price#75] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -712,7 +712,7 @@ Input [4]: [cs_sold_date_sk#18, cs_quantity#74, cs_list_price#75, d_date_sk#10] (127) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#22, ws_quantity#78, ws_list_price#79] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -765,7 +765,7 @@ Subquery:2 Hosting operator id = 67 Hosting Expression = Subquery scalar-subquer (137) Scan parquet default.date_dim Output [4]: [d_week_seq#29, d_year#11, d_moy#89, d_dom#90] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), IsNotNull(d_dom), EqualTo(d_year,2000), EqualTo(d_moy,12), EqualTo(d_dom,11)] ReadSchema: struct @@ -792,7 +792,7 @@ Subquery:4 Hosting operator id = 95 Hosting Expression = Subquery scalar-subquer (141) Scan parquet default.date_dim Output [4]: [d_week_seq#29, d_year#11, d_moy#89, d_dom#90] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), IsNotNull(d_dom), EqualTo(d_year,1999), EqualTo(d_moy,12), EqualTo(d_dom,11)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt index 54e984da09306..dfe1892d80d61 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt @@ -104,7 +104,7 @@ TakeOrderedAndProject (100) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -118,7 +118,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_class_id), IsNotNull(i_brand_id), IsNotNull(i_category_id)] ReadSchema: struct @@ -132,7 +132,7 @@ Condition : ((isnotnull(i_class_id#7) AND isnotnull(i_brand_id#6)) AND isnotnull (7) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -146,7 +146,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (10) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -173,7 +173,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, i_item_sk#5, i_brand_id#6, i_class_ (16) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -204,7 +204,7 @@ Input [5]: [ss_sold_date_sk#1, i_brand_id#6, i_class_id#7, i_category_id#8, d_da (23) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_item_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -218,7 +218,7 @@ Condition : (isnotnull(cs_item_sk#17) AND isnotnull(cs_sold_date_sk#16)) (26) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -266,7 +266,7 @@ Join condition: None (37) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#20, ws_item_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -367,7 +367,7 @@ Join condition: None (58) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_class_id), IsNotNull(i_category_id), IsNotNull(i_brand_id)] ReadSchema: struct @@ -402,7 +402,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, i_i (66) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -459,7 +459,7 @@ Input [7]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, n (78) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -493,7 +493,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, i_i (86) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -594,7 +594,7 @@ Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquer (101) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -608,7 +608,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (104) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -639,7 +639,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (111) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#16, cs_quantity#71, cs_list_price#72] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -665,7 +665,7 @@ Input [4]: [cs_sold_date_sk#16, cs_quantity#71, cs_list_price#72, d_date_sk#10] (117) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#20, ws_quantity#75, ws_list_price#76] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -718,7 +718,7 @@ Subquery:2 Hosting operator id = 68 Hosting Expression = Subquery scalar-subquer (127) Scan parquet default.date_dim Output [4]: [d_week_seq#28, d_year#11, d_moy#86, d_dom#87] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), IsNotNull(d_dom), EqualTo(d_year,2000), EqualTo(d_moy,12), EqualTo(d_dom,11)] ReadSchema: struct @@ -745,7 +745,7 @@ Subquery:4 Hosting operator id = 88 Hosting Expression = Subquery scalar-subquer (131) Scan parquet default.date_dim Output [4]: [d_week_seq#28, d_year#11, d_moy#86, d_dom#87] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), IsNotNull(d_dom), EqualTo(d_year,1999), EqualTo(d_moy,12), EqualTo(d_dom,11)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15.sf100/explain.txt index 997fe4f5bfce5..5aa95486f8209 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : (isnotnull(cs_bill_customer_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_year#5, d_qoy#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.customer Output [2]: [c_customer_sk#9, c_current_addr_sk#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -111,7 +111,7 @@ Arguments: [c_current_addr_sk#10 ASC NULLS FIRST], false, 0 (18) Scan parquet default.customer_address Output [3]: [ca_address_sk#12, ca_state#13, ca_zip#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15/explain.txt index 009db105d2cb0..4dc0abf9a5484 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q15/explain.txt @@ -30,7 +30,7 @@ TakeOrderedAndProject (26) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -44,7 +44,7 @@ Condition : (isnotnull(cs_bill_customer_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.customer Output [2]: [c_customer_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_sales_price#3, c_custom (10) Scan parquet default.customer_address Output [3]: [ca_address_sk#7, ca_state#8, ca_zip#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -98,7 +98,7 @@ Input [6]: [cs_sold_date_sk#1, cs_sales_price#3, c_current_addr_sk#5, ca_address (16) Scan parquet default.date_dim Output [3]: [d_date_sk#11, d_year#12, d_qoy#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt index 66bf2dc518751..509fb0133095b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.catalog_sales Output [7]: [cs_ship_date_sk#1, cs_ship_addr_sk#2, cs_call_center_sk#3, cs_warehouse_sk#4, cs_order_number#5, cs_ext_ship_cost#6, cs_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_ship_date_sk), IsNotNull(cs_ship_addr_sk), IsNotNull(cs_call_center_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Arguments: [cs_order_number#5 ASC NULLS FIRST], false, 0 (6) Scan parquet default.catalog_sales Output [2]: [cs_warehouse_sk#4, cs_order_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] ReadSchema: struct (7) ColumnarToRow [codegen id : 3] @@ -100,7 +100,7 @@ Input [7]: [cs_ship_date_sk#1, cs_ship_addr_sk#2, cs_call_center_sk#3, cs_wareho (13) Scan parquet default.catalog_returns Output [1]: [cr_order_number#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] ReadSchema: struct (14) ColumnarToRow [codegen id : 6] @@ -122,7 +122,7 @@ Join condition: None (18) Scan parquet default.customer_address Output [2]: [ca_address_sk#14, ca_state#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -153,7 +153,7 @@ Input [7]: [cs_ship_date_sk#1, cs_ship_addr_sk#2, cs_call_center_sk#3, cs_order_ (25) Scan parquet default.call_center Output [2]: [cc_call_center_sk#17, cc_county#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_county), EqualTo(cc_county,Williamson County), IsNotNull(cc_call_center_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Input [6]: [cs_ship_date_sk#1, cs_call_center_sk#3, cs_order_number#5, cs_ext_sh (32) Scan parquet default.date_dim Output [2]: [d_date_sk#20, d_date#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2002-02-01), LessThanOrEqual(d_date,2002-04-02), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt index ed45f7de91759..2ae939cfe41f3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt @@ -45,7 +45,7 @@ TakeOrderedAndProject (41) (1) Scan parquet default.catalog_sales Output [7]: [cs_ship_date_sk#1, cs_ship_addr_sk#2, cs_call_center_sk#3, cs_warehouse_sk#4, cs_order_number#5, cs_ext_ship_cost#6, cs_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_ship_date_sk), IsNotNull(cs_ship_addr_sk), IsNotNull(cs_call_center_sk)] ReadSchema: struct @@ -59,7 +59,7 @@ Condition : ((isnotnull(cs_ship_date_sk#1) AND isnotnull(cs_ship_addr_sk#2)) AND (4) Scan parquet default.catalog_sales Output [2]: [cs_warehouse_sk#4, cs_order_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] ReadSchema: struct (5) ColumnarToRow [codegen id : 1] @@ -85,7 +85,7 @@ Input [7]: [cs_ship_date_sk#1, cs_ship_addr_sk#2, cs_call_center_sk#3, cs_wareho (10) Scan parquet default.catalog_returns Output [1]: [cr_order_number#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] ReadSchema: struct (11) ColumnarToRow [codegen id : 2] @@ -103,7 +103,7 @@ Join condition: None (14) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_date#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2002-02-01), LessThanOrEqual(d_date,2002-04-02), IsNotNull(d_date_sk)] ReadSchema: struct @@ -134,7 +134,7 @@ Input [7]: [cs_ship_date_sk#1, cs_ship_addr_sk#2, cs_call_center_sk#3, cs_order_ (21) Scan parquet default.customer_address Output [2]: [ca_address_sk#16, ca_state#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -165,7 +165,7 @@ Input [6]: [cs_ship_addr_sk#2, cs_call_center_sk#3, cs_order_number#5, cs_ext_sh (28) Scan parquet default.call_center Output [2]: [cc_call_center_sk#19, cc_county#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_county), EqualTo(cc_county,Williamson County), IsNotNull(cc_call_center_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt index 36b0e72d2d0fd..df221f16db96f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt @@ -61,7 +61,7 @@ TakeOrderedAndProject (57) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, ss_quantity#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -75,7 +75,7 @@ Condition : ((((isnotnull(ss_customer_sk#3) AND isnotnull(ss_item_sk#2)) AND isn (4) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_quarter_name#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_quarter_name), EqualTo(d_quarter_name,2001Q1), IsNotNull(d_date_sk)] ReadSchema: struct @@ -106,7 +106,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_state#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [3]: [i_item_sk#14, i_item_id#15, i_item_desc#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (28) Scan parquet default.date_dim Output [2]: [d_date_sk#19, d_quarter_name#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_quarter_name, [2001Q1,2001Q2,2001Q3]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (33) Scan parquet default.store_returns Output [5]: [sr_returned_date_sk#22, sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -250,7 +250,7 @@ Arguments: [sr_customer_sk#24 ASC NULLS FIRST, sr_item_sk#23 ASC NULLS FIRST], f (44) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17/explain.txt index fddd2bb6fbde7..e04b67a75d62f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17/explain.txt @@ -52,7 +52,7 @@ TakeOrderedAndProject (48) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, ss_quantity#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -66,7 +66,7 @@ Condition : ((((isnotnull(ss_item_sk#2) AND isnotnull(ss_customer_sk#3)) AND isn (4) Scan parquet default.store_returns Output [5]: [sr_returned_date_sk#7, sr_item_sk#8, sr_customer_sk#9, sr_ticket_number#10, sr_return_quantity#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -93,7 +93,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, s (10) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#13, cs_bill_customer_sk#14, cs_item_sk#15, cs_quantity#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -120,7 +120,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_r (16) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_quarter_name#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_quarter_name), EqualTo(d_quarter_name,2001Q1), IsNotNull(d_date_sk)] ReadSchema: struct @@ -151,7 +151,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_re (23) Scan parquet default.date_dim Output [2]: [d_date_sk#21, d_quarter_name#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_quarter_name, [2001Q1,2001Q2,2001Q3]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -194,7 +194,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_return_quantity#11, c (33) Scan parquet default.store Output [2]: [s_store_sk#25, s_state#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -221,7 +221,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_return_quantity#11, c (39) Scan parquet default.item Output [3]: [i_item_sk#28, i_item_id#29, i_item_desc#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt index 646c5240fd09e..516f782057631 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt @@ -57,7 +57,7 @@ TakeOrderedAndProject (53) (1) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Condition : (((isnotnull(cs_bill_cdemo_sk#3) AND isnotnull(cs_bill_customer_sk#2 (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#10, cd_gender#11, cd_education_status#12, cd_dep_count#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,F), EqualTo(cd_education_status,Unknown), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -102,7 +102,7 @@ Input [11]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_it (11) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_year#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -133,7 +133,7 @@ Input [10]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity (18) Scan parquet default.item Output [2]: [i_item_sk#18, i_item_id#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -168,7 +168,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (26) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [1,6,8,9,12,2]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -186,7 +186,7 @@ Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bir (30) Scan parquet default.customer_address Output [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [MS,IN,ND,OK,NM,VA]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -221,7 +221,7 @@ Arguments: [c_current_cdemo_sk#23 ASC NULLS FIRST], false, 0 (38) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18/explain.txt index 5ba71337ccacb..3b213efa6347d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : (((isnotnull(cs_bill_cdemo_sk#3) AND isnotnull(cs_bill_customer_sk#2 (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#10, cd_gender#11, cd_education_status#12, cd_dep_count#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,F), EqualTo(cd_education_status,Unknown), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [11]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_it (11) Scan parquet default.customer Output [5]: [c_customer_sk#15, c_current_cdemo_sk#16, c_current_addr_sk#17, c_birth_month#18, c_birth_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [1,6,8,9,12,2]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -127,7 +127,7 @@ Input [13]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity (18) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Input [12]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (24) Scan parquet default.customer_address Output [4]: [ca_address_sk#23, ca_county#24, ca_state#25, ca_country#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [MS,IN,ND,OK,NM,VA]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -181,7 +181,7 @@ Input [14]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (30) Scan parquet default.date_dim Output [2]: [d_date_sk#28, d_year#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -212,7 +212,7 @@ Input [13]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (37) Scan parquet default.item Output [2]: [i_item_sk#31, i_item_id#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt index 2799fd0fbd634..0fbe0ccef6d13 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt @@ -49,7 +49,7 @@ TakeOrderedAndProject (45) (1) Scan parquet default.item Output [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, i_manager_id#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,8), IsNotNull(i_item_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#8, ss_item_sk#9, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -94,7 +94,7 @@ Input [10]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5 (11) Scan parquet default.date_dim Output [3]: [d_date_sk#13, d_year#14, d_moy#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -125,7 +125,7 @@ Input [9]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_sold_date (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_zip#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_zip), IsNotNull(s_store_sk)] ReadSchema: struct @@ -160,7 +160,7 @@ Arguments: [ss_customer_sk#10 ASC NULLS FIRST], false, 0 (26) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_zip#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip)] ReadSchema: struct @@ -182,7 +182,7 @@ Arguments: [ca_address_sk#21 ASC NULLS FIRST], false, 0 (31) Scan parquet default.customer Output [2]: [c_customer_sk#24, c_current_addr_sk#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19/explain.txt index 1b9e0d10dfe90..cb75374a8ddb2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19/explain.txt @@ -43,7 +43,7 @@ TakeOrderedAndProject (39) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#4, ss_item_sk#5, ss_customer_sk#6, ss_store_sk#7, ss_ext_sales_price#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Input [6]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_customer_sk#6, ss_s (11) Scan parquet default.item Output [6]: [i_item_sk#10, i_brand_id#11, i_brand#12, i_manufact_id#13, i_manufact#14, i_manager_id#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,8), IsNotNull(i_item_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [9]: [ss_item_sk#5, ss_customer_sk#6, ss_store_sk#7, ss_ext_sales_price#8, (18) Scan parquet default.customer Output [2]: [c_customer_sk#17, c_current_addr_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -146,7 +146,7 @@ Input [9]: [ss_customer_sk#6, ss_store_sk#7, ss_ext_sales_price#8, i_brand_id#11 (24) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_zip#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip)] ReadSchema: struct @@ -173,7 +173,7 @@ Input [9]: [ss_store_sk#7, ss_ext_sales_price#8, i_brand_id#11, i_brand#12, i_ma (30) Scan parquet default.store Output [2]: [s_store_sk#23, s_zip#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_zip), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt index a11f0125eee9b..fe5966bb4dfb3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt @@ -46,7 +46,7 @@ (1) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#1, ws_ext_sales_price#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -64,7 +64,7 @@ Input [2]: [ws_sold_date_sk#1, ws_ext_sales_price#2] (5) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#5, cs_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Input [2]: [cs_sold_date_sk#5, cs_ext_sales_price#6] (10) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_week_seq#10, d_day_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -129,7 +129,7 @@ Results [8]: [d_week_seq#10, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name (19) Scan parquet default.date_dim Output [2]: [d_week_seq#42, d_year#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_week_seq)] ReadSchema: struct @@ -178,7 +178,7 @@ Results [8]: [d_week_seq#10, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name (30) Scan parquet default.date_dim Output [2]: [d_week_seq#68, d_year#69] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_week_seq)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2/explain.txt index d944c21c2efb6..7eff75a0220a4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2/explain.txt @@ -43,7 +43,7 @@ (1) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#1, ws_ext_sales_price#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Input [2]: [ws_sold_date_sk#1, ws_ext_sales_price#2] (5) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#5, cs_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [2]: [cs_sold_date_sk#5, cs_ext_sales_price#6] (10) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_week_seq#10, d_day_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -126,7 +126,7 @@ Results [8]: [d_week_seq#10, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name (19) Scan parquet default.date_dim Output [2]: [d_week_seq#42, d_year#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_week_seq)] ReadSchema: struct @@ -167,7 +167,7 @@ Results [8]: [d_week_seq#10, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name (28) Scan parquet default.date_dim Output [2]: [d_week_seq#67, d_year#68] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_week_seq)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20.sf100/explain.txt index 92ac79f525fb5..2ef3660bc7ba4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20.sf100/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Arguments: [cs_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20/explain.txt index 3260eafea1b04..4234fba2b5a14 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q20/explain.txt @@ -28,7 +28,7 @@ TakeOrderedAndProject (24) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -42,7 +42,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Input [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt index 2a540094ddcd5..9de369f611d0e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_warehouse_sk), IsNotNull(inv_item_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : ((isnotnull(inv_warehouse_sk#3) AND isnotnull(inv_item_sk#2)) AND is (4) Scan parquet default.item Output [3]: [i_item_sk#5, i_item_id#6, i_current_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,0.99), LessThanOrEqual(i_current_price,1.49), IsNotNull(i_item_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Input [6]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (11) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_date#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-02-10), LessThanOrEqual(d_date,2000-04-10), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [6]: [inv_date_sk#1, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_id (17) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#12, w_warehouse_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt index 67d479c0d6a33..788d1affde1b8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_warehouse_sk), IsNotNull(inv_item_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : ((isnotnull(inv_warehouse_sk#3) AND isnotnull(inv_item_sk#2)) AND is (4) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#5, w_warehouse_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -72,7 +72,7 @@ Input [6]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (10) Scan parquet default.item Output [3]: [i_item_sk#8, i_item_id#9, i_current_price#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,0.99), LessThanOrEqual(i_current_price,1.49), IsNotNull(i_item_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [6]: [inv_date_sk#1, inv_item_sk#2, inv_quantity_on_hand#4, w_warehouse_na (17) Scan parquet default.date_dim Output [2]: [d_date_sk#12, d_date#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-02-10), LessThanOrEqual(d_date,2000-04-10), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22.sf100/explain.txt index e0c0319ef8ce5..a405f658951d3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22.sf100/explain.txt @@ -34,7 +34,7 @@ TakeOrderedAndProject (30) (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_date_sk), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk)] ReadSchema: struct @@ -48,7 +48,7 @@ Condition : ((isnotnull(inv_date_sk#1) AND isnotnull(inv_item_sk#2)) AND isnotnu (4) Scan parquet default.warehouse Output [1]: [w_warehouse_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -75,7 +75,7 @@ Input [5]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (10) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_month_seq#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Arguments: [inv_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [5]: [i_item_sk#11, i_brand#12, i_class#13, i_category#14, i_product_name#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22/explain.txt index 7ebdeb2615751..6aae0b0c8dc0c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q22/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_date_sk), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : ((isnotnull(inv_date_sk#1) AND isnotnull(inv_item_sk#2)) AND isnotnu (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Input [5]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (11) Scan parquet default.item Output [5]: [i_item_sk#8, i_brand#9, i_class#10, i_category#11, i_product_name#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [8]: [inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_sk (17) Scan parquet default.warehouse Output [1]: [w_warehouse_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt index 702901b354289..c5988072f758d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt @@ -96,7 +96,7 @@ CollectLimit (92) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity#4, cs_list_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -118,7 +118,7 @@ Arguments: [cs_item_sk#3 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#7, ss_item_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -132,7 +132,7 @@ Condition : (isnotnull(ss_sold_date_sk#7) AND isnotnull(ss_item_sk#8)) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_date#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -171,7 +171,7 @@ Arguments: [ss_item_sk#8 ASC NULLS FIRST], false, 0 (18) Scan parquet default.item Output [2]: [i_item_sk#14, i_item_desc#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -249,7 +249,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (35) Scan parquet default.store_sales Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -271,7 +271,7 @@ Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 (40) Scan parquet default.customer Output [1]: [c_customer_sk#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -337,7 +337,7 @@ Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_pri (54) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#11, d_moy#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct @@ -368,7 +368,7 @@ Input [4]: [cs_sold_date_sk#1, cs_quantity#4, cs_list_price#5, d_date_sk#9] (61) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -537,7 +537,7 @@ Subquery:1 Hosting operator id = 49 Hosting Expression = Subquery scalar-subquer (93) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -551,7 +551,7 @@ Condition : (isnotnull(ss_customer_sk#25) AND isnotnull(ss_sold_date_sk#7)) (96) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -590,7 +590,7 @@ Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 (105) Scan parquet default.customer Output [1]: [c_customer_sk#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt index 37c10d8acd77b..6d2b5b0013d8f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt @@ -76,7 +76,7 @@ CollectLimit (72) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity#4, cs_list_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -90,7 +90,7 @@ Condition : isnotnull(cs_sold_date_sk#1) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#6, ss_item_sk#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Condition : (isnotnull(ss_sold_date_sk#6) AND isnotnull(ss_item_sk#7)) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_date#9, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Input [4]: [ss_sold_date_sk#6, ss_item_sk#7, d_date_sk#8, d_date#9] (14) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_desc#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -201,7 +201,7 @@ Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity# (28) Scan parquet default.store_sales Output [3]: [ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -215,7 +215,7 @@ Condition : isnotnull(ss_customer_sk#23) (31) Scan parquet default.customer Output [1]: [c_customer_sk#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -281,7 +281,7 @@ Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_pri (45) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_year#10, d_moy#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct @@ -312,7 +312,7 @@ Input [4]: [cs_sold_date_sk#1, cs_quantity#4, cs_list_price#5, d_date_sk#8] (52) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -432,7 +432,7 @@ Subquery:1 Hosting operator id = 40 Hosting Expression = Subquery scalar-subquer (73) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -446,7 +446,7 @@ Condition : (isnotnull(ss_customer_sk#23) AND isnotnull(ss_sold_date_sk#6)) (76) Scan parquet default.customer Output [1]: [c_customer_sk#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -473,7 +473,7 @@ Input [5]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price (82) Scan parquet default.date_dim Output [2]: [d_date_sk#8, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt index 6039d13e74edb..51b85142f37ff 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt @@ -134,7 +134,7 @@ TakeOrderedAndProject (130) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity#4, cs_list_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -156,7 +156,7 @@ Arguments: [cs_item_sk#3 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#7, ss_item_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -170,7 +170,7 @@ Condition : (isnotnull(ss_sold_date_sk#7) AND isnotnull(ss_item_sk#8)) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_date#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -209,7 +209,7 @@ Arguments: [ss_item_sk#8 ASC NULLS FIRST], false, 0 (18) Scan parquet default.item Output [2]: [i_item_sk#14, i_item_desc#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (35) Scan parquet default.store_sales Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -309,7 +309,7 @@ Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 (40) Scan parquet default.customer Output [1]: [c_customer_sk#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -371,7 +371,7 @@ Join condition: None (53) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#11, d_moy#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct @@ -402,7 +402,7 @@ Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_pri (60) Scan parquet default.customer Output [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -509,7 +509,7 @@ Results [3]: [c_last_name#42, c_first_name#41, sum(CheckOverflow((promote_precis (83) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -748,7 +748,7 @@ Subquery:1 Hosting operator id = 49 Hosting Expression = Subquery scalar-subquer (131) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -762,7 +762,7 @@ Condition : (isnotnull(ss_customer_sk#25) AND isnotnull(ss_sold_date_sk#7)) (134) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -801,7 +801,7 @@ Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 (143) Scan parquet default.customer Output [1]: [c_customer_sk#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b/explain.txt index 61e4b21189a86..b5213786c93bc 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b/explain.txt @@ -101,7 +101,7 @@ TakeOrderedAndProject (97) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity#4, cs_list_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -115,7 +115,7 @@ Condition : (isnotnull(cs_bill_customer_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#6, ss_item_sk#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -129,7 +129,7 @@ Condition : (isnotnull(ss_sold_date_sk#6) AND isnotnull(ss_item_sk#7)) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_date#9, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -160,7 +160,7 @@ Input [4]: [ss_sold_date_sk#6, ss_item_sk#7, d_date_sk#8, d_date#9] (14) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_desc#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -226,7 +226,7 @@ Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity# (28) Scan parquet default.store_sales Output [3]: [ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -240,7 +240,7 @@ Condition : isnotnull(ss_customer_sk#23) (31) Scan parquet default.customer Output [1]: [c_customer_sk#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -302,7 +302,7 @@ Join condition: None (44) Scan parquet default.customer Output [3]: [c_customer_sk#26, c_first_name#38, c_last_name#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -356,7 +356,7 @@ Input [7]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_pri (56) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_year#10, d_moy#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct @@ -405,7 +405,7 @@ Results [3]: [c_last_name#39, c_first_name#38, sum(CheckOverflow((promote_precis (66) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#52, ws_item_sk#53, ws_bill_customer_sk#54, ws_quantity#55, ws_list_price#56] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -458,7 +458,7 @@ Join condition: None (78) Scan parquet default.customer Output [3]: [c_customer_sk#26, c_first_name#38, c_last_name#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -575,7 +575,7 @@ Subquery:1 Hosting operator id = 40 Hosting Expression = Subquery scalar-subquer (98) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -589,7 +589,7 @@ Condition : (isnotnull(ss_customer_sk#23) AND isnotnull(ss_sold_date_sk#6)) (101) Scan parquet default.customer Output [1]: [c_customer_sk#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -616,7 +616,7 @@ Input [5]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price (107) Scan parquet default.date_dim Output [2]: [d_date_sk#8, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt index d53db33027ed1..d1e4f3e242fc5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt @@ -52,7 +52,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -66,7 +66,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (4) Scan parquet default.item Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_color), EqualTo(i_color,pale), IsNotNull(i_item_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -132,7 +132,7 @@ Input [14]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, (19) Scan parquet default.store Output [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -154,7 +154,7 @@ Arguments: HashedRelationBroadcastMode(List(input[3, string, true]),false), [id= (24) Scan parquet default.customer_address Output [3]: [ca_state#25, ca_zip#26, ca_country#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_zip), IsNotNull(ca_country)] ReadSchema: struct @@ -198,7 +198,7 @@ Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (34) Scan parquet default.store_returns Output [2]: [sr_item_sk#30, sr_ticket_number#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -330,7 +330,7 @@ Subquery:1 Hosting operator id = 47 Hosting Expression = Subquery scalar-subquer (49) Scan parquet default.store Output [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -352,7 +352,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (54) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -383,7 +383,7 @@ Arguments: [ss_item_sk#1 ASC NULLS FIRST], false, 0 (61) Scan parquet default.item Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -422,7 +422,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (70) Scan parquet default.customer Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -461,7 +461,7 @@ Arguments: [s_zip#23 ASC NULLS FIRST, c_birth_country#17 ASC NULLS FIRST], false (79) Scan parquet default.customer_address Output [3]: [ca_state#25, ca_zip#26, ca_country#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), IsNotNull(ca_zip)] ReadSchema: struct @@ -500,7 +500,7 @@ Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (88) Scan parquet default.store_returns Output [2]: [sr_item_sk#30, sr_ticket_number#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a/explain.txt index 09942dc6d5009..88ea132e59e74 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a/explain.txt @@ -46,7 +46,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#6, sr_ticket_number#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -87,7 +87,7 @@ Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, s (10) Scan parquet default.store Output [5]: [s_store_sk#9, s_store_name#10, s_market_id#11, s_state#12, s_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -118,7 +118,7 @@ Input [8]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_net_paid#5, s_stor (17) Scan parquet default.item Output [6]: [i_item_sk#15, i_current_price#16, i_size#17, i_color#18, i_units#19, i_manager_id#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_color), EqualTo(i_color,pale), IsNotNull(i_item_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [12]: [ss_item_sk#1, ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_s (23) Scan parquet default.customer Output [4]: [c_customer_sk#22, c_first_name#23, c_last_name#24, c_birth_country#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -172,7 +172,7 @@ Input [14]: [ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_state#12, s_zip (29) Scan parquet default.customer_address Output [3]: [ca_state#27, ca_zip#28, ca_country#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_zip), IsNotNull(ca_country)] ReadSchema: struct @@ -288,7 +288,7 @@ Subquery:1 Hosting operator id = 41 Hosting Expression = Subquery scalar-subquer (43) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -302,7 +302,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (46) Scan parquet default.store_returns Output [2]: [sr_item_sk#6, sr_ticket_number#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -329,7 +329,7 @@ Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, s (52) Scan parquet default.store Output [5]: [s_store_sk#9, s_store_name#10, s_market_id#11, s_state#12, s_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -360,7 +360,7 @@ Input [8]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_net_paid#5, s_stor (59) Scan parquet default.item Output [6]: [i_item_sk#15, i_current_price#16, i_size#17, i_color#18, i_units#19, i_manager_id#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -387,7 +387,7 @@ Input [12]: [ss_item_sk#1, ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_s (65) Scan parquet default.customer Output [4]: [c_customer_sk#22, c_first_name#23, c_last_name#24, c_birth_country#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -414,7 +414,7 @@ Input [14]: [ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_state#12, s_zip (71) Scan parquet default.customer_address Output [3]: [ca_state#27, ca_zip#28, ca_country#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), IsNotNull(ca_zip)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt index 1c7950dbaa396..7d0932e3268fb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt @@ -52,7 +52,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -66,7 +66,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (4) Scan parquet default.item Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_color), EqualTo(i_color,chiffon), IsNotNull(i_item_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -132,7 +132,7 @@ Input [14]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, (19) Scan parquet default.store Output [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -154,7 +154,7 @@ Arguments: HashedRelationBroadcastMode(List(input[3, string, true]),false), [id= (24) Scan parquet default.customer_address Output [3]: [ca_state#25, ca_zip#26, ca_country#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_zip), IsNotNull(ca_country)] ReadSchema: struct @@ -198,7 +198,7 @@ Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (34) Scan parquet default.store_returns Output [2]: [sr_item_sk#30, sr_ticket_number#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -330,7 +330,7 @@ Subquery:1 Hosting operator id = 47 Hosting Expression = Subquery scalar-subquer (49) Scan parquet default.store Output [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -352,7 +352,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (54) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -383,7 +383,7 @@ Arguments: [ss_item_sk#1 ASC NULLS FIRST], false, 0 (61) Scan parquet default.item Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -422,7 +422,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (70) Scan parquet default.customer Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -461,7 +461,7 @@ Arguments: [s_zip#23 ASC NULLS FIRST, c_birth_country#17 ASC NULLS FIRST], false (79) Scan parquet default.customer_address Output [3]: [ca_state#25, ca_zip#26, ca_country#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), IsNotNull(ca_zip)] ReadSchema: struct @@ -500,7 +500,7 @@ Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (88) Scan parquet default.store_returns Output [2]: [sr_item_sk#30, sr_ticket_number#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b/explain.txt index aa1cd3e86f29a..08fb812d3f0e5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b/explain.txt @@ -46,7 +46,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#6, sr_ticket_number#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -87,7 +87,7 @@ Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, s (10) Scan parquet default.store Output [5]: [s_store_sk#9, s_store_name#10, s_market_id#11, s_state#12, s_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -118,7 +118,7 @@ Input [8]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_net_paid#5, s_stor (17) Scan parquet default.item Output [6]: [i_item_sk#15, i_current_price#16, i_size#17, i_color#18, i_units#19, i_manager_id#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_color), EqualTo(i_color,chiffon), IsNotNull(i_item_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [12]: [ss_item_sk#1, ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_s (23) Scan parquet default.customer Output [4]: [c_customer_sk#22, c_first_name#23, c_last_name#24, c_birth_country#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -172,7 +172,7 @@ Input [14]: [ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_state#12, s_zip (29) Scan parquet default.customer_address Output [3]: [ca_state#27, ca_zip#28, ca_country#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_zip), IsNotNull(ca_country)] ReadSchema: struct @@ -288,7 +288,7 @@ Subquery:1 Hosting operator id = 41 Hosting Expression = Subquery scalar-subquer (43) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -302,7 +302,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (46) Scan parquet default.store_returns Output [2]: [sr_item_sk#6, sr_ticket_number#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -329,7 +329,7 @@ Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, s (52) Scan parquet default.store Output [5]: [s_store_sk#9, s_store_name#10, s_market_id#11, s_state#12, s_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -360,7 +360,7 @@ Input [8]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_net_paid#5, s_stor (59) Scan parquet default.item Output [6]: [i_item_sk#15, i_current_price#16, i_size#17, i_color#18, i_units#19, i_manager_id#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -387,7 +387,7 @@ Input [12]: [ss_item_sk#1, ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_s (65) Scan parquet default.customer Output [4]: [c_customer_sk#22, c_first_name#23, c_last_name#24, c_birth_country#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] ReadSchema: struct @@ -414,7 +414,7 @@ Input [14]: [ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_state#12, s_zip (71) Scan parquet default.customer_address Output [3]: [ca_state#27, ca_zip#28, ca_country#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), IsNotNull(ca_zip)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt index f9d1aa308b108..1e703b42648ab 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt @@ -61,7 +61,7 @@ TakeOrderedAndProject (57) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, ss_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -75,7 +75,7 @@ Condition : ((((isnotnull(ss_customer_sk#3) AND isnotnull(ss_item_sk#2)) AND isn (4) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,4), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -106,7 +106,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (11) Scan parquet default.store Output [3]: [s_store_sk#11, s_store_id#12, s_store_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [3]: [i_item_sk#16, i_item_id#17, i_item_desc#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (28) Scan parquet default.date_dim Output [3]: [d_date_sk#21, d_year#22, d_moy#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,10), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (33) Scan parquet default.store_returns Output [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -250,7 +250,7 @@ Arguments: [sr_customer_sk#27 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST], f (44) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25/explain.txt index b749a37913efb..8acd8414fb1d3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25/explain.txt @@ -52,7 +52,7 @@ TakeOrderedAndProject (48) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, ss_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -66,7 +66,7 @@ Condition : ((((isnotnull(ss_item_sk#2) AND isnotnull(ss_customer_sk#3)) AND isn (4) Scan parquet default.store_returns Output [5]: [sr_returned_date_sk#7, sr_item_sk#8, sr_customer_sk#9, sr_ticket_number#10, sr_net_loss#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -93,7 +93,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, s (10) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#13, cs_bill_customer_sk#14, cs_item_sk#15, cs_net_profit#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -120,7 +120,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_net_profit#6, sr (16) Scan parquet default.date_dim Output [3]: [d_date_sk#18, d_year#19, d_moy#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,4), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -151,7 +151,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_net_profit#6, sr_ (23) Scan parquet default.date_dim Output [3]: [d_date_sk#22, d_year#23, d_moy#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,10), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -194,7 +194,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#4, ss_net_profit#6, sr_net_loss#11, cs_sol (33) Scan parquet default.store Output [3]: [s_store_sk#27, s_store_id#28, s_store_name#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -221,7 +221,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_net_profit#6, sr_net_loss#11, cs_net (39) Scan parquet default.item Output [3]: [i_item_sk#31, i_item_id#32, i_item_desc#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26.sf100/explain.txt index 671ce981abf6c..b6917a75fe493 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26.sf100/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.catalog_sales Output [8]: [cs_sold_date_sk#1, cs_bill_cdemo_sk#2, cs_item_sk#3, cs_promo_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_promo_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(cs_bill_cdemo_sk#2) AND isnotnull(cs_sold_date_sk#1)) A (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), IsNotNull(cd_marital_status), EqualTo(cd_gender,M), EqualTo(cd_marital_status,S), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [cs_sold_date_sk#1, cs_bill_cdemo_sk#2, cs_item_sk#3, cs_promo_sk#4, (11) Scan parquet default.promotion Output [3]: [p_promo_sk#14, p_channel_email#15, p_channel_event#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(EqualTo(p_channel_email,N),EqualTo(p_channel_event,N)), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [cs_sold_date_sk#1, cs_item_sk#3, cs_promo_sk#4, cs_quantity#5, cs_li (18) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [7]: [cs_sold_date_sk#1, cs_item_sk#3, cs_quantity#5, cs_list_price#6, cs_ (25) Scan parquet default.item Output [2]: [i_item_sk#21, i_item_id#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26/explain.txt index fb76c3804a462..2ad3da8e77a14 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q26/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.catalog_sales Output [8]: [cs_sold_date_sk#1, cs_bill_cdemo_sk#2, cs_item_sk#3, cs_promo_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_promo_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(cs_bill_cdemo_sk#2) AND isnotnull(cs_sold_date_sk#1)) A (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,M), EqualTo(cd_marital_status,S), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [cs_sold_date_sk#1, cs_bill_cdemo_sk#2, cs_item_sk#3, cs_promo_sk#4, (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [cs_sold_date_sk#1, cs_item_sk#3, cs_promo_sk#4, cs_quantity#5, cs_li (18) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_id#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [8]: [cs_item_sk#3, cs_promo_sk#4, cs_quantity#5, cs_list_price#6, cs_sale (24) Scan parquet default.promotion Output [3]: [p_promo_sk#20, p_channel_email#21, p_channel_event#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(EqualTo(p_channel_email,N),EqualTo(p_channel_event,N)), IsNotNull(p_promo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27.sf100/explain.txt index 07e50e7a65c57..8fc22b44b7ca5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27.sf100/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(ss_cdemo_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND is (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), IsNotNull(cd_marital_status), EqualTo(cd_gender,M), EqualTo(cd_marital_status,S), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27/explain.txt index 8258588e4546a..b14b1847940b4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q27/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(ss_cdemo_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND is (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,M), EqualTo(cd_marital_status,S), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt index bc499294a413e..9788040bbe6de 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt @@ -75,7 +75,7 @@ CollectLimit (71) (1) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,0), LessThanOrEqual(ss_quantity,5)] ReadSchema: struct @@ -129,7 +129,7 @@ Results [3]: [cast((avg(UnscaledValue(ss_list_price#3))#5 / 100.0) as decimal(11 (11) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,6), LessThanOrEqual(ss_quantity,10)] ReadSchema: struct @@ -190,7 +190,7 @@ Join condition: None (23) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,11), LessThanOrEqual(ss_quantity,15)] ReadSchema: struct @@ -251,7 +251,7 @@ Join condition: None (35) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,16), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -312,7 +312,7 @@ Join condition: None (47) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,25)] ReadSchema: struct @@ -373,7 +373,7 @@ Join condition: None (59) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,26), LessThanOrEqual(ss_quantity,30)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt index 4169644f231c8..9788040bbe6de 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt @@ -75,7 +75,7 @@ CollectLimit (71) (1) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,0), LessThanOrEqual(ss_quantity,5)] ReadSchema: struct @@ -129,7 +129,7 @@ Results [3]: [cast((avg(UnscaledValue(ss_list_price#3))#5 / 100.0) as decimal(11 (11) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,6), LessThanOrEqual(ss_quantity,10)] ReadSchema: struct @@ -190,7 +190,7 @@ Join condition: None (23) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,11), LessThanOrEqual(ss_quantity,15)] ReadSchema: struct @@ -251,7 +251,7 @@ Join condition: None (35) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,16), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -312,7 +312,7 @@ Join condition: None (47) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,25)] ReadSchema: struct @@ -373,7 +373,7 @@ Join condition: None (59) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,26), LessThanOrEqual(ss_quantity,30)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt index 7625d9dd683e9..31d05221a981c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt @@ -65,7 +65,7 @@ TakeOrderedAndProject (61) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, ss_quantity#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -79,7 +79,7 @@ Condition : ((((isnotnull(ss_customer_sk#3) AND isnotnull(ss_item_sk#2)) AND isn (4) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,9), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -110,7 +110,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (11) Scan parquet default.store Output [3]: [s_store_sk#11, s_store_id#12, s_store_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [3]: [i_item_sk#16, i_item_id#17, i_item_desc#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Arguments: [cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (28) Scan parquet default.date_dim Output [3]: [d_date_sk#21, d_year#22, d_moy#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), GreaterThanOrEqual(d_moy,9), LessThanOrEqual(d_moy,12), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -206,7 +206,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (33) Scan parquet default.store_returns Output [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -254,7 +254,7 @@ Arguments: [sr_customer_sk#27 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST], f (44) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_quantity#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -268,7 +268,7 @@ Condition : ((isnotnull(cs_bill_customer_sk#33) AND isnotnull(cs_item_sk#34)) AN (47) Scan parquet default.date_dim Output [2]: [d_date_sk#36, d_year#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29/explain.txt index 9de2335c2d493..a45526688a30a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29/explain.txt @@ -56,7 +56,7 @@ TakeOrderedAndProject (52) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, ss_quantity#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Condition : ((((isnotnull(ss_item_sk#2) AND isnotnull(ss_customer_sk#3)) AND isn (4) Scan parquet default.store_returns Output [5]: [sr_returned_date_sk#7, sr_item_sk#8, sr_customer_sk#9, sr_ticket_number#10, sr_return_quantity#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, s (10) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#13, cs_bill_customer_sk#14, cs_item_sk#15, cs_quantity#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_r (16) Scan parquet default.date_dim Output [3]: [d_date_sk#18, d_year#19, d_moy#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,9), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -155,7 +155,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_re (23) Scan parquet default.date_dim Output [3]: [d_date_sk#22, d_year#23, d_moy#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), GreaterThanOrEqual(d_moy,9), LessThanOrEqual(d_moy,12), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -186,7 +186,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_returned_date_sk#7, s (30) Scan parquet default.date_dim Output [2]: [d_date_sk#26, d_year#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -217,7 +217,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_return_quantity#11, c (37) Scan parquet default.store Output [3]: [s_store_sk#29, s_store_id#30, s_store_name#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -244,7 +244,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#6, sr_return_quantity#11, c (43) Scan parquet default.item Output [3]: [i_item_sk#33, i_item_id#34, i_item_desc#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3.sf100/explain.txt index 6ebca702beb34..afe7c682b55e8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -39,7 +39,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) (4) Scan parquet default.item Output [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manufact_id#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,128), IsNotNull(i_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (11) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3/explain.txt index 3dccfeaf633a8..bec06d146863d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q3/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manufact_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,128), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30.sf100/explain.txt index 636d2569b0575..c338e5ce6f5d5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30.sf100/explain.txt @@ -63,7 +63,7 @@ TakeOrderedAndProject (59) (1) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#1, wr_returning_customer_sk#2, wr_returning_addr_sk#3, wr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_returning_addr_sk), IsNotNull(wr_returning_customer_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Condition : ((isnotnull(wr_returned_date_sk#1) AND isnotnull(wr_returning_addr_s (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Arguments: [wr_returning_addr_sk#3 ASC NULLS FIRST], false, 0 (13) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_state#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state)] ReadSchema: struct @@ -169,7 +169,7 @@ Condition : isnotnull(ctr_total_return#18) (24) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#1, wr_returning_customer_sk#2, wr_returning_addr_sk#3, wr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_returning_addr_sk)] ReadSchema: struct @@ -272,7 +272,7 @@ Input [5]: [ctr_customer_sk#16, ctr_state#17, ctr_total_return#18, (CAST(avg(ctr (46) Scan parquet default.customer Output [14]: [c_customer_sk#33, c_customer_id#34, c_current_addr_sk#35, c_salutation#36, c_first_name#37, c_last_name#38, c_preferred_cust_flag#39, c_birth_day#40, c_birth_month#41, c_birth_year#42, c_birth_country#43, c_login#44, c_email_address#45, c_last_review_date#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -286,7 +286,7 @@ Condition : (isnotnull(c_customer_sk#33) AND isnotnull(c_current_addr_sk#35)) (49) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_state#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30/explain.txt index 8b899ff2cb2da..fffcc5ca30173 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q30/explain.txt @@ -57,7 +57,7 @@ TakeOrderedAndProject (53) (1) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#1, wr_returning_customer_sk#2, wr_returning_addr_sk#3, wr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_returning_addr_sk), IsNotNull(wr_returning_customer_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Condition : ((isnotnull(wr_returned_date_sk#1) AND isnotnull(wr_returning_addr_s (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -102,7 +102,7 @@ Input [5]: [wr_returned_date_sk#1, wr_returning_customer_sk#2, wr_returning_addr (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#8, ca_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state)] ReadSchema: struct @@ -151,7 +151,7 @@ Condition : isnotnull(ctr_total_return#17) (21) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#1, wr_returning_customer_sk#2, wr_returning_addr_sk#3, wr_return_amt#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_returning_addr_sk)] ReadSchema: struct @@ -242,7 +242,7 @@ Input [5]: [ctr_customer_sk#15, ctr_state#16, ctr_total_return#17, (CAST(avg(ctr (40) Scan parquet default.customer Output [14]: [c_customer_sk#31, c_customer_id#32, c_current_addr_sk#33, c_salutation#34, c_first_name#35, c_last_name#36, c_preferred_cust_flag#37, c_birth_day#38, c_birth_month#39, c_birth_year#40, c_birth_country#41, c_login#42, c_email_address#43, c_last_review_date#44] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -269,7 +269,7 @@ Input [16]: [ctr_customer_sk#15, ctr_total_return#17, c_customer_sk#31, c_custom (46) Scan parquet default.customer_address Output [2]: [ca_address_sk#8, ca_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt index 919bf08024030..d3b013660ba28 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt @@ -123,7 +123,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -137,7 +137,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_year#5, d_qoy#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,3), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -172,7 +172,7 @@ Arguments: [ss_addr_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_county#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_county)] ReadSchema: struct @@ -221,7 +221,7 @@ Results [2]: [ca_county#10, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3)) (22) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -235,7 +235,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) (25) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_year#18, d_qoy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -317,7 +317,7 @@ Input [4]: [ca_county#10, store_sales#16, ca_county#23, store_sales#28] (43) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -331,7 +331,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) (46) Scan parquet default.date_dim Output [3]: [d_date_sk#30, d_year#31, d_qoy#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,1), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -413,7 +413,7 @@ Input [6]: [store_sales#16, ca_county#23, store_sales#28, ca_county#36, d_year#3 (64) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk)] ReadSchema: struct @@ -481,7 +481,7 @@ Results [2]: [ca_county#51, MakeDecimal(sum(UnscaledValue(ws_ext_sales_price#45) (79) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk)] ReadSchema: struct @@ -562,7 +562,7 @@ Input [4]: [ca_county#51, web_sales#56, ca_county#62, web_sales#67] (97) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31/explain.txt index 1b3d2d7cc2f0b..0ed0929fa882c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31/explain.txt @@ -103,7 +103,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_year#5, d_qoy#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,1), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -144,7 +144,7 @@ Input [6]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3, d_date_sk#4, (10) Scan parquet default.customer_address Output [2]: [ca_address_sk#8, ca_county#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_county)] ReadSchema: struct @@ -189,7 +189,7 @@ Results [3]: [ca_county#9, d_year#5, MakeDecimal(sum(UnscaledValue(ss_ext_sales_ (19) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -203,7 +203,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) (22) Scan parquet default.date_dim Output [3]: [d_date_sk#16, d_year#17, d_qoy#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -269,7 +269,7 @@ Join condition: None (36) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_addr_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -283,7 +283,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) (39) Scan parquet default.date_dim Output [3]: [d_date_sk#28, d_year#29, d_qoy#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,3), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -353,7 +353,7 @@ Input [7]: [ca_county#9, d_year#5, store_sales#15, ca_county#21, store_sales#26, (54) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#40, ws_bill_addr_sk#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk)] ReadSchema: struct @@ -418,7 +418,7 @@ Join condition: None (68) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#40, ws_bill_addr_sk#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk)] ReadSchema: struct @@ -487,7 +487,7 @@ Input [9]: [ca_county#9, d_year#5, store_sales#15, store_sales#26, store_sales#3 (83) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#40, ws_bill_addr_sk#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32.sf100/explain.txt index d08f3d6ede6f0..7affdeabd6f7f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32.sf100/explain.txt @@ -35,7 +35,7 @@ CollectLimit (31) (1) Scan parquet default.item Output [2]: [i_item_sk#1, i_manufact_id#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,977), IsNotNull(i_item_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#4, cs_item_sk#5, cs_ext_discount_amt#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Condition : (isnotnull(cs_sold_date_sk#4) AND isnotnull(cs_item_sk#5)) (9) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-01-27), LessThanOrEqual(d_date,2000-04-26), IsNotNull(d_date_sk)] ReadSchema: struct @@ -137,7 +137,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (23) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#4, cs_item_sk#5, cs_ext_discount_amt#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_ext_discount_amt), IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32/explain.txt index a4589f00b84a3..27f93fd7a9807 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q32/explain.txt @@ -35,7 +35,7 @@ CollectLimit (31) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_discount_amt#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_ext_discount_amt), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -49,7 +49,7 @@ Condition : ((isnotnull(cs_item_sk#2) AND isnotnull(cs_ext_discount_amt#3)) AND (4) Scan parquet default.item Output [2]: [i_item_sk#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,977), IsNotNull(i_item_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Input [4]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_discount_amt#3, i_item_sk#4] (11) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_discount_amt#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -94,7 +94,7 @@ Condition : (isnotnull(cs_sold_date_sk#1) AND isnotnull(cs_item_sk#2)) (14) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-01-27), LessThanOrEqual(d_date,2000-04-26), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt index 044f552befdd8..8185680b58670 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt @@ -71,7 +71,7 @@ TakeOrderedAndProject (67) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#3)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,5), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, (11) Scan parquet default.item Output [2]: [i_item_sk#9, i_manufact_id#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Condition : isnotnull(i_item_sk#9) (14) Scan parquet default.item Output [2]: [i_category#11, i_manufact_id#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Electronics)] ReadSchema: struct @@ -170,7 +170,7 @@ Input [5]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, i_item_sk#9, i_man (23) Scan parquet default.customer_address Output [2]: [ca_address_sk#15, ca_gmt_offset#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -219,7 +219,7 @@ Results [2]: [i_manufact_id#10, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_addr_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Results [2]: [i_manufact_id#10, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk), IsNotNull(ws_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33/explain.txt index 78227141b3a2a..8d1558a01cfde 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33/explain.txt @@ -71,7 +71,7 @@ TakeOrderedAndProject (67) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#3)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,5), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_gmt_offset#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, ca_address_sk#9] (18) Scan parquet default.item Output [2]: [i_item_sk#12, i_manufact_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : isnotnull(i_item_sk#12) (21) Scan parquet default.item Output [2]: [i_category#14, i_manufact_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Electronics)] ReadSchema: struct @@ -219,7 +219,7 @@ Results [2]: [i_manufact_id#13, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_addr_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Results [2]: [i_manufact_id#13, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk), IsNotNull(ws_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt index b17257a890db2..17bb0e7e71d27 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt @@ -43,7 +43,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#4)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(And(GreaterThanOrEqual(d_dom,1),LessThanOrEqual(d_dom,3)),And(GreaterThanOrEqual(d_dom,25),LessThanOrEqual(d_dom,28))), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_county), EqualTo(s_county,Williamson County), IsNotNull(s_store_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (31) Scan parquet default.customer Output [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt index 3183f43c67433..18f465caea20d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt @@ -40,7 +40,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#4)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(And(GreaterThanOrEqual(d_dom,1),LessThanOrEqual(d_dom,3)),And(GreaterThanOrEqual(d_dom,25),LessThanOrEqual(d_dom,28))), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_county), EqualTo(s_county,Williamson County), IsNotNull(s_store_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Condition : ((cnt#22 >= 15) AND (cnt#22 <= 20)) (29) Scan parquet default.customer Output [5]: [c_customer_sk#23, c_salutation#24, c_first_name#25, c_last_name#26, c_preferred_cust_flag#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35.sf100/explain.txt index 527e77d7a7afc..01f6c35a2178d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35.sf100/explain.txt @@ -64,7 +64,7 @@ TakeOrderedAndProject (60) (1) Scan parquet default.customer Output [3]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Arguments: [c_customer_sk#3 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#7, ss_customer_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -100,7 +100,7 @@ Condition : isnotnull(ss_sold_date_sk#7) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_qoy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_qoy), EqualTo(d_year,2002), LessThan(d_qoy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -144,7 +144,7 @@ Join condition: None (19) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#14, ws_bill_customer_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -183,7 +183,7 @@ Join condition: None (28) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#17, cs_ship_customer_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -238,7 +238,7 @@ Arguments: [c_current_addr_sk#5 ASC NULLS FIRST], false, 0 (41) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_state#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -277,7 +277,7 @@ Arguments: [c_current_cdemo_sk#4 ASC NULLS FIRST], false, 0 (50) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#25, cd_gender#26, cd_marital_status#27, cd_dep_count#28, cd_dep_employed_count#29, cd_dep_college_count#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35/explain.txt index cb958fdb8abf4..5370b6872abf6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q35/explain.txt @@ -53,7 +53,7 @@ TakeOrderedAndProject (49) (1) Scan parquet default.customer Output [3]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -67,7 +67,7 @@ Condition : (isnotnull(c_current_addr_sk#5) AND isnotnull(c_current_cdemo_sk#4)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#6, ss_customer_sk#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Condition : isnotnull(ss_sold_date_sk#6) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_year#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_qoy), EqualTo(d_year,2002), LessThan(d_qoy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -121,7 +121,7 @@ Join condition: None (16) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#13, ws_bill_customer_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -156,7 +156,7 @@ Join condition: None (24) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_ship_customer_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -199,7 +199,7 @@ Input [5]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5, exists#2 (34) Scan parquet default.customer_address Output [2]: [ca_address_sk#19, ca_state#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -226,7 +226,7 @@ Input [4]: [c_current_cdemo_sk#4, c_current_addr_sk#5, ca_address_sk#19, ca_stat (40) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#22, cd_gender#23, cd_marital_status#24, cd_dep_count#25, cd_dep_employed_count#26, cd_dep_college_count#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36.sf100/explain.txt index 195f45feeba50..d6dfb6e7785c1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) AND isno (4) Scan parquet default.date_dim Output [2]: [d_date_sk#6, d_year#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4 (11) Scan parquet default.store Output [2]: [s_store_sk#9, s_state#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [5]: [ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5, (18) Scan parquet default.item Output [3]: [i_item_sk#12, i_class#13, i_category#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36/explain.txt index 1f86dd127f4b6..73174b7351002 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q36/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) AND isno (4) Scan parquet default.date_dim Output [2]: [d_date_sk#6, d_year#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4 (11) Scan parquet default.item Output [3]: [i_item_sk#9, i_class#10, i_category#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5, (17) Scan parquet default.store Output [2]: [s_store_sk#13, s_state#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37.sf100/explain.txt index fc783877fa6cf..a6b76c7618123 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37.sf100/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,68.00), In(i_manufact_id, [677,940,694,808]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.inventory Output [3]: [inv_date_sk#7, inv_item_sk#8, inv_quantity_on_hand#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), GreaterThanOrEqual(inv_quantity_on_hand,100), LessThanOrEqual(inv_quantity_on_hand,500), IsNotNull(inv_item_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Input [6]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, inv_date (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_date#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-02-01), LessThanOrEqual(d_date,2000-04-01), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Arguments: [i_item_sk#1 ASC NULLS FIRST], false, 0 (21) Scan parquet default.catalog_sales Output [1]: [cs_item_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37/explain.txt index ccfd03ea05be7..896d6c571b404 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q37/explain.txt @@ -32,7 +32,7 @@ TakeOrderedAndProject (28) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,68.00), In(i_manufact_id, [677,940,694,808]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Input [5]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, i_manufa (5) Scan parquet default.inventory Output [3]: [inv_date_sk#6, inv_item_sk#7, inv_quantity_on_hand#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), GreaterThanOrEqual(inv_quantity_on_hand,100), LessThanOrEqual(inv_quantity_on_hand,500), IsNotNull(inv_item_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, inv_date (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_date#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-02-01), LessThanOrEqual(d_date,2000-04-01), IsNotNull(d_date_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, inv_date (19) Scan parquet default.catalog_sales Output [1]: [cs_item_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt index 552f0ee332789..92b9c26825e51 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt @@ -72,7 +72,7 @@ CollectLimit (68) (1) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_customer_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_customer_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#3, d_date#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -125,7 +125,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.customer Output [3]: [c_customer_sk#8, c_first_name#9, c_last_name#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -164,7 +164,7 @@ Arguments: [coalesce(c_last_name#10, ) ASC NULLS FIRST, isnull(c_last_name#10) A (22) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#13, cs_bill_customer_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -245,7 +245,7 @@ Join condition: None (40) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#23, ws_bill_customer_sk#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt index 397b36be79de8..09ab60c7cf651 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt @@ -59,7 +59,7 @@ CollectLimit (55) (1) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_customer_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -73,7 +73,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_customer_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#3, d_date#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [4]: [ss_sold_date_sk#1, ss_customer_sk#2, d_date_sk#3, d_date#4] (11) Scan parquet default.customer Output [3]: [c_customer_sk#7, c_first_name#8, c_last_name#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [5]: [ss_customer_sk#2, d_date#4, c_customer_sk#7, c_first_name#8, c_last_ (17) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#11, cs_bill_customer_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -196,7 +196,7 @@ Join condition: None (31) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#20, ws_bill_customer_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a.sf100/explain.txt index 1b3539cf1fd05..3cf87e9bf2eaf 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a.sf100/explain.txt @@ -59,7 +59,7 @@ (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -73,7 +73,7 @@ Condition : ((isnotnull(inv_item_sk#2) AND isnotnull(inv_warehouse_sk#3)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,1), IsNotNull(d_date_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [6]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (11) Scan parquet default.item Output [1]: [i_item_sk#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [5]: [inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4, d_moy#7, (17) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#11, w_warehouse_name#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Arguments: [i_item_sk#9 ASC NULLS FIRST, w_warehouse_sk#11 ASC NULLS FIRST], fal (30) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -206,7 +206,7 @@ Condition : ((isnotnull(inv_item_sk#2) AND isnotnull(inv_warehouse_sk#3)) AND is (33) Scan parquet default.date_dim Output [3]: [d_date_sk#31, d_year#32, d_moy#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a/explain.txt index d4b0a075d18bc..b2cc849c603c8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39a/explain.txt @@ -56,7 +56,7 @@ (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Condition : ((isnotnull(inv_item_sk#2) AND isnotnull(inv_warehouse_sk#3)) AND is (4) Scan parquet default.item Output [1]: [i_item_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Input [5]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (10) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#7, w_warehouse_name#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Input [6]: [inv_date_sk#1, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_sk (16) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,1), IsNotNull(d_date_sk)] ReadSchema: struct @@ -181,7 +181,7 @@ Input [5]: [w_warehouse_sk#7, i_item_sk#5, d_moy#12, stdev#27, mean#28] (28) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -219,7 +219,7 @@ Input [6]: [inv_date_sk#1, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_sk (37) Scan parquet default.date_dim Output [3]: [d_date_sk#33, d_year#34, d_moy#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b.sf100/explain.txt index 61b613f52891f..b73801a52bb37 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b.sf100/explain.txt @@ -59,7 +59,7 @@ (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -73,7 +73,7 @@ Condition : ((isnotnull(inv_item_sk#2) AND isnotnull(inv_warehouse_sk#3)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,1), IsNotNull(d_date_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [6]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (11) Scan parquet default.item Output [1]: [i_item_sk#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [5]: [inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4, d_moy#7, (17) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#11, w_warehouse_name#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Arguments: [i_item_sk#9 ASC NULLS FIRST, w_warehouse_sk#11 ASC NULLS FIRST], fal (30) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -206,7 +206,7 @@ Condition : ((isnotnull(inv_item_sk#2) AND isnotnull(inv_warehouse_sk#3)) AND is (33) Scan parquet default.date_dim Output [3]: [d_date_sk#31, d_year#32, d_moy#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b/explain.txt index 5a963825eab79..92c2d5ed4700b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q39b/explain.txt @@ -56,7 +56,7 @@ (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Condition : ((isnotnull(inv_item_sk#2) AND isnotnull(inv_warehouse_sk#3)) AND is (4) Scan parquet default.item Output [1]: [i_item_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Input [5]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (10) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#7, w_warehouse_name#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Input [6]: [inv_date_sk#1, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_sk (16) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,1), IsNotNull(d_date_sk)] ReadSchema: struct @@ -181,7 +181,7 @@ Input [5]: [w_warehouse_sk#7, i_item_sk#5, d_moy#12, stdev#27, mean#28] (28) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -219,7 +219,7 @@ Input [6]: [inv_date_sk#1, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_sk (37) Scan parquet default.date_dim Output [3]: [d_date_sk#33, d_year#34, d_moy#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4.sf100/explain.txt index 5ec3d359386b4..fb5e991043bf8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4.sf100/explain.txt @@ -130,7 +130,7 @@ TakeOrderedAndProject (126) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_ext_discount_amt#3, ss_ext_sales_price#4, ss_ext_wholesale_cost#5, ss_ext_list_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -144,7 +144,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_year#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -179,7 +179,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer Output [8]: [c_customer_sk#11, c_customer_id#12, c_first_name#13, c_last_name#14, c_preferred_cust_flag#15, c_birth_country#16, c_login#17, c_email_address#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -240,7 +240,7 @@ Arguments: [customer_id#26 ASC NULLS FIRST], false, 0 (25) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_ext_discount_amt#3, ss_ext_sales_price#4, ss_ext_wholesale_cost#5, ss_ext_list_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -254,7 +254,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (28) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_year#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -336,7 +336,7 @@ Join condition: None (46) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#46, cs_bill_customer_sk#47, cs_ext_discount_amt#48, cs_ext_sales_price#49, cs_ext_wholesale_cost#50, cs_ext_list_price#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -429,7 +429,7 @@ Input [12]: [customer_id#26, year_total#27, customer_id#37, customer_first_name# (67) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#46, cs_bill_customer_sk#47, cs_ext_discount_amt#48, cs_ext_sales_price#49, cs_ext_wholesale_cost#50, cs_ext_list_price#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -514,7 +514,7 @@ Input [13]: [customer_id#26, year_total#27, customer_id#37, customer_first_name# (86) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#74, ws_bill_customer_sk#75, ws_ext_discount_amt#76, ws_ext_sales_price#77, ws_ext_wholesale_cost#78, ws_ext_list_price#79] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -607,7 +607,7 @@ Input [12]: [customer_id#26, customer_id#37, customer_first_name#38, customer_la (107) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#74, ws_bill_customer_sk#75, ws_ext_discount_amt#76, ws_ext_sales_price#77, ws_ext_wholesale_cost#78, ws_ext_list_price#79] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4/explain.txt index a58e27871b94e..79a7abdcff5db 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q4/explain.txt @@ -111,7 +111,7 @@ TakeOrderedAndProject (107) (1) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -125,7 +125,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (4) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#9, ss_customer_sk#10, ss_ext_discount_amt#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_ext_list_price#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -152,7 +152,7 @@ Input [14]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_ (10) Scan parquet default.date_dim Output [2]: [d_date_sk#16, d_year#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -201,7 +201,7 @@ Condition : (isnotnull(year_total#26) AND (year_total#26 > 0.000000)) (20) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -227,7 +227,7 @@ Input [14]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_ (26) Scan parquet default.date_dim Output [2]: [d_date_sk#16, d_year#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -281,7 +281,7 @@ Join condition: None (37) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -295,7 +295,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (40) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#43, cs_bill_customer_sk#44, cs_ext_discount_amt#45, cs_ext_sales_price#46, cs_ext_wholesale_cost#47, cs_ext_list_price#48] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -373,7 +373,7 @@ Input [12]: [customer_id#25, year_total#26, customer_id#34, customer_first_name# (57) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -442,7 +442,7 @@ Input [13]: [customer_id#25, year_total#26, customer_id#34, customer_first_name# (72) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -456,7 +456,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (75) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#70, ws_bill_customer_sk#71, ws_ext_discount_amt#72, ws_ext_sales_price#73, ws_ext_wholesale_cost#74, ws_ext_list_price#75] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -534,7 +534,7 @@ Input [12]: [customer_id#25, customer_id#34, customer_first_name#35, customer_la (92) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40.sf100/explain.txt index a23b64f179db5..c55f264a9d858 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40.sf100/explain.txt @@ -39,7 +39,7 @@ TakeOrderedAndProject (35) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_item_sk#3, cs_order_number#4, cs_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Arguments: [cs_order_number#4 ASC NULLS FIRST, cs_item_sk#3 ASC NULLS FIRST], fa (6) Scan parquet default.catalog_returns Output [3]: [cr_item_sk#7, cr_order_number#8, cr_refunded_cash#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [8]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_item_sk#3, cs_order_number# (13) Scan parquet default.item Output [3]: [i_item_sk#11, i_item_id#12, i_current_price#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,0.99), LessThanOrEqual(i_current_price,1.49), IsNotNull(i_item_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [7]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_item_sk#3, cs_sales_price#5 (20) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_date#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-02-10), LessThanOrEqual(d_date,2000-04-10), IsNotNull(d_date_sk)] ReadSchema: struct @@ -150,7 +150,7 @@ Input [7]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_sales_price#5, cr_refunded_ (26) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#18, w_state#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40/explain.txt index cf2f3f895781e..21046922327d3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q40/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_item_sk#3, cs_order_number#4, cs_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : ((isnotnull(cs_warehouse_sk#2) AND isnotnull(cs_item_sk#3)) AND isno (4) Scan parquet default.catalog_returns Output [3]: [cr_item_sk#6, cr_order_number#7, cr_refunded_cash#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [8]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_item_sk#3, cs_order_number# (10) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#10, w_state#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [7]: [cs_sold_date_sk#1, cs_warehouse_sk#2, cs_item_sk#3, cs_sales_price#5 (16) Scan parquet default.item Output [3]: [i_item_sk#13, i_item_id#14, i_current_price#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,0.99), LessThanOrEqual(i_current_price,1.49), IsNotNull(i_item_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Input [7]: [cs_sold_date_sk#1, cs_item_sk#3, cs_sales_price#5, cr_refunded_cash# (23) Scan parquet default.date_dim Output [2]: [d_date_sk#17, d_date#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-02-10), LessThanOrEqual(d_date,2000-04-10), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt index 7581089a6014c..c5eb50e25d82c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt @@ -24,7 +24,7 @@ TakeOrderedAndProject (20) (1) Scan parquet default.item Output [3]: [i_manufact_id#1, i_manufact#2, i_product_name#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), GreaterThanOrEqual(i_manufact_id,738), LessThanOrEqual(i_manufact_id,778), IsNotNull(i_manufact)] ReadSchema: struct @@ -42,7 +42,7 @@ Input [3]: [i_manufact_id#1, i_manufact#2, i_product_name#3] (5) Scan parquet default.item Output [5]: [i_category#4, i_manufact#2, i_size#5, i_color#6, i_units#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(Or(And(EqualTo(i_category,Women),Or(And(And(Or(EqualTo(i_color,powder),EqualTo(i_color,khaki)),Or(EqualTo(i_units,Ounce),EqualTo(i_units,Oz))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large))),And(And(Or(EqualTo(i_color,brown),EqualTo(i_color,honeydew)),Or(EqualTo(i_units,Bunch),EqualTo(i_units,Ton))),Or(EqualTo(i_size,N/A),EqualTo(i_size,small))))),And(EqualTo(i_category,Men),Or(And(And(Or(EqualTo(i_color,floral),EqualTo(i_color,deep)),Or(EqualTo(i_units,N/A),EqualTo(i_units,Dozen))),Or(EqualTo(i_size,petite),EqualTo(i_size,large))),And(And(Or(EqualTo(i_color,light),EqualTo(i_color,cornflower)),Or(EqualTo(i_units,Box),EqualTo(i_units,Pound))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large)))))),Or(And(EqualTo(i_category,Women),Or(And(And(Or(EqualTo(i_color,midnight),EqualTo(i_color,snow)),Or(EqualTo(i_units,Pallet),EqualTo(i_units,Gross))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large))),And(And(Or(EqualTo(i_color,cyan),EqualTo(i_color,papaya)),Or(EqualTo(i_units,Cup),EqualTo(i_units,Dram))),Or(EqualTo(i_size,N/A),EqualTo(i_size,small))))),And(EqualTo(i_category,Men),Or(And(And(Or(EqualTo(i_color,orange),EqualTo(i_color,frosted)),Or(EqualTo(i_units,Each),EqualTo(i_units,Tbl))),Or(EqualTo(i_size,petite),EqualTo(i_size,large))),And(And(Or(EqualTo(i_color,forest),EqualTo(i_color,ghost)),Or(EqualTo(i_units,Lb),EqualTo(i_units,Bundle))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large))))))), IsNotNull(i_manufact)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt index 9357ba4edb9d5..c5eb50e25d82c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt @@ -24,7 +24,7 @@ TakeOrderedAndProject (20) (1) Scan parquet default.item Output [3]: [i_manufact_id#1, i_manufact#2, i_product_name#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), GreaterThanOrEqual(i_manufact_id,738), LessThanOrEqual(i_manufact_id,778), IsNotNull(i_manufact)] ReadSchema: struct @@ -42,7 +42,7 @@ Input [3]: [i_manufact_id#1, i_manufact#2, i_product_name#3] (5) Scan parquet default.item Output [5]: [i_category#4, i_manufact#2, i_size#5, i_color#6, i_units#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(Or(And(EqualTo(i_category,Women),Or(And(And(Or(EqualTo(i_color,powder),EqualTo(i_color,khaki)),Or(EqualTo(i_units,Ounce),EqualTo(i_units,Oz))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large))),And(And(Or(EqualTo(i_color,brown),EqualTo(i_color,honeydew)),Or(EqualTo(i_units,Bunch),EqualTo(i_units,Ton))),Or(EqualTo(i_size,N/A),EqualTo(i_size,small))))),And(EqualTo(i_category,Men),Or(And(And(Or(EqualTo(i_color,floral),EqualTo(i_color,deep)),Or(EqualTo(i_units,N/A),EqualTo(i_units,Dozen))),Or(EqualTo(i_size,petite),EqualTo(i_size,large))),And(And(Or(EqualTo(i_color,light),EqualTo(i_color,cornflower)),Or(EqualTo(i_units,Box),EqualTo(i_units,Pound))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large)))))),Or(And(EqualTo(i_category,Women),Or(And(And(Or(EqualTo(i_color,midnight),EqualTo(i_color,snow)),Or(EqualTo(i_units,Pallet),EqualTo(i_units,Gross))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large))),And(And(Or(EqualTo(i_color,cyan),EqualTo(i_color,papaya)),Or(EqualTo(i_units,Cup),EqualTo(i_units,Dram))),Or(EqualTo(i_size,N/A),EqualTo(i_size,small))))),And(EqualTo(i_category,Men),Or(And(And(Or(EqualTo(i_color,orange),EqualTo(i_color,frosted)),Or(EqualTo(i_units,Each),EqualTo(i_units,Tbl))),Or(EqualTo(i_size,petite),EqualTo(i_size,large))),And(And(Or(EqualTo(i_color,forest),EqualTo(i_color,ghost)),Or(EqualTo(i_units,Lb),EqualTo(i_units,Bundle))),Or(EqualTo(i_size,medium),EqualTo(i_size,extra large))))))), IsNotNull(i_manufact)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42.sf100/explain.txt index 5f7bfb6280592..dfd840a823327 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -39,7 +39,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) (4) Scan parquet default.item Output [4]: [i_item_sk#4, i_category_id#5, i_category#6, i_manager_id#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (11) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42/explain.txt index 92ac42b19d74f..f7732f3c867e4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q42/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_category_id#9, i_category#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43.sf100/explain.txt index e83fdf2d7a348..952873e60626e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_day_name#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -47,7 +47,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_store_sk#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_day_name#3, ss_sold_date_sk#5, ss_store_sk#6, ss_sale (11) Scan parquet default.store Output [4]: [s_store_sk#8, s_store_id#9, s_store_name#10, s_gmt_offset#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_gmt_offset), EqualTo(s_gmt_offset,-5.00), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43/explain.txt index 760cc9a4a0ac0..8f3ef7fee3410 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q43/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_day_name#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_day_name#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_store_sk#5, ss_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_day_name#3, ss_sold_date_sk#4, ss_store_sk#5, ss_sale (11) Scan parquet default.store Output [4]: [s_store_sk#8, s_store_id#9, s_store_name#10, s_gmt_offset#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_gmt_offset), EqualTo(s_gmt_offset,-5.00), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44.sf100/explain.txt index 685946db4c185..096bd45f06bca 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44.sf100/explain.txt @@ -40,7 +40,7 @@ TakeOrderedAndProject (36) (1) Scan parquet default.store_sales Output [3]: [ss_item_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), EqualTo(ss_store_sk,4)] ReadSchema: struct @@ -155,7 +155,7 @@ Input [4]: [item_sk#10, rnk#16, item_sk#20, rnk#24] (27) Scan parquet default.item Output [2]: [i_item_sk#26, i_product_name#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -210,7 +210,7 @@ Subquery:1 Hosting operator id = 8 Hosting Expression = Subquery scalar-subquery (37) Scan parquet default.store_sales Output [3]: [ss_addr_sk#33, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), EqualTo(ss_store_sk,4), IsNull(ss_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44/explain.txt index 5a3b2c4dd7843..096bd45f06bca 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q44/explain.txt @@ -40,7 +40,7 @@ TakeOrderedAndProject (36) (1) Scan parquet default.store_sales Output [3]: [ss_item_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), EqualTo(ss_store_sk,4)] ReadSchema: struct @@ -155,7 +155,7 @@ Input [4]: [item_sk#10, rnk#16, item_sk#20, rnk#24] (27) Scan parquet default.item Output [2]: [i_item_sk#26, i_product_name#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -210,7 +210,7 @@ Subquery:1 Hosting operator id = 8 Hosting Expression = Subquery scalar-subquery (37) Scan parquet default.store_sales Output [3]: [ss_addr_sk#33, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), EqualTo(ss_store_sk,4), IsNull(ss_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt index ab8f08566f79e..0232d56ab7481 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt @@ -50,7 +50,7 @@ TakeOrderedAndProject (46) (1) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#2, ws_item_sk#3, ws_bill_customer_sk#4, ws_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -64,7 +64,7 @@ Condition : ((isnotnull(ws_bill_customer_sk#4) AND isnotnull(ws_sold_date_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_qoy#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -95,7 +95,7 @@ Input [5]: [ws_sold_date_sk#2, ws_item_sk#3, ws_bill_customer_sk#4, ws_sales_pri (11) Scan parquet default.item Output [2]: [i_item_sk#10, i_item_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Arguments: [ws_bill_customer_sk#4 ASC NULLS FIRST], false, 0 (19) Scan parquet default.customer_address Output [3]: [ca_address_sk#14, ca_city#15, ca_zip#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -152,7 +152,7 @@ Arguments: [ca_address_sk#14 ASC NULLS FIRST], false, 0 (24) Scan parquet default.customer Output [2]: [c_customer_sk#18, c_current_addr_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -200,7 +200,7 @@ Input [6]: [ws_bill_customer_sk#4, ws_sales_price#5, i_item_id#11, ca_city#15, c (35) Scan parquet default.item Output [2]: [i_item_sk#10, i_item_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_item_sk, [2,3,5,7,11,13,17,19,23,29])] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45/explain.txt index 91a98423d049e..f556e9b0d86b9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45/explain.txt @@ -44,7 +44,7 @@ TakeOrderedAndProject (40) (1) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#2, ws_item_sk#3, ws_bill_customer_sk#4, ws_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Condition : ((isnotnull(ws_bill_customer_sk#4) AND isnotnull(ws_sold_date_sk#2)) (4) Scan parquet default.customer Output [2]: [c_customer_sk#6, c_current_addr_sk#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ws_sold_date_sk#2, ws_item_sk#3, ws_bill_customer_sk#4, ws_sales_pri (10) Scan parquet default.customer_address Output [3]: [ca_address_sk#9, ca_city#10, ca_zip#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [7]: [ws_sold_date_sk#2, ws_item_sk#3, ws_sales_price#5, c_current_addr_sk (16) Scan parquet default.date_dim Output [3]: [d_date_sk#13, d_year#14, d_qoy#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -143,7 +143,7 @@ Input [6]: [ws_sold_date_sk#2, ws_item_sk#3, ws_sales_price#5, ca_city#10, ca_zi (23) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_id#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -170,7 +170,7 @@ Input [6]: [ws_item_sk#3, ws_sales_price#5, ca_city#10, ca_zip#11, i_item_sk#17, (29) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_id#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_item_sk, [2,3,5,7,11,13,17,19,23,29])] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46.sf100/explain.txt index a5120c1fe1c27..d4c3d754860b1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46.sf100/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_current_addr_sk#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Arguments: [c_current_addr_sk#2 ASC NULLS FIRST], false, 0 (6) Scan parquet default.customer_address Output [2]: [ca_address_sk#6, ca_city#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -116,7 +116,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (15) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#10, ss_customer_sk#11, ss_hdemo_sk#12, ss_addr_sk#13, ss_store_sk#14, ss_ticket_number#15, ss_coupon_amt#16, ss_net_profit#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#10) AND isnotnull(ss_store_sk#14)) AND (18) Scan parquet default.date_dim Output [3]: [d_date_sk#18, d_year#19, d_dow#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_dow, [6,0]), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Input [9]: [ss_sold_date_sk#10, ss_customer_sk#11, ss_hdemo_sk#12, ss_addr_sk#13 (25) Scan parquet default.store Output [2]: [s_store_sk#22, s_city#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Fairview,Midway]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Input [8]: [ss_customer_sk#11, ss_hdemo_sk#12, ss_addr_sk#13, ss_store_sk#14, ss (32) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#25, hd_dep_count#26, hd_vehicle_count#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,4),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46/explain.txt index b2c3231d872e6..a6a3c3c463d27 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q46/explain.txt @@ -47,7 +47,7 @@ TakeOrderedAndProject (43) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#5)) AND i (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_dow, [6,0]), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.store Output [2]: [s_store_sk#13, s_city#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Fairview,Midway]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#16, hd_dep_count#17, hd_vehicle_count#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,4),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Input [7]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_ticket_number#6, s (25) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_city#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -199,7 +199,7 @@ Results [5]: [ss_ticket_number#6, ss_customer_sk#2, ca_city#21 AS bought_city#30 (34) Scan parquet default.customer Output [4]: [c_customer_sk#33, c_current_addr_sk#34, c_first_name#35, c_last_name#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47.sf100/explain.txt index 563e94e6a0950..70261f5d29bd5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47.sf100/explain.txt @@ -62,7 +62,7 @@ TakeOrderedAndProject (58) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Condition : ((isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,8 +103,8 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4, d_ (10) Scan parquet default.store Output [3]: [s_store_sk#9, s_store_name#10, s_company_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] -PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_name), IsNotNull(s_company_name)] +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_company_name), IsNotNull(s_store_name)] ReadSchema: struct (11) ColumnarToRow [codegen id : 2] @@ -112,7 +112,7 @@ Input [3]: [s_store_sk#9, s_store_name#10, s_company_name#11] (12) Filter [codegen id : 2] Input [3]: [s_store_sk#9, s_store_name#10, s_company_name#11] -Condition : ((isnotnull(s_store_sk#9) AND isnotnull(s_store_name#10)) AND isnotnull(s_company_name#11)) +Condition : ((isnotnull(s_store_sk#9) AND isnotnull(s_company_name#11)) AND isnotnull(s_store_name#10)) (13) BroadcastExchange Input [3]: [s_store_sk#9, s_store_name#10, s_company_name#11] @@ -138,7 +138,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (18) Scan parquet default.item Output [3]: [i_item_sk#14, i_brand#15, i_category#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category), IsNotNull(i_brand)] ReadSchema: struct @@ -214,7 +214,7 @@ Arguments: [rank(d_year#6, d_moy#7) windowspecdefinition(i_category#16, i_brand# (35) Filter [codegen id : 12] Input [9]: [i_category#16, i_brand#15, s_store_name#10, s_company_name#11, d_year#6, d_moy#7, sum_sales#22, avg_monthly_sales#25, rn#27] -Condition : (((((isnotnull(avg_monthly_sales#25) AND isnotnull(d_year#6)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#25 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#25 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#22 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#27)) +Condition : (((((isnotnull(d_year#6) AND isnotnull(avg_monthly_sales#25)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#25 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#25 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#22 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#27)) (36) Exchange Input [9]: [i_category#16, i_brand#15, s_store_name#10, s_company_name#11, d_year#6, d_moy#7, sum_sales#22, avg_monthly_sales#25, rn#27] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47/explain.txt index 7c0a66d64972b..478e717998aac 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q47/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.item Output [3]: [i_item_sk#1, i_brand#2, i_category#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category), IsNotNull(i_brand)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : ((isnotnull(i_item_sk#1) AND isnotnull(i_category#3)) AND isnotnull( (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#4, ss_item_sk#5, ss_store_sk#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [7]: [i_item_sk#1, i_brand#2, i_category#3, ss_sold_date_sk#4, ss_item_sk# (10) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,8 +123,8 @@ Input [8]: [i_brand#2, i_category#3, ss_sold_date_sk#4, ss_store_sk#6, ss_sales_ (16) Scan parquet default.store Output [3]: [s_store_sk#13, s_store_name#14, s_company_name#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] -PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_name), IsNotNull(s_company_name)] +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_company_name), IsNotNull(s_store_name)] ReadSchema: struct (17) ColumnarToRow [codegen id : 3] @@ -132,7 +132,7 @@ Input [3]: [s_store_sk#13, s_store_name#14, s_company_name#15] (18) Filter [codegen id : 3] Input [3]: [s_store_sk#13, s_store_name#14, s_company_name#15] -Condition : ((isnotnull(s_store_sk#13) AND isnotnull(s_store_name#14)) AND isnotnull(s_company_name#15)) +Condition : ((isnotnull(s_store_sk#13) AND isnotnull(s_company_name#15)) AND isnotnull(s_store_name#14)) (19) BroadcastExchange Input [3]: [s_store_sk#13, s_store_name#14, s_company_name#15] @@ -195,7 +195,7 @@ Arguments: [rank(d_year#10, d_moy#11) windowspecdefinition(i_category#3, i_brand (32) Filter [codegen id : 23] Input [9]: [i_category#3, i_brand#2, s_store_name#14, s_company_name#15, d_year#10, d_moy#11, sum_sales#21, avg_monthly_sales#24, rn#26] -Condition : (((((isnotnull(d_year#10) AND isnotnull(avg_monthly_sales#24)) AND (d_year#10 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) +Condition : (((((isnotnull(avg_monthly_sales#24) AND isnotnull(d_year#10)) AND (d_year#10 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) (33) ReusedExchange [Reuses operator id: 23] Output [7]: [i_category#27, i_brand#28, s_store_name#29, s_company_name#30, d_year#31, d_moy#32, sum#33] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48.sf100/explain.txt index 8f547d5b3c846..f317bcdf13844 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48.sf100/explain.txt @@ -36,7 +36,7 @@ (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_addr_sk#3, ss_store_sk#4, ss_quantity#5, ss_sales_price#6, ss_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_sold_date_sk), Or(Or(And(GreaterThanOrEqual(ss_sales_price,100.00),LessThanOrEqual(ss_sales_price,150.00)),And(GreaterThanOrEqual(ss_sales_price,50.00),LessThanOrEqual(ss_sales_price,100.00))),And(GreaterThanOrEqual(ss_sales_price,150.00),LessThanOrEqual(ss_sales_price,200.00))), Or(Or(And(GreaterThanOrEqual(ss_net_profit,0.00),LessThanOrEqual(ss_net_profit,2000.00)),And(GreaterThanOrEqual(ss_net_profit,150.00),LessThanOrEqual(ss_net_profit,3000.00))),And(GreaterThanOrEqual(ss_net_profit,50.00),LessThanOrEqual(ss_net_profit,25000.00)))] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : (((((isnotnull(ss_store_sk#4) AND isnotnull(ss_cdemo_sk#2)) AND isno (4) Scan parquet default.store Output [1]: [s_store_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [8]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_addr_sk#3, ss_store_sk#4, ss_qu (10) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,4 yr Degree)),And(EqualTo(cd_marital_status,D),EqualTo(cd_education_status,2 yr Degree))),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College)))] ReadSchema: struct @@ -104,7 +104,7 @@ Input [9]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_addr_sk#3, ss_quantity#5, ss_sa (16) Scan parquet default.customer_address Output [3]: [ca_address_sk#14, ca_state#15, ca_country#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [CO,OH,TX]),In(ca_state, [OR,MN,KY])),In(ca_state, [VA,CA,MS]))] ReadSchema: struct @@ -135,7 +135,7 @@ Input [6]: [ss_sold_date_sk#1, ss_addr_sk#3, ss_quantity#5, ss_net_profit#7, ca_ (23) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48/explain.txt index 79fadac22c93c..f317bcdf13844 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q48/explain.txt @@ -36,7 +36,7 @@ (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_addr_sk#3, ss_store_sk#4, ss_quantity#5, ss_sales_price#6, ss_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_sold_date_sk), Or(Or(And(GreaterThanOrEqual(ss_sales_price,100.00),LessThanOrEqual(ss_sales_price,150.00)),And(GreaterThanOrEqual(ss_sales_price,50.00),LessThanOrEqual(ss_sales_price,100.00))),And(GreaterThanOrEqual(ss_sales_price,150.00),LessThanOrEqual(ss_sales_price,200.00))), Or(Or(And(GreaterThanOrEqual(ss_net_profit,0.00),LessThanOrEqual(ss_net_profit,2000.00)),And(GreaterThanOrEqual(ss_net_profit,150.00),LessThanOrEqual(ss_net_profit,3000.00))),And(GreaterThanOrEqual(ss_net_profit,50.00),LessThanOrEqual(ss_net_profit,25000.00)))] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : (((((isnotnull(ss_store_sk#4) AND isnotnull(ss_cdemo_sk#2)) AND isno (4) Scan parquet default.store Output [1]: [s_store_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [8]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_addr_sk#3, ss_store_sk#4, ss_qu (10) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,4 yr Degree)),And(EqualTo(cd_marital_status,D),EqualTo(cd_education_status,2 yr Degree))),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College)))] ReadSchema: struct @@ -104,7 +104,7 @@ Input [9]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_addr_sk#3, ss_quantity#5, ss_sa (16) Scan parquet default.customer_address Output [3]: [ca_address_sk#14, ca_state#15, ca_country#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [CO,OH,TX]),In(ca_state, [OR,MN,KY])),In(ca_state, [VA,CA,MS]))] ReadSchema: struct @@ -135,7 +135,7 @@ Input [6]: [ss_sold_date_sk#1, ss_addr_sk#3, ss_quantity#5, ss_net_profit#7, ca_ (23) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49.sf100/explain.txt index ea0845178bd87..2303cae3725e5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49.sf100/explain.txt @@ -91,8 +91,8 @@ TakeOrderedAndProject (87) (1) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] -PushedFilters: [IsNotNull(ws_net_profit), IsNotNull(ws_net_paid), IsNotNull(ws_quantity), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_order_number), IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/web_sales] +PushedFilters: [IsNotNull(ws_quantity), IsNotNull(ws_net_paid), IsNotNull(ws_net_profit), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_order_number), IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct (2) ColumnarToRow [codegen id : 2] @@ -100,7 +100,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (3) Filter [codegen id : 2] Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] -Condition : ((((((((isnotnull(ws_net_profit#6) AND isnotnull(ws_net_paid#5)) AND isnotnull(ws_quantity#4)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_sold_date_sk#1)) +Condition : ((((((((isnotnull(ws_quantity#4) AND isnotnull(ws_net_paid#5)) AND isnotnull(ws_net_profit#6)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_sold_date_sk#1)) (4) Project [codegen id : 2] Output [5]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5] @@ -109,7 +109,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (5) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,12), IsNotNull(d_date_sk)] ReadSchema: struct @@ -148,8 +148,8 @@ Arguments: [cast(ws_order_number#3 as bigint) ASC NULLS FIRST, cast(ws_item_sk#2 (14) Scan parquet default.web_returns Output [4]: [wr_item_sk#12, wr_order_number#13, wr_return_quantity#14, wr_return_amt#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] -PushedFilters: [IsNotNull(wr_return_amt), GreaterThan(wr_return_amt,10000.00), IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] +Location [not included in comparison]/{warehouse_dir}/web_returns] +PushedFilters: [IsNotNull(wr_return_amt), GreaterThan(wr_return_amt,10000.00), IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct (15) ColumnarToRow [codegen id : 4] @@ -157,7 +157,7 @@ Input [4]: [wr_item_sk#12, wr_order_number#13, wr_return_quantity#14, wr_return_ (16) Filter [codegen id : 4] Input [4]: [wr_item_sk#12, wr_order_number#13, wr_return_quantity#14, wr_return_amt#15] -Condition : (((isnotnull(wr_return_amt#15) AND (wr_return_amt#15 > 10000.00)) AND isnotnull(wr_order_number#13)) AND isnotnull(wr_item_sk#12)) +Condition : (((isnotnull(wr_return_amt#15) AND (wr_return_amt#15 > 10000.00)) AND isnotnull(wr_item_sk#12)) AND isnotnull(wr_order_number#13)) (17) Exchange Input [4]: [wr_item_sk#12, wr_order_number#13, wr_return_quantity#14, wr_return_amt#15] @@ -225,8 +225,8 @@ Input [5]: [item#34, return_ratio#35, currency_ratio#36, return_rank#38, currenc (31) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#44, cs_net_paid#45, cs_net_profit#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] -PushedFilters: [IsNotNull(cs_net_profit), IsNotNull(cs_quantity), IsNotNull(cs_net_paid), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_item_sk), IsNotNull(cs_order_number), IsNotNull(cs_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] +PushedFilters: [IsNotNull(cs_net_profit), IsNotNull(cs_net_paid), IsNotNull(cs_quantity), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_item_sk), IsNotNull(cs_order_number), IsNotNull(cs_sold_date_sk)] ReadSchema: struct (32) ColumnarToRow [codegen id : 12] @@ -234,7 +234,7 @@ Input [6]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#4 (33) Filter [codegen id : 12] Input [6]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#44, cs_net_paid#45, cs_net_profit#46] -Condition : ((((((((isnotnull(cs_net_profit#46) AND isnotnull(cs_quantity#44)) AND isnotnull(cs_net_paid#45)) AND (cs_net_profit#46 > 1.00)) AND (cs_net_paid#45 > 0.00)) AND (cs_quantity#44 > 0)) AND isnotnull(cs_item_sk#42)) AND isnotnull(cs_order_number#43)) AND isnotnull(cs_sold_date_sk#41)) +Condition : ((((((((isnotnull(cs_net_profit#46) AND isnotnull(cs_net_paid#45)) AND isnotnull(cs_quantity#44)) AND (cs_net_profit#46 > 1.00)) AND (cs_net_paid#45 > 0.00)) AND (cs_quantity#44 > 0)) AND isnotnull(cs_item_sk#42)) AND isnotnull(cs_order_number#43)) AND isnotnull(cs_sold_date_sk#41)) (34) Project [codegen id : 12] Output [5]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#44, cs_net_paid#45] @@ -263,7 +263,7 @@ Arguments: [cs_order_number#43 ASC NULLS FIRST, cs_item_sk#42 ASC NULLS FIRST], (40) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#48, cr_order_number#49, cr_return_quantity#50, cr_return_amount#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_return_amount), GreaterThan(cr_return_amount,10000.00), IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -340,8 +340,8 @@ Input [5]: [item#70, return_ratio#71, currency_ratio#72, return_rank#74, currenc (57) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity#80, ss_net_paid#81, ss_net_profit#82] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] -PushedFilters: [IsNotNull(ss_net_profit), IsNotNull(ss_quantity), IsNotNull(ss_net_paid), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_net_profit), IsNotNull(ss_net_paid), IsNotNull(ss_quantity), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk)] ReadSchema: struct (58) ColumnarToRow [codegen id : 22] @@ -349,7 +349,7 @@ Input [6]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity# (59) Filter [codegen id : 22] Input [6]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity#80, ss_net_paid#81, ss_net_profit#82] -Condition : ((((((((isnotnull(ss_net_profit#82) AND isnotnull(ss_quantity#80)) AND isnotnull(ss_net_paid#81)) AND (ss_net_profit#82 > 1.00)) AND (ss_net_paid#81 > 0.00)) AND (ss_quantity#80 > 0)) AND isnotnull(ss_item_sk#78)) AND isnotnull(ss_ticket_number#79)) AND isnotnull(ss_sold_date_sk#77)) +Condition : ((((((((isnotnull(ss_net_profit#82) AND isnotnull(ss_net_paid#81)) AND isnotnull(ss_quantity#80)) AND (ss_net_profit#82 > 1.00)) AND (ss_net_paid#81 > 0.00)) AND (ss_quantity#80 > 0)) AND isnotnull(ss_item_sk#78)) AND isnotnull(ss_ticket_number#79)) AND isnotnull(ss_sold_date_sk#77)) (60) Project [codegen id : 22] Output [5]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity#80, ss_net_paid#81] @@ -378,7 +378,7 @@ Arguments: [cast(ss_ticket_number#79 as bigint) ASC NULLS FIRST, cast(ss_item_sk (66) Scan parquet default.store_returns Output [4]: [sr_item_sk#84, sr_ticket_number#85, sr_return_quantity#86, sr_return_amt#87] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_return_amt), GreaterThan(sr_return_amt,10000.00), IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49/explain.txt index 883661759e561..909041abb5862 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q49/explain.txt @@ -82,8 +82,8 @@ TakeOrderedAndProject (78) (1) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] -PushedFilters: [IsNotNull(ws_quantity), IsNotNull(ws_net_profit), IsNotNull(ws_net_paid), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/web_sales] +PushedFilters: [IsNotNull(ws_net_paid), IsNotNull(ws_quantity), IsNotNull(ws_net_profit), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_sold_date_sk)] ReadSchema: struct (2) ColumnarToRow [codegen id : 3] @@ -91,7 +91,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (3) Filter [codegen id : 3] Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] -Condition : ((((((((isnotnull(ws_quantity#4) AND isnotnull(ws_net_profit#6)) AND isnotnull(ws_net_paid#5)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_sold_date_sk#1)) +Condition : ((((((((isnotnull(ws_net_paid#5) AND isnotnull(ws_quantity#4)) AND isnotnull(ws_net_profit#6)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_sold_date_sk#1)) (4) Project [codegen id : 3] Output [5]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5] @@ -100,7 +100,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (5) Scan parquet default.web_returns Output [4]: [wr_item_sk#7, wr_order_number#8, wr_return_quantity#9, wr_return_amt#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_return_amt), GreaterThan(wr_return_amt,10000.00), IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct @@ -127,7 +127,7 @@ Input [9]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (11) Scan parquet default.date_dim Output [3]: [d_date_sk#12, d_year#13, d_moy#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,12), IsNotNull(d_date_sk)] ReadSchema: struct @@ -204,7 +204,7 @@ Input [5]: [item#33, return_ratio#34, currency_ratio#35, return_rank#37, currenc (28) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#43, cs_net_paid#44, cs_net_profit#45] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_net_paid), IsNotNull(cs_net_profit), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_item_sk), IsNotNull(cs_order_number), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -222,8 +222,8 @@ Input [6]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#4 (32) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#46, cr_order_number#47, cr_return_quantity#48, cr_return_amount#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] -PushedFilters: [IsNotNull(cr_return_amount), GreaterThan(cr_return_amount,10000.00), IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] +PushedFilters: [IsNotNull(cr_return_amount), GreaterThan(cr_return_amount,10000.00), IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct (33) ColumnarToRow [codegen id : 8] @@ -231,7 +231,7 @@ Input [4]: [cr_item_sk#46, cr_order_number#47, cr_return_quantity#48, cr_return_ (34) Filter [codegen id : 8] Input [4]: [cr_item_sk#46, cr_order_number#47, cr_return_quantity#48, cr_return_amount#49] -Condition : (((isnotnull(cr_return_amount#49) AND (cr_return_amount#49 > 10000.00)) AND isnotnull(cr_order_number#47)) AND isnotnull(cr_item_sk#46)) +Condition : (((isnotnull(cr_return_amount#49) AND (cr_return_amount#49 > 10000.00)) AND isnotnull(cr_item_sk#46)) AND isnotnull(cr_order_number#47)) (35) BroadcastExchange Input [4]: [cr_item_sk#46, cr_order_number#47, cr_return_quantity#48, cr_return_amount#49] @@ -307,8 +307,8 @@ Input [5]: [item#68, return_ratio#69, currency_ratio#70, return_rank#72, currenc (51) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity#78, ss_net_paid#79, ss_net_profit#80] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] -PushedFilters: [IsNotNull(ss_net_profit), IsNotNull(ss_quantity), IsNotNull(ss_net_paid), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_net_paid), IsNotNull(ss_quantity), IsNotNull(ss_net_profit), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk)] ReadSchema: struct (52) ColumnarToRow [codegen id : 17] @@ -316,7 +316,7 @@ Input [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity# (53) Filter [codegen id : 17] Input [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity#78, ss_net_paid#79, ss_net_profit#80] -Condition : ((((((((isnotnull(ss_net_profit#80) AND isnotnull(ss_quantity#78)) AND isnotnull(ss_net_paid#79)) AND (ss_net_profit#80 > 1.00)) AND (ss_net_paid#79 > 0.00)) AND (ss_quantity#78 > 0)) AND isnotnull(ss_ticket_number#77)) AND isnotnull(ss_item_sk#76)) AND isnotnull(ss_sold_date_sk#75)) +Condition : ((((((((isnotnull(ss_net_paid#79) AND isnotnull(ss_quantity#78)) AND isnotnull(ss_net_profit#80)) AND (ss_net_profit#80 > 1.00)) AND (ss_net_paid#79 > 0.00)) AND (ss_quantity#78 > 0)) AND isnotnull(ss_item_sk#76)) AND isnotnull(ss_ticket_number#77)) AND isnotnull(ss_sold_date_sk#75)) (54) Project [codegen id : 17] Output [5]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity#78, ss_net_paid#79] @@ -325,8 +325,8 @@ Input [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity# (55) Scan parquet default.store_returns Output [4]: [sr_item_sk#81, sr_ticket_number#82, sr_return_quantity#83, sr_return_amt#84] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] -PushedFilters: [IsNotNull(sr_return_amt), GreaterThan(sr_return_amt,10000.00), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] +Location [not included in comparison]/{warehouse_dir}/store_returns] +PushedFilters: [IsNotNull(sr_return_amt), GreaterThan(sr_return_amt,10000.00), IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct (56) ColumnarToRow [codegen id : 15] @@ -334,7 +334,7 @@ Input [4]: [sr_item_sk#81, sr_ticket_number#82, sr_return_quantity#83, sr_return (57) Filter [codegen id : 15] Input [4]: [sr_item_sk#81, sr_ticket_number#82, sr_return_quantity#83, sr_return_amt#84] -Condition : (((isnotnull(sr_return_amt#84) AND (sr_return_amt#84 > 10000.00)) AND isnotnull(sr_item_sk#81)) AND isnotnull(sr_ticket_number#82)) +Condition : (((isnotnull(sr_return_amt#84) AND (sr_return_amt#84 > 10000.00)) AND isnotnull(sr_ticket_number#82)) AND isnotnull(sr_item_sk#81)) (58) BroadcastExchange Input [4]: [sr_item_sk#81, sr_ticket_number#82, sr_return_quantity#83, sr_return_amt#84] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt index 034301643add7..55bd25c501294 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt @@ -85,7 +85,7 @@ TakeOrderedAndProject (81) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] @@ -102,7 +102,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (5) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_loss#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -122,7 +122,7 @@ Input [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_los (10) Scan parquet default.date_dim Output [2]: [d_date_sk#21, d_date#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-06), IsNotNull(d_date_sk)] ReadSchema: struct @@ -153,7 +153,7 @@ Input [7]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_lo (17) Scan parquet default.store Output [2]: [s_store_sk#24, s_store_id#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -198,7 +198,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(sales_price#7))#36,17,2) AS sales#40 (26) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs_net_profit#48] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk)] ReadSchema: struct @@ -216,7 +216,7 @@ Input [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs (30) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#55, cr_catalog_page_sk#56, cr_return_amount#57, cr_net_loss#58] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_catalog_page_sk)] ReadSchema: struct @@ -248,7 +248,7 @@ Input [7]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, ne (38) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -293,7 +293,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(sales_price#51))#77,17,2) AS sales#8 (47) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net_profit#89] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -311,7 +311,7 @@ Input [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net (51) Scan parquet default.web_returns Output [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk)] ReadSchema: struct @@ -333,7 +333,7 @@ Arguments: [wr_item_sk#97 ASC NULLS FIRST, wr_order_number#98 ASC NULLS FIRST], (56) Scan parquet default.web_sales Output [3]: [ws_item_sk#102, ws_web_site_sk#87, ws_order_number#103] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -378,7 +378,7 @@ Input [7]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_am (67) Scan parquet default.web_site Output [2]: [web_site_sk#111, web_site_id#112] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5/explain.txt index cbe5ed4a5b6aa..15f0cda0b5f9f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5/explain.txt @@ -82,7 +82,7 @@ TakeOrderedAndProject (78) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] @@ -99,7 +99,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (5) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_loss#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_los (10) Scan parquet default.date_dim Output [2]: [d_date_sk#21, d_date#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-06), IsNotNull(d_date_sk)] ReadSchema: struct @@ -150,7 +150,7 @@ Input [7]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_lo (17) Scan parquet default.store Output [2]: [s_store_sk#24, s_store_id#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -195,7 +195,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(sales_price#7))#36,17,2) AS sales#40 (26) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs_net_profit#48] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk)] ReadSchema: struct @@ -213,7 +213,7 @@ Input [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs (30) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#55, cr_catalog_page_sk#56, cr_return_amount#57, cr_net_loss#58] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_catalog_page_sk)] ReadSchema: struct @@ -245,7 +245,7 @@ Input [7]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, ne (38) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -290,7 +290,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(sales_price#51))#77,17,2) AS sales#8 (47) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net_profit#89] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -308,7 +308,7 @@ Input [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net (51) Scan parquet default.web_returns Output [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk)] ReadSchema: struct @@ -322,7 +322,7 @@ Condition : isnotnull(wr_returned_date_sk#96) (54) Scan parquet default.web_sales Output [3]: [ws_item_sk#101, ws_web_site_sk#87, ws_order_number#102] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -363,7 +363,7 @@ Input [7]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_am (64) Scan parquet default.web_site Output [2]: [web_site_sk#110, web_site_id#111] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt index 6327f03620f61..5438ebb4c0910 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt @@ -39,7 +39,7 @@ TakeOrderedAndProject (35) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -53,7 +53,7 @@ Condition : ((((isnotnull(ss_customer_sk#3) AND isnotnull(ss_item_sk#2)) AND isn (4) Scan parquet default.date_dim Output [1]: [d_date_sk#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (10) Scan parquet default.store Output [11]: [s_store_sk#8, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -115,7 +115,7 @@ Arguments: [cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (18) Scan parquet default.date_dim Output [3]: [d_date_sk#21, d_year#22, d_moy#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,8), IsNotNull(d_date_sk)] ReadSchema: struct @@ -137,7 +137,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (23) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt index 66e6608ce4a31..eac796a4eff48 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : ((((isnotnull(ss_item_sk#2) AND isnotnull(ss_customer_sk#3)) AND isn (4) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#6, sr_item_sk#7, sr_customer_sk#8, sr_ticket_number#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (10) Scan parquet default.store Output [11]: [s_store_sk#11, s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [14]: [ss_sold_date_sk#1, ss_store_sk#4, sr_returned_date_sk#6, s_store_sk (16) Scan parquet default.date_dim Output [1]: [d_date_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [13]: [ss_sold_date_sk#1, sr_returned_date_sk#6, s_store_name#12, s_compan (22) Scan parquet default.date_dim Output [3]: [d_date_sk#25, d_year#26, d_moy#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,8), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51.sf100/explain.txt index 6163643706c5d..b391d90a36b9b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51.sf100/explain.txt @@ -45,7 +45,7 @@ TakeOrderedAndProject (41) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -59,7 +59,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_date#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -132,7 +132,7 @@ Arguments: [item_sk#12 ASC NULLS FIRST, d_date#5 ASC NULLS FIRST], false, 0 (20) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#17, ss_item_sk#18, ss_sales_price#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51/explain.txt index df98f23cd3db6..b391d90a36b9b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q51/explain.txt @@ -45,7 +45,7 @@ TakeOrderedAndProject (41) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -59,7 +59,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_date#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -132,7 +132,7 @@ Arguments: [item_sk#12 ASC NULLS FIRST, d_date#5 ASC NULLS FIRST], false, 0 (20) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#17, ss_item_sk#18, ss_sales_price#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt index 0475abd0b38cf..d7a8c103285cb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -39,7 +39,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) (4) Scan parquet default.item Output [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (11) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52/explain.txt index 71d181aed2940..47235253f571a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53.sf100/explain.txt index d08a3f5cda86b..f8a6d7de0c4ea 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,reference,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#11, ss_item_sk#12, ss_store_sk#13, ss_sales_price#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manufact_id#5, ss_sold_date_sk#11, ss_item_sk#12, ss_ (11) Scan parquet default.store Output [1]: [s_store_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [5]: [i_manufact_id#5, ss_sold_date_sk#11, ss_store_sk#13, ss_sales_price# (17) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_month_seq#18, d_qoy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1200,1211,1205,1201,1206,1210,1207,1202,1209,1203,1208,1204]), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53/explain.txt index f230f2f140edf..249f74e73da2b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q53/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,reference,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Input [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manufact_id#5] (5) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#10, ss_item_sk#11, ss_store_sk#12, ss_sales_price#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manufact_id#5, ss_sold_date_sk#10, ss_item_sk#11, ss_ (11) Scan parquet default.date_dim Output [3]: [d_date_sk#15, d_month_seq#16, d_qoy#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1200,1211,1205,1201,1206,1210,1207,1202,1209,1203,1208,1204]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [i_manufact_id#5, ss_sold_date_sk#10, ss_store_sk#12, ss_sales_price# (18) Scan parquet default.store Output [1]: [s_store_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt index 9ccda17a031c3..bdaf52e928d2e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt @@ -75,7 +75,7 @@ TakeOrderedAndProject (71) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -93,7 +93,7 @@ Input [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] (5) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Input [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] (10) Scan parquet default.item Output [3]: [i_item_sk#13, i_class#14, i_category#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), IsNotNull(i_class), EqualTo(i_category,Women), EqualTo(i_class,maternity), IsNotNull(i_item_sk)] ReadSchema: struct @@ -144,7 +144,7 @@ Input [4]: [sold_date_sk#4, customer_sk#5, item_sk#6, i_item_sk#13] (17) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_year#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -183,7 +183,7 @@ Arguments: [customer_sk#5 ASC NULLS FIRST], false, 0 (26) Scan parquet default.customer Output [2]: [c_customer_sk#22, c_current_addr_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -232,7 +232,7 @@ Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 (36) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -271,7 +271,7 @@ Arguments: [c_current_addr_sk#23 ASC NULLS FIRST], false, 0 (45) Scan parquet default.customer_address Output [3]: [ca_address_sk#30, ca_county#31, ca_state#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state), IsNotNull(ca_county)] ReadSchema: struct @@ -302,7 +302,7 @@ Input [7]: [c_customer_sk#22, c_current_addr_sk#23, ss_sold_date_sk#25, ss_ext_s (52) Scan parquet default.store Output [2]: [s_county#34, s_state#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), IsNotNull(s_county)] ReadSchema: struct @@ -329,7 +329,7 @@ Input [7]: [c_customer_sk#22, ss_sold_date_sk#25, ss_ext_sales_price#27, ca_coun (58) Scan parquet default.date_dim Output [2]: [d_date_sk#17, d_month_seq#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -412,7 +412,7 @@ Subquery:1 Hosting operator id = 60 Hosting Expression = Subquery scalar-subquer (72) Scan parquet default.date_dim Output [3]: [d_month_seq#37, d_year#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,12)] ReadSchema: struct @@ -458,7 +458,7 @@ Subquery:2 Hosting operator id = 60 Hosting Expression = Subquery scalar-subquer (79) Scan parquet default.date_dim Output [3]: [d_month_seq#37, d_year#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,12)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54/explain.txt index f1c0861f61508..b7c9373a8b4ec 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54/explain.txt @@ -68,7 +68,7 @@ TakeOrderedAndProject (64) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Input [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] (5) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct @@ -106,7 +106,7 @@ Input [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] (10) Scan parquet default.item Output [3]: [i_item_sk#13, i_class#14, i_category#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), IsNotNull(i_class), EqualTo(i_category,Women), EqualTo(i_class,maternity), IsNotNull(i_item_sk)] ReadSchema: struct @@ -137,7 +137,7 @@ Input [4]: [sold_date_sk#4, customer_sk#5, item_sk#6, i_item_sk#13] (17) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_year#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -168,7 +168,7 @@ Input [3]: [sold_date_sk#4, customer_sk#5, d_date_sk#17] (24) Scan parquet default.customer Output [2]: [c_customer_sk#21, c_current_addr_sk#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -213,7 +213,7 @@ Results [2]: [c_customer_sk#21, c_current_addr_sk#22] (33) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -240,7 +240,7 @@ Input [5]: [c_customer_sk#21, c_current_addr_sk#22, ss_sold_date_sk#25, ss_custo (39) Scan parquet default.customer_address Output [3]: [ca_address_sk#29, ca_county#30, ca_state#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_county), IsNotNull(ca_state)] ReadSchema: struct @@ -267,7 +267,7 @@ Input [7]: [c_customer_sk#21, c_current_addr_sk#22, ss_sold_date_sk#25, ss_ext_s (45) Scan parquet default.store Output [2]: [s_county#33, s_state#34] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), IsNotNull(s_county)] ReadSchema: struct @@ -294,7 +294,7 @@ Input [7]: [c_customer_sk#21, ss_sold_date_sk#25, ss_ext_sales_price#27, ca_coun (51) Scan parquet default.date_dim Output [2]: [d_date_sk#17, d_month_seq#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -377,7 +377,7 @@ Subquery:1 Hosting operator id = 53 Hosting Expression = Subquery scalar-subquer (65) Scan parquet default.date_dim Output [3]: [d_month_seq#36, d_year#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,12)] ReadSchema: struct @@ -423,7 +423,7 @@ Subquery:2 Hosting operator id = 53 Hosting Expression = Subquery scalar-subquer (72) Scan parquet default.date_dim Output [3]: [d_month_seq#36, d_year#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,12)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt index 4a964a54cc37f..a1257cd292e48 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -39,7 +39,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) (4) Scan parquet default.item Output [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,28), IsNotNull(i_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (11) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55/explain.txt index 8662e923d9af5..99c79d2040691 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55/explain.txt @@ -25,7 +25,7 @@ TakeOrderedAndProject (21) (1) Scan parquet default.date_dim Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [4]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_ext_sales_price#6] (11) Scan parquet default.item Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,28), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56.sf100/explain.txt index 480c3ecda13f3..d0d64721100c3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56.sf100/explain.txt @@ -71,7 +71,7 @@ TakeOrderedAndProject (67) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#3)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_gmt_offset#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, ca_address_sk#9] (18) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : isnotnull(i_item_sk#12) (21) Scan parquet default.item Output [2]: [i_item_id#13, i_color#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_color, [slate,blanched,burnished])] ReadSchema: struct @@ -219,7 +219,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4)) (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_addr_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#26) (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk), IsNotNull(ws_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56/explain.txt index ba02fc0bfcfd8..d0d64721100c3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q56/explain.txt @@ -71,7 +71,7 @@ TakeOrderedAndProject (67) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#3)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_gmt_offset#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, ca_address_sk#9] (18) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : isnotnull(i_item_sk#12) (21) Scan parquet default.item Output [2]: [i_item_id#13, i_color#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_color, [slate,blanched,burnished])] ReadSchema: struct @@ -219,7 +219,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4)) (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_addr_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#26) (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk), IsNotNull(ws_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57.sf100/explain.txt index a5bd18b0d822f..9ed3db41b547c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57.sf100/explain.txt @@ -62,7 +62,7 @@ TakeOrderedAndProject (58) (1) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#1, cs_call_center_sk#2, cs_item_sk#3, cs_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_call_center_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Condition : ((isnotnull(cs_item_sk#3) AND isnotnull(cs_sold_date_sk#1)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [7]: [cs_sold_date_sk#1, cs_call_center_sk#2, cs_item_sk#3, cs_sales_price (10) Scan parquet default.call_center Output [2]: [cc_call_center_sk#9, cc_name#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk), IsNotNull(cc_name)] ReadSchema: struct @@ -138,7 +138,7 @@ Arguments: [cs_item_sk#3 ASC NULLS FIRST], false, 0 (18) Scan parquet default.item Output [3]: [i_item_sk#13, i_brand#14, i_category#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category), IsNotNull(i_brand)] ReadSchema: struct @@ -214,7 +214,7 @@ Arguments: [rank(d_year#6, d_moy#7) windowspecdefinition(i_category#15, i_brand# (35) Filter [codegen id : 12] Input [8]: [i_category#15, i_brand#14, cc_name#10, d_year#6, d_moy#7, sum_sales#21, avg_monthly_sales#24, rn#26] -Condition : (((((isnotnull(avg_monthly_sales#24) AND isnotnull(d_year#6)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) +Condition : (((((isnotnull(d_year#6) AND isnotnull(avg_monthly_sales#24)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) (36) Exchange Input [8]: [i_category#15, i_brand#14, cc_name#10, d_year#6, d_moy#7, sum_sales#21, avg_monthly_sales#24, rn#26] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57/explain.txt index 5c8c93cee8107..4e1123185c96c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q57/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.item Output [3]: [i_item_sk#1, i_brand#2, i_category#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category), IsNotNull(i_brand)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : ((isnotnull(i_item_sk#1) AND isnotnull(i_category#3)) AND isnotnull( (4) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#4, cs_call_center_sk#5, cs_item_sk#6, cs_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_call_center_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [7]: [i_item_sk#1, i_brand#2, i_category#3, cs_sold_date_sk#4, cs_call_cen (10) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [8]: [i_brand#2, i_category#3, cs_sold_date_sk#4, cs_call_center_sk#5, cs_ (16) Scan parquet default.call_center Output [2]: [cc_call_center_sk#13, cc_name#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk), IsNotNull(cc_name)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58.sf100/explain.txt index 467aa26dac4e5..8498e4397271f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58.sf100/explain.txt @@ -83,7 +83,7 @@ TakeOrderedAndProject (79) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -111,7 +111,7 @@ Condition : isnotnull(d_date_sk#4) (7) Scan parquet default.date_dim Output [2]: [d_date#5, d_week_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq)] ReadSchema: struct @@ -155,7 +155,7 @@ Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, d_date_sk#4] (17) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_item_id)] ReadSchema: struct @@ -204,7 +204,7 @@ Condition : isnotnull(ss_item_rev#20) (27) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#21, cs_item_sk#22, cs_ext_sales_price#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -218,7 +218,7 @@ Condition : (isnotnull(cs_item_sk#22) AND isnotnull(cs_sold_date_sk#21)) (30) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -232,7 +232,7 @@ Condition : isnotnull(d_date_sk#4) (33) Scan parquet default.date_dim Output [2]: [d_date#5, d_week_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq)] ReadSchema: struct @@ -323,7 +323,7 @@ Input [4]: [item_id#19, ss_item_rev#20, item_id#31, cs_item_rev#32] (53) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#34, ws_item_sk#35, ws_ext_sales_price#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -337,7 +337,7 @@ Condition : (isnotnull(ws_item_sk#35) AND isnotnull(ws_sold_date_sk#34)) (56) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -351,7 +351,7 @@ Condition : isnotnull(d_date_sk#4) (59) Scan parquet default.date_dim Output [2]: [d_date#5, d_week_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq)] ReadSchema: struct @@ -455,7 +455,7 @@ Subquery:1 Hosting operator id = 9 Hosting Expression = Subquery scalar-subquery (80) Scan parquet default.date_dim Output [2]: [d_date#5, d_week_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), EqualTo(d_date,2000-01-03)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58/explain.txt index f587499d7d21a..40a6836aedae6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q58/explain.txt @@ -83,7 +83,7 @@ TakeOrderedAndProject (79) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [2]: [i_item_sk#4, i_item_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_item_id)] ReadSchema: struct @@ -124,7 +124,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -138,7 +138,7 @@ Condition : isnotnull(d_date_sk#7) (13) Scan parquet default.date_dim Output [2]: [d_date#8, d_week_seq#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq)] ReadSchema: struct @@ -204,7 +204,7 @@ Condition : isnotnull(ss_item_rev#20) (27) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#21, cs_item_sk#22, cs_ext_sales_price#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -230,7 +230,7 @@ Input [5]: [cs_sold_date_sk#21, cs_item_sk#22, cs_ext_sales_price#23, i_item_sk# (33) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -244,7 +244,7 @@ Condition : isnotnull(d_date_sk#7) (36) Scan parquet default.date_dim Output [2]: [d_date#8, d_week_seq#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq)] ReadSchema: struct @@ -323,7 +323,7 @@ Input [4]: [item_id#19, ss_item_rev#20, item_id#31, cs_item_rev#32] (53) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#34, ws_item_sk#35, ws_ext_sales_price#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -349,7 +349,7 @@ Input [5]: [ws_sold_date_sk#34, ws_item_sk#35, ws_ext_sales_price#36, i_item_sk# (59) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -363,7 +363,7 @@ Condition : isnotnull(d_date_sk#7) (62) Scan parquet default.date_dim Output [2]: [d_date#8, d_week_seq#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq)] ReadSchema: struct @@ -455,7 +455,7 @@ Subquery:1 Hosting operator id = 15 Hosting Expression = Subquery scalar-subquer (80) Scan parquet default.date_dim Output [2]: [d_date#8, d_week_seq#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), EqualTo(d_date,2000-01-03)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59.sf100/explain.txt index 84dfe09b7e67d..6edd0e4b0c159 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59.sf100/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_week_seq#5, d_day_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -107,7 +107,7 @@ Results [9]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (13) Scan parquet default.store Output [3]: [s_store_sk#37, s_store_id#38, s_store_name#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -134,7 +134,7 @@ Input [12]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, tue_sales# (19) Scan parquet default.date_dim Output [2]: [d_month_seq#41, d_week_seq#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_week_seq)] ReadSchema: struct @@ -175,7 +175,7 @@ Results [9]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (28) Scan parquet default.store Output [2]: [s_store_sk#37, s_store_id#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -202,7 +202,7 @@ Input [11]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, tue_sales# (34) Scan parquet default.date_dim Output [2]: [d_month_seq#69, d_week_seq#70] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1224), LessThanOrEqual(d_month_seq,1235), IsNotNull(d_week_seq)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59/explain.txt index a7c8053ca4713..6edd0e4b0c159 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q59/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_week_seq#5, d_day_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -107,7 +107,7 @@ Results [9]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (13) Scan parquet default.store Output [3]: [s_store_sk#37, s_store_id#38, s_store_name#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -134,7 +134,7 @@ Input [12]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, tue_sales# (19) Scan parquet default.date_dim Output [2]: [d_month_seq#41, d_week_seq#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_week_seq)] ReadSchema: struct @@ -175,7 +175,7 @@ Results [9]: [d_week_seq#5, ss_store_sk#2, MakeDecimal(sum(UnscaledValue(CASE WH (28) Scan parquet default.store Output [2]: [s_store_sk#37, s_store_id#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_id)] ReadSchema: struct @@ -202,7 +202,7 @@ Input [11]: [d_week_seq#5, ss_store_sk#2, sun_sales#30, mon_sales#31, tue_sales# (34) Scan parquet default.date_dim Output [2]: [d_month_seq#69, d_week_seq#70] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1224), LessThanOrEqual(d_month_seq,1235), IsNotNull(d_week_seq)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt index 2c003cb15cc3b..511e1b46cd7a7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt @@ -54,7 +54,7 @@ TakeOrderedAndProject (50) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Condition : ((isnotnull(ss_customer_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -99,7 +99,7 @@ Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, d_date_sk#4] (11) Scan parquet default.item Output [3]: [i_item_sk#9, i_current_price#10, i_category#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), IsNotNull(i_item_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Condition : (isnotnull(i_current_price#10) AND isnotnull(i_item_sk#9)) (14) Scan parquet default.item Output [2]: [i_current_price#10, i_category#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category)] ReadSchema: struct @@ -183,7 +183,7 @@ Arguments: [ss_customer_sk#3 ASC NULLS FIRST], false, 0 (29) Scan parquet default.customer_address Output [2]: [ca_address_sk#23, ca_state#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -205,7 +205,7 @@ Arguments: [ca_address_sk#23 ASC NULLS FIRST], false, 0 (34) Scan parquet default.customer Output [2]: [c_customer_sk#26, c_current_addr_sk#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_customer_sk)] ReadSchema: struct @@ -295,7 +295,7 @@ Subquery:1 Hosting operator id = 6 Hosting Expression = Subquery scalar-subquery (51) Scan parquet default.date_dim Output [3]: [d_month_seq#5, d_year#37, d_moy#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,1)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6/explain.txt index a3007b7efa680..822d24b2f5939 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.customer_address Output [2]: [ca_address_sk#1, ca_state#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : isnotnull(ca_address_sk#1) (4) Scan parquet default.customer Output [2]: [c_customer_sk#3, c_current_addr_sk#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_customer_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Input [4]: [ca_address_sk#1, ca_state#2, c_customer_sk#3, c_current_addr_sk#4] (10) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#6, ss_item_sk#7, ss_customer_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ca_state#2, c_customer_sk#3, ss_sold_date_sk#6, ss_item_sk#7, ss_cus (16) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_month_seq#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ca_state#2, ss_sold_date_sk#6, ss_item_sk#7, d_date_sk#10] (23) Scan parquet default.item Output [3]: [i_item_sk#15, i_current_price#16, i_category#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : (isnotnull(i_current_price#16) AND isnotnull(i_item_sk#15)) (26) Scan parquet default.item Output [2]: [i_current_price#16, i_category#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category)] ReadSchema: struct @@ -265,7 +265,7 @@ Subquery:1 Hosting operator id = 18 Hosting Expression = Subquery scalar-subquer (45) Scan parquet default.date_dim Output [3]: [d_month_seq#11, d_year#35, d_moy#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,1)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60.sf100/explain.txt index cec4923c48198..f838f8f1a18af 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60.sf100/explain.txt @@ -71,7 +71,7 @@ TakeOrderedAndProject (67) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#3)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,9), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_gmt_offset#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, ca_address_sk#9] (18) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : isnotnull(i_item_sk#12) (21) Scan parquet default.item Output [2]: [i_item_id#13, i_category#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Music)] ReadSchema: struct @@ -219,7 +219,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4)) (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_addr_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#26) (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk), IsNotNull(ws_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60/explain.txt index 8e05106d68911..f838f8f1a18af 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q60/explain.txt @@ -71,7 +71,7 @@ TakeOrderedAndProject (67) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#3)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,9), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#9, ca_gmt_offset#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, ca_address_sk#9] (18) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : isnotnull(i_item_sk#12) (21) Scan parquet default.item Output [2]: [i_item_id#13, i_category#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Music)] ReadSchema: struct @@ -219,7 +219,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4)) (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_addr_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -287,7 +287,7 @@ Results [2]: [i_item_id#13, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#26) (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_addr_sk), IsNotNull(ws_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt index acc767cba6ff5..e616934bbd073 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt @@ -79,7 +79,7 @@ TakeOrderedAndProject (75) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_promo_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -93,7 +93,7 @@ Condition : ((((isnotnull(ss_store_sk#4) AND isnotnull(ss_promo_sk#5)) AND isnot (4) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (11) Scan parquet default.item Output [2]: [i_item_sk#11, i_category#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Jewelry), IsNotNull(i_item_sk)] ReadSchema: struct @@ -155,7 +155,7 @@ Input [6]: [ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_promo_sk#5, ss_ext (18) Scan parquet default.promotion Output [4]: [p_promo_sk#14, p_channel_dmail#15, p_channel_email#16, p_channel_tv#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(Or(EqualTo(p_channel_dmail,Y),EqualTo(p_channel_email,Y)),EqualTo(p_channel_tv,Y)), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -186,7 +186,7 @@ Input [5]: [ss_customer_sk#3, ss_store_sk#4, ss_promo_sk#5, ss_ext_sales_price#6 (25) Scan parquet default.store Output [2]: [s_store_sk#19, s_gmt_offset#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_gmt_offset), EqualTo(s_gmt_offset,-5.00), IsNotNull(s_store_sk)] ReadSchema: struct @@ -217,7 +217,7 @@ Input [4]: [ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6, s_store_sk#19 (32) Scan parquet default.customer Output [2]: [c_customer_sk#22, c_current_addr_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -231,7 +231,7 @@ Condition : (isnotnull(c_customer_sk#22) AND isnotnull(c_current_addr_sk#23)) (35) Scan parquet default.customer_address Output [2]: [ca_address_sk#24, ca_gmt_offset#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Output [1]: [d_date_sk#7] (49) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -346,7 +346,7 @@ Output [1]: [ca_address_sk#24] (61) Scan parquet default.customer Output [2]: [c_customer_sk#22, c_current_addr_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt index 05fffeeec65c9..f56f48726c4ad 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt @@ -76,7 +76,7 @@ TakeOrderedAndProject (72) (1) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_promo_sk#5, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -90,7 +90,7 @@ Condition : ((((isnotnull(ss_store_sk#4) AND isnotnull(ss_promo_sk#5)) AND isnot (4) Scan parquet default.store Output [2]: [s_store_sk#7, s_gmt_offset#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_gmt_offset), EqualTo(s_gmt_offset,-5.00), IsNotNull(s_store_sk)] ReadSchema: struct @@ -121,7 +121,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss (11) Scan parquet default.promotion Output [4]: [p_promo_sk#10, p_channel_dmail#11, p_channel_email#12, p_channel_tv#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(Or(EqualTo(p_channel_dmail,Y),EqualTo(p_channel_email,Y)),EqualTo(p_channel_tv,Y)), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -152,7 +152,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_promo_sk#5, ss (18) Scan parquet default.date_dim Output [3]: [d_date_sk#15, d_year#16, d_moy#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -183,7 +183,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ext_sales_pric (25) Scan parquet default.customer Output [2]: [c_customer_sk#19, c_current_addr_sk#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -210,7 +210,7 @@ Input [5]: [ss_item_sk#2, ss_customer_sk#3, ss_ext_sales_price#6, c_customer_sk# (31) Scan parquet default.customer_address Output [2]: [ca_address_sk#22, ca_gmt_offset#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -241,7 +241,7 @@ Input [4]: [ss_item_sk#2, ss_ext_sales_price#6, c_current_addr_sk#20, ca_address (38) Scan parquet default.item Output [2]: [i_item_sk#25, i_category#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Jewelry), IsNotNull(i_item_sk)] ReadSchema: struct @@ -290,7 +290,7 @@ Results [1]: [MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#6))#31,17,2) AS p (48) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt index d2553a2c58c62..e9a2b7a375b01 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.date_dim Output [2]: [d_date_sk#1, d_month_seq#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_ship_mode_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_ship_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [d_date_sk#1, ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, (11) Scan parquet default.web_site Output [2]: [web_site_sk#9, web_name#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [7]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode (17) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#12, sm_type#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/ship_mode] +Location [not included in comparison]/{warehouse_dir}/ship_mode] PushedFilters: [IsNotNull(sm_ship_mode_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Input [7]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_ship_mode_sk#7, ws_warehous (23) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#15, w_warehouse_name#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt index c06918906c77d..05ce467c349a3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_ship_mode_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_ship_date_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : (((isnotnull(ws_warehouse_sk#5) AND isnotnull(ws_ship_mode_sk#4)) AN (4) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#6, w_warehouse_name#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [7]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode (10) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#9, sm_type#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/ship_mode] +Location [not included in comparison]/{warehouse_dir}/ship_mode] PushedFilters: [IsNotNull(sm_ship_mode_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [7]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode (16) Scan parquet default.web_site Output [2]: [web_site_sk#12, web_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [7]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, w_warehouse_ (22) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_month_seq#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63.sf100/explain.txt index 76c468560a951..7de8d8780ad42 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manager_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,refernece,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#11, ss_item_sk#12, ss_store_sk#13, ss_sales_price#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manager_id#5, ss_sold_date_sk#11, ss_item_sk#12, ss_s (11) Scan parquet default.store Output [1]: [s_store_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [5]: [i_manager_id#5, ss_sold_date_sk#11, ss_store_sk#13, ss_sales_price#1 (17) Scan parquet default.date_dim Output [3]: [d_date_sk#17, d_month_seq#18, d_moy#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1200,1211,1205,1201,1206,1210,1207,1202,1209,1203,1208,1204]), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63/explain.txt index 733b3e5b05756..284a9203a03f6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q63/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manager_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(And(In(i_category, [Books,Children,Electronics]),In(i_class, [personal,portable,refernece,self-help])),In(i_brand, [scholaramalgamalg #6,scholaramalgamalg #7,exportiunivamalg #8,scholaramalgamalg #8])),And(And(In(i_category, [Women,Music,Men]),In(i_class, [accessories,classical,fragrances,pants])),In(i_brand, [amalgimporto #9,edu packscholar #9,exportiimporto #9,importoamalg #9]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Input [5]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, i_manager_id#5] (5) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#10, ss_item_sk#11, ss_store_sk#12, ss_sales_price#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_manager_id#5, ss_sold_date_sk#10, ss_item_sk#11, ss_s (11) Scan parquet default.date_dim Output [3]: [d_date_sk#15, d_month_seq#16, d_moy#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_month_seq, [1200,1211,1205,1201,1206,1210,1207,1202,1209,1203,1208,1204]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [i_manager_id#5, ss_sold_date_sk#10, ss_store_sk#12, ss_sales_price#1 (18) Scan parquet default.store Output [1]: [s_store_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64.sf100/explain.txt index 10f238ebd2ad7..d0faf9b521e7c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64.sf100/explain.txt @@ -213,7 +213,7 @@ (1) Scan parquet default.store_sales Output [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_store_sk#7, ss_promo_sk#8, ss_ticket_number#9, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -235,7 +235,7 @@ Arguments: [cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number# (6) Scan parquet default.store_returns Output [2]: [sr_item_sk#14, sr_ticket_number#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -266,7 +266,7 @@ Input [14]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (13) Scan parquet default.catalog_sales Output [3]: [cs_item_sk#17, cs_order_number#18, cs_ext_list_price#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_order_number)] ReadSchema: struct @@ -288,7 +288,7 @@ Arguments: [cs_item_sk#17 ASC NULLS FIRST, cs_order_number#18 ASC NULLS FIRST], (18) Scan parquet default.catalog_returns Output [5]: [cr_item_sk#21, cr_order_number#22, cr_refunded_cash#23, cr_reversed_charge#24, cr_store_credit#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -358,7 +358,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (33) Scan parquet default.date_dim Output [2]: [d_date_sk#39, d_year#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -385,7 +385,7 @@ Input [13]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (39) Scan parquet default.store Output [3]: [s_store_sk#42, s_store_name#43, s_zip#44] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_name), IsNotNull(s_zip)] ReadSchema: struct @@ -420,7 +420,7 @@ Arguments: [ss_customer_sk#3 ASC NULLS FIRST], false, 0 (47) Scan parquet default.customer Output [6]: [c_customer_sk#47, c_current_cdemo_sk#48, c_current_hdemo_sk#49, c_current_addr_sk#50, c_first_shipto_date_sk#51, c_first_sales_date_sk#52] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_first_sales_date_sk), IsNotNull(c_first_shipto_date_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -451,7 +451,7 @@ Input [18]: [ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_ad (54) Scan parquet default.date_dim Output [2]: [d_date_sk#54, d_year#55] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -498,7 +498,7 @@ Arguments: [ss_cdemo_sk#4 ASC NULLS FIRST], false, 0 (65) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#60, cd_marital_status#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status)] ReadSchema: struct @@ -553,7 +553,7 @@ Input [18]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (78) Scan parquet default.promotion Output [1]: [p_promo_sk#66] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -580,7 +580,7 @@ Input [15]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (84) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#68, hd_income_band_sk#69] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] ReadSchema: struct @@ -627,7 +627,7 @@ Arguments: [ss_addr_sk#6 ASC NULLS FIRST], false, 0 (95) Scan parquet default.customer_address Output [5]: [ca_address_sk#74, ca_street_number#75, ca_street_name#76, ca_city#77, ca_zip#78] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -682,7 +682,7 @@ Input [21]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (108) Scan parquet default.income_band Output [1]: [ib_income_band_sk#86] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/income_band] +Location [not included in comparison]/{warehouse_dir}/income_band] PushedFilters: [IsNotNull(ib_income_band_sk)] ReadSchema: struct @@ -721,7 +721,7 @@ Input [19]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (117) Scan parquet default.item Output [4]: [i_item_sk#89, i_current_price#90, i_color#91, i_product_name#92] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), In(i_color, [purple,burlywood,indian,spring,floral,medium]), GreaterThanOrEqual(i_current_price,64.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -813,7 +813,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (138) Scan parquet default.date_dim Output [2]: [d_date_sk#39, d_year#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64/explain.txt index 0f0e48d035cbf..5b6dac01866b7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q64/explain.txt @@ -174,7 +174,7 @@ (1) Scan parquet default.store_sales Output [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_store_sk#7, ss_promo_sk#8, ss_ticket_number#9, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -188,7 +188,7 @@ Condition : ((((((((isnotnull(ss_item_sk#2) AND isnotnull(ss_ticket_number#9)) A (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#13, sr_ticket_number#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -215,7 +215,7 @@ Input [14]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (10) Scan parquet default.catalog_sales Output [3]: [cs_item_sk#16, cs_order_number#17, cs_ext_list_price#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_order_number)] ReadSchema: struct @@ -229,7 +229,7 @@ Condition : (isnotnull(cs_item_sk#16) AND isnotnull(cs_order_number#17)) (13) Scan parquet default.catalog_returns Output [5]: [cr_item_sk#19, cr_order_number#20, cr_refunded_cash#21, cr_reversed_charge#22, cr_store_credit#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -295,7 +295,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (27) Scan parquet default.date_dim Output [2]: [d_date_sk#37, d_year#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -322,7 +322,7 @@ Input [13]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (33) Scan parquet default.store Output [3]: [s_store_sk#40, s_store_name#41, s_zip#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_zip), IsNotNull(s_store_name)] ReadSchema: struct @@ -349,7 +349,7 @@ Input [14]: [ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_ad (39) Scan parquet default.customer Output [6]: [c_customer_sk#44, c_current_cdemo_sk#45, c_current_hdemo_sk#46, c_current_addr_sk#47, c_first_shipto_date_sk#48, c_first_sales_date_sk#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_first_sales_date_sk), IsNotNull(c_first_shipto_date_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -376,7 +376,7 @@ Input [18]: [ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_ad (45) Scan parquet default.date_dim Output [2]: [d_date_sk#51, d_year#52] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -415,7 +415,7 @@ Input [18]: [ss_item_sk#2, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_ (54) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#56, cd_marital_status#57] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status)] ReadSchema: struct @@ -454,7 +454,7 @@ Input [18]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (63) Scan parquet default.promotion Output [1]: [p_promo_sk#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -481,7 +481,7 @@ Input [15]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (69) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#63, hd_income_band_sk#64] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] ReadSchema: struct @@ -520,7 +520,7 @@ Input [15]: [ss_item_sk#2, ss_addr_sk#6, ss_wholesale_cost#10, ss_list_price#11, (78) Scan parquet default.customer_address Output [5]: [ca_address_sk#68, ca_street_number#69, ca_street_name#70, ca_city#71, ca_zip#72] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -559,7 +559,7 @@ Input [21]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (87) Scan parquet default.income_band Output [1]: [ib_income_band_sk#79] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/income_band] +Location [not included in comparison]/{warehouse_dir}/income_band] PushedFilters: [IsNotNull(ib_income_band_sk)] ReadSchema: struct @@ -598,7 +598,7 @@ Input [19]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (96) Scan parquet default.item Output [4]: [i_item_sk#82, i_current_price#83, i_color#84, i_product_name#85] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), In(i_color, [purple,burlywood,indian,spring,floral,medium]), GreaterThanOrEqual(i_current_price,64.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -647,7 +647,7 @@ Results [17]: [i_product_name#85 AS product_name#100, i_item_sk#82 AS item_sk#10 (106) Scan parquet default.store_sales Output [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_store_sk#7, ss_promo_sk#8, ss_ticket_number#9, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -685,7 +685,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (115) Scan parquet default.date_dim Output [2]: [d_date_sk#37, d_year#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65.sf100/explain.txt index badf00877da8e..24a142c70d774 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65.sf100/explain.txt @@ -49,7 +49,7 @@ TakeOrderedAndProject (45) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -63,7 +63,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND isn (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1176), LessThanOrEqual(d_month_seq,1187), IsNotNull(d_date_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Condition : isnotnull(revenue#12) (15) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#13, ss_item_sk#14, ss_store_sk#15, ss_sales_price#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -191,7 +191,7 @@ Input [5]: [ss_store_sk#3, ss_item_sk#2, revenue#12, ss_store_sk#15, ave#28] (30) Scan parquet default.store Output [2]: [s_store_sk#30, s_store_name#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -226,7 +226,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (38) Scan parquet default.item Output [5]: [i_item_sk#34, i_item_desc#35, i_current_price#36, i_wholesale_cost#37, i_brand#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65/explain.txt index 6e6e6a00c4d78..ab87816b8980b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q65/explain.txt @@ -46,7 +46,7 @@ TakeOrderedAndProject (42) (1) Scan parquet default.store Output [2]: [s_store_sk#1, s_store_name#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : isnotnull(s_store_sk#1) (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#3, ss_item_sk#4, ss_store_sk#5, ss_sales_price#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Condition : ((isnotnull(ss_sold_date_sk#3) AND isnotnull(ss_store_sk#5)) AND isn (7) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_month_seq#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1176), LessThanOrEqual(d_month_seq,1187), IsNotNull(d_date_sk)] ReadSchema: struct @@ -140,7 +140,7 @@ Input [5]: [s_store_sk#1, s_store_name#2, ss_store_sk#5, ss_item_sk#4, revenue#1 (21) Scan parquet default.item Output [5]: [i_item_sk#16, i_item_desc#17, i_current_price#18, i_wholesale_cost#19, i_brand#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -167,7 +167,7 @@ Input [9]: [s_store_name#2, ss_store_sk#5, ss_item_sk#4, revenue#14, i_item_sk#1 (27) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#22, ss_item_sk#23, ss_store_sk#24, ss_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt index 6eead0c581c17..4b863587b08d9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt @@ -59,7 +59,7 @@ TakeOrderedAndProject (55) (1) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#1, sm_carrier#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/ship_mode] +Location [not included in comparison]/{warehouse_dir}/ship_mode] PushedFilters: [In(sm_carrier, [DHL,BARIAN]), IsNotNull(sm_ship_mode_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_ship_mode_sk#6, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_ship_mode_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [8]: [sm_ship_mode_sk#1, ws_sold_date_sk#4, ws_sold_time_sk#5, ws_ship_mod (11) Scan parquet default.time_dim Output [2]: [t_time_sk#11, t_time#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_time), GreaterThanOrEqual(t_time,30838), LessThanOrEqual(t_time,59638), IsNotNull(t_time_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Input [7]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_warehouse_sk#7, ws_quantity (18) Scan parquet default.date_dim Output [3]: [d_date_sk#14, d_year#15, d_moy#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -162,7 +162,7 @@ Input [8]: [ws_sold_date_sk#4, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_pr (24) Scan parquet default.warehouse Output [7]: [w_warehouse_sk#18, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -210,7 +210,7 @@ Output [1]: [sm_ship_mode_sk#1] (34) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_sold_time_sk), IsNotNull(cs_ship_mode_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66/explain.txt index 48ee7fa1c8aa7..fc18efd3d32c1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66/explain.txt @@ -59,7 +59,7 @@ TakeOrderedAndProject (55) (1) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_ship_mode_sk)] ReadSchema: struct @@ -73,7 +73,7 @@ Condition : (((isnotnull(ws_warehouse_sk#4) AND isnotnull(ws_sold_date_sk#1)) AN (4) Scan parquet default.warehouse Output [7]: [w_warehouse_sk#8, w_warehouse_name#9, w_warehouse_sq_ft#10, w_city#11, w_county#12, w_state#13, w_country#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -100,7 +100,7 @@ Input [14]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_warehou (10) Scan parquet default.date_dim Output [3]: [d_date_sk#16, d_year#17, d_moy#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -127,7 +127,7 @@ Input [15]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_quantit (16) Scan parquet default.time_dim Output [2]: [t_time_sk#20, t_time#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_time), GreaterThanOrEqual(t_time,30838), LessThanOrEqual(t_time,59638), IsNotNull(t_time_sk)] ReadSchema: struct @@ -158,7 +158,7 @@ Input [14]: [ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_quantity#5, ws_ext_sales_p (23) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#23, sm_carrier#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/ship_mode] +Location [not included in comparison]/{warehouse_dir}/ship_mode] PushedFilters: [In(sm_carrier, [DHL,BARIAN]), IsNotNull(sm_ship_mode_sk)] ReadSchema: struct @@ -207,7 +207,7 @@ Results [32]: [w_warehouse_name#9, w_warehouse_sq_ft#10, w_city#11, w_county#12, (33) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_sold_time_sk), IsNotNull(cs_ship_mode_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67.sf100/explain.txt index 06890e80266c9..0ba0d4c16f75a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67.sf100/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND isn (4) Scan parquet default.date_dim Output [5]: [d_date_sk#6, d_month_seq#7, d_year#8, d_moy#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sa (11) Scan parquet default.store Output [2]: [s_store_sk#12, s_store_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -118,7 +118,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [5]: [i_item_sk#16, i_brand#17, i_class#18, i_category#19, i_product_name#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67/explain.txt index 21070074a3111..ae133938ba32f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q67/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -49,7 +49,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND isn (4) Scan parquet default.date_dim Output [5]: [d_date_sk#6, d_month_seq#7, d_year#8, d_moy#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sa (11) Scan parquet default.store Output [2]: [s_store_sk#12, s_store_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -107,7 +107,7 @@ Input [9]: [ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sales_price#5, d_year (17) Scan parquet default.item Output [5]: [i_item_sk#15, i_brand#16, i_class#17, i_category#18, i_product_name#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68.sf100/explain.txt index ef8dc7fc917e7..b3dc146e26e38 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68.sf100/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_current_addr_sk#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Arguments: [c_current_addr_sk#2 ASC NULLS FIRST], false, 0 (6) Scan parquet default.customer_address Output [2]: [ca_address_sk#6, ca_city#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -116,7 +116,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (15) Scan parquet default.store_sales Output [9]: [ss_sold_date_sk#10, ss_customer_sk#11, ss_hdemo_sk#12, ss_addr_sk#13, ss_store_sk#14, ss_ticket_number#15, ss_ext_sales_price#16, ss_ext_list_price#17, ss_ext_tax#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#10) AND isnotnull(ss_store_sk#14)) AND (18) Scan parquet default.date_dim Output [3]: [d_date_sk#19, d_year#20, d_dom#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Input [10]: [ss_sold_date_sk#10, ss_customer_sk#11, ss_hdemo_sk#12, ss_addr_sk#1 (25) Scan parquet default.store Output [2]: [s_store_sk#23, s_city#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Midway,Fairview]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Input [9]: [ss_customer_sk#11, ss_hdemo_sk#12, ss_addr_sk#13, ss_store_sk#14, ss (32) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#26, hd_dep_count#27, hd_vehicle_count#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,4),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68/explain.txt index 98af6d9af75b8..d0c618bdb6f23 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q68/explain.txt @@ -47,7 +47,7 @@ TakeOrderedAndProject (43) (1) Scan parquet default.store_sales Output [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_ext_sales_price#7, ss_ext_list_price#8, ss_ext_tax#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -61,7 +61,7 @@ Condition : ((((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#5)) AND i (4) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_dom#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [10]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#14, s_city#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_city, [Midway,Fairview]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [9]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#17, hd_dep_count#18, hd_vehicle_count#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,4),EqualTo(hd_vehicle_count,3)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_ticket_number#6, s (25) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_city#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_city)] ReadSchema: struct @@ -199,7 +199,7 @@ Results [6]: [ss_ticket_number#6, ss_customer_sk#2, ca_city#22 AS bought_city#34 (34) Scan parquet default.customer Output [4]: [c_customer_sk#38, c_current_addr_sk#39, c_first_name#40, c_last_name#41] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69.sf100/explain.txt index e54aecbc37c22..10867c980b22a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69.sf100/explain.txt @@ -58,7 +58,7 @@ TakeOrderedAndProject (54) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#5, ss_customer_sk#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -94,7 +94,7 @@ Condition : isnotnull(ss_sold_date_sk#5) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,6), IsNotNull(d_date_sk)] ReadSchema: struct @@ -138,7 +138,7 @@ Join condition: None (19) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#12, ws_bill_customer_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -177,7 +177,7 @@ Join condition: None (28) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#15, cs_ship_customer_sk#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -220,7 +220,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (38) Scan parquet default.customer_address Output [2]: [ca_address_sk#18, ca_state#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [KY,GA,NM]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -255,7 +255,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (46) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#22, cd_gender#23, cd_marital_status#24, cd_education_status#25, cd_purchase_estimate#26, cd_credit_rating#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69/explain.txt index a5448b18bd4d8..ca9400b9875ef 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q69/explain.txt @@ -53,7 +53,7 @@ TakeOrderedAndProject (49) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -67,7 +67,7 @@ Condition : (isnotnull(c_current_addr_sk#3) AND isnotnull(c_current_cdemo_sk#2)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#4, ss_customer_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Condition : isnotnull(ss_sold_date_sk#4) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_moy#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,2001), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,6), IsNotNull(d_date_sk)] ReadSchema: struct @@ -121,7 +121,7 @@ Join condition: None (16) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#11, ws_bill_customer_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -156,7 +156,7 @@ Join condition: None (24) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#14, cs_ship_customer_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -195,7 +195,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (33) Scan parquet default.customer_address Output [2]: [ca_address_sk#17, ca_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [KY,GA,NM]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -226,7 +226,7 @@ Input [3]: [c_current_cdemo_sk#2, c_current_addr_sk#3, ca_address_sk#17] (40) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#20, cd_gender#21, cd_marital_status#22, cd_education_status#23, cd_purchase_estimate#24, cd_credit_rating#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7.sf100/explain.txt index d497558b628d7..3fed561c34984 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7.sf100/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(ss_cdemo_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND is (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), IsNotNull(cd_marital_status), EqualTo(cd_gender,M), EqualTo(cd_marital_status,S), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_qu (11) Scan parquet default.promotion Output [3]: [p_promo_sk#14, p_channel_email#15, p_channel_event#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(EqualTo(p_channel_email,N),EqualTo(p_channel_event,N)), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_promo_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_ (25) Scan parquet default.item Output [2]: [i_item_sk#21, i_item_id#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7/explain.txt index 34ec2e61198f6..998c6b2b02218 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q7/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(ss_cdemo_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND is (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,M), EqualTo(cd_marital_status,S), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_promo_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_id#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Input [8]: [ss_item_sk#2, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.promotion Output [3]: [p_promo_sk#20, p_channel_email#21, p_channel_event#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [Or(EqualTo(p_channel_email,N),EqualTo(p_channel_event,N)), IsNotNull(p_promo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70.sf100/explain.txt index abca0d859fd17..00f691230ff69 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70.sf100/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (11) Scan parquet default.store Output [3]: [s_store_sk#7, s_county#8, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -110,7 +110,7 @@ Condition : isnotnull(s_store_sk#7) (14) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -136,7 +136,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (20) Scan parquet default.store Output [2]: [s_store_sk#7, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70/explain.txt index 2e6b9ebdd0226..05b533aa65a63 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q70/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (11) Scan parquet default.store Output [3]: [s_store_sk#7, s_county#8, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -110,7 +110,7 @@ Condition : isnotnull(s_store_sk#7) (14) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Condition : (isnotnull(ss_store_sk#2) AND isnotnull(ss_sold_date_sk#1)) (17) Scan parquet default.store Output [2]: [s_store_sk#7, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71.sf100/explain.txt index 3513c8f42a2e1..9471377a18a19 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71.sf100/explain.txt @@ -46,7 +46,7 @@ (1) Scan parquet default.item Output [4]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manager_id#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#6, ws_sold_time_sk#7, ws_item_sk#8, ws_ext_sales_price#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk), IsNotNull(ws_sold_time_sk)] ReadSchema: struct @@ -82,7 +82,7 @@ Condition : ((isnotnull(ws_sold_date_sk#6) AND isnotnull(ws_item_sk#8)) AND isno (9) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Input [5]: [ws_sold_date_sk#6, ws_sold_time_sk#7, ws_item_sk#8, ws_ext_sales_pri (16) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#17, cs_sold_time_sk#18, cs_item_sk#19, cs_ext_sales_price#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_time_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Input [5]: [cs_sold_date_sk#17, cs_sold_time_sk#18, cs_item_sk#19, cs_ext_sales_ (22) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#24, ss_sold_time_sk#25, ss_item_sk#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_sold_time_sk)] ReadSchema: struct @@ -176,7 +176,7 @@ Input [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, ext_price#14, sold_item_sk#15, (31) Scan parquet default.time_dim Output [4]: [t_time_sk#31, t_hour#32, t_minute#33, t_meal_time#34] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [Or(EqualTo(t_meal_time,breakfast),EqualTo(t_meal_time,dinner)), IsNotNull(t_time_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71/explain.txt index 11046e192f86a..9471377a18a19 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q71/explain.txt @@ -46,7 +46,7 @@ (1) Scan parquet default.item Output [4]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manager_id#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#6, ws_sold_time_sk#7, ws_item_sk#8, ws_ext_sales_price#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk), IsNotNull(ws_sold_time_sk)] ReadSchema: struct @@ -82,7 +82,7 @@ Condition : ((isnotnull(ws_sold_date_sk#6) AND isnotnull(ws_item_sk#8)) AND isno (9) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Input [5]: [ws_sold_date_sk#6, ws_sold_time_sk#7, ws_item_sk#8, ws_ext_sales_pri (16) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#17, cs_sold_time_sk#18, cs_item_sk#19, cs_ext_sales_price#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_time_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Input [5]: [cs_sold_date_sk#17, cs_sold_time_sk#18, cs_item_sk#19, cs_ext_sales_ (22) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#24, ss_sold_time_sk#25, ss_item_sk#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_sold_time_sk)] ReadSchema: struct @@ -176,7 +176,7 @@ Input [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, ext_price#14, sold_item_sk#15, (31) Scan parquet default.time_dim Output [4]: [t_time_sk#31, t_hour#32, t_minute#33, t_meal_time#34] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [Or(EqualTo(t_meal_time,breakfast),EqualTo(t_meal_time,dinner)), IsNotNull(t_time_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt index 50422e7949201..7662bba3282c2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt @@ -83,7 +83,7 @@ TakeOrderedAndProject (79) (1) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#1, hd_buy_potential#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,>10000), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -105,7 +105,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.catalog_sales Output [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] ReadSchema: struct @@ -128,7 +128,7 @@ Input [9]: [hd_demo_sk#1, cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk (11) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#12, cd_marital_status#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), EqualTo(cd_marital_status,D), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -159,7 +159,7 @@ Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_item_sk (18) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_date#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_date)] ReadSchema: struct @@ -194,7 +194,7 @@ Arguments: [cs_item_sk#8 ASC NULLS FIRST], false, 0 (26) Scan parquet default.item Output [2]: [i_item_sk#19, i_item_desc#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -225,7 +225,7 @@ Input [8]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, (33) Scan parquet default.date_dim Output [2]: [d_date_sk#22, d_week_seq#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -239,7 +239,7 @@ Condition : (isnotnull(d_date_sk#22) AND isnotnull(d_week_seq#23)) (36) Scan parquet default.date_dim Output [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk), IsNotNull(d_week_seq), IsNotNull(d_date)] ReadSchema: struct @@ -291,7 +291,7 @@ Arguments: [cs_item_sk#8 ASC NULLS FIRST, d_date_sk#22 ASC NULLS FIRST], false, (48) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#31, w_warehouse_name#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -309,7 +309,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint) (52) Scan parquet default.inventory Output [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -349,7 +349,7 @@ Input [11]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_ (61) Scan parquet default.promotion Output [1]: [p_promo_sk#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -384,7 +384,7 @@ Arguments: [cs_item_sk#8 ASC NULLS FIRST, cs_order_number#10 ASC NULLS FIRST], f (69) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#42, cr_order_number#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72/explain.txt index 539ad1474749b..9477ce20210bf 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72/explain.txt @@ -74,7 +74,7 @@ TakeOrderedAndProject (70) (1) Scan parquet default.catalog_sales Output [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Condition : (((((isnotnull(cs_quantity#8) AND isnotnull(cs_item_sk#5)) AND isnot (4) Scan parquet default.inventory Output [4]: [inv_date_sk#9, inv_item_sk#10, inv_warehouse_sk#11, inv_quantity_on_hand#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -115,7 +115,7 @@ Input [12]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (10) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#14, w_warehouse_name#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -142,7 +142,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (16) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_desc#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (22) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#20, cd_marital_status#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), EqualTo(cd_marital_status,D), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -200,7 +200,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (29) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#23, hd_buy_potential#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,>10000), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -231,7 +231,7 @@ Input [10]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_hdemo_sk#4, cs_item_s (36) Scan parquet default.date_dim Output [4]: [d_date_sk#26, d_date#27, d_week_seq#28, d_year#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk), IsNotNull(d_week_seq), IsNotNull(d_date)] ReadSchema: struct @@ -262,7 +262,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, (43) Scan parquet default.date_dim Output [2]: [d_date_sk#31, d_week_seq#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_week_seq)] ReadSchema: struct @@ -289,7 +289,7 @@ Input [11]: [cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, (49) Scan parquet default.date_dim Output [2]: [d_date_sk#34, d_date#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_date)] ReadSchema: struct @@ -316,7 +316,7 @@ Input [10]: [cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, (55) Scan parquet default.promotion Output [1]: [p_promo_sk#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -343,7 +343,7 @@ Input [7]: [cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, w_warehouse_name#15, (61) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#39, cr_order_number#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt index 426d31e6ea9a6..4af604ca3f65f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt @@ -43,7 +43,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#4)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_county, [Williamson County,Franklin Parish,Bronx County,Orange County]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (31) Scan parquet default.customer Output [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt index 65454a045649f..f4565c3edb172 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt @@ -40,7 +40,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#4)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), GreaterThanOrEqual(d_dom,1), LessThanOrEqual(d_dom,2), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [In(s_county, [Williamson County,Franklin Parish,Bronx County,Orange County]), IsNotNull(s_store_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Condition : ((cnt#22 >= 1) AND (cnt#22 <= 5)) (29) Scan parquet default.customer Output [5]: [c_customer_sk#23, c_salutation#24, c_first_name#25, c_last_name#26, c_preferred_cust_flag#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74.sf100/explain.txt index 70b237e0cbdd5..7b55fa470c616 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74.sf100/explain.txt @@ -90,7 +90,7 @@ TakeOrderedAndProject (86) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_year#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer Output [4]: [c_customer_sk#8, c_customer_id#9, c_first_name#10, c_last_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -200,7 +200,7 @@ Arguments: [customer_id#17 ASC NULLS FIRST], false, 0 (25) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -214,7 +214,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (28) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_year#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Join condition: None (46) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#31, ws_bill_customer_sk#32, ws_net_paid#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -389,7 +389,7 @@ Input [8]: [customer_id#17, year_total#18, customer_id#26, customer_first_name#2 (67) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#31, ws_bill_customer_sk#32, ws_net_paid#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74/explain.txt index 800b3a88c0d73..4c9bad96e2027 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q74/explain.txt @@ -76,7 +76,7 @@ TakeOrderedAndProject (72) (1) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -90,7 +90,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (4) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_customer_sk#6, ss_net_paid#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Input [7]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, ss_ (10) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -166,7 +166,7 @@ Condition : (isnotnull(year_total#17) AND (year_total#17 > 0.00)) (20) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -192,7 +192,7 @@ Input [7]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, ss_ (26) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -246,7 +246,7 @@ Join condition: None (37) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -260,7 +260,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (40) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#28, ws_bill_customer_sk#29, ws_net_paid#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -338,7 +338,7 @@ Input [8]: [customer_id#16, year_total#17, customer_id#23, customer_first_name#2 (57) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt index f797b6c7ed087..f5129e36b4a23 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt @@ -142,7 +142,7 @@ TakeOrderedAndProject (138) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -156,7 +156,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_category#10, i_manufact_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Books), IsNotNull(i_item_sk), IsNotNull(i_manufact_id), IsNotNull(i_brand_id), IsNotNull(i_class_id), IsNotNull(i_category_id)] ReadSchema: struct @@ -187,7 +187,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (11) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_year#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -222,7 +222,7 @@ Arguments: [cs_order_number#3 ASC NULLS FIRST, cs_item_sk#2 ASC NULLS FIRST], fa (19) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -253,7 +253,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (26) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -299,7 +299,7 @@ Arguments: [cast(ss_ticket_number#26 as bigint) ASC NULLS FIRST, cast(ss_item_sk (37) Scan parquet default.store_returns Output [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -350,7 +350,7 @@ Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact (48) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -396,7 +396,7 @@ Arguments: [cast(ws_order_number#40 as bigint) ASC NULLS FIRST, cast(ws_item_sk# (59) Scan parquet default.web_returns Output [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -473,7 +473,7 @@ Arguments: [i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_catego (75) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -499,7 +499,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (81) Scan parquet default.date_dim Output [2]: [d_date_sk#67, d_year#68] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -550,7 +550,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (93) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -632,7 +632,7 @@ Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manuf (112) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt index 2c829e45de716..afdfb4f691a7b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt @@ -121,7 +121,7 @@ TakeOrderedAndProject (117) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_category#10, i_manufact_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Books), IsNotNull(i_item_sk), IsNotNull(i_manufact_id), IsNotNull(i_category_id), IsNotNull(i_brand_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -166,7 +166,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (11) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_year#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -193,7 +193,7 @@ Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (17) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -220,7 +220,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (23) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -258,7 +258,7 @@ Input [11]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity (32) Scan parquet default.store_returns Output [4]: [sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -305,7 +305,7 @@ Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact (42) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -343,7 +343,7 @@ Input [11]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity# (51) Scan parquet default.web_returns Output [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct @@ -408,7 +408,7 @@ Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact (64) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -434,7 +434,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (70) Scan parquet default.date_dim Output [2]: [d_date_sk#63, d_year#64] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -473,7 +473,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (79) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -543,7 +543,7 @@ Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manuf (95) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76.sf100/explain.txt index c3c6951d9d083..10e9cbba93b13 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76.sf100/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNull(ss_store_sk), IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : ((isnull(ss_store_sk#3) AND isnotnull(ss_item_sk#2)) AND isnotnull(s (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_qoy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4 (10) Scan parquet default.item Output [2]: [i_item_sk#9, i_category#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, d_year#6, d_qoy#7 (16) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#15, ws_item_sk#16, ws_ship_customer_sk#17, ws_ext_sales_price#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNull(ws_ship_customer_sk), IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -134,7 +134,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint) (20) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_qoy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (26) Scan parquet default.item Output [2]: [i_item_sk#9, i_category#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Input [7]: [ws_item_sk#16, ws_ship_customer_sk#17, ws_ext_sales_price#18, d_year (31) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#24, cs_ship_addr_sk#25, cs_item_sk#26, cs_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNull(cs_ship_addr_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76/explain.txt index b4a279af23889..917e2b028106c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q76/explain.txt @@ -42,7 +42,7 @@ TakeOrderedAndProject (38) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNull(ss_store_sk), IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -56,7 +56,7 @@ Condition : ((isnull(ss_store_sk#3) AND isnotnull(ss_item_sk#2)) AND isnotnull(s (4) Scan parquet default.item Output [2]: [i_item_sk#5, i_category#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4 (10) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_year#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -110,7 +110,7 @@ Input [7]: [ss_sold_date_sk#1, ss_store_sk#3, ss_ext_sales_price#4, i_category#6 (16) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#15, ws_item_sk#16, ws_ship_customer_sk#17, ws_ext_sales_price#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNull(ws_ship_customer_sk), IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -148,7 +148,7 @@ Input [7]: [ws_sold_date_sk#15, ws_ship_customer_sk#17, ws_ext_sales_price#18, i (25) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#22, cs_ship_addr_sk#23, cs_item_sk#24, cs_ext_sales_price#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNull(cs_ship_addr_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77.sf100/explain.txt index 560903bb9eeab..e6b88348e4d68 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77.sf100/explain.txt @@ -95,7 +95,7 @@ TakeOrderedAndProject (91) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -109,7 +109,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_date#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-03), LessThanOrEqual(d_date,2000-09-02), IsNotNull(d_date_sk)] ReadSchema: struct @@ -140,7 +140,7 @@ Input [5]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (11) Scan parquet default.store Output [1]: [s_store_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -185,7 +185,7 @@ Results [3]: [s_store_sk#8, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3)) (20) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#19, sr_store_sk#20, sr_return_amt#21, sr_net_loss#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -254,7 +254,7 @@ Input [6]: [s_store_sk#8, sales#17, profit#18, s_store_sk#23, returns#31, profit (35) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#38, cs_call_center_sk#39, cs_ext_sales_price#40, cs_net_profit#41] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -298,7 +298,7 @@ Results [3]: [cs_call_center_sk#39, MakeDecimal(sum(UnscaledValue(cs_ext_sales_p (44) Scan parquet default.catalog_returns Output [3]: [cr_returned_date_sk#51, cr_return_amount#52, cr_net_loss#53] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk)] ReadSchema: struct @@ -353,7 +353,7 @@ Input [5]: [cs_call_center_sk#39, sales#49, profit#50, returns#61, profit_loss#6 (56) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net_profit#70] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -379,7 +379,7 @@ Input [5]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net (62) Scan parquet default.web_page Output [1]: [wp_web_page_sk#71] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -424,7 +424,7 @@ Results [3]: [wp_web_page_sk#71, MakeDecimal(sum(UnscaledValue(ws_ext_sales_pric (71) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#82, wr_web_page_sk#83, wr_return_amt#84, wr_net_loss#85] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_web_page_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77/explain.txt index 75f4fb6640dee..c232055ba8c34 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q77/explain.txt @@ -95,7 +95,7 @@ TakeOrderedAndProject (91) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -109,7 +109,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_date#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-03), LessThanOrEqual(d_date,2000-09-02), IsNotNull(d_date_sk)] ReadSchema: struct @@ -140,7 +140,7 @@ Input [5]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (11) Scan parquet default.store Output [1]: [s_store_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -185,7 +185,7 @@ Results [3]: [s_store_sk#8, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3)) (20) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#19, sr_store_sk#20, sr_return_amt#21, sr_net_loss#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -254,7 +254,7 @@ Input [6]: [s_store_sk#8, sales#17, profit#18, s_store_sk#23, returns#31, profit (35) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#38, cs_call_center_sk#39, cs_ext_sales_price#40, cs_net_profit#41] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -302,7 +302,7 @@ Arguments: IdentityBroadcastMode, [id=#51] (45) Scan parquet default.catalog_returns Output [3]: [cr_returned_date_sk#52, cr_return_amount#53, cr_net_loss#54] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk)] ReadSchema: struct @@ -353,7 +353,7 @@ Input [5]: [cs_call_center_sk#39, sales#49, profit#50, returns#62, profit_loss#6 (56) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net_profit#70] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -379,7 +379,7 @@ Input [5]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net (62) Scan parquet default.web_page Output [1]: [wp_web_page_sk#71] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -424,7 +424,7 @@ Results [3]: [wp_web_page_sk#71, MakeDecimal(sum(UnscaledValue(ws_ext_sales_pric (71) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#82, wr_web_page_sk#83, wr_return_amt#84, wr_net_loss#85] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_web_page_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78.sf100/explain.txt index 98a04d3b64d21..633284b9d2200 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78.sf100/explain.txt @@ -74,7 +74,7 @@ TakeOrderedAndProject (70) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#4, ss_quantity#5, ss_wholesale_cost#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (6) Scan parquet default.store_returns Output [2]: [sr_item_sk#9, sr_ticket_number#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number# (14) Scan parquet default.date_dim Output [2]: [d_date_sk#12, d_year#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [ss_sold_year#25 ASC NULLS FIRST, ss_item_sk#2 ASC NULLS FIRST, ss_cu (24) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_order_number#32, cs_quantity#33, cs_wholesale_cost#34, cs_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Arguments: [cs_order_number#32 ASC NULLS FIRST, cs_item_sk#31 ASC NULLS FIRST], (29) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#37, cr_order_number#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -284,7 +284,7 @@ Input [12]: [ss_sold_year#25, ss_item_sk#2, ss_customer_sk#3, ss_qty#26, ss_wc#2 (47) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#55, ws_item_sk#56, ws_bill_customer_sk#57, ws_order_number#58, ws_quantity#59, ws_wholesale_cost#60, ws_sales_price#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -306,7 +306,7 @@ Arguments: [cast(ws_order_number#58 as bigint) ASC NULLS FIRST, cast(ws_item_sk# (52) Scan parquet default.web_returns Output [2]: [wr_item_sk#63, wr_order_number#64] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78/explain.txt index 0d20fd0422f06..a881456ea09f8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q78/explain.txt @@ -64,7 +64,7 @@ TakeOrderedAndProject (60) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#4, ss_quantity#5, ss_wholesale_cost#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -78,7 +78,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) AND isno (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#8, sr_ticket_number#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -109,7 +109,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number# (11) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_year#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Results [6]: [d_year#12 AS ss_sold_year#24, ss_item_sk#2, ss_customer_sk#3, sum( (20) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#28, ws_item_sk#29, ws_bill_customer_sk#30, ws_order_number#31, ws_quantity#32, ws_wholesale_cost#33, ws_sales_price#34] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct @@ -168,7 +168,7 @@ Condition : ((isnotnull(ws_sold_date_sk#28) AND isnotnull(ws_item_sk#29)) AND is (23) Scan parquet default.web_returns Output [2]: [wr_item_sk#35, wr_order_number#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct @@ -246,7 +246,7 @@ Input [12]: [ss_sold_year#24, ss_item_sk#2, ss_customer_sk#3, ss_qty#25, ss_wc#2 (40) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#54, cs_bill_customer_sk#55, cs_item_sk#56, cs_order_number#57, cs_quantity#58, cs_wholesale_cost#59, cs_sales_price#60] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -260,7 +260,7 @@ Condition : ((isnotnull(cs_sold_date_sk#54) AND isnotnull(cs_item_sk#56)) AND is (43) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#61, cr_order_number#62] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79.sf100/explain.txt index 4f39bba8e00d8..2a3d68ee8b62e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79.sf100/explain.txt @@ -41,7 +41,7 @@ TakeOrderedAndProject (37) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -55,7 +55,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#5)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dow), EqualTo(d_dow,1), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#13, hd_dep_count#14, hd_vehicle_count#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,6),GreaterThan(hd_vehicle_count,2)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Input [8]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.store Output [3]: [s_store_sk#17, s_number_employees#18, s_city#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_number_employees), GreaterThanOrEqual(s_number_employees,200), LessThanOrEqual(s_number_employees,295), IsNotNull(s_store_sk)] ReadSchema: struct @@ -174,7 +174,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (30) Scan parquet default.customer Output [3]: [c_customer_sk#31, c_first_name#32, c_last_name#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79/explain.txt index c5a159ac59b87..63cde726803b9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q79/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_ticket_number#6, ss_coupon_amt#7, ss_net_profit#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#5)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_dow#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dow), EqualTo(d_dow,1), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [9]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss (11) Scan parquet default.store Output [3]: [s_store_sk#13, s_number_employees#14, s_city#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_number_employees), GreaterThanOrEqual(s_number_employees,200), LessThanOrEqual(s_number_employees,295), IsNotNull(s_store_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Input [9]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_tic (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#17, hd_dep_count#18, hd_vehicle_count#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(EqualTo(hd_dep_count,6),GreaterThan(hd_vehicle_count,2)), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -163,7 +163,7 @@ Results [5]: [ss_ticket_number#6, ss_customer_sk#2, s_city#15, MakeDecimal(sum(U (28) Scan parquet default.customer Output [3]: [c_customer_sk#30, c_first_name#31, c_last_name#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8.sf100/explain.txt index df5b8ec4d66db..8249e85adf218 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8.sf100/explain.txt @@ -57,7 +57,7 @@ TakeOrderedAndProject (53) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_year#5, d_qoy#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -102,7 +102,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (11) Scan parquet default.store Output [3]: [s_store_sk#8, s_store_name#9, s_zip#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -137,7 +137,7 @@ Arguments: [substr(s_zip#10, 1, 2) ASC NULLS FIRST], false, 0 (19) Scan parquet default.customer_address Output [1]: [ca_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] ReadSchema: struct (20) ColumnarToRow [codegen id : 11] @@ -150,7 +150,7 @@ Condition : (substr(ca_zip#13, 1, 5) INSET (56910,69952,63792,39371,74351,11101, (22) Scan parquet default.customer_address Output [2]: [ca_address_sk#14, ca_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -172,7 +172,7 @@ Arguments: [ca_address_sk#14 ASC NULLS FIRST], false, 0 (27) Scan parquet default.customer Output [2]: [c_current_addr_sk#16, c_preferred_cust_flag#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_preferred_cust_flag), EqualTo(c_preferred_cust_flag,Y), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8/explain.txt index aed20da055b40..e9f9a92a0b766 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q8/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_year#5, d_qoy#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (11) Scan parquet default.store Output [3]: [s_store_sk#8, s_store_name#9, s_zip#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [5]: [ss_store_sk#2, ss_net_profit#3, s_store_sk#8, s_store_name#9, s_zip# (17) Scan parquet default.customer_address Output [1]: [ca_zip#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] ReadSchema: struct (18) ColumnarToRow [codegen id : 6] @@ -136,7 +136,7 @@ Condition : (substr(ca_zip#12, 1, 5) INSET (56910,69952,63792,39371,74351,11101, (20) Scan parquet default.customer_address Output [2]: [ca_address_sk#13, ca_zip#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -150,7 +150,7 @@ Condition : isnotnull(ca_address_sk#13) (23) Scan parquet default.customer Output [2]: [c_current_addr_sk#14, c_preferred_cust_flag#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_preferred_cust_flag), EqualTo(c_preferred_cust_flag,Y), IsNotNull(c_current_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt index 8fbaac0eb8fb7..057d786afbcdd 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt @@ -112,7 +112,7 @@ TakeOrderedAndProject (108) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ticket_number#5, ss_ext_sales_price#6, ss_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -134,7 +134,7 @@ Arguments: [cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number# (6) Scan parquet default.store_returns Output [4]: [sr_item_sk#9, sr_ticket_number#10, sr_return_amt#11, sr_net_loss#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -165,7 +165,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_t (13) Scan parquet default.item Output [2]: [i_item_sk#14, i_current_price#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThan(i_current_price,50.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -196,7 +196,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ex (20) Scan parquet default.date_dim Output [2]: [d_date_sk#17, d_date#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-22), IsNotNull(d_date_sk)] ReadSchema: struct @@ -227,7 +227,7 @@ Input [8]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price# (27) Scan parquet default.promotion Output [2]: [p_promo_sk#20, p_channel_tv#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -258,7 +258,7 @@ Input [7]: [ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, (34) Scan parquet default.store Output [2]: [s_store_sk#23, s_store_id#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -303,7 +303,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#6))#37,17,2) AS s (43) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_item_sk#47, cs_promo_sk#48, cs_order_number#49, cs_ext_sales_price#50, cs_net_profit#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk), IsNotNull(cs_item_sk), IsNotNull(cs_promo_sk)] ReadSchema: struct @@ -325,7 +325,7 @@ Arguments: [cs_item_sk#47 ASC NULLS FIRST, cs_order_number#49 ASC NULLS FIRST], (48) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#53, cr_order_number#54, cr_return_amount#55, cr_net_loss#56] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -392,7 +392,7 @@ Input [7]: [cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net (64) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#58, cp_catalog_page_id#59] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -437,7 +437,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#50))#72,17,2) AS (73) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#80, ws_item_sk#81, ws_web_site_sk#82, ws_promo_sk#83, ws_order_number#84, ws_ext_sales_price#85, ws_net_profit#86] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_item_sk), IsNotNull(ws_promo_sk)] ReadSchema: struct @@ -459,7 +459,7 @@ Arguments: [cast(ws_item_sk#81 as bigint) ASC NULLS FIRST, cast(ws_order_number# (78) Scan parquet default.web_returns Output [4]: [wr_item_sk#88, wr_order_number#89, wr_return_amt#90, wr_net_loss#91] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -526,7 +526,7 @@ Input [7]: [ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_pro (94) Scan parquet default.web_site Output [2]: [web_site_sk#93, web_site_id#94] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80/explain.txt index b835dec02cbea..36b045bfd9129 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80/explain.txt @@ -103,7 +103,7 @@ TakeOrderedAndProject (99) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ticket_number#5, ss_ext_sales_price#6, ss_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND is (4) Scan parquet default.store_returns Output [4]: [sr_item_sk#8, sr_ticket_number#9, sr_return_amt#10, sr_net_loss#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -144,7 +144,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_t (10) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_date#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-22), IsNotNull(d_date_sk)] ReadSchema: struct @@ -175,7 +175,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ex (17) Scan parquet default.store Output [2]: [s_store_sk#16, s_store_id#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Input [9]: [ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss (23) Scan parquet default.item Output [2]: [i_item_sk#19, i_current_price#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThan(i_current_price,50.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -233,7 +233,7 @@ Input [8]: [ss_item_sk#2, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, (30) Scan parquet default.promotion Output [2]: [p_promo_sk#22, p_channel_tv#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -282,7 +282,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#6))#36,17,2) AS s (40) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#44, cs_catalog_page_sk#45, cs_item_sk#46, cs_promo_sk#47, cs_order_number#48, cs_ext_sales_price#49, cs_net_profit#50] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk), IsNotNull(cs_item_sk), IsNotNull(cs_promo_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Condition : (((isnotnull(cs_sold_date_sk#44) AND isnotnull(cs_catalog_page_sk#45 (43) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#51, cr_order_number#52, cr_return_amount#53, cr_net_loss#54] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -335,7 +335,7 @@ Input [9]: [cs_sold_date_sk#44, cs_catalog_page_sk#45, cs_item_sk#46, cs_promo_s (52) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#56, cp_catalog_page_id#57] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -404,7 +404,7 @@ Results [5]: [MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#49))#70,17,2) AS (67) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#78, ws_item_sk#79, ws_web_site_sk#80, ws_promo_sk#81, ws_order_number#82, ws_ext_sales_price#83, ws_net_profit#84] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_item_sk), IsNotNull(ws_promo_sk)] ReadSchema: struct @@ -418,7 +418,7 @@ Condition : (((isnotnull(ws_sold_date_sk#78) AND isnotnull(ws_web_site_sk#80)) A (70) Scan parquet default.web_returns Output [4]: [wr_item_sk#85, wr_order_number#86, wr_return_amt#87, wr_net_loss#88] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -457,7 +457,7 @@ Input [9]: [ws_sold_date_sk#78, ws_item_sk#79, ws_web_site_sk#80, ws_promo_sk#81 (79) Scan parquet default.web_site Output [2]: [web_site_sk#90, web_site_id#91] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt index d85e361512b17..6e757528a3e68 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt @@ -65,7 +65,7 @@ TakeOrderedAndProject (61) (1) Scan parquet default.customer Output [6]: [c_customer_sk#1, c_customer_id#2, c_current_addr_sk#3, c_salutation#4, c_first_name#5, c_last_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -79,7 +79,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_current_addr_sk#3)) (4) Scan parquet default.customer_address Output [12]: [ca_address_sk#7, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -114,7 +114,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (12) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk), IsNotNull(cr_returning_customer_sk)] ReadSchema: struct @@ -128,7 +128,7 @@ Condition : ((isnotnull(cr_returned_date_sk#21) AND isnotnull(cr_returning_addr_ (15) Scan parquet default.date_dim Output [2]: [d_date_sk#25, d_year#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -167,7 +167,7 @@ Arguments: [cr_returning_addr_sk#23 ASC NULLS FIRST], false, 0 (24) Scan parquet default.customer_address Output [2]: [ca_address_sk#7, ca_state#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state)] ReadSchema: struct @@ -237,7 +237,7 @@ Input [19]: [c_customer_sk#1, c_customer_id#2, c_salutation#4, c_first_name#5, c (39) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81/explain.txt index 956a51096d2d3..2530fe9e8af95 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81/explain.txt @@ -56,7 +56,7 @@ TakeOrderedAndProject (52) (1) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk), IsNotNull(cr_returning_customer_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Condition : ((isnotnull(cr_returned_date_sk#1) AND isnotnull(cr_returning_addr_s (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Input [5]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr (11) Scan parquet default.customer_address Output [2]: [ca_address_sk#8, ca_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state)] ReadSchema: struct @@ -150,7 +150,7 @@ Condition : isnotnull(ctr_total_return#17) (21) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk)] ReadSchema: struct @@ -241,7 +241,7 @@ Input [5]: [ctr_customer_sk#15, ctr_state#16, ctr_total_return#17, (CAST(avg(ctr (40) Scan parquet default.customer Output [6]: [c_customer_sk#31, c_customer_id#32, c_current_addr_sk#33, c_salutation#34, c_first_name#35, c_last_name#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -268,7 +268,7 @@ Input [8]: [ctr_customer_sk#15, ctr_total_return#17, c_customer_sk#31, c_custome (46) Scan parquet default.customer_address Output [12]: [ca_address_sk#8, ca_street_number#38, ca_street_name#39, ca_street_type#40, ca_suite_number#41, ca_city#42, ca_county#43, ca_state#9, ca_zip#44, ca_country#45, ca_gmt_offset#46, ca_location_type#47] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82.sf100/explain.txt index c814e20e826e3..4f1a4a4a8a127 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82.sf100/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,62.00), In(i_manufact_id, [129,270,821,423]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.inventory Output [3]: [inv_date_sk#7, inv_item_sk#8, inv_quantity_on_hand#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), GreaterThanOrEqual(inv_quantity_on_hand,100), LessThanOrEqual(inv_quantity_on_hand,500), IsNotNull(inv_item_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Input [6]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, inv_date (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_date#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-05-25), LessThanOrEqual(d_date,2000-07-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Arguments: [i_item_sk#1 ASC NULLS FIRST], false, 0 (21) Scan parquet default.store_sales Output [1]: [ss_item_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82/explain.txt index d717c8f635828..c7240792c9d0b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q82/explain.txt @@ -32,7 +32,7 @@ TakeOrderedAndProject (28) (1) Scan parquet default.item Output [5]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThanOrEqual(i_current_price,62.00), In(i_manufact_id, [129,270,821,423]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Input [5]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, i_manufa (5) Scan parquet default.inventory Output [3]: [inv_date_sk#6, inv_item_sk#7, inv_quantity_on_hand#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), GreaterThanOrEqual(inv_quantity_on_hand,100), LessThanOrEqual(inv_quantity_on_hand,500), IsNotNull(inv_item_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, inv_date (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_date#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-05-25), LessThanOrEqual(d_date,2000-07-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [i_item_sk#1, i_item_id#2, i_item_desc#3, i_current_price#4, inv_date (19) Scan parquet default.store_sales Output [1]: [ss_item_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83.sf100/explain.txt index 86a1fade375f2..c41efb82d904c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83.sf100/explain.txt @@ -65,7 +65,7 @@ TakeOrderedAndProject (61) (1) Scan parquet default.store_returns Output [3]: [sr_returned_date_sk#1, sr_item_sk#2, sr_return_quantity#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -79,7 +79,7 @@ Condition : (isnotnull(sr_item_sk#2) AND isnotnull(sr_returned_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -93,7 +93,7 @@ Condition : isnotnull(d_date_sk#4) (7) Scan parquet default.date_dim Output [2]: [d_date#5, d_week_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] ReadSchema: struct (8) ColumnarToRow [codegen id : 2] @@ -102,7 +102,7 @@ Input [2]: [d_date#5, d_week_seq#6] (9) Scan parquet default.date_dim Output [2]: [d_date#5, d_week_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] ReadSchema: struct (10) ColumnarToRow [codegen id : 1] @@ -158,7 +158,7 @@ Input [4]: [sr_returned_date_sk#1, sr_item_sk#2, sr_return_quantity#3, d_date_sk (22) Scan parquet default.item Output [2]: [i_item_sk#12, i_item_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_item_id)] ReadSchema: struct @@ -203,7 +203,7 @@ Results [2]: [i_item_id#13 AS item_id#19, sum(cast(sr_return_quantity#3 as bigin (31) Scan parquet default.catalog_returns Output [3]: [cr_returned_date_sk#21, cr_item_sk#22, cr_return_quantity#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_returned_date_sk)] ReadSchema: struct @@ -272,7 +272,7 @@ Input [4]: [item_id#19, sr_item_qty#20, item_id#28, cr_item_qty#29] (46) Scan parquet default.web_returns Output [3]: [wr_returned_date_sk#31, wr_item_sk#32, wr_return_quantity#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_returned_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83/explain.txt index 39fc20878d286..0d44a01466dd9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q83/explain.txt @@ -65,7 +65,7 @@ TakeOrderedAndProject (61) (1) Scan parquet default.store_returns Output [3]: [sr_returned_date_sk#1, sr_item_sk#2, sr_return_quantity#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_returned_date_sk)] ReadSchema: struct @@ -79,7 +79,7 @@ Condition : (isnotnull(sr_item_sk#2) AND isnotnull(sr_returned_date_sk#1)) (4) Scan parquet default.item Output [2]: [i_item_sk#4, i_item_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_item_id)] ReadSchema: struct @@ -106,7 +106,7 @@ Input [5]: [sr_returned_date_sk#1, sr_item_sk#2, sr_return_quantity#3, i_item_sk (10) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -120,7 +120,7 @@ Condition : isnotnull(d_date_sk#7) (13) Scan parquet default.date_dim Output [2]: [d_date#8, d_week_seq#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] ReadSchema: struct (14) ColumnarToRow [codegen id : 3] @@ -129,7 +129,7 @@ Input [2]: [d_date#8, d_week_seq#9] (15) Scan parquet default.date_dim Output [2]: [d_date#8, d_week_seq#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] ReadSchema: struct (16) ColumnarToRow [codegen id : 2] @@ -203,7 +203,7 @@ Results [2]: [i_item_id#5 AS item_id#19, sum(cast(sr_return_quantity#3 as bigint (31) Scan parquet default.catalog_returns Output [3]: [cr_returned_date_sk#21, cr_item_sk#22, cr_return_quantity#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_returned_date_sk)] ReadSchema: struct @@ -272,7 +272,7 @@ Input [4]: [item_id#19, sr_item_qty#20, item_id#28, cr_item_qty#29] (46) Scan parquet default.web_returns Output [3]: [wr_returned_date_sk#31, wr_item_sk#32, wr_return_quantity#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_returned_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt index 4109009213406..ae0b996ec28be 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt @@ -40,7 +40,7 @@ TakeOrderedAndProject (36) (1) Scan parquet default.customer Output [6]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_current_addr_sk#4, c_first_name#5, c_last_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : ((isnotnull(c_current_addr_sk#4) AND isnotnull(c_current_cdemo_sk#2) (4) Scan parquet default.customer_address Output [2]: [ca_address_sk#7, ca_city#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_city), EqualTo(ca_city,Edgewood), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [7]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_curre (11) Scan parquet default.income_band Output [3]: [ib_income_band_sk#10, ib_lower_bound#11, ib_upper_bound#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/income_band] +Location [not included in comparison]/{warehouse_dir}/income_band] PushedFilters: [IsNotNull(ib_lower_bound), IsNotNull(ib_upper_bound), GreaterThanOrEqual(ib_lower_bound,38128), LessThanOrEqual(ib_upper_bound,88128), IsNotNull(ib_income_band_sk)] ReadSchema: struct @@ -107,7 +107,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (16) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#14, hd_income_band_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)) (25) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -174,7 +174,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[3, int, true] as bigint)) (31) Scan parquet default.store_returns Output [1]: [sr_cdemo_sk#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_cdemo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84/explain.txt index bd581e4738788..9e5fb4386ba93 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84/explain.txt @@ -40,7 +40,7 @@ TakeOrderedAndProject (36) (1) Scan parquet default.customer Output [6]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_current_addr_sk#4, c_first_name#5, c_last_name#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : ((isnotnull(c_current_addr_sk#4) AND isnotnull(c_current_cdemo_sk#2) (4) Scan parquet default.customer_address Output [2]: [ca_address_sk#7, ca_city#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_city), EqualTo(ca_city,Edgewood), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [7]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_curre (11) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -112,7 +112,7 @@ Input [6]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_first (17) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#12, hd_income_band_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Input [7]: [c_customer_id#1, c_current_hdemo_sk#3, c_first_name#5, c_last_name#6 (23) Scan parquet default.income_band Output [3]: [ib_income_band_sk#15, ib_lower_bound#16, ib_upper_bound#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/income_band] +Location [not included in comparison]/{warehouse_dir}/income_band] PushedFilters: [IsNotNull(ib_lower_bound), IsNotNull(ib_upper_bound), GreaterThanOrEqual(ib_lower_bound,38128), LessThanOrEqual(ib_upper_bound,88128), IsNotNull(ib_income_band_sk)] ReadSchema: struct @@ -170,7 +170,7 @@ Input [6]: [c_customer_id#1, c_first_name#5, c_last_name#6, cd_demo_sk#10, hd_in (30) Scan parquet default.store_returns Output [1]: [sr_cdemo_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_cdemo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt index 86d82ddb52e1a..064acdf824282 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt @@ -61,7 +61,7 @@ TakeOrderedAndProject (57) (1) Scan parquet default.date_dim Output [2]: [d_date_sk#1, d_year#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#4, ws_item_sk#5, ws_web_page_sk#6, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_page_sk), IsNotNull(ws_sold_date_sk), Or(Or(And(GreaterThanOrEqual(ws_sales_price,100.00),LessThanOrEqual(ws_sales_price,150.00)),And(GreaterThanOrEqual(ws_sales_price,50.00),LessThanOrEqual(ws_sales_price,100.00))),And(GreaterThanOrEqual(ws_sales_price,150.00),LessThanOrEqual(ws_sales_price,200.00))), Or(Or(And(GreaterThanOrEqual(ws_net_profit,100.00),LessThanOrEqual(ws_net_profit,200.00)),And(GreaterThanOrEqual(ws_net_profit,150.00),LessThanOrEqual(ws_net_profit,300.00))),And(GreaterThanOrEqual(ws_net_profit,50.00),LessThanOrEqual(ws_net_profit,250.00)))] ReadSchema: struct @@ -97,7 +97,7 @@ Condition : (((((isnotnull(ws_item_sk#5) AND isnotnull(ws_order_number#7)) AND i (9) Scan parquet default.web_page Output [1]: [wp_web_page_sk#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -132,7 +132,7 @@ Arguments: [cast(ws_item_sk#5 as bigint) ASC NULLS FIRST, cast(ws_order_number#7 (17) Scan parquet default.web_returns Output [8]: [wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number), IsNotNull(wr_refunded_cdemo_sk), IsNotNull(wr_returning_cdemo_sk), IsNotNull(wr_refunded_addr_sk), IsNotNull(wr_reason_sk)] ReadSchema: struct @@ -172,7 +172,7 @@ Input [11]: [d_date_sk#1, ws_sold_date_sk#4, ws_quantity#8, ws_sales_price#9, ws (26) Scan parquet default.reason Output [2]: [r_reason_sk#23, r_reason_desc#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/reason] +Location [not included in comparison]/{warehouse_dir}/reason] PushedFilters: [IsNotNull(r_reason_sk)] ReadSchema: struct @@ -199,7 +199,7 @@ Input [11]: [ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdem (32) Scan parquet default.customer_address Output [3]: [ca_address_sk#26, ca_state#27, ca_country#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [IN,OH,NJ]),In(ca_state, [WI,CT,KY])),In(ca_state, [LA,IA,AR]))] ReadSchema: struct @@ -238,7 +238,7 @@ Arguments: [wr_refunded_cdemo_sk#15 ASC NULLS FIRST, wr_returning_cdemo_sk#17 AS (41) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_education_status), IsNotNull(cd_marital_status), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -252,7 +252,7 @@ Condition : ((isnotnull(cd_education_status#33) AND isnotnull(cd_marital_status# (44) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_education_status), IsNotNull(cd_marital_status), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85/explain.txt index 94567de54317f..48a810d472642 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#1, ws_item_sk#2, ws_web_page_sk#3, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_page_sk), IsNotNull(ws_sold_date_sk), Or(Or(And(GreaterThanOrEqual(ws_sales_price,100.00),LessThanOrEqual(ws_sales_price,150.00)),And(GreaterThanOrEqual(ws_sales_price,50.00),LessThanOrEqual(ws_sales_price,100.00))),And(GreaterThanOrEqual(ws_sales_price,150.00),LessThanOrEqual(ws_sales_price,200.00))), Or(Or(And(GreaterThanOrEqual(ws_net_profit,100.00),LessThanOrEqual(ws_net_profit,200.00)),And(GreaterThanOrEqual(ws_net_profit,150.00),LessThanOrEqual(ws_net_profit,300.00))),And(GreaterThanOrEqual(ws_net_profit,50.00),LessThanOrEqual(ws_net_profit,250.00)))] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : (((((isnotnull(ws_item_sk#2) AND isnotnull(ws_order_number#4)) AND i (4) Scan parquet default.web_returns Output [8]: [wr_item_sk#8, wr_refunded_cdemo_sk#9, wr_refunded_addr_sk#10, wr_returning_cdemo_sk#11, wr_reason_sk#12, wr_order_number#13, wr_fee#14, wr_refunded_cash#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number), IsNotNull(wr_refunded_cdemo_sk), IsNotNull(wr_returning_cdemo_sk), IsNotNull(wr_refunded_addr_sk), IsNotNull(wr_reason_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [15]: [ws_sold_date_sk#1, ws_item_sk#2, ws_web_page_sk#3, ws_order_number# (10) Scan parquet default.web_page Output [1]: [wp_web_page_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [12]: [ws_sold_date_sk#1, ws_web_page_sk#3, ws_quantity#5, ws_sales_price# (16) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#19, cd_marital_status#20, cd_education_status#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status), IsNotNull(cd_education_status), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] ReadSchema: struct @@ -150,7 +150,7 @@ Input [13]: [ws_sold_date_sk#1, ws_quantity#5, ws_sales_price#6, ws_net_profit#7 (22) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#23, cd_marital_status#24, cd_education_status#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_education_status), IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status)] ReadSchema: struct @@ -177,7 +177,7 @@ Input [13]: [ws_sold_date_sk#1, ws_quantity#5, ws_net_profit#7, wr_refunded_addr (28) Scan parquet default.customer_address Output [3]: [ca_address_sk#27, ca_state#28, ca_country#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [IN,OH,NJ]),In(ca_state, [WI,CT,KY])),In(ca_state, [LA,IA,AR]))] ReadSchema: struct @@ -208,7 +208,7 @@ Input [9]: [ws_sold_date_sk#1, ws_quantity#5, ws_net_profit#7, wr_refunded_addr_ (35) Scan parquet default.date_dim Output [2]: [d_date_sk#31, d_year#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -239,7 +239,7 @@ Input [6]: [ws_sold_date_sk#1, ws_quantity#5, wr_reason_sk#12, wr_fee#14, wr_ref (42) Scan parquet default.reason Output [2]: [r_reason_sk#34, r_reason_desc#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/reason] +Location [not included in comparison]/{warehouse_dir}/reason] PushedFilters: [IsNotNull(r_reason_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86.sf100/explain.txt index af394e3d93d65..20ae4d244dcd8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86.sf100/explain.txt @@ -29,7 +29,7 @@ TakeOrderedAndProject (25) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Condition : (isnotnull(ws_sold_date_sk#1) AND isnotnull(ws_item_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Input [4]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3, d_date_sk#4] (11) Scan parquet default.item Output [3]: [i_item_sk#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86/explain.txt index 712444ca3a9a5..20ae4d244dcd8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q86/explain.txt @@ -29,7 +29,7 @@ TakeOrderedAndProject (25) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Condition : (isnotnull(ws_sold_date_sk#1) AND isnotnull(ws_item_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Input [4]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3, d_date_sk#4] (11) Scan parquet default.item Output [3]: [i_item_sk#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87.sf100/explain.txt index cb0aa8eb0bd8b..377bd36f520eb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87.sf100/explain.txt @@ -71,7 +71,7 @@ (1) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_customer_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_customer_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#3, d_date#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.customer Output [3]: [c_customer_sk#8, c_first_name#9, c_last_name#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -163,7 +163,7 @@ Arguments: [coalesce(c_last_name#10, ) ASC NULLS FIRST, isnull(c_last_name#10) A (22) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#13, cs_bill_customer_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -244,7 +244,7 @@ Join condition: None (40) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#23, ws_bill_customer_sk#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87/explain.txt index 57f97a686c31a..3d59a670b7e8b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q87/explain.txt @@ -58,7 +58,7 @@ (1) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_customer_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -72,7 +72,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_customer_sk#2)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#3, d_date#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [4]: [ss_sold_date_sk#1, ss_customer_sk#2, d_date_sk#3, d_date#4] (11) Scan parquet default.customer Output [3]: [c_customer_sk#7, c_first_name#8, c_last_name#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Input [5]: [ss_customer_sk#2, d_date#4, c_customer_sk#7, c_first_name#8, c_last_ (17) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#11, cs_bill_customer_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -195,7 +195,7 @@ Join condition: None (31) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#20, ws_bill_customer_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88.sf100/explain.txt index 1ec80c2abe08d..562f73c509ee7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88.sf100/explain.txt @@ -178,7 +178,7 @@ BroadcastNestedLoopJoin Inner BuildRight (174) (1) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (4) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,8), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -223,7 +223,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, t_time_sk#4] (11) Scan parquet default.store Output [2]: [s_store_sk#8, s_store_name#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_name), EqualTo(s_store_name,ese), IsNotNull(s_store_sk)] ReadSchema: struct @@ -254,7 +254,7 @@ Input [3]: [ss_hdemo_sk#2, ss_store_sk#3, s_store_sk#8] (18) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#11, hd_dep_count#12, hd_vehicle_count#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(Or(And(EqualTo(hd_dep_count,4),LessThanOrEqual(hd_vehicle_count,6)),And(EqualTo(hd_dep_count,2),LessThanOrEqual(hd_vehicle_count,4))),And(EqualTo(hd_dep_count,0),LessThanOrEqual(hd_vehicle_count,2))), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -303,7 +303,7 @@ Results [1]: [count(1)#18 AS h8_30_to_9#19] (28) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -317,7 +317,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (31) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,9), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -397,7 +397,7 @@ Join condition: None (49) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -411,7 +411,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (52) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,9), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -491,7 +491,7 @@ Join condition: None (70) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -505,7 +505,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (73) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,10), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -585,7 +585,7 @@ Join condition: None (91) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -599,7 +599,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (94) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,10), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -679,7 +679,7 @@ Join condition: None (112) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -693,7 +693,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (115) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,11), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -773,7 +773,7 @@ Join condition: None (133) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -787,7 +787,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (136) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,11), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -867,7 +867,7 @@ Join condition: None (154) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -881,7 +881,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (157) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,12), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88/explain.txt index 4b9064aff5f0c..22297e02e9b27 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q88/explain.txt @@ -178,7 +178,7 @@ BroadcastNestedLoopJoin Inner BuildRight (174) (1) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (4) Scan parquet default.household_demographics Output [3]: [hd_demo_sk#4, hd_dep_count#5, hd_vehicle_count#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [Or(Or(And(EqualTo(hd_dep_count,4),LessThanOrEqual(hd_vehicle_count,6)),And(EqualTo(hd_dep_count,2),LessThanOrEqual(hd_vehicle_count,4))),And(EqualTo(hd_dep_count,0),LessThanOrEqual(hd_vehicle_count,2))), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -223,7 +223,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (11) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,8), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -254,7 +254,7 @@ Input [3]: [ss_sold_time_sk#1, ss_store_sk#3, t_time_sk#8] (18) Scan parquet default.store Output [2]: [s_store_sk#12, s_store_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_name), EqualTo(s_store_name,ese), IsNotNull(s_store_sk)] ReadSchema: struct @@ -303,7 +303,7 @@ Results [1]: [count(1)#18 AS h8_30_to_9#19] (28) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -329,7 +329,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (34) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,9), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -397,7 +397,7 @@ Join condition: None (49) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -423,7 +423,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (55) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,9), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -491,7 +491,7 @@ Join condition: None (70) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -517,7 +517,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (76) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,10), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -585,7 +585,7 @@ Join condition: None (91) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -611,7 +611,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (97) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,10), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -679,7 +679,7 @@ Join condition: None (112) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -705,7 +705,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (118) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,11), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -773,7 +773,7 @@ Join condition: None (133) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -799,7 +799,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (139) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,11), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -867,7 +867,7 @@ Join condition: None (154) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -893,7 +893,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (160) Scan parquet default.time_dim Output [3]: [t_time_sk#8, t_hour#9, t_minute#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,12), LessThan(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89.sf100/explain.txt index 1bc6a409b1b84..0d47735a1138d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89.sf100/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.item Output [4]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(In(i_category, [Books,Electronics,Sports]),In(i_class, [computers,stereo,football])),And(In(i_category, [Men,Jewelry,Women]),In(i_class, [shirts,birdal,dresses]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -53,7 +53,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint) (5) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#6, ss_item_sk#7, ss_store_sk#8, ss_sales_price#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Input [8]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, ss_sold_date_sk#6, (10) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -107,7 +107,7 @@ Input [8]: [i_brand#2, i_class#3, i_category#4, ss_sold_date_sk#6, ss_store_sk#8 (17) Scan parquet default.store Output [3]: [s_store_sk#14, s_store_name#15, s_company_name#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89/explain.txt index 29c357117d279..8dca84461a4f6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q89/explain.txt @@ -35,7 +35,7 @@ TakeOrderedAndProject (31) (1) Scan parquet default.item Output [4]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [Or(And(In(i_category, [Books,Electronics,Sports]),In(i_class, [computers,stereo,football])),And(In(i_category, [Men,Jewelry,Women]),In(i_class, [shirts,birdal,dresses]))), IsNotNull(i_item_sk)] ReadSchema: struct @@ -49,7 +49,7 @@ Condition : (((i_category#4 IN (Books,Electronics,Sports) AND i_class#3 IN (comp (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#5, ss_item_sk#6, ss_store_sk#7, ss_sales_price#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Input [8]: [i_item_sk#1, i_brand#2, i_class#3, i_category#4, ss_sold_date_sk#5, (10) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -107,7 +107,7 @@ Input [8]: [i_brand#2, i_class#3, i_category#4, ss_sold_date_sk#5, ss_store_sk#7 (17) Scan parquet default.store Output [3]: [s_store_sk#14, s_store_name#15, s_company_name#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9.sf100/explain.txt index d5d360c15d657..55f7977b231d5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9.sf100/explain.txt @@ -8,7 +8,7 @@ (1) Scan parquet default.reason Output [1]: [r_reason_sk#1] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/reason] +Location [not included in comparison]/{warehouse_dir}/reason] PushedFilters: [IsNotNull(r_reason_sk), EqualTo(r_reason_sk,1)] ReadSchema: struct @@ -38,7 +38,7 @@ Subquery:1 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (5) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,1), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -84,7 +84,7 @@ Subquery:2 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (12) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,1), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -130,7 +130,7 @@ Subquery:3 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (19) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,1), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -176,7 +176,7 @@ Subquery:4 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (26) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,40)] ReadSchema: struct @@ -222,7 +222,7 @@ Subquery:5 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (33) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,40)] ReadSchema: struct @@ -268,7 +268,7 @@ Subquery:6 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (40) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,40)] ReadSchema: struct @@ -314,7 +314,7 @@ Subquery:7 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (47) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,41), LessThanOrEqual(ss_quantity,60)] ReadSchema: struct @@ -360,7 +360,7 @@ Subquery:8 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (54) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,41), LessThanOrEqual(ss_quantity,60)] ReadSchema: struct @@ -406,7 +406,7 @@ Subquery:9 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (61) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,41), LessThanOrEqual(ss_quantity,60)] ReadSchema: struct @@ -452,7 +452,7 @@ Subquery:10 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (68) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,61), LessThanOrEqual(ss_quantity,80)] ReadSchema: struct @@ -498,7 +498,7 @@ Subquery:11 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (75) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,61), LessThanOrEqual(ss_quantity,80)] ReadSchema: struct @@ -544,7 +544,7 @@ Subquery:12 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (82) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,61), LessThanOrEqual(ss_quantity,80)] ReadSchema: struct @@ -590,7 +590,7 @@ Subquery:13 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (89) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,81), LessThanOrEqual(ss_quantity,100)] ReadSchema: struct @@ -636,7 +636,7 @@ Subquery:14 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (96) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,81), LessThanOrEqual(ss_quantity,100)] ReadSchema: struct @@ -682,7 +682,7 @@ Subquery:15 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (103) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,81), LessThanOrEqual(ss_quantity,100)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9/explain.txt index 2448680c053c1..55f7977b231d5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q9/explain.txt @@ -8,7 +8,7 @@ (1) Scan parquet default.reason Output [1]: [r_reason_sk#1] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/reason] +Location [not included in comparison]/{warehouse_dir}/reason] PushedFilters: [IsNotNull(r_reason_sk), EqualTo(r_reason_sk,1)] ReadSchema: struct @@ -38,7 +38,7 @@ Subquery:1 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (5) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,1), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -84,7 +84,7 @@ Subquery:2 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (12) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,1), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -130,7 +130,7 @@ Subquery:3 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (19) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,1), LessThanOrEqual(ss_quantity,20)] ReadSchema: struct @@ -176,7 +176,7 @@ Subquery:4 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (26) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,40)] ReadSchema: struct @@ -222,7 +222,7 @@ Subquery:5 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (33) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,40)] ReadSchema: struct @@ -268,7 +268,7 @@ Subquery:6 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (40) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,40)] ReadSchema: struct @@ -314,7 +314,7 @@ Subquery:7 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (47) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,41), LessThanOrEqual(ss_quantity,60)] ReadSchema: struct @@ -360,7 +360,7 @@ Subquery:8 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (54) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,41), LessThanOrEqual(ss_quantity,60)] ReadSchema: struct @@ -406,7 +406,7 @@ Subquery:9 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquery (61) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,41), LessThanOrEqual(ss_quantity,60)] ReadSchema: struct @@ -452,7 +452,7 @@ Subquery:10 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (68) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,61), LessThanOrEqual(ss_quantity,80)] ReadSchema: struct @@ -498,7 +498,7 @@ Subquery:11 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (75) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,61), LessThanOrEqual(ss_quantity,80)] ReadSchema: struct @@ -544,7 +544,7 @@ Subquery:12 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (82) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,61), LessThanOrEqual(ss_quantity,80)] ReadSchema: struct @@ -590,7 +590,7 @@ Subquery:13 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (89) Scan parquet default.store_sales Output [1]: [ss_quantity#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,81), LessThanOrEqual(ss_quantity,100)] ReadSchema: struct @@ -636,7 +636,7 @@ Subquery:14 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (96) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_ext_discount_amt#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,81), LessThanOrEqual(ss_quantity,100)] ReadSchema: struct @@ -682,7 +682,7 @@ Subquery:15 Hosting operator id = 4 Hosting Expression = Subquery scalar-subquer (103) Scan parquet default.store_sales Output [2]: [ss_quantity#37, ss_net_paid#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,81), LessThanOrEqual(ss_quantity,100)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt index 9d1c956ebc271..3f787bfb99b67 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt @@ -54,7 +54,7 @@ TakeOrderedAndProject (50) (1) Scan parquet default.web_sales Output [3]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_hdemo_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Condition : ((isnotnull(ws_ship_hdemo_sk#2) AND isnotnull(ws_sold_time_sk#1)) AN (4) Scan parquet default.web_page Output [2]: [wp_web_page_sk#4, wp_char_count#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_char_count), GreaterThanOrEqual(wp_char_count,5000), LessThanOrEqual(wp_char_count,5200), IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -99,7 +99,7 @@ Input [4]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3, wp_web_page (11) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#7, hd_dep_count#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_dep_count), EqualTo(hd_dep_count,6), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Input [3]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, hd_demo_sk#7] (18) Scan parquet default.time_dim Output [2]: [t_time_sk#10, t_hour#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), GreaterThanOrEqual(t_hour,8), LessThanOrEqual(t_hour,9), IsNotNull(t_time_sk)] ReadSchema: struct @@ -179,7 +179,7 @@ Results [1]: [count(1)#16 AS amc#17] (28) Scan parquet default.web_sales Output [3]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_hdemo_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -217,7 +217,7 @@ Input [3]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, hd_demo_sk#7] (37) Scan parquet default.time_dim Output [2]: [t_time_sk#10, t_hour#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), GreaterThanOrEqual(t_hour,19), LessThanOrEqual(t_hour,20), IsNotNull(t_time_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt index 332aab8796bd1..550bf89ce3b99 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt @@ -54,7 +54,7 @@ TakeOrderedAndProject (50) (1) Scan parquet default.web_sales Output [3]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_hdemo_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Condition : ((isnotnull(ws_ship_hdemo_sk#2) AND isnotnull(ws_sold_time_sk#1)) AN (4) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#4, hd_dep_count#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_dep_count), EqualTo(hd_dep_count,6), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -99,7 +99,7 @@ Input [4]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3, hd_demo_sk# (11) Scan parquet default.time_dim Output [2]: [t_time_sk#7, t_hour#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), GreaterThanOrEqual(t_hour,8), LessThanOrEqual(t_hour,9), IsNotNull(t_time_sk)] ReadSchema: struct @@ -130,7 +130,7 @@ Input [3]: [ws_sold_time_sk#1, ws_web_page_sk#3, t_time_sk#7] (18) Scan parquet default.web_page Output [2]: [wp_web_page_sk#10, wp_char_count#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_char_count), GreaterThanOrEqual(wp_char_count,5000), LessThanOrEqual(wp_char_count,5200), IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -179,7 +179,7 @@ Results [1]: [count(1)#16 AS amc#17] (28) Scan parquet default.web_sales Output [3]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_hdemo_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -205,7 +205,7 @@ Input [4]: [ws_sold_time_sk#1, ws_ship_hdemo_sk#2, ws_web_page_sk#3, hd_demo_sk# (34) Scan parquet default.time_dim Output [2]: [t_time_sk#7, t_hour#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), GreaterThanOrEqual(t_hour,19), LessThanOrEqual(t_hour,20), IsNotNull(t_time_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt index fc53c0218645f..69b02557c4750 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt @@ -51,7 +51,7 @@ (1) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Unknown)),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,Advanced Degree))), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint) (5) Scan parquet default.customer Output [4]: [c_customer_sk#5, c_current_cdemo_sk#6, c_current_hdemo_sk#7, c_current_addr_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [7]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3, c_customer (10) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#9, hd_buy_potential#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_buy_potential), StringStartsWith(hd_buy_potential,Unknown), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [6]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_curre (17) Scan parquet default.customer_address Output [2]: [ca_address_sk#12, ca_gmt_offset#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-7.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Input [5]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_curre (24) Scan parquet default.date_dim Output [3]: [d_date_sk#15, d_year#16, d_moy#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -176,7 +176,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (29) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#19, cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_call_center_sk), IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_customer_sk)] ReadSchema: struct @@ -212,7 +212,7 @@ Input [6]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, cr_retu (37) Scan parquet default.call_center Output [4]: [cc_call_center_sk#24, cc_call_center_id#25, cc_name#26, cc_manager#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91/explain.txt index 003c7bd5a9835..1956baf782417 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91/explain.txt @@ -51,7 +51,7 @@ (1) Scan parquet default.call_center Output [4]: [cc_call_center_sk#1, cc_call_center_id#2, cc_name#3, cc_manager#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : isnotnull(cc_call_center_sk#1) (4) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#5, cr_returning_customer_sk#6, cr_call_center_sk#7, cr_net_loss#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_call_center_sk), IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_customer_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Input [8]: [cc_call_center_sk#1, cc_call_center_id#2, cc_name#3, cc_manager#4, c (10) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [7]: [cc_call_center_id#2, cc_name#3, cc_manager#4, cr_returned_date_sk#5, (17) Scan parquet default.customer Output [4]: [c_customer_sk#14, c_current_cdemo_sk#15, c_current_hdemo_sk#16, c_current_addr_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk)] ReadSchema: struct @@ -150,7 +150,7 @@ Input [9]: [cc_call_center_id#2, cc_name#3, cc_manager#4, cr_returning_customer_ (23) Scan parquet default.customer_address Output [2]: [ca_address_sk#19, ca_gmt_offset#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-7.00), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -181,7 +181,7 @@ Input [8]: [cc_call_center_id#2, cc_name#3, cc_manager#4, cr_net_loss#8, c_curre (30) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#22, cd_marital_status#23, cd_education_status#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Unknown)),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,Advanced Degree))), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -208,7 +208,7 @@ Input [9]: [cc_call_center_id#2, cc_name#3, cc_manager#4, cr_net_loss#8, c_curre (36) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#26, hd_buy_potential#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_buy_potential), StringStartsWith(hd_buy_potential,Unknown), IsNotNull(hd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt index 3a8f6f316837b..dc4665185b014 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.item Output [2]: [i_item_sk#1, i_manufact_id#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,350), IsNotNull(i_item_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#4, ws_item_sk#5, ws_ext_discount_amt#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Condition : (isnotnull(ws_sold_date_sk#4) AND isnotnull(ws_item_sk#5)) (9) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-01-27), LessThanOrEqual(d_date,2000-04-26), IsNotNull(d_date_sk)] ReadSchema: struct @@ -140,7 +140,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (23) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#4, ws_item_sk#5, ws_ext_discount_amt#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_ext_discount_amt), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt index bcda3d7ad72d1..b17a48db8baac 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt @@ -38,7 +38,7 @@ TakeOrderedAndProject (34) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_discount_amt#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_ext_discount_amt), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -52,7 +52,7 @@ Condition : ((isnotnull(ws_item_sk#2) AND isnotnull(ws_ext_discount_amt#3)) AND (4) Scan parquet default.item Output [2]: [i_item_sk#4, i_manufact_id#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_manufact_id), EqualTo(i_manufact_id,350), IsNotNull(i_item_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Input [4]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_discount_amt#3, i_item_sk#4] (11) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_discount_amt#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Condition : (isnotnull(ws_sold_date_sk#1) AND isnotnull(ws_item_sk#2)) (14) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_date#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-01-27), LessThanOrEqual(d_date,2000-04-26), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93.sf100/explain.txt index b71f2ab6ae3c1..5b4635e0bd67a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93.sf100/explain.txt @@ -26,7 +26,7 @@ TakeOrderedAndProject (22) (1) Scan parquet default.store_returns Output [4]: [sr_item_sk#1, sr_reason_sk#2, sr_ticket_number#3, sr_return_quantity#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_reason_sk)] ReadSchema: struct @@ -40,7 +40,7 @@ Condition : ((isnotnull(sr_item_sk#1) AND isnotnull(sr_ticket_number#3)) AND isn (4) Scan parquet default.reason Output [2]: [r_reason_sk#5, r_reason_desc#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/reason] +Location [not included in comparison]/{warehouse_dir}/reason] PushedFilters: [IsNotNull(r_reason_desc), EqualTo(r_reason_desc,reason 28), IsNotNull(r_reason_sk)] ReadSchema: struct @@ -79,7 +79,7 @@ Arguments: [sr_item_sk#1 ASC NULLS FIRST, sr_ticket_number#3 ASC NULLS FIRST], f (13) Scan parquet default.store_sales Output [5]: [ss_item_sk#9, ss_customer_sk#10, ss_ticket_number#11, ss_quantity#12, ss_sales_price#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (14) ColumnarToRow [codegen id : 4] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93/explain.txt index 496c2ab591de4..620aa672770e1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q93/explain.txt @@ -23,7 +23,7 @@ TakeOrderedAndProject (19) (1) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#3, ss_quantity#4, ss_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 3] @@ -32,7 +32,7 @@ Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#3, ss_quantity#4, s (3) Scan parquet default.store_returns Output [4]: [sr_item_sk#6, sr_reason_sk#7, sr_ticket_number#8, sr_return_quantity#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_reason_sk)] ReadSchema: struct @@ -59,7 +59,7 @@ Input [9]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#3, ss_quantity#4, s (9) Scan parquet default.reason Output [2]: [r_reason_sk#11, r_reason_desc#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/reason] +Location [not included in comparison]/{warehouse_dir}/reason] PushedFilters: [IsNotNull(r_reason_desc), EqualTo(r_reason_desc,reason 28), IsNotNull(r_reason_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt index aed3635e09bfe..7720d9dee4170 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.web_sales Output [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_warehouse_sk#4, ws_order_number#5, ws_ext_ship_cost#6, ws_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_date_sk), IsNotNull(ws_ship_addr_sk), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -73,7 +73,7 @@ Arguments: [ws_order_number#5 ASC NULLS FIRST], false, 0 (6) Scan parquet default.web_sales Output [2]: [ws_warehouse_sk#4, ws_order_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] ReadSchema: struct (7) ColumnarToRow [codegen id : 3] @@ -111,7 +111,7 @@ Arguments: [cast(ws_order_number#5 as bigint) ASC NULLS FIRST], false, 0 (15) Scan parquet default.web_returns Output [1]: [wr_order_number#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] ReadSchema: struct (16) ColumnarToRow [codegen id : 7] @@ -133,7 +133,7 @@ Join condition: None (20) Scan parquet default.customer_address Output [2]: [ca_address_sk#15, ca_state#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,IL), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -164,7 +164,7 @@ Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_num (27) Scan parquet default.web_site Output [2]: [web_site_sk#18, web_company_name#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_company_name), EqualTo(web_company_name,pri), IsNotNull(web_site_sk)] ReadSchema: struct @@ -195,7 +195,7 @@ Input [6]: [ws_ship_date_sk#1, ws_web_site_sk#3, ws_order_number#5, ws_ext_ship_ (34) Scan parquet default.date_dim Output [2]: [d_date_sk#21, d_date#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-01), LessThanOrEqual(d_date,1999-04-02), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt index 3a7c91dc09301..a94e74f66b201 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt @@ -45,7 +45,7 @@ TakeOrderedAndProject (41) (1) Scan parquet default.web_sales Output [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_warehouse_sk#4, ws_order_number#5, ws_ext_ship_cost#6, ws_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_date_sk), IsNotNull(ws_ship_addr_sk), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -59,7 +59,7 @@ Condition : ((isnotnull(ws_ship_date_sk#1) AND isnotnull(ws_ship_addr_sk#2)) AND (4) Scan parquet default.web_sales Output [2]: [ws_warehouse_sk#4, ws_order_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] ReadSchema: struct (5) ColumnarToRow [codegen id : 1] @@ -85,7 +85,7 @@ Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_warehouse (10) Scan parquet default.web_returns Output [1]: [wr_order_number#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] ReadSchema: struct (11) ColumnarToRow [codegen id : 2] @@ -103,7 +103,7 @@ Join condition: None (14) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_date#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-01), LessThanOrEqual(d_date,1999-04-02), IsNotNull(d_date_sk)] ReadSchema: struct @@ -134,7 +134,7 @@ Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_num (21) Scan parquet default.customer_address Output [2]: [ca_address_sk#16, ca_state#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,IL), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -165,7 +165,7 @@ Input [6]: [ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#5, ws_ext_ship_ (28) Scan parquet default.web_site Output [2]: [web_site_sk#19, web_company_name#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_company_name), EqualTo(web_company_name,pri), IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt index c2baa5ba23ffc..eae118d46245d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt @@ -67,7 +67,7 @@ TakeOrderedAndProject (63) (1) Scan parquet default.web_sales Output [6]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_date_sk), IsNotNull(ws_ship_addr_sk), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Arguments: [ws_order_number#4 ASC NULLS FIRST], false, 0 (6) Scan parquet default.web_sales Output [2]: [ws_warehouse_sk#8, ws_order_number#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_order_number), IsNotNull(ws_warehouse_sk)] ReadSchema: struct @@ -179,7 +179,7 @@ Arguments: [cast(ws_order_number#4 as bigint) ASC NULLS FIRST], false, 0 (28) Scan parquet default.web_returns Output [1]: [wr_order_number#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number)] ReadSchema: struct @@ -215,7 +215,7 @@ Join condition: None (36) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_state#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,IL), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -246,7 +246,7 @@ Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_num (43) Scan parquet default.web_site Output [2]: [web_site_sk#23, web_company_name#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_company_name), EqualTo(web_company_name,pri), IsNotNull(web_site_sk)] ReadSchema: struct @@ -277,7 +277,7 @@ Input [6]: [ws_ship_date_sk#1, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_ (50) Scan parquet default.date_dim Output [2]: [d_date_sk#26, d_date#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-01), LessThanOrEqual(d_date,1999-04-02), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt index 5b232d915efdd..3a24e83aff256 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt @@ -60,7 +60,7 @@ TakeOrderedAndProject (56) (1) Scan parquet default.web_sales Output [6]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_ship_date_sk), IsNotNull(ws_ship_addr_sk), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Condition : ((isnotnull(ws_ship_date_sk#1) AND isnotnull(ws_ship_addr_sk#2)) AND (4) Scan parquet default.web_sales Output [2]: [ws_warehouse_sk#7, ws_order_number#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_order_number), IsNotNull(ws_warehouse_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Condition : (isnotnull(ws_order_number#4) AND isnotnull(ws_warehouse_sk#7)) (7) Scan parquet default.web_sales Output [2]: [ws_warehouse_sk#8, ws_order_number#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_order_number), IsNotNull(ws_warehouse_sk)] ReadSchema: struct @@ -124,7 +124,7 @@ Join condition: None (15) Scan parquet default.web_returns Output [1]: [wr_order_number#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number)] ReadSchema: struct @@ -138,7 +138,7 @@ Condition : isnotnull(wr_order_number#13) (18) Scan parquet default.web_sales Output [2]: [ws_warehouse_sk#7, ws_order_number#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_order_number), IsNotNull(ws_warehouse_sk)] ReadSchema: struct @@ -186,7 +186,7 @@ Join condition: None (29) Scan parquet default.date_dim Output [2]: [d_date_sk#18, d_date#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-01), LessThanOrEqual(d_date,1999-04-02), IsNotNull(d_date_sk)] ReadSchema: struct @@ -217,7 +217,7 @@ Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_num (36) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_state#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,IL), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -248,7 +248,7 @@ Input [6]: [ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_ (43) Scan parquet default.web_site Output [2]: [web_site_sk#24, web_company_name#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_company_name), EqualTo(web_company_name,pri), IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt index 53a1642b95700..d00029f985471 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt @@ -32,7 +32,7 @@ TakeOrderedAndProject (28) (1) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -46,7 +46,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (4) Scan parquet default.time_dim Output [3]: [t_time_sk#4, t_hour#5, t_minute#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,20), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, t_time_sk#4] (11) Scan parquet default.store Output [2]: [s_store_sk#8, s_store_name#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_name), EqualTo(s_store_name,ese), IsNotNull(s_store_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [3]: [ss_hdemo_sk#2, ss_store_sk#3, s_store_sk#8] (18) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#11, hd_dep_count#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_dep_count), EqualTo(hd_dep_count,7), IsNotNull(hd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt index a2c549bdb3444..3561eff8f57ef 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt @@ -32,7 +32,7 @@ TakeOrderedAndProject (28) (1) Scan parquet default.store_sales Output [3]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_hdemo_sk), IsNotNull(ss_sold_time_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -46,7 +46,7 @@ Condition : ((isnotnull(ss_hdemo_sk#2) AND isnotnull(ss_sold_time_sk#1)) AND isn (4) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#4, hd_dep_count#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_dep_count), EqualTo(hd_dep_count,7), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [4]: [ss_sold_time_sk#1, ss_hdemo_sk#2, ss_store_sk#3, hd_demo_sk#4] (11) Scan parquet default.time_dim Output [3]: [t_time_sk#7, t_hour#8, t_minute#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/time_dim] +Location [not included in comparison]/{warehouse_dir}/time_dim] PushedFilters: [IsNotNull(t_hour), IsNotNull(t_minute), EqualTo(t_hour,20), GreaterThanOrEqual(t_minute,30), IsNotNull(t_time_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [3]: [ss_sold_time_sk#1, ss_store_sk#3, t_time_sk#7] (18) Scan parquet default.store Output [2]: [s_store_sk#11, s_store_name#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_name), EqualTo(s_store_name,ese), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt index cf04505c74a34..0a2e88b5bc160 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt @@ -34,7 +34,7 @@ CollectLimit (30) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -48,7 +48,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Arguments: [customer_sk#8 ASC NULLS FIRST, item_sk#9 ASC NULLS FIRST], false, 0 (15) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#10, cs_bill_customer_sk#11, cs_item_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt index 82ab0df435670..0a2e88b5bc160 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt @@ -34,7 +34,7 @@ CollectLimit (30) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -48,7 +48,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Arguments: [customer_sk#8 ASC NULLS FIRST, item_sk#9 ASC NULLS FIRST], false, 0 (15) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#10, cs_bill_customer_sk#11, cs_item_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98.sf100/explain.txt index bbbec343e1f05..30dabdd2d5523 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98.sf100/explain.txt @@ -33,7 +33,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -47,7 +47,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98/explain.txt index db2cc37d26cfc..11519207de0ec 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q98/explain.txt @@ -30,7 +30,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -44,7 +44,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -71,7 +71,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt index 67946af47b28a..c547e7af5d790 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.date_dim Output [2]: [d_date_sk#1, d_month_seq#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -58,7 +58,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_ship_mode_sk), IsNotNull(cs_call_center_sk), IsNotNull(cs_ship_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Input [6]: [d_date_sk#1, cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk (11) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#9, sm_type#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/ship_mode] +Location [not included in comparison]/{warehouse_dir}/ship_mode] PushedFilters: [IsNotNull(sm_ship_mode_sk)] ReadSchema: struct @@ -108,7 +108,7 @@ Input [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_m (17) Scan parquet default.call_center Output [2]: [cc_call_center_sk#12, cc_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Input [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_wareho (23) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#15, w_warehouse_name#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt index 48aa878fe8d6d..595cb2984ab75 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt @@ -36,7 +36,7 @@ TakeOrderedAndProject (32) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_ship_mode_sk), IsNotNull(cs_call_center_sk), IsNotNull(cs_ship_date_sk)] ReadSchema: struct @@ -50,7 +50,7 @@ Condition : (((isnotnull(cs_warehouse_sk#5) AND isnotnull(cs_ship_mode_sk#4)) AN (4) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#6, w_warehouse_name#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -77,7 +77,7 @@ Input [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_m (10) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#9, sm_type#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/ship_mode] +Location [not included in comparison]/{warehouse_dir}/ship_mode] PushedFilters: [IsNotNull(sm_ship_mode_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Input [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_m (16) Scan parquet default.call_center Output [2]: [cc_call_center_sk#12, cc_name#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, w_warehou (22) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_month_seq#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV1_4_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a.sf100/explain.txt index 26797aa2de40e..f0aa032e0f2b6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a.sf100/explain.txt @@ -56,7 +56,7 @@ TakeOrderedAndProject (52) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -78,7 +78,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#5, ss_customer_sk#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Condition : isnotnull(ss_sold_date_sk#5) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,2002), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,7), IsNotNull(d_date_sk)] ReadSchema: struct @@ -136,7 +136,7 @@ Join condition: None (19) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#12, ws_bill_customer_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -162,7 +162,7 @@ Input [3]: [ws_sold_date_sk#12, ws_bill_customer_sk#13, d_date_sk#7] (25) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#15, cs_ship_customer_sk#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -207,7 +207,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (36) Scan parquet default.customer_address Output [2]: [ca_address_sk#19, ca_county#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_county, [Walker County,Richland County,Gaines County,Douglas County,Dona Ana County]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -242,7 +242,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (44) Scan parquet default.customer_demographics Output [9]: [cd_demo_sk#23, cd_gender#24, cd_marital_status#25, cd_education_status#26, cd_purchase_estimate#27, cd_credit_rating#28, cd_dep_count#29, cd_dep_employed_count#30, cd_dep_college_count#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a/explain.txt index aba866b2117a1..182e040762cdb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q10a/explain.txt @@ -52,7 +52,7 @@ TakeOrderedAndProject (48) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -66,7 +66,7 @@ Condition : (isnotnull(c_current_addr_sk#3) AND isnotnull(c_current_cdemo_sk#2)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#4, ss_customer_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -80,7 +80,7 @@ Condition : isnotnull(ss_sold_date_sk#4) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_moy#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,2002), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,7), IsNotNull(d_date_sk)] ReadSchema: struct @@ -120,7 +120,7 @@ Join condition: None (16) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#11, ws_bill_customer_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -146,7 +146,7 @@ Input [3]: [ws_sold_date_sk#11, ws_bill_customer_sk#12, d_date_sk#6] (22) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#14, cs_ship_customer_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -187,7 +187,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (32) Scan parquet default.customer_address Output [2]: [ca_address_sk#18, ca_county#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_county, [Walker County,Richland County,Gaines County,Douglas County,Dona Ana County]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -218,7 +218,7 @@ Input [3]: [c_current_cdemo_sk#2, c_current_addr_sk#3, ca_address_sk#18] (39) Scan parquet default.customer_demographics Output [9]: [cd_demo_sk#21, cd_gender#22, cd_marital_status#23, cd_education_status#24, cd_purchase_estimate#25, cd_credit_rating#26, cd_dep_count#27, cd_dep_employed_count#28, cd_dep_college_count#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11.sf100/explain.txt index 9349e4629a28f..ab502fd0933f0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11.sf100/explain.txt @@ -90,7 +90,7 @@ TakeOrderedAndProject (86) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_ext_discount_amt#3, ss_ext_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer Output [8]: [c_customer_sk#9, c_customer_id#10, c_first_name#11, c_last_name#12, c_preferred_cust_flag#13, c_birth_country#14, c_login#15, c_email_address#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -200,7 +200,7 @@ Arguments: [customer_id#22 ASC NULLS FIRST], false, 0 (25) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_ext_discount_amt#3, ss_ext_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -214,7 +214,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (28) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Join condition: None (46) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#37, ws_bill_customer_sk#38, ws_ext_discount_amt#39, ws_ext_list_price#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -389,7 +389,7 @@ Input [9]: [customer_id#22, year_total#23, customer_id#31, customer_first_name#3 (67) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#37, ws_bill_customer_sk#38, ws_ext_discount_amt#39, ws_ext_list_price#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11/explain.txt index 15e3a1ec9706f..fc0b38368e006 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q11/explain.txt @@ -76,7 +76,7 @@ TakeOrderedAndProject (72) (1) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -90,7 +90,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#9, ss_customer_sk#10, ss_ext_discount_amt#11, ss_ext_list_price#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Input [12]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_ (10) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -166,7 +166,7 @@ Condition : (isnotnull(year_total#22) AND (year_total#22 > 0.00)) (20) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -192,7 +192,7 @@ Input [12]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_ (26) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -246,7 +246,7 @@ Join condition: None (37) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -260,7 +260,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (40) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#34, ws_bill_customer_sk#35, ws_ext_discount_amt#36, ws_ext_list_price#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -338,7 +338,7 @@ Input [9]: [customer_id#21, year_total#22, customer_id#28, customer_first_name#2 (57) Scan parquet default.customer Output [8]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, c_preferred_cust_flag#5, c_birth_country#6, c_login#7, c_email_address#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12.sf100/explain.txt index 94a43d84cc7e7..c1bf12b7c2c5a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12.sf100/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Arguments: [ws_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12/explain.txt index 86262bb562644..9a82a58af5774 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q12/explain.txt @@ -28,7 +28,7 @@ TakeOrderedAndProject (24) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -42,7 +42,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Input [9]: [ws_sold_date_sk#1, ws_item_sk#2, ws_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt index 5282470abdc5f..583f88961a836 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt @@ -114,7 +114,7 @@ TakeOrderedAndProject (110) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -136,7 +136,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (6) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -150,7 +150,7 @@ Condition : ((isnotnull(i_brand_id#7) AND isnotnull(i_category_id#9)) AND isnotn (9) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -164,7 +164,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -195,7 +195,7 @@ Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, d_date_sk#10] (19) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category_id), IsNotNull(i_class_id), IsNotNull(i_brand_id)] ReadSchema: struct @@ -230,7 +230,7 @@ Arguments: [coalesce(brand_id#14, 0) ASC NULLS FIRST, isnull(brand_id#14) ASC NU (27) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#18, cs_item_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -256,7 +256,7 @@ Input [3]: [cs_sold_date_sk#18, cs_item_sk#19, d_date_sk#10] (33) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Join condition: None (42) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#22, ws_item_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -405,7 +405,7 @@ Join condition: None (65) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -436,7 +436,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, d_d (72) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category_id), IsNotNull(i_class_id), IsNotNull(i_brand_id)] ReadSchema: struct @@ -528,7 +528,7 @@ Join condition: None (93) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -641,7 +641,7 @@ Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquer (111) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -655,7 +655,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (114) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -686,7 +686,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (121) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#18, cs_quantity#74, cs_list_price#75] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -712,7 +712,7 @@ Input [4]: [cs_sold_date_sk#18, cs_quantity#74, cs_list_price#75, d_date_sk#10] (127) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#22, ws_quantity#78, ws_list_price#79] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -765,7 +765,7 @@ Subquery:2 Hosting operator id = 67 Hosting Expression = Subquery scalar-subquer (137) Scan parquet default.date_dim Output [4]: [d_week_seq#29, d_year#11, d_moy#89, d_dom#90] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_dom), IsNotNull(d_year), EqualTo(d_year,1999), EqualTo(d_moy,12), EqualTo(d_dom,16)] ReadSchema: struct @@ -792,7 +792,7 @@ Subquery:4 Hosting operator id = 95 Hosting Expression = Subquery scalar-subquer (141) Scan parquet default.date_dim Output [4]: [d_week_seq#29, d_year#11, d_moy#89, d_dom#90] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_moy), IsNotNull(d_dom), IsNotNull(d_year), EqualTo(d_year,1998), EqualTo(d_moy,12), EqualTo(d_dom,16)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt index 9ee58f9b3d604..983e96981c031 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt @@ -104,7 +104,7 @@ TakeOrderedAndProject (100) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -118,7 +118,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_class_id), IsNotNull(i_category_id), IsNotNull(i_brand_id)] ReadSchema: struct @@ -132,7 +132,7 @@ Condition : ((isnotnull(i_class_id#7) AND isnotnull(i_category_id#8)) AND isnotn (7) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -146,7 +146,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (10) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -173,7 +173,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, i_item_sk#5, i_brand_id#6, i_class_ (16) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -204,7 +204,7 @@ Input [5]: [ss_sold_date_sk#1, i_brand_id#6, i_class_id#7, i_category_id#8, d_da (23) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_item_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -218,7 +218,7 @@ Condition : (isnotnull(cs_item_sk#17) AND isnotnull(cs_sold_date_sk#16)) (26) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -266,7 +266,7 @@ Join condition: None (37) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#20, ws_item_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -367,7 +367,7 @@ Join condition: None (58) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_class_id), IsNotNull(i_category_id), IsNotNull(i_brand_id)] ReadSchema: struct @@ -402,7 +402,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, i_i (66) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -459,7 +459,7 @@ Input [7]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, n (78) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -493,7 +493,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, i_i (86) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_week_seq#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -594,7 +594,7 @@ Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquer (101) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -608,7 +608,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (104) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -639,7 +639,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (111) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#16, cs_quantity#71, cs_list_price#72] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -665,7 +665,7 @@ Input [4]: [cs_sold_date_sk#16, cs_quantity#71, cs_list_price#72, d_date_sk#10] (117) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#20, ws_quantity#75, ws_list_price#76] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -718,7 +718,7 @@ Subquery:2 Hosting operator id = 68 Hosting Expression = Subquery scalar-subquer (127) Scan parquet default.date_dim Output [4]: [d_week_seq#28, d_year#11, d_moy#86, d_dom#87] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,1999), EqualTo(d_moy,12), EqualTo(d_dom,16)] ReadSchema: struct @@ -745,7 +745,7 @@ Subquery:4 Hosting operator id = 88 Hosting Expression = Subquery scalar-subquer (131) Scan parquet default.date_dim Output [4]: [d_week_seq#28, d_year#11, d_moy#86, d_dom#87] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_dom), IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_year,1998), EqualTo(d_moy,12), EqualTo(d_dom,16)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt index 705abacb4f572..0a76360d004ca 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt @@ -226,7 +226,7 @@ TakeOrderedAndProject (222) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -248,7 +248,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (6) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -262,7 +262,7 @@ Condition : ((isnotnull(i_brand_id#7) AND isnotnull(i_category_id#9)) AND isnotn (9) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -276,7 +276,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (12) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -307,7 +307,7 @@ Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, d_date_sk#10] (19) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_class_id)] ReadSchema: struct @@ -342,7 +342,7 @@ Arguments: [coalesce(brand_id#14, 0) ASC NULLS FIRST, isnull(brand_id#14) ASC NU (27) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#18, cs_item_sk#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -368,7 +368,7 @@ Input [3]: [cs_sold_date_sk#18, cs_item_sk#19, d_date_sk#10] (33) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -408,7 +408,7 @@ Join condition: None (42) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#22, ws_item_sk#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -517,7 +517,7 @@ Join condition: None (65) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -548,7 +548,7 @@ Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, d_d (72) Scan parquet default.item Output [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -621,7 +621,7 @@ Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, n (88) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#18, cs_item_sk#19, cs_quantity#48, cs_list_price#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -705,7 +705,7 @@ Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, n (107) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#22, ws_item_sk#23, ws_quantity#64, ws_list_price#65] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -1296,7 +1296,7 @@ Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquer (223) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -1310,7 +1310,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (226) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -1341,7 +1341,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (233) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -1355,7 +1355,7 @@ Condition : isnotnull(cs_sold_date_sk#18) (236) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -1386,7 +1386,7 @@ Input [4]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49, d_date_sk#10] (243) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#22, ws_quantity#64, ws_list_price#65] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt index 24f9a69287dc9..e5afd3736c60c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt @@ -210,7 +210,7 @@ TakeOrderedAndProject (206) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -224,7 +224,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_class_id), IsNotNull(i_category_id), IsNotNull(i_brand_id)] ReadSchema: struct @@ -238,7 +238,7 @@ Condition : ((isnotnull(i_class_id#7) AND isnotnull(i_category_id#8)) AND isnotn (7) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#1, ss_item_sk#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -252,7 +252,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (10) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand_id), IsNotNull(i_class_id), IsNotNull(i_category_id)] ReadSchema: struct @@ -279,7 +279,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, i_item_sk#5, i_brand_id#6, i_class_ (16) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -310,7 +310,7 @@ Input [5]: [ss_sold_date_sk#1, i_brand_id#6, i_class_id#7, i_category_id#8, d_da (23) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_item_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -324,7 +324,7 @@ Condition : (isnotnull(cs_item_sk#17) AND isnotnull(cs_sold_date_sk#16)) (26) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -372,7 +372,7 @@ Join condition: None (37) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#20, ws_item_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -473,7 +473,7 @@ Join condition: None (58) Scan parquet default.item Output [4]: [i_item_sk#5, i_brand_id#6, i_class_id#7, i_category_id#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -508,7 +508,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4, i_i (66) Scan parquet default.date_dim Output [3]: [d_date_sk#10, d_year#11, d_moy#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,11), IsNotNull(d_date_sk)] ReadSchema: struct @@ -565,7 +565,7 @@ Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, n (78) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#16, cs_item_sk#17, cs_quantity#45, cs_list_price#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -637,7 +637,7 @@ Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, n (94) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#20, ws_item_sk#21, ws_quantity#60, ws_list_price#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -1216,7 +1216,7 @@ Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquer (207) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -1230,7 +1230,7 @@ Condition : isnotnull(ss_sold_date_sk#1) (210) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -1261,7 +1261,7 @@ Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] (217) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -1275,7 +1275,7 @@ Condition : isnotnull(cs_sold_date_sk#16) (220) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -1306,7 +1306,7 @@ Input [4]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46, d_date_sk#10] (227) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#20, ws_quantity#60, ws_list_price#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt index ef8c5ccae050f..2d76deefcaa36 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt @@ -164,7 +164,7 @@ TakeOrderedAndProject (160) (1) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -178,7 +178,7 @@ Condition : (((isnotnull(cs_bill_cdemo_sk#3) AND isnotnull(cs_bill_customer_sk#2 (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#10, cd_gender#11, cd_education_status#12, cd_dep_count#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,M), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -209,7 +209,7 @@ Input [11]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_it (11) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_year#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -240,7 +240,7 @@ Input [10]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity (18) Scan parquet default.item Output [2]: [i_item_sk#18, i_item_id#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -275,7 +275,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (26) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [9,5,12,4,1,10]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -293,7 +293,7 @@ Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bir (30) Scan parquet default.customer_address Output [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -328,7 +328,7 @@ Arguments: [c_current_cdemo_sk#23 ASC NULLS FIRST], false, 0 (38) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -401,7 +401,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (54) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [9,5,12,4,1,10]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -419,7 +419,7 @@ Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bir (58) Scan parquet default.customer_address Output [3]: [ca_address_sk#27, ca_state#29, ca_country#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -512,7 +512,7 @@ Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 (79) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [9,5,12,4,1,10]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -530,7 +530,7 @@ Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bir (83) Scan parquet default.customer_address Output [3]: [ca_address_sk#27, ca_state#29, ca_country#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -620,7 +620,7 @@ Results [11]: [i_item_id#19, ca_country#30, null AS ca_state#174, null AS county (103) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -658,7 +658,7 @@ Input [10]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity (112) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [9,5,12,4,1,10]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -676,7 +676,7 @@ Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bir (116) Scan parquet default.customer_address Output [2]: [ca_address_sk#27, ca_state#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -711,7 +711,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)) (124) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#185] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -777,7 +777,7 @@ Results [11]: [i_item_id#19, null AS ca_country#223, null AS ca_state#224, null (138) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -815,7 +815,7 @@ Input [10]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity (147) Scan parquet default.item Output [1]: [i_item_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a/explain.txt index b7e9b4857929e..b9a00214c3a1b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a/explain.txt @@ -161,7 +161,7 @@ TakeOrderedAndProject (157) (1) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -175,7 +175,7 @@ Condition : (((isnotnull(cs_bill_cdemo_sk#3) AND isnotnull(cs_bill_customer_sk#2 (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#10, cd_gender#11, cd_education_status#12, cd_dep_count#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_gender), IsNotNull(cd_education_status), EqualTo(cd_gender,M), EqualTo(cd_education_status,College), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -206,7 +206,7 @@ Input [11]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_it (11) Scan parquet default.customer Output [5]: [c_customer_sk#15, c_current_cdemo_sk#16, c_current_addr_sk#17, c_birth_month#18, c_birth_year#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [9,5,12,4,1,10]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -237,7 +237,7 @@ Input [13]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity (18) Scan parquet default.customer_demographics Output [1]: [cd_demo_sk#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -264,7 +264,7 @@ Input [12]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (24) Scan parquet default.customer_address Output [4]: [ca_address_sk#23, ca_county#24, ca_state#25, ca_country#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -291,7 +291,7 @@ Input [14]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (30) Scan parquet default.date_dim Output [2]: [d_date_sk#28, d_year#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -322,7 +322,7 @@ Input [13]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (37) Scan parquet default.item Output [2]: [i_item_sk#31, i_item_id#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -367,7 +367,7 @@ Results [11]: [i_item_id#32, ca_country#26, ca_state#25, ca_county#24, avg(agg1# (46) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -417,7 +417,7 @@ Input [12]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (58) Scan parquet default.customer_address Output [3]: [ca_address_sk#23, ca_state#25, ca_country#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -486,7 +486,7 @@ Results [11]: [i_item_id#32, ca_country#26, ca_state#25, null AS county#122, avg (73) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -536,7 +536,7 @@ Input [12]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (85) Scan parquet default.customer_address Output [3]: [ca_address_sk#23, ca_state#25, ca_country#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -609,7 +609,7 @@ Results [11]: [i_item_id#32, ca_country#26, null AS ca_state#168, null AS county (101) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -659,7 +659,7 @@ Input [12]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (113) Scan parquet default.customer_address Output [2]: [ca_address_sk#23, ca_state#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct @@ -732,7 +732,7 @@ Results [11]: [i_item_id#32, null AS ca_country#215, null AS ca_state#216, null (129) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_customer_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk)] ReadSchema: struct @@ -806,7 +806,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs (147) Scan parquet default.item Output [1]: [i_item_sk#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20.sf100/explain.txt index 9bb210f7f01db..15cde1a45a99b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20.sf100/explain.txt @@ -31,7 +31,7 @@ TakeOrderedAndProject (27) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -45,7 +45,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -84,7 +84,7 @@ Arguments: [cs_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20/explain.txt index b3ffeacc48faf..bd684664fdb34 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q20/explain.txt @@ -28,7 +28,7 @@ TakeOrderedAndProject (24) (1) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -42,7 +42,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -69,7 +69,7 @@ Input [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22.sf100/explain.txt index 811539b96fced..3efe02a377d09 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22.sf100/explain.txt @@ -32,7 +32,7 @@ TakeOrderedAndProject (28) (1) Scan parquet default.inventory Output [3]: [inv_date_sk#1, inv_item_sk#2, inv_quantity_on_hand#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_date_sk), IsNotNull(inv_item_sk)] ReadSchema: struct @@ -46,7 +46,7 @@ Condition : (isnotnull(inv_date_sk#1) AND isnotnull(inv_item_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Arguments: [inv_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [5]: [i_item_sk#8, i_brand#9, i_class#10, i_category#11, i_product_name#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [7]: [inv_item_sk#2, inv_quantity_on_hand#3, i_item_sk#8, i_brand#9, i_cla (20) Scan parquet default.warehouse Output: [] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] ReadSchema: struct<> (21) ColumnarToRow [codegen id : 7] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22/explain.txt index 03cc2a5b182dc..ad83edec0ad33 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22/explain.txt @@ -29,7 +29,7 @@ TakeOrderedAndProject (25) (1) Scan parquet default.inventory Output [3]: [inv_date_sk#1, inv_item_sk#2, inv_quantity_on_hand#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_date_sk), IsNotNull(inv_item_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Condition : (isnotnull(inv_date_sk#1) AND isnotnull(inv_item_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] ReadSchema: struct @@ -74,7 +74,7 @@ Input [4]: [inv_date_sk#1, inv_item_sk#2, inv_quantity_on_hand#3, d_date_sk#4] (11) Scan parquet default.item Output [5]: [i_item_sk#7, i_brand#8, i_class#9, i_category#10, i_product_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -101,7 +101,7 @@ Input [7]: [inv_item_sk#2, inv_quantity_on_hand#3, i_item_sk#7, i_brand#8, i_cla (17) Scan parquet default.warehouse Output: [] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] ReadSchema: struct<> (18) ColumnarToRow [codegen id : 4] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a.sf100/explain.txt index 415c62a070bab..0234a65ac06a5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a.sf100/explain.txt @@ -56,7 +56,7 @@ TakeOrderedAndProject (52) (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_date_sk), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Condition : ((isnotnull(inv_date_sk#1) AND isnotnull(inv_item_sk#2)) AND isnotnu (4) Scan parquet default.warehouse Output [1]: [w_warehouse_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Input [5]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (10) Scan parquet default.date_dim Output [2]: [d_date_sk#7, d_month_seq#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -136,7 +136,7 @@ Arguments: [inv_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [5]: [i_item_sk#11, i_brand#12, i_class#13, i_category#14, i_product_name#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a/explain.txt index 340c432c78489..2a1ca82e1f263 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q22a/explain.txt @@ -53,7 +53,7 @@ TakeOrderedAndProject (49) (1) Scan parquet default.inventory Output [4]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_date_sk), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk)] ReadSchema: struct @@ -67,7 +67,7 @@ Condition : ((isnotnull(inv_date_sk#1) AND isnotnull(inv_item_sk#2)) AND isnotnu (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -98,7 +98,7 @@ Input [5]: [inv_date_sk#1, inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_ha (11) Scan parquet default.item Output [5]: [i_item_sk#8, i_brand#9, i_class#10, i_category#11, i_product_name#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -125,7 +125,7 @@ Input [8]: [inv_item_sk#2, inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_sk (17) Scan parquet default.warehouse Output [1]: [w_warehouse_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24.sf100/explain.txt index 8f01636da7200..ddcdbcd1237b2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24.sf100/explain.txt @@ -51,7 +51,7 @@ (1) Scan parquet default.store Output [5]: [s_store_sk#1, s_store_name#2, s_market_id#3, s_state#4, s_zip#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -73,7 +73,7 @@ Arguments: HashedRelationBroadcastMode(List(input[3, string, true]),false), [id= (6) Scan parquet default.customer_address Output [4]: [ca_address_sk#7, ca_state#8, ca_zip#9, ca_country#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_country), IsNotNull(ca_zip)] ReadSchema: struct @@ -100,7 +100,7 @@ Arguments: HashedRelationBroadcastMode(List(input[3, int, true], upper(input[5, (12) Scan parquet default.customer Output [5]: [c_customer_sk#12, c_current_addr_sk#13, c_first_name#14, c_last_name#15, c_birth_country#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -127,7 +127,7 @@ Arguments: HashedRelationBroadcastMode(List((shiftleft(cast(input[0, int, true] (18) Scan parquet default.store_sales Output [5]: [ss_item_sk#18, ss_customer_sk#19, ss_store_sk#20, ss_ticket_number#21, ss_net_paid#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -150,7 +150,7 @@ Input [12]: [s_store_sk#1, s_store_name#2, s_state#4, ca_state#8, c_customer_sk# (23) Scan parquet default.item Output [6]: [i_item_sk#23, i_current_price#24, i_size#25, i_color#26, i_units#27, i_manager_id#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_color), EqualTo(i_color,pale), IsNotNull(i_item_sk)] ReadSchema: struct @@ -185,7 +185,7 @@ Arguments: [cast(ss_item_sk#18 as bigint) ASC NULLS FIRST, cast(ss_ticket_number (31) Scan parquet default.store_returns Output [2]: [sr_item_sk#31, sr_ticket_number#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -319,7 +319,7 @@ Subquery:1 Hosting operator id = 44 Hosting Expression = Subquery scalar-subquer (48) Scan parquet default.store Output [5]: [s_store_sk#1, s_store_name#2, s_market_id#3, s_state#4, s_zip#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -341,7 +341,7 @@ Arguments: HashedRelationBroadcastMode(List(input[3, string, true]),false), [id= (53) Scan parquet default.customer_address Output [4]: [ca_address_sk#7, ca_state#8, ca_zip#9, ca_country#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_country), IsNotNull(ca_zip)] ReadSchema: struct @@ -368,7 +368,7 @@ Arguments: HashedRelationBroadcastMode(List(input[3, int, true], upper(input[5, (59) Scan parquet default.customer Output [5]: [c_customer_sk#12, c_current_addr_sk#13, c_first_name#14, c_last_name#15, c_birth_country#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -395,7 +395,7 @@ Arguments: HashedRelationBroadcastMode(List((shiftleft(cast(input[0, int, true] (65) Scan parquet default.store_sales Output [5]: [ss_item_sk#18, ss_customer_sk#19, ss_store_sk#20, ss_ticket_number#21, ss_net_paid#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -426,7 +426,7 @@ Arguments: [ss_item_sk#18 ASC NULLS FIRST], false, 0 (72) Scan parquet default.item Output [6]: [i_item_sk#23, i_current_price#24, i_size#25, i_color#26, i_units#27, i_manager_id#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -465,7 +465,7 @@ Arguments: [cast(ss_item_sk#18 as bigint) ASC NULLS FIRST, cast(ss_ticket_number (81) Scan parquet default.store_returns Output [2]: [sr_item_sk#31, sr_ticket_number#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24/explain.txt index 58d40545d046b..870a1c24979c6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q24/explain.txt @@ -48,7 +48,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#6, sr_ticket_number#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, s (10) Scan parquet default.store Output [5]: [s_store_sk#9, s_store_name#10, s_market_id#11, s_state#12, s_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -120,7 +120,7 @@ Input [8]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_net_paid#5, s_stor (17) Scan parquet default.item Output [6]: [i_item_sk#15, i_current_price#16, i_size#17, i_color#18, i_units#19, i_manager_id#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_color), EqualTo(i_color,pale), IsNotNull(i_item_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [12]: [ss_item_sk#1, ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_s (23) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_addr_sk#23, c_first_name#24, c_last_name#25, c_birth_country#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -174,7 +174,7 @@ Input [15]: [ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_state#12, s_zip (29) Scan parquet default.customer_address Output [4]: [ca_address_sk#28, ca_state#29, ca_zip#30, ca_country#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip), IsNotNull(ca_country)] ReadSchema: struct @@ -298,7 +298,7 @@ Subquery:1 Hosting operator id = 41 Hosting Expression = Subquery scalar-subquer (45) Scan parquet default.store_sales Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -312,7 +312,7 @@ Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND is (48) Scan parquet default.store_returns Output [2]: [sr_item_sk#6, sr_ticket_number#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -339,7 +339,7 @@ Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, s (54) Scan parquet default.store Output [5]: [s_store_sk#9, s_store_name#10, s_market_id#11, s_state#12, s_zip#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct @@ -370,7 +370,7 @@ Input [8]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_net_paid#5, s_stor (61) Scan parquet default.item Output [6]: [i_item_sk#15, i_current_price#16, i_size#17, i_color#18, i_units#19, i_manager_id#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -397,7 +397,7 @@ Input [12]: [ss_item_sk#1, ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_s (67) Scan parquet default.customer Output [5]: [c_customer_sk#22, c_current_addr_sk#23, c_first_name#24, c_last_name#25, c_birth_country#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -424,7 +424,7 @@ Input [15]: [ss_customer_sk#2, ss_net_paid#5, s_store_name#10, s_state#12, s_zip (73) Scan parquet default.customer_address Output [4]: [ca_address_sk#28, ca_state#29, ca_zip#30, ca_country#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip), IsNotNull(ca_country)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a.sf100/explain.txt index 0ebc809387dc6..fc684d188d714 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a.sf100/explain.txt @@ -81,7 +81,7 @@ TakeOrderedAndProject (77) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -95,7 +95,7 @@ Condition : (((isnotnull(ss_cdemo_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND is (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_education_status), IsNotNull(cd_gender), IsNotNull(cd_marital_status), EqualTo(cd_gender,F), EqualTo(cd_marital_status,W), EqualTo(cd_education_status,Primary), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -126,7 +126,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -229,7 +229,7 @@ Results [7]: [i_item_id#21, s_state#18, 0 AS g_state#48, avg(cast(agg1#23 as big (33) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -255,7 +255,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_qu (39) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -328,7 +328,7 @@ Results [7]: [i_item_id#21, null AS s_state#75, 1 AS g_state#76, avg(cast(agg1#2 (55) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -378,7 +378,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_ (67) Scan parquet default.item Output [1]: [i_item_sk#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a/explain.txt index 2d6deabcf64a4..8ff35202e0f18 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q27a/explain.txt @@ -81,7 +81,7 @@ TakeOrderedAndProject (77) (1) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -95,7 +95,7 @@ Condition : (((isnotnull(ss_cdemo_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND is (4) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#9, cd_gender#10, cd_marital_status#11, cd_education_status#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_education_status), IsNotNull(cd_marital_status), IsNotNull(cd_gender), EqualTo(cd_gender,F), EqualTo(cd_marital_status,W), EqualTo(cd_education_status,Primary), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -126,7 +126,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_qu (11) Scan parquet default.date_dim Output [2]: [d_date_sk#14, d_year#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), IsNotNull(d_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -184,7 +184,7 @@ Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -229,7 +229,7 @@ Results [7]: [i_item_id#21, s_state#18, 0 AS g_state#48, avg(cast(agg1#23 as big (33) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -267,7 +267,7 @@ Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_li (42) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -328,7 +328,7 @@ Results [7]: [i_item_id#21, null AS s_state#75, 1 AS g_state#76, avg(cast(agg1#2 (55) Scan parquet default.store_sales Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_cdemo_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -378,7 +378,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sale (67) Scan parquet default.item Output [1]: [i_item_sk#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt index 45e1768b05c2a..c7b8685b64bea 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt @@ -43,7 +43,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -57,7 +57,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#4)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(And(GreaterThanOrEqual(d_dom,1),LessThanOrEqual(d_dom,3)),And(GreaterThanOrEqual(d_dom,25),LessThanOrEqual(d_dom,28))), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_county), EqualTo(s_county,Williamson County), IsNotNull(s_store_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (31) Scan parquet default.customer Output [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt index d6dcdb2ecdb8d..01b5f46bd5dd4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt @@ -40,7 +40,7 @@ (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -54,7 +54,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#4)) AND is (4) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_dom#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(And(GreaterThanOrEqual(d_dom,1),LessThanOrEqual(d_dom,3)),And(GreaterThanOrEqual(d_dom,25),LessThanOrEqual(d_dom,28))), In(d_year, [1999,2000,2001]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Input [6]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, s (11) Scan parquet default.store Output [2]: [s_store_sk#10, s_county#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_county), EqualTo(s_county,Williamson County), IsNotNull(s_store_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, (18) Scan parquet default.household_demographics Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Condition : ((cnt#22 >= 15) AND (cnt#22 <= 20)) (29) Scan parquet default.customer Output [5]: [c_customer_sk#23, c_salutation#24, c_first_name#25, c_last_name#26, c_preferred_cust_flag#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35.sf100/explain.txt index 25b48af7f658f..d38804d4f0422 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35.sf100/explain.txt @@ -64,7 +64,7 @@ TakeOrderedAndProject (60) (1) Scan parquet default.customer Output [3]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -86,7 +86,7 @@ Arguments: [c_customer_sk#3 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#7, ss_customer_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -100,7 +100,7 @@ Condition : isnotnull(ss_sold_date_sk#7) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_qoy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_qoy), EqualTo(d_year,2002), LessThan(d_qoy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -144,7 +144,7 @@ Join condition: None (19) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#14, ws_bill_customer_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -183,7 +183,7 @@ Join condition: None (28) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#17, cs_ship_customer_sk#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -238,7 +238,7 @@ Arguments: [c_current_addr_sk#5 ASC NULLS FIRST], false, 0 (41) Scan parquet default.customer_address Output [2]: [ca_address_sk#21, ca_state#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -277,7 +277,7 @@ Arguments: [c_current_cdemo_sk#4 ASC NULLS FIRST], false, 0 (50) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#25, cd_gender#26, cd_marital_status#27, cd_dep_count#28, cd_dep_employed_count#29, cd_dep_college_count#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35/explain.txt index a6341c55f0457..e091e512afba8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35/explain.txt @@ -53,7 +53,7 @@ TakeOrderedAndProject (49) (1) Scan parquet default.customer Output [3]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -67,7 +67,7 @@ Condition : (isnotnull(c_current_addr_sk#5) AND isnotnull(c_current_cdemo_sk#4)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#6, ss_customer_sk#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -81,7 +81,7 @@ Condition : isnotnull(ss_sold_date_sk#6) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#8, d_year#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_qoy), EqualTo(d_year,2002), LessThan(d_qoy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -121,7 +121,7 @@ Join condition: None (16) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#13, ws_bill_customer_sk#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -156,7 +156,7 @@ Join condition: None (24) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#16, cs_ship_customer_sk#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -199,7 +199,7 @@ Input [5]: [c_customer_sk#3, c_current_cdemo_sk#4, c_current_addr_sk#5, exists#2 (34) Scan parquet default.customer_address Output [2]: [ca_address_sk#19, ca_state#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -226,7 +226,7 @@ Input [4]: [c_current_cdemo_sk#4, c_current_addr_sk#5, ca_address_sk#19, ca_stat (40) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#22, cd_gender#23, cd_marital_status#24, cd_dep_count#25, cd_dep_employed_count#26, cd_dep_college_count#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a.sf100/explain.txt index 9e668c7015769..b08d2174e5974 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a.sf100/explain.txt @@ -61,7 +61,7 @@ TakeOrderedAndProject (57) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -83,7 +83,7 @@ Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 (6) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#5, ss_customer_sk#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -97,7 +97,7 @@ Condition : isnotnull(ss_sold_date_sk#5) (9) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_qoy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_qoy), EqualTo(d_year,1999), LessThan(d_qoy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Join condition: None (19) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#12, ws_bill_customer_sk#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -167,7 +167,7 @@ Input [3]: [ws_sold_date_sk#12, ws_bill_customer_sk#13, d_date_sk#7] (25) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#15, cs_ship_customer_sk#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -220,7 +220,7 @@ Arguments: [c_current_addr_sk#3 ASC NULLS FIRST], false, 0 (38) Scan parquet default.customer_address Output [2]: [ca_address_sk#20, ca_state#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -259,7 +259,7 @@ Arguments: [c_current_cdemo_sk#2 ASC NULLS FIRST], false, 0 (47) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#24, cd_gender#25, cd_marital_status#26, cd_dep_count#27, cd_dep_employed_count#28, cd_dep_college_count#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a/explain.txt index 69ad4ba7290bf..847b5cf66c3e0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q35a/explain.txt @@ -51,7 +51,7 @@ TakeOrderedAndProject (47) (1) Scan parquet default.customer Output [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk)] ReadSchema: struct @@ -65,7 +65,7 @@ Condition : (isnotnull(c_current_addr_sk#3) AND isnotnull(c_current_cdemo_sk#2)) (4) Scan parquet default.store_sales Output [2]: [ss_sold_date_sk#4, ss_customer_sk#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -79,7 +79,7 @@ Condition : isnotnull(ss_sold_date_sk#4) (7) Scan parquet default.date_dim Output [3]: [d_date_sk#6, d_year#7, d_qoy#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_qoy), EqualTo(d_year,1999), LessThan(d_qoy,4), IsNotNull(d_date_sk)] ReadSchema: struct @@ -119,7 +119,7 @@ Join condition: None (16) Scan parquet default.web_sales Output [2]: [ws_sold_date_sk#11, ws_bill_customer_sk#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -145,7 +145,7 @@ Input [3]: [ws_sold_date_sk#11, ws_bill_customer_sk#12, d_date_sk#6] (22) Scan parquet default.catalog_sales Output [2]: [cs_sold_date_sk#14, cs_ship_customer_sk#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -186,7 +186,7 @@ Input [3]: [c_customer_sk#1, c_current_cdemo_sk#2, c_current_addr_sk#3] (32) Scan parquet default.customer_address Output [2]: [ca_address_sk#18, ca_state#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -213,7 +213,7 @@ Input [4]: [c_current_cdemo_sk#2, c_current_addr_sk#3, ca_address_sk#18, ca_stat (38) Scan parquet default.customer_demographics Output [6]: [cd_demo_sk#21, cd_gender#22, cd_marital_status#23, cd_dep_count#24, cd_dep_employed_count#25, cd_dep_college_count#26] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt index 9d1194dcd7550..107343f091fb2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt @@ -53,7 +53,7 @@ TakeOrderedAndProject (49) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -67,7 +67,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) AND isno (4) Scan parquet default.date_dim Output [2]: [d_date_sk#6, d_year#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -98,7 +98,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4 (11) Scan parquet default.store Output [2]: [s_store_sk#9, s_state#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct @@ -129,7 +129,7 @@ Input [5]: [ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5, (18) Scan parquet default.item Output [3]: [i_item_sk#12, i_class#13, i_category#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt index 5d1e7206bdef0..0d6dfa6f90a86 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt @@ -53,7 +53,7 @@ TakeOrderedAndProject (49) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -67,7 +67,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) AND isno (4) Scan parquet default.date_dim Output [2]: [d_date_sk#6, d_year#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -98,7 +98,7 @@ Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4 (11) Scan parquet default.item Output [3]: [i_item_sk#9, i_class#10, i_category#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -125,7 +125,7 @@ Input [7]: [ss_item_sk#2, ss_store_sk#3, ss_ext_sales_price#4, ss_net_profit#5, (17) Scan parquet default.store Output [2]: [s_store_sk#13, s_state#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_state), EqualTo(s_state,TN), IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47.sf100/explain.txt index 7151c1cb4db4d..f078ca8cd68d3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47.sf100/explain.txt @@ -62,7 +62,7 @@ TakeOrderedAndProject (58) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Condition : ((isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_sales_price#4, d_ (10) Scan parquet default.store Output [3]: [s_store_sk#9, s_store_name#10, s_company_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_company_name), IsNotNull(s_store_name)] ReadSchema: struct @@ -138,7 +138,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (18) Scan parquet default.item Output [3]: [i_item_sk#14, i_brand#15, i_category#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand), IsNotNull(i_category)] ReadSchema: struct @@ -214,7 +214,7 @@ Arguments: [rank(d_year#6, d_moy#7) windowspecdefinition(i_category#16, i_brand# (35) Filter [codegen id : 12] Input [9]: [i_category#16, i_brand#15, s_store_name#10, s_company_name#11, d_year#6, d_moy#7, sum_sales#22, avg_monthly_sales#25, rn#27] -Condition : (((((isnotnull(avg_monthly_sales#25) AND isnotnull(d_year#6)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#25 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#25 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#22 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#27)) +Condition : (((((isnotnull(d_year#6) AND isnotnull(avg_monthly_sales#25)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#25 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#25 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#22 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#25 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#27)) (36) Exchange Input [9]: [i_category#16, i_brand#15, s_store_name#10, s_company_name#11, d_year#6, d_moy#7, sum_sales#22, avg_monthly_sales#25, rn#27] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47/explain.txt index 769051bfa32c9..7e007a9d138c7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q47/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.item Output [3]: [i_item_sk#1, i_brand#2, i_category#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category), IsNotNull(i_brand)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : ((isnotnull(i_item_sk#1) AND isnotnull(i_category#3)) AND isnotnull( (4) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#4, ss_item_sk#5, ss_store_sk#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [7]: [i_item_sk#1, i_brand#2, i_category#3, ss_sold_date_sk#4, ss_item_sk# (10) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [8]: [i_brand#2, i_category#3, ss_sold_date_sk#4, ss_store_sk#6, ss_sales_ (16) Scan parquet default.store Output [3]: [s_store_sk#13, s_store_name#14, s_company_name#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_store_name), IsNotNull(s_company_name)] ReadSchema: struct @@ -195,7 +195,7 @@ Arguments: [rank(d_year#10, d_moy#11) windowspecdefinition(i_category#3, i_brand (32) Filter [codegen id : 23] Input [9]: [i_category#3, i_brand#2, s_store_name#14, s_company_name#15, d_year#10, d_moy#11, sum_sales#21, avg_monthly_sales#24, rn#26] -Condition : (((((isnotnull(d_year#10) AND isnotnull(avg_monthly_sales#24)) AND (d_year#10 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) +Condition : (((((isnotnull(avg_monthly_sales#24) AND isnotnull(d_year#10)) AND (d_year#10 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) (33) ReusedExchange [Reuses operator id: 23] Output [7]: [i_category#27, i_brand#28, s_store_name#29, s_company_name#30, d_year#31, d_moy#32, sum#33] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49.sf100/explain.txt index 0662a51d8f9cd..31810073746e7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49.sf100/explain.txt @@ -91,8 +91,8 @@ TakeOrderedAndProject (87) (1) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] -PushedFilters: [IsNotNull(ws_quantity), IsNotNull(ws_net_paid), IsNotNull(ws_net_profit), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/web_sales] +PushedFilters: [IsNotNull(ws_net_profit), IsNotNull(ws_net_paid), IsNotNull(ws_quantity), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_order_number), IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct (2) ColumnarToRow [codegen id : 2] @@ -100,7 +100,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (3) Filter [codegen id : 2] Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] -Condition : ((((((((isnotnull(ws_quantity#4) AND isnotnull(ws_net_paid#5)) AND isnotnull(ws_net_profit#6)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_sold_date_sk#1)) +Condition : ((((((((isnotnull(ws_net_profit#6) AND isnotnull(ws_net_paid#5)) AND isnotnull(ws_quantity#4)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_sold_date_sk#1)) (4) Project [codegen id : 2] Output [5]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5] @@ -109,7 +109,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (5) Scan parquet default.date_dim Output [3]: [d_date_sk#7, d_year#8, d_moy#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,12), IsNotNull(d_date_sk)] ReadSchema: struct @@ -148,7 +148,7 @@ Arguments: [cast(ws_order_number#3 as bigint) ASC NULLS FIRST, cast(ws_item_sk#2 (14) Scan parquet default.web_returns Output [4]: [wr_item_sk#12, wr_order_number#13, wr_return_quantity#14, wr_return_amt#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_return_amt), GreaterThan(wr_return_amt,10000.00), IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -225,8 +225,8 @@ Input [5]: [item#34, return_ratio#35, currency_ratio#36, return_rank#38, currenc (31) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#44, cs_net_paid#45, cs_net_profit#46] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] -PushedFilters: [IsNotNull(cs_net_paid), IsNotNull(cs_quantity), IsNotNull(cs_net_profit), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_item_sk), IsNotNull(cs_order_number), IsNotNull(cs_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] +PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_net_paid), IsNotNull(cs_net_profit), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_item_sk), IsNotNull(cs_order_number), IsNotNull(cs_sold_date_sk)] ReadSchema: struct (32) ColumnarToRow [codegen id : 12] @@ -234,7 +234,7 @@ Input [6]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#4 (33) Filter [codegen id : 12] Input [6]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#44, cs_net_paid#45, cs_net_profit#46] -Condition : ((((((((isnotnull(cs_net_paid#45) AND isnotnull(cs_quantity#44)) AND isnotnull(cs_net_profit#46)) AND (cs_net_profit#46 > 1.00)) AND (cs_net_paid#45 > 0.00)) AND (cs_quantity#44 > 0)) AND isnotnull(cs_item_sk#42)) AND isnotnull(cs_order_number#43)) AND isnotnull(cs_sold_date_sk#41)) +Condition : ((((((((isnotnull(cs_quantity#44) AND isnotnull(cs_net_paid#45)) AND isnotnull(cs_net_profit#46)) AND (cs_net_profit#46 > 1.00)) AND (cs_net_paid#45 > 0.00)) AND (cs_quantity#44 > 0)) AND isnotnull(cs_item_sk#42)) AND isnotnull(cs_order_number#43)) AND isnotnull(cs_sold_date_sk#41)) (34) Project [codegen id : 12] Output [5]: [cs_sold_date_sk#41, cs_item_sk#42, cs_order_number#43, cs_quantity#44, cs_net_paid#45] @@ -263,8 +263,8 @@ Arguments: [cs_order_number#43 ASC NULLS FIRST, cs_item_sk#42 ASC NULLS FIRST], (40) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#48, cr_order_number#49, cr_return_quantity#50, cr_return_amount#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] -PushedFilters: [IsNotNull(cr_return_amount), GreaterThan(cr_return_amount,10000.00), IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] +PushedFilters: [IsNotNull(cr_return_amount), GreaterThan(cr_return_amount,10000.00), IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct (41) ColumnarToRow [codegen id : 14] @@ -272,7 +272,7 @@ Input [4]: [cr_item_sk#48, cr_order_number#49, cr_return_quantity#50, cr_return_ (42) Filter [codegen id : 14] Input [4]: [cr_item_sk#48, cr_order_number#49, cr_return_quantity#50, cr_return_amount#51] -Condition : (((isnotnull(cr_return_amount#51) AND (cr_return_amount#51 > 10000.00)) AND isnotnull(cr_item_sk#48)) AND isnotnull(cr_order_number#49)) +Condition : (((isnotnull(cr_return_amount#51) AND (cr_return_amount#51 > 10000.00)) AND isnotnull(cr_order_number#49)) AND isnotnull(cr_item_sk#48)) (43) Exchange Input [4]: [cr_item_sk#48, cr_order_number#49, cr_return_quantity#50, cr_return_amount#51] @@ -340,8 +340,8 @@ Input [5]: [item#70, return_ratio#71, currency_ratio#72, return_rank#74, currenc (57) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity#80, ss_net_paid#81, ss_net_profit#82] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] -PushedFilters: [IsNotNull(ss_net_profit), IsNotNull(ss_quantity), IsNotNull(ss_net_paid), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_quantity), IsNotNull(ss_net_paid), IsNotNull(ss_net_profit), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct (58) ColumnarToRow [codegen id : 22] @@ -349,7 +349,7 @@ Input [6]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity# (59) Filter [codegen id : 22] Input [6]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity#80, ss_net_paid#81, ss_net_profit#82] -Condition : ((((((((isnotnull(ss_net_profit#82) AND isnotnull(ss_quantity#80)) AND isnotnull(ss_net_paid#81)) AND (ss_net_profit#82 > 1.00)) AND (ss_net_paid#81 > 0.00)) AND (ss_quantity#80 > 0)) AND isnotnull(ss_ticket_number#79)) AND isnotnull(ss_item_sk#78)) AND isnotnull(ss_sold_date_sk#77)) +Condition : ((((((((isnotnull(ss_quantity#80) AND isnotnull(ss_net_paid#81)) AND isnotnull(ss_net_profit#82)) AND (ss_net_profit#82 > 1.00)) AND (ss_net_paid#81 > 0.00)) AND (ss_quantity#80 > 0)) AND isnotnull(ss_ticket_number#79)) AND isnotnull(ss_item_sk#78)) AND isnotnull(ss_sold_date_sk#77)) (60) Project [codegen id : 22] Output [5]: [ss_sold_date_sk#77, ss_item_sk#78, ss_ticket_number#79, ss_quantity#80, ss_net_paid#81] @@ -378,8 +378,8 @@ Arguments: [cast(ss_ticket_number#79 as bigint) ASC NULLS FIRST, cast(ss_item_sk (66) Scan parquet default.store_returns Output [4]: [sr_item_sk#84, sr_ticket_number#85, sr_return_quantity#86, sr_return_amt#87] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] -PushedFilters: [IsNotNull(sr_return_amt), GreaterThan(sr_return_amt,10000.00), IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] +Location [not included in comparison]/{warehouse_dir}/store_returns] +PushedFilters: [IsNotNull(sr_return_amt), GreaterThan(sr_return_amt,10000.00), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct (67) ColumnarToRow [codegen id : 24] @@ -387,7 +387,7 @@ Input [4]: [sr_item_sk#84, sr_ticket_number#85, sr_return_quantity#86, sr_return (68) Filter [codegen id : 24] Input [4]: [sr_item_sk#84, sr_ticket_number#85, sr_return_quantity#86, sr_return_amt#87] -Condition : (((isnotnull(sr_return_amt#87) AND (sr_return_amt#87 > 10000.00)) AND isnotnull(sr_ticket_number#85)) AND isnotnull(sr_item_sk#84)) +Condition : (((isnotnull(sr_return_amt#87) AND (sr_return_amt#87 > 10000.00)) AND isnotnull(sr_item_sk#84)) AND isnotnull(sr_ticket_number#85)) (69) Exchange Input [4]: [sr_item_sk#84, sr_ticket_number#85, sr_return_quantity#86, sr_return_amt#87] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49/explain.txt index 0ce48667d73a1..0f5821b13b73b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q49/explain.txt @@ -82,8 +82,8 @@ TakeOrderedAndProject (78) (1) Scan parquet default.web_sales Output [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] -PushedFilters: [IsNotNull(ws_net_paid), IsNotNull(ws_quantity), IsNotNull(ws_net_profit), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_order_number), IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/web_sales] +PushedFilters: [IsNotNull(ws_net_profit), IsNotNull(ws_net_paid), IsNotNull(ws_quantity), GreaterThan(ws_net_profit,1.00), GreaterThan(ws_net_paid,0.00), GreaterThan(ws_quantity,0), IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_sold_date_sk)] ReadSchema: struct (2) ColumnarToRow [codegen id : 3] @@ -91,7 +91,7 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (3) Filter [codegen id : 3] Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5, ws_net_profit#6] -Condition : ((((((((isnotnull(ws_net_paid#5) AND isnotnull(ws_quantity#4)) AND isnotnull(ws_net_profit#6)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_sold_date_sk#1)) +Condition : ((((((((isnotnull(ws_net_profit#6) AND isnotnull(ws_net_paid#5)) AND isnotnull(ws_quantity#4)) AND (ws_net_profit#6 > 1.00)) AND (ws_net_paid#5 > 0.00)) AND (ws_quantity#4 > 0)) AND isnotnull(ws_item_sk#2)) AND isnotnull(ws_order_number#3)) AND isnotnull(ws_sold_date_sk#1)) (4) Project [codegen id : 3] Output [5]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, ws_net_paid#5] @@ -100,8 +100,8 @@ Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (5) Scan parquet default.web_returns Output [4]: [wr_item_sk#7, wr_order_number#8, wr_return_quantity#9, wr_return_amt#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_returns] -PushedFilters: [IsNotNull(wr_return_amt), GreaterThan(wr_return_amt,10000.00), IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] +Location [not included in comparison]/{warehouse_dir}/web_returns] +PushedFilters: [IsNotNull(wr_return_amt), GreaterThan(wr_return_amt,10000.00), IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct (6) ColumnarToRow [codegen id : 1] @@ -109,7 +109,7 @@ Input [4]: [wr_item_sk#7, wr_order_number#8, wr_return_quantity#9, wr_return_amt (7) Filter [codegen id : 1] Input [4]: [wr_item_sk#7, wr_order_number#8, wr_return_quantity#9, wr_return_amt#10] -Condition : (((isnotnull(wr_return_amt#10) AND (wr_return_amt#10 > 10000.00)) AND isnotnull(wr_order_number#8)) AND isnotnull(wr_item_sk#7)) +Condition : (((isnotnull(wr_return_amt#10) AND (wr_return_amt#10 > 10000.00)) AND isnotnull(wr_item_sk#7)) AND isnotnull(wr_order_number#8)) (8) BroadcastExchange Input [4]: [wr_item_sk#7, wr_order_number#8, wr_return_quantity#9, wr_return_amt#10] @@ -127,7 +127,7 @@ Input [9]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#3, ws_quantity#4, w (11) Scan parquet default.date_dim Output [3]: [d_date_sk#12, d_year#13, d_moy#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,12), IsNotNull(d_date_sk)] ReadSchema: struct @@ -204,8 +204,8 @@ Input [5]: [item#33, return_ratio#34, currency_ratio#35, return_rank#37, currenc (28) Scan parquet default.catalog_sales Output [6]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#43, cs_net_paid#44, cs_net_profit#45] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] -PushedFilters: [IsNotNull(cs_net_paid), IsNotNull(cs_quantity), IsNotNull(cs_net_profit), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_order_number), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] +PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_net_paid), IsNotNull(cs_net_profit), GreaterThan(cs_net_profit,1.00), GreaterThan(cs_net_paid,0.00), GreaterThan(cs_quantity,0), IsNotNull(cs_item_sk), IsNotNull(cs_order_number), IsNotNull(cs_sold_date_sk)] ReadSchema: struct (29) ColumnarToRow [codegen id : 10] @@ -213,7 +213,7 @@ Input [6]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#4 (30) Filter [codegen id : 10] Input [6]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#43, cs_net_paid#44, cs_net_profit#45] -Condition : ((((((((isnotnull(cs_net_paid#44) AND isnotnull(cs_quantity#43)) AND isnotnull(cs_net_profit#45)) AND (cs_net_profit#45 > 1.00)) AND (cs_net_paid#44 > 0.00)) AND (cs_quantity#43 > 0)) AND isnotnull(cs_order_number#42)) AND isnotnull(cs_item_sk#41)) AND isnotnull(cs_sold_date_sk#40)) +Condition : ((((((((isnotnull(cs_quantity#43) AND isnotnull(cs_net_paid#44)) AND isnotnull(cs_net_profit#45)) AND (cs_net_profit#45 > 1.00)) AND (cs_net_paid#44 > 0.00)) AND (cs_quantity#43 > 0)) AND isnotnull(cs_item_sk#41)) AND isnotnull(cs_order_number#42)) AND isnotnull(cs_sold_date_sk#40)) (31) Project [codegen id : 10] Output [5]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#43, cs_net_paid#44] @@ -222,7 +222,7 @@ Input [6]: [cs_sold_date_sk#40, cs_item_sk#41, cs_order_number#42, cs_quantity#4 (32) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#46, cr_order_number#47, cr_return_quantity#48, cr_return_amount#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_return_amount), GreaterThan(cr_return_amount,10000.00), IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -307,8 +307,8 @@ Input [5]: [item#68, return_ratio#69, currency_ratio#70, return_rank#72, currenc (51) Scan parquet default.store_sales Output [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity#78, ss_net_paid#79, ss_net_profit#80] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] -PushedFilters: [IsNotNull(ss_quantity), IsNotNull(ss_net_paid), IsNotNull(ss_net_profit), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk)] +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_net_paid), IsNotNull(ss_net_profit), IsNotNull(ss_quantity), GreaterThan(ss_net_profit,1.00), GreaterThan(ss_net_paid,0.00), GreaterThan(ss_quantity,0), IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct (52) ColumnarToRow [codegen id : 17] @@ -316,7 +316,7 @@ Input [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity# (53) Filter [codegen id : 17] Input [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity#78, ss_net_paid#79, ss_net_profit#80] -Condition : ((((((((isnotnull(ss_quantity#78) AND isnotnull(ss_net_paid#79)) AND isnotnull(ss_net_profit#80)) AND (ss_net_profit#80 > 1.00)) AND (ss_net_paid#79 > 0.00)) AND (ss_quantity#78 > 0)) AND isnotnull(ss_item_sk#76)) AND isnotnull(ss_ticket_number#77)) AND isnotnull(ss_sold_date_sk#75)) +Condition : ((((((((isnotnull(ss_net_paid#79) AND isnotnull(ss_net_profit#80)) AND isnotnull(ss_quantity#78)) AND (ss_net_profit#80 > 1.00)) AND (ss_net_paid#79 > 0.00)) AND (ss_quantity#78 > 0)) AND isnotnull(ss_ticket_number#77)) AND isnotnull(ss_item_sk#76)) AND isnotnull(ss_sold_date_sk#75)) (54) Project [codegen id : 17] Output [5]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity#78, ss_net_paid#79] @@ -325,7 +325,7 @@ Input [6]: [ss_sold_date_sk#75, ss_item_sk#76, ss_ticket_number#77, ss_quantity# (55) Scan parquet default.store_returns Output [4]: [sr_item_sk#81, sr_ticket_number#82, sr_return_quantity#83, sr_return_amt#84] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_return_amt), GreaterThan(sr_return_amt,10000.00), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a.sf100/explain.txt index 409051a7856a1..6243d3234d187 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a.sf100/explain.txt @@ -84,7 +84,7 @@ TakeOrderedAndProject (80) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -98,7 +98,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_date#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -233,7 +233,7 @@ Arguments: [item_sk#12 ASC NULLS FIRST, d_date#5 ASC NULLS FIRST], false, 0 (34) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#29, ss_item_sk#30, ss_sales_price#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a/explain.txt index 88500a4c2a834..386b2da5528c7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q51a/explain.txt @@ -81,7 +81,7 @@ TakeOrderedAndProject (77) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -95,7 +95,7 @@ Condition : (isnotnull(ws_item_sk#2) AND isnotnull(ws_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [3]: [d_date_sk#4, d_date#5, d_month_seq#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -222,7 +222,7 @@ Arguments: [item_sk#12 ASC NULLS FIRST, d_date#5 ASC NULLS FIRST], false, 0 (32) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#29, ss_item_sk#30, ss_sales_price#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57.sf100/explain.txt index f506aebd2cb78..105de3e077e6a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57.sf100/explain.txt @@ -62,7 +62,7 @@ TakeOrderedAndProject (58) (1) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#1, cs_call_center_sk#2, cs_item_sk#3, cs_sales_price#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_call_center_sk)] ReadSchema: struct @@ -76,7 +76,7 @@ Condition : ((isnotnull(cs_item_sk#3) AND isnotnull(cs_sold_date_sk#1)) AND isno (4) Scan parquet default.date_dim Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -103,7 +103,7 @@ Input [7]: [cs_sold_date_sk#1, cs_call_center_sk#2, cs_item_sk#3, cs_sales_price (10) Scan parquet default.call_center Output [2]: [cc_call_center_sk#9, cc_name#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk), IsNotNull(cc_name)] ReadSchema: struct @@ -138,7 +138,7 @@ Arguments: [cs_item_sk#3 ASC NULLS FIRST], false, 0 (18) Scan parquet default.item Output [3]: [i_item_sk#13, i_brand#14, i_category#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_brand), IsNotNull(i_category)] ReadSchema: struct @@ -214,7 +214,7 @@ Arguments: [rank(d_year#6, d_moy#7) windowspecdefinition(i_category#15, i_brand# (35) Filter [codegen id : 12] Input [8]: [i_category#15, i_brand#14, cc_name#10, d_year#6, d_moy#7, sum_sales#21, avg_monthly_sales#24, rn#26] -Condition : (((((isnotnull(avg_monthly_sales#24) AND isnotnull(d_year#6)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) +Condition : (((((isnotnull(d_year#6) AND isnotnull(avg_monthly_sales#24)) AND (d_year#6 = 1999)) AND (avg_monthly_sales#24 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#24 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#21 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#24 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#26)) (36) Exchange Input [8]: [i_category#15, i_brand#14, cc_name#10, d_year#6, d_moy#7, sum_sales#21, avg_monthly_sales#24, rn#26] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57/explain.txt index 1ec955a59b3ca..524ddbc102c11 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q57/explain.txt @@ -55,7 +55,7 @@ TakeOrderedAndProject (51) (1) Scan parquet default.item Output [3]: [i_item_sk#1, i_brand#2, i_category#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk), IsNotNull(i_category), IsNotNull(i_brand)] ReadSchema: struct @@ -69,7 +69,7 @@ Condition : ((isnotnull(i_item_sk#1) AND isnotnull(i_category#3)) AND isnotnull( (4) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#4, cs_call_center_sk#5, cs_item_sk#6, cs_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_call_center_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Input [7]: [i_item_sk#1, i_brand#2, i_category#3, cs_sold_date_sk#4, cs_call_cen (10) Scan parquet default.date_dim Output [3]: [d_date_sk#9, d_year#10, d_moy#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [Or(Or(EqualTo(d_year,1999),And(EqualTo(d_year,1998),EqualTo(d_moy,12))),And(EqualTo(d_year,2000),EqualTo(d_moy,1))), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [8]: [i_brand#2, i_category#3, cs_sold_date_sk#4, cs_call_center_sk#5, cs_ (16) Scan parquet default.call_center Output [2]: [cc_call_center_sk#13, cc_name#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/call_center] +Location [not included in comparison]/{warehouse_dir}/call_center] PushedFilters: [IsNotNull(cc_call_center_sk), IsNotNull(cc_name)] ReadSchema: struct @@ -195,7 +195,7 @@ Arguments: [rank(d_year#10, d_moy#11) windowspecdefinition(i_category#3, i_brand (32) Filter [codegen id : 23] Input [8]: [i_category#3, i_brand#2, cc_name#14, d_year#10, d_moy#11, sum_sales#20, avg_monthly_sales#23, rn#25] -Condition : (((((isnotnull(d_year#10) AND isnotnull(avg_monthly_sales#23)) AND (d_year#10 = 1999)) AND (avg_monthly_sales#23 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#23 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#20 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#23 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#23 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#25)) +Condition : (((((isnotnull(avg_monthly_sales#23) AND isnotnull(d_year#10)) AND (d_year#10 = 1999)) AND (avg_monthly_sales#23 > 0.000000)) AND (CASE WHEN (avg_monthly_sales#23 > 0.000000) THEN CheckOverflow((promote_precision(abs(CheckOverflow((promote_precision(cast(sum_sales#20 as decimal(22,6))) - promote_precision(cast(avg_monthly_sales#23 as decimal(22,6)))), DecimalType(22,6), true))) / promote_precision(cast(avg_monthly_sales#23 as decimal(22,6)))), DecimalType(38,16), true) ELSE null END > 0.1000000000000000)) AND isnotnull(rn#25)) (33) ReusedExchange [Reuses operator id: 23] Output [6]: [i_category#26, i_brand#27, cc_name#28, d_year#29, d_moy#30, sum#31] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt index 77a7e2a00c4f4..471d38c89e601 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt @@ -102,7 +102,7 @@ TakeOrderedAndProject (98) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] @@ -119,7 +119,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (5) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_loss#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Input [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_los (10) Scan parquet default.date_dim Output [2]: [d_date_sk#21, d_date#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-08-18), IsNotNull(d_date_sk)] ReadSchema: struct @@ -170,7 +170,7 @@ Input [7]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_lo (17) Scan parquet default.store Output [2]: [s_store_sk#24, s_store_id#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -215,7 +215,7 @@ Results [5]: [store channel AS channel#40, concat(store, s_store_id#25) AS id#41 (26) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs_net_profit#48] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk)] ReadSchema: struct @@ -233,7 +233,7 @@ Input [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs (30) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#55, cr_catalog_page_sk#56, cr_return_amount#57, cr_net_loss#58] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_catalog_page_sk)] ReadSchema: struct @@ -265,7 +265,7 @@ Input [7]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, ne (38) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -310,7 +310,7 @@ Results [5]: [catalog channel AS channel#81, concat(catalog_page, cp_catalog_pag (47) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net_profit#89] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -328,7 +328,7 @@ Input [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net (51) Scan parquet default.web_returns Output [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk)] ReadSchema: struct @@ -350,7 +350,7 @@ Arguments: [wr_item_sk#97 ASC NULLS FIRST, wr_order_number#98 ASC NULLS FIRST], (56) Scan parquet default.web_sales Output [3]: [ws_item_sk#102, ws_web_site_sk#87, ws_order_number#103] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -395,7 +395,7 @@ Input [7]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_am (67) Scan parquet default.web_site Output [2]: [web_site_sk#111, web_site_id#112] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt index 62bbb6547080a..fa2435de73e02 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt @@ -99,7 +99,7 @@ TakeOrderedAndProject (95) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] @@ -116,7 +116,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (5) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_loss#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -136,7 +136,7 @@ Input [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_los (10) Scan parquet default.date_dim Output [2]: [d_date_sk#21, d_date#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-08-18), IsNotNull(d_date_sk)] ReadSchema: struct @@ -167,7 +167,7 @@ Input [7]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_lo (17) Scan parquet default.store Output [2]: [s_store_sk#24, s_store_id#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -212,7 +212,7 @@ Results [5]: [store channel AS channel#40, concat(store, s_store_id#25) AS id#41 (26) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs_net_profit#48] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk)] ReadSchema: struct @@ -230,7 +230,7 @@ Input [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs (30) Scan parquet default.catalog_returns Output [4]: [cr_returned_date_sk#55, cr_catalog_page_sk#56, cr_return_amount#57, cr_net_loss#58] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_catalog_page_sk)] ReadSchema: struct @@ -262,7 +262,7 @@ Input [7]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, ne (38) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -307,7 +307,7 @@ Results [5]: [catalog channel AS channel#81, concat(catalog_page, cp_catalog_pag (47) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net_profit#89] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -325,7 +325,7 @@ Input [4]: [ws_sold_date_sk#86, ws_web_site_sk#87, ws_ext_sales_price#88, ws_net (51) Scan parquet default.web_returns Output [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk)] ReadSchema: struct @@ -339,7 +339,7 @@ Condition : isnotnull(wr_returned_date_sk#96) (54) Scan parquet default.web_sales Output [3]: [ws_item_sk#101, ws_web_site_sk#87, ws_order_number#102] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_site_sk)] ReadSchema: struct @@ -380,7 +380,7 @@ Input [7]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_am (64) Scan parquet default.web_site Output [2]: [web_site_sk#110, web_site_id#111] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt index fe826bf02784e..ab246a3449557 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt @@ -54,7 +54,7 @@ TakeOrderedAndProject (50) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -68,7 +68,7 @@ Condition : ((isnotnull(ss_customer_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -99,7 +99,7 @@ Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, d_date_sk#4] (11) Scan parquet default.item Output [3]: [i_item_sk#9, i_current_price#10, i_category#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), IsNotNull(i_item_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Condition : (isnotnull(i_current_price#10) AND isnotnull(i_item_sk#9)) (14) Scan parquet default.item Output [2]: [i_current_price#10, i_category#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category)] ReadSchema: struct @@ -183,7 +183,7 @@ Arguments: [ss_customer_sk#3 ASC NULLS FIRST], false, 0 (29) Scan parquet default.customer_address Output [2]: [ca_address_sk#23, ca_state#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -205,7 +205,7 @@ Arguments: [ca_address_sk#23 ASC NULLS FIRST], false, 0 (34) Scan parquet default.customer Output [2]: [c_customer_sk#26, c_current_addr_sk#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_customer_sk)] ReadSchema: struct @@ -295,7 +295,7 @@ Subquery:1 Hosting operator id = 6 Hosting Expression = Subquery scalar-subquery (51) Scan parquet default.date_dim Output [3]: [d_month_seq#5, d_year#37, d_moy#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,1)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6/explain.txt index 4a892fbdb59b6..bbcb16861e0be 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6/explain.txt @@ -48,7 +48,7 @@ TakeOrderedAndProject (44) (1) Scan parquet default.customer_address Output [2]: [ca_address_sk#1, ca_state#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -62,7 +62,7 @@ Condition : isnotnull(ca_address_sk#1) (4) Scan parquet default.customer Output [2]: [c_customer_sk#3, c_current_addr_sk#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_current_addr_sk), IsNotNull(c_customer_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Input [4]: [ca_address_sk#1, ca_state#2, c_customer_sk#3, c_current_addr_sk#4] (10) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#6, ss_item_sk#7, ss_customer_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -116,7 +116,7 @@ Input [5]: [ca_state#2, c_customer_sk#3, ss_sold_date_sk#6, ss_item_sk#7, ss_cus (16) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_month_seq#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [4]: [ca_state#2, ss_sold_date_sk#6, ss_item_sk#7, d_date_sk#10] (23) Scan parquet default.item Output [3]: [i_item_sk#15, i_current_price#16, i_category#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), IsNotNull(i_item_sk)] ReadSchema: struct @@ -161,7 +161,7 @@ Condition : (isnotnull(i_current_price#16) AND isnotnull(i_item_sk#15)) (26) Scan parquet default.item Output [2]: [i_current_price#16, i_category#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category)] ReadSchema: struct @@ -265,7 +265,7 @@ Subquery:1 Hosting operator id = 18 Hosting Expression = Subquery scalar-subquer (45) Scan parquet default.date_dim Output [3]: [d_month_seq#11, d_year#35, d_moy#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,1)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64.sf100/explain.txt index 900f61c34e2bc..744936e3d80d0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64.sf100/explain.txt @@ -213,7 +213,7 @@ (1) Scan parquet default.store_sales Output [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_store_sk#7, ss_promo_sk#8, ss_ticket_number#9, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -235,7 +235,7 @@ Arguments: [cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number# (6) Scan parquet default.store_returns Output [2]: [sr_item_sk#14, sr_ticket_number#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -266,7 +266,7 @@ Input [14]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (13) Scan parquet default.catalog_sales Output [3]: [cs_item_sk#17, cs_order_number#18, cs_ext_list_price#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_order_number)] ReadSchema: struct @@ -288,7 +288,7 @@ Arguments: [cs_item_sk#17 ASC NULLS FIRST, cs_order_number#18 ASC NULLS FIRST], (18) Scan parquet default.catalog_returns Output [5]: [cr_item_sk#21, cr_order_number#22, cr_refunded_cash#23, cr_reversed_charge#24, cr_store_credit#25] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -358,7 +358,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (33) Scan parquet default.date_dim Output [2]: [d_date_sk#39, d_year#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -385,7 +385,7 @@ Input [13]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (39) Scan parquet default.store Output [3]: [s_store_sk#42, s_store_name#43, s_zip#44] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_zip), IsNotNull(s_store_name)] ReadSchema: struct @@ -420,7 +420,7 @@ Arguments: [ss_customer_sk#3 ASC NULLS FIRST], false, 0 (47) Scan parquet default.customer Output [6]: [c_customer_sk#47, c_current_cdemo_sk#48, c_current_hdemo_sk#49, c_current_addr_sk#50, c_first_shipto_date_sk#51, c_first_sales_date_sk#52] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_first_sales_date_sk), IsNotNull(c_first_shipto_date_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -451,7 +451,7 @@ Input [18]: [ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_ad (54) Scan parquet default.date_dim Output [2]: [d_date_sk#54, d_year#55] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -498,7 +498,7 @@ Arguments: [ss_cdemo_sk#4 ASC NULLS FIRST], false, 0 (65) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#60, cd_marital_status#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status)] ReadSchema: struct @@ -553,7 +553,7 @@ Input [18]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (78) Scan parquet default.promotion Output [1]: [p_promo_sk#66] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -580,7 +580,7 @@ Input [15]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (84) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#68, hd_income_band_sk#69] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] ReadSchema: struct @@ -627,7 +627,7 @@ Arguments: [ss_addr_sk#6 ASC NULLS FIRST], false, 0 (95) Scan parquet default.customer_address Output [5]: [ca_address_sk#74, ca_street_number#75, ca_street_name#76, ca_city#77, ca_zip#78] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -682,7 +682,7 @@ Input [21]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (108) Scan parquet default.income_band Output [1]: [ib_income_band_sk#86] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/income_band] +Location [not included in comparison]/{warehouse_dir}/income_band] PushedFilters: [IsNotNull(ib_income_band_sk)] ReadSchema: struct @@ -721,7 +721,7 @@ Input [19]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (117) Scan parquet default.item Output [4]: [i_item_sk#89, i_current_price#90, i_color#91, i_product_name#92] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), In(i_color, [purple,burlywood,indian,spring,floral,medium]), GreaterThanOrEqual(i_current_price,64.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -813,7 +813,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (138) Scan parquet default.date_dim Output [2]: [d_date_sk#39, d_year#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64/explain.txt index 55967382e42cd..ae65d8f33b996 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q64/explain.txt @@ -174,7 +174,7 @@ (1) Scan parquet default.store_sales Output [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_store_sk#7, ss_promo_sk#8, ss_ticket_number#9, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -188,7 +188,7 @@ Condition : ((((((((isnotnull(ss_item_sk#2) AND isnotnull(ss_ticket_number#9)) A (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#13, sr_ticket_number#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -215,7 +215,7 @@ Input [14]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (10) Scan parquet default.catalog_sales Output [3]: [cs_item_sk#16, cs_order_number#17, cs_ext_list_price#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_order_number)] ReadSchema: struct @@ -229,7 +229,7 @@ Condition : (isnotnull(cs_item_sk#16) AND isnotnull(cs_order_number#17)) (13) Scan parquet default.catalog_returns Output [5]: [cr_item_sk#19, cr_order_number#20, cr_refunded_cash#21, cr_reversed_charge#22, cr_store_credit#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -295,7 +295,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (27) Scan parquet default.date_dim Output [2]: [d_date_sk#37, d_year#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk)] ReadSchema: struct @@ -322,7 +322,7 @@ Input [13]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (33) Scan parquet default.store Output [3]: [s_store_sk#40, s_store_name#41, s_zip#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk), IsNotNull(s_zip), IsNotNull(s_store_name)] ReadSchema: struct @@ -349,7 +349,7 @@ Input [14]: [ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_ad (39) Scan parquet default.customer Output [6]: [c_customer_sk#44, c_current_cdemo_sk#45, c_current_hdemo_sk#46, c_current_addr_sk#47, c_first_shipto_date_sk#48, c_first_sales_date_sk#49] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_first_sales_date_sk), IsNotNull(c_first_shipto_date_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct @@ -376,7 +376,7 @@ Input [18]: [ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_ad (45) Scan parquet default.date_dim Output [2]: [d_date_sk#51, d_year#52] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk)] ReadSchema: struct @@ -415,7 +415,7 @@ Input [18]: [ss_item_sk#2, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_ (54) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#56, cd_marital_status#57] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status)] ReadSchema: struct @@ -454,7 +454,7 @@ Input [18]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (63) Scan parquet default.promotion Output [1]: [p_promo_sk#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -481,7 +481,7 @@ Input [15]: [ss_item_sk#2, ss_hdemo_sk#5, ss_addr_sk#6, ss_promo_sk#8, ss_wholes (69) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#63, hd_income_band_sk#64] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] ReadSchema: struct @@ -520,7 +520,7 @@ Input [15]: [ss_item_sk#2, ss_addr_sk#6, ss_wholesale_cost#10, ss_list_price#11, (78) Scan parquet default.customer_address Output [5]: [ca_address_sk#68, ca_street_number#69, ca_street_name#70, ca_city#71, ca_zip#72] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_address] +Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk)] ReadSchema: struct @@ -559,7 +559,7 @@ Input [21]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (87) Scan parquet default.income_band Output [1]: [ib_income_band_sk#79] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/income_band] +Location [not included in comparison]/{warehouse_dir}/income_band] PushedFilters: [IsNotNull(ib_income_band_sk)] ReadSchema: struct @@ -598,7 +598,7 @@ Input [19]: [ss_item_sk#2, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt (96) Scan parquet default.item Output [4]: [i_item_sk#82, i_current_price#83, i_color#84, i_product_name#85] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), In(i_color, [purple,burlywood,indian,spring,floral,medium]), GreaterThanOrEqual(i_current_price,64.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -647,7 +647,7 @@ Results [17]: [i_product_name#85 AS product_name#100, i_item_sk#82 AS item_sk#10 (106) Scan parquet default.store_sales Output [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, ss_hdemo_sk#5, ss_addr_sk#6, ss_store_sk#7, ss_promo_sk#8, ss_ticket_number#9, ss_wholesale_cost#10, ss_list_price#11, ss_coupon_amt#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_ticket_number), IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_promo_sk), IsNotNull(ss_hdemo_sk), IsNotNull(ss_addr_sk)] ReadSchema: struct @@ -685,7 +685,7 @@ Input [12]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_cdemo_sk#4, s (115) Scan parquet default.date_dim Output [2]: [d_date_sk#37, d_year#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a.sf100/explain.txt index 40d9b1e2cbd08..3d0d49fff876a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a.sf100/explain.txt @@ -78,7 +78,7 @@ TakeOrderedAndProject (74) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -92,7 +92,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND isn (4) Scan parquet default.date_dim Output [5]: [d_date_sk#6, d_month_seq#7, d_year#8, d_moy#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -123,7 +123,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sa (11) Scan parquet default.store Output [2]: [s_store_sk#12, s_store_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -158,7 +158,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (19) Scan parquet default.item Output [5]: [i_item_sk#16, i_brand#17, i_class#18, i_category#19, i_product_name#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a/explain.txt index f1bd844bb6973..38a768bd3dec0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q67a/explain.txt @@ -75,7 +75,7 @@ TakeOrderedAndProject (71) (1) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -89,7 +89,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND isn (4) Scan parquet default.date_dim Output [5]: [d_date_sk#6, d_month_seq#7, d_year#8, d_moy#9, d_qoy#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -120,7 +120,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sa (11) Scan parquet default.store Output [2]: [s_store_sk#12, s_store_id#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -147,7 +147,7 @@ Input [9]: [ss_item_sk#2, ss_store_sk#3, ss_quantity#4, ss_sales_price#5, d_year (17) Scan parquet default.item Output [5]: [i_item_sk#15, i_brand#16, i_class#17, i_category#18, i_product_name#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt index 8ffe0713c70ae..628ca0ad4711c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt @@ -68,7 +68,7 @@ TakeOrderedAndProject (64) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -82,7 +82,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (11) Scan parquet default.store Output [3]: [s_store_sk#7, s_county#8, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -127,7 +127,7 @@ Condition : isnotnull(s_store_sk#7) (14) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -153,7 +153,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (20) Scan parquet default.store Output [2]: [s_store_sk#7, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt index 6fc2c3e2b48a1..705d1b3f91342 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt @@ -68,7 +68,7 @@ TakeOrderedAndProject (64) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -82,7 +82,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -113,7 +113,7 @@ Input [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3, d_date_sk#4] (11) Scan parquet default.store Output [3]: [s_store_sk#7, s_county#8, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -127,7 +127,7 @@ Condition : isnotnull(s_store_sk#7) (14) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_store_sk#2, ss_net_profit#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -141,7 +141,7 @@ Condition : (isnotnull(ss_store_sk#2) AND isnotnull(ss_sold_date_sk#1)) (17) Scan parquet default.store Output [2]: [s_store_sk#7, s_state#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt index da6ded3e2e6c9..1e10cb8da5d09 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt @@ -83,7 +83,7 @@ TakeOrderedAndProject (79) (1) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#1, hd_buy_potential#2] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,1001-5000), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -105,7 +105,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)) (6) Scan parquet default.catalog_sales Output [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] ReadSchema: struct @@ -128,7 +128,7 @@ Input [9]: [hd_demo_sk#1, cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk (11) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#12, cd_marital_status#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), EqualTo(cd_marital_status,M), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -159,7 +159,7 @@ Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_item_sk (18) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_date#16] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_date)] ReadSchema: struct @@ -194,7 +194,7 @@ Arguments: [cs_item_sk#8 ASC NULLS FIRST], false, 0 (26) Scan parquet default.item Output [2]: [i_item_sk#19, i_item_desc#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -225,7 +225,7 @@ Input [8]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, (33) Scan parquet default.date_dim Output [2]: [d_date_sk#22, d_week_seq#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -239,7 +239,7 @@ Condition : (isnotnull(d_week_seq#23) AND isnotnull(d_date_sk#22)) (36) Scan parquet default.date_dim Output [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk), IsNotNull(d_week_seq), IsNotNull(d_date)] ReadSchema: struct @@ -291,7 +291,7 @@ Arguments: [cs_item_sk#8 ASC NULLS FIRST, d_date_sk#22 ASC NULLS FIRST], false, (48) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#31, w_warehouse_name#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -309,7 +309,7 @@ Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint) (52) Scan parquet default.inventory Output [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -349,7 +349,7 @@ Input [11]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_ (61) Scan parquet default.promotion Output [1]: [p_promo_sk#39] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -384,7 +384,7 @@ Arguments: [cs_item_sk#8 ASC NULLS FIRST, cs_order_number#10 ASC NULLS FIRST], f (69) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#42, cr_order_number#43] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72/explain.txt index d5b836aeb8004..0c5c4aecec1b4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72/explain.txt @@ -74,7 +74,7 @@ TakeOrderedAndProject (70) (1) Scan parquet default.catalog_sales Output [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] ReadSchema: struct @@ -88,7 +88,7 @@ Condition : (((((isnotnull(cs_quantity#8) AND isnotnull(cs_item_sk#5)) AND isnot (4) Scan parquet default.inventory Output [4]: [inv_date_sk#9, inv_item_sk#10, inv_warehouse_sk#11, inv_quantity_on_hand#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/inventory] +Location [not included in comparison]/{warehouse_dir}/inventory] PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] ReadSchema: struct @@ -115,7 +115,7 @@ Input [12]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (10) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#14, w_warehouse_name#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/warehouse] +Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct @@ -142,7 +142,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (16) Scan parquet default.item Output [2]: [i_item_sk#17, i_item_desc#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct @@ -169,7 +169,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (22) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#20, cd_marital_status#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer_demographics] +Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_marital_status), EqualTo(cd_marital_status,M), IsNotNull(cd_demo_sk)] ReadSchema: struct @@ -200,7 +200,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_h (29) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#23, hd_buy_potential#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/household_demographics] +Location [not included in comparison]/{warehouse_dir}/household_demographics] PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,1001-5000), IsNotNull(hd_demo_sk)] ReadSchema: struct @@ -231,7 +231,7 @@ Input [10]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_hdemo_sk#4, cs_item_s (36) Scan parquet default.date_dim Output [4]: [d_date_sk#26, d_date#27, d_week_seq#28, d_year#29] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk), IsNotNull(d_week_seq), IsNotNull(d_date)] ReadSchema: struct @@ -262,7 +262,7 @@ Input [11]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, (43) Scan parquet default.date_dim Output [2]: [d_date_sk#31, d_week_seq#32] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] ReadSchema: struct @@ -289,7 +289,7 @@ Input [11]: [cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, (49) Scan parquet default.date_dim Output [2]: [d_date_sk#34, d_date#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date_sk), IsNotNull(d_date)] ReadSchema: struct @@ -316,7 +316,7 @@ Input [10]: [cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, (55) Scan parquet default.promotion Output [1]: [p_promo_sk#37] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_promo_sk)] ReadSchema: struct @@ -343,7 +343,7 @@ Input [7]: [cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, w_warehouse_name#15, (61) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#39, cr_order_number#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74.sf100/explain.txt index c295082463335..2d4b595efeff6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74.sf100/explain.txt @@ -90,7 +90,7 @@ TakeOrderedAndProject (86) (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -104,7 +104,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_year#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -139,7 +139,7 @@ Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 (12) Scan parquet default.customer Output [4]: [c_customer_sk#8, c_customer_id#9, c_first_name#10, c_last_name#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -200,7 +200,7 @@ Arguments: [customer_id#17 ASC NULLS FIRST], false, 0 (25) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_customer_sk#2, ss_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -214,7 +214,7 @@ Condition : (isnotnull(ss_customer_sk#2) AND isnotnull(ss_sold_date_sk#1)) (28) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_year#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -296,7 +296,7 @@ Join condition: None (46) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#31, ws_bill_customer_sk#32, ws_net_paid#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -389,7 +389,7 @@ Input [8]: [customer_id#17, year_total#18, customer_id#26, customer_first_name#2 (67) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#31, ws_bill_customer_sk#32, ws_net_paid#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74/explain.txt index 53d52dd20bfbe..f58fb1343a186 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q74/explain.txt @@ -76,7 +76,7 @@ TakeOrderedAndProject (72) (1) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -90,7 +90,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (4) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#5, ss_customer_sk#6, ss_net_paid#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -117,7 +117,7 @@ Input [7]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, ss_ (10) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -166,7 +166,7 @@ Condition : (isnotnull(year_total#17) AND (year_total#17 > 0.00)) (20) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -192,7 +192,7 @@ Input [7]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4, ss_ (26) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), In(d_year, [2001,2002]), IsNotNull(d_date_sk)] ReadSchema: struct @@ -246,7 +246,7 @@ Join condition: None (37) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct @@ -260,7 +260,7 @@ Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_customer_id#2)) (40) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#28, ws_bill_customer_sk#29, ws_net_paid#30] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -338,7 +338,7 @@ Input [8]: [customer_id#16, year_total#17, customer_id#23, customer_first_name#2 (57) Scan parquet default.customer Output [4]: [c_customer_sk#1, c_customer_id#2, c_first_name#3, c_last_name#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/customer] +Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_customer_id)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt index 414674df8a5a1..f947a62f173a6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt @@ -142,7 +142,7 @@ TakeOrderedAndProject (138) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -156,7 +156,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_category#10, i_manufact_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Books), IsNotNull(i_item_sk), IsNotNull(i_class_id), IsNotNull(i_brand_id), IsNotNull(i_category_id), IsNotNull(i_manufact_id)] ReadSchema: struct @@ -187,7 +187,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (11) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_year#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -222,7 +222,7 @@ Arguments: [cs_order_number#3 ASC NULLS FIRST, cs_item_sk#2 ASC NULLS FIRST], fa (19) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -253,7 +253,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (26) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -299,7 +299,7 @@ Arguments: [cast(ss_ticket_number#26 as bigint) ASC NULLS FIRST, cast(ss_item_sk (37) Scan parquet default.store_returns Output [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -350,7 +350,7 @@ Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact (48) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -396,7 +396,7 @@ Arguments: [cast(ws_order_number#40 as bigint) ASC NULLS FIRST, cast(ws_item_sk# (59) Scan parquet default.web_returns Output [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -473,7 +473,7 @@ Arguments: [i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_catego (75) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -499,7 +499,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (81) Scan parquet default.date_dim Output [2]: [d_date_sk#67, d_year#68] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -550,7 +550,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (93) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -632,7 +632,7 @@ Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manuf (112) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt index 7306dc2be142e..1653191ed3ab8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt @@ -121,7 +121,7 @@ TakeOrderedAndProject (117) (1) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -135,7 +135,7 @@ Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_category#10, i_manufact_id#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Books), IsNotNull(i_item_sk), IsNotNull(i_category_id), IsNotNull(i_brand_id), IsNotNull(i_class_id), IsNotNull(i_manufact_id)] ReadSchema: struct @@ -166,7 +166,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (11) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_year#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_date_sk)] ReadSchema: struct @@ -193,7 +193,7 @@ Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (17) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -220,7 +220,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (23) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -258,7 +258,7 @@ Input [11]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity (32) Scan parquet default.store_returns Output [4]: [sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -305,7 +305,7 @@ Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact (42) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct @@ -343,7 +343,7 @@ Input [11]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity# (51) Scan parquet default.web_returns Output [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct @@ -408,7 +408,7 @@ Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact (64) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -434,7 +434,7 @@ Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (70) Scan parquet default.date_dim Output [2]: [d_date_sk#63, d_year#64] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct @@ -473,7 +473,7 @@ Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price# (79) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -543,7 +543,7 @@ Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manuf (95) Scan parquet default.web_sales Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt index dfeee524d5e06..ac49cc0548c08 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt @@ -112,7 +112,7 @@ TakeOrderedAndProject (108) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -126,7 +126,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_date#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-09-03), IsNotNull(d_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Input [5]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (11) Scan parquet default.store Output [1]: [s_store_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Results [3]: [s_store_sk#8, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3)) (20) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#19, sr_store_sk#20, sr_return_amt#21, sr_net_loss#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -271,7 +271,7 @@ Input [6]: [s_store_sk#8, sales#17, profit#18, s_store_sk#23, returns#31, profit (35) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#38, cs_call_center_sk#39, cs_ext_sales_price#40, cs_net_profit#41] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -315,7 +315,7 @@ Results [3]: [cs_call_center_sk#39, MakeDecimal(sum(UnscaledValue(cs_ext_sales_p (44) Scan parquet default.catalog_returns Output [3]: [cr_returned_date_sk#51, cr_return_amount#52, cr_net_loss#53] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk)] ReadSchema: struct @@ -370,7 +370,7 @@ Input [5]: [cs_call_center_sk#39, sales#49, profit#50, returns#61, profit_loss#6 (56) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net_profit#70] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -396,7 +396,7 @@ Input [5]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net (62) Scan parquet default.web_page Output [1]: [wp_web_page_sk#71] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -441,7 +441,7 @@ Results [3]: [wp_web_page_sk#71, MakeDecimal(sum(UnscaledValue(ws_ext_sales_pric (71) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#82, wr_web_page_sk#83, wr_return_amt#84, wr_net_loss#85] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_web_page_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt index 75ed1713c2628..c18698ebc5b45 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt @@ -112,7 +112,7 @@ TakeOrderedAndProject (108) (1) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profit#4] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk)] ReadSchema: struct @@ -126,7 +126,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#5, d_date#6] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-09-03), IsNotNull(d_date_sk)] ReadSchema: struct @@ -157,7 +157,7 @@ Input [5]: [ss_sold_date_sk#1, ss_store_sk#2, ss_ext_sales_price#3, ss_net_profi (11) Scan parquet default.store Output [1]: [s_store_sk#8] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Results [3]: [s_store_sk#8, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3)) (20) Scan parquet default.store_returns Output [4]: [sr_returned_date_sk#19, sr_store_sk#20, sr_return_amt#21, sr_net_loss#22] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_returned_date_sk), IsNotNull(sr_store_sk)] ReadSchema: struct @@ -271,7 +271,7 @@ Input [6]: [s_store_sk#8, sales#17, profit#18, s_store_sk#23, returns#31, profit (35) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#38, cs_call_center_sk#39, cs_ext_sales_price#40, cs_net_profit#41] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct @@ -319,7 +319,7 @@ Arguments: IdentityBroadcastMode, [id=#51] (45) Scan parquet default.catalog_returns Output [3]: [cr_returned_date_sk#52, cr_return_amount#53, cr_net_loss#54] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_returned_date_sk)] ReadSchema: struct @@ -370,7 +370,7 @@ Input [5]: [cs_call_center_sk#39, sales#49, profit#50, returns#62, profit_loss#6 (56) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net_profit#70] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_page_sk)] ReadSchema: struct @@ -396,7 +396,7 @@ Input [5]: [ws_sold_date_sk#67, ws_web_page_sk#68, ws_ext_sales_price#69, ws_net (62) Scan parquet default.web_page Output [1]: [wp_web_page_sk#71] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_page] +Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct @@ -441,7 +441,7 @@ Results [3]: [wp_web_page_sk#71, MakeDecimal(sum(UnscaledValue(ws_ext_sales_pric (71) Scan parquet default.web_returns Output [4]: [wr_returned_date_sk#82, wr_web_page_sk#83, wr_return_amt#84, wr_net_loss#85] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_returned_date_sk), IsNotNull(wr_web_page_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78.sf100/explain.txt index dc2975b51f0bb..9c328f4d3d49a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78.sf100/explain.txt @@ -74,7 +74,7 @@ TakeOrderedAndProject (70) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#4, ss_quantity#5, ss_wholesale_cost#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk)] ReadSchema: struct @@ -96,7 +96,7 @@ Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk# (6) Scan parquet default.store_returns Output [2]: [sr_item_sk#9, sr_ticket_number#10] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -131,7 +131,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number# (14) Scan parquet default.date_dim Output [2]: [d_date_sk#12, d_year#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -180,7 +180,7 @@ Arguments: [ss_sold_year#25 ASC NULLS FIRST, ss_item_sk#2 ASC NULLS FIRST, ss_cu (24) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_order_number#32, cs_quantity#33, cs_wholesale_cost#34, cs_sales_price#35] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -202,7 +202,7 @@ Arguments: [cs_order_number#32 ASC NULLS FIRST, cs_item_sk#31 ASC NULLS FIRST], (29) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#37, cr_order_number#38] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct @@ -284,7 +284,7 @@ Input [12]: [ss_sold_year#25, ss_item_sk#2, ss_customer_sk#3, ss_qty#26, ss_wc#2 (47) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#55, ws_item_sk#56, ws_bill_customer_sk#57, ws_order_number#58, ws_quantity#59, ws_wholesale_cost#60, ws_sales_price#61] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -306,7 +306,7 @@ Arguments: [cast(ws_order_number#58 as bigint) ASC NULLS FIRST, cast(ws_item_sk# (52) Scan parquet default.web_returns Output [2]: [wr_item_sk#63, wr_order_number#64] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78/explain.txt index d9a62f16e0475..11f3971bb6855 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q78/explain.txt @@ -64,7 +64,7 @@ TakeOrderedAndProject (60) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#4, ss_quantity#5, ss_wholesale_cost#6, ss_sales_price#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk)] ReadSchema: struct @@ -78,7 +78,7 @@ Condition : ((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) AND isno (4) Scan parquet default.store_returns Output [2]: [sr_item_sk#8, sr_ticket_number#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk)] ReadSchema: struct @@ -109,7 +109,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number# (11) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_year#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct @@ -154,7 +154,7 @@ Results [6]: [d_year#12 AS ss_sold_year#24, ss_item_sk#2, ss_customer_sk#3, sum( (20) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#28, ws_item_sk#29, ws_bill_customer_sk#30, ws_order_number#31, ws_quantity#32, ws_wholesale_cost#33, ws_sales_price#34] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -168,7 +168,7 @@ Condition : ((isnotnull(ws_sold_date_sk#28) AND isnotnull(ws_bill_customer_sk#30 (23) Scan parquet default.web_returns Output [2]: [wr_item_sk#35, wr_order_number#36] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct @@ -246,7 +246,7 @@ Input [12]: [ss_sold_year#24, ss_item_sk#2, ss_customer_sk#3, ss_qty#25, ss_wc#2 (40) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#54, cs_bill_customer_sk#55, cs_item_sk#56, cs_order_number#57, cs_quantity#58, cs_wholesale_cost#59, cs_sales_price#60] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_item_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct @@ -260,7 +260,7 @@ Condition : ((isnotnull(cs_sold_date_sk#54) AND isnotnull(cs_item_sk#56)) AND is (43) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#61, cr_order_number#62] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_order_number), IsNotNull(cr_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt index 7eead39d2d1d4..e6210f4a26281 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt @@ -129,7 +129,7 @@ TakeOrderedAndProject (125) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ticket_number#5, ss_ext_sales_price#6, ss_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -151,7 +151,7 @@ Arguments: [cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number# (6) Scan parquet default.store_returns Output [4]: [sr_item_sk#9, sr_ticket_number#10, sr_return_amt#11, sr_net_loss#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -182,7 +182,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_t (13) Scan parquet default.item Output [2]: [i_item_sk#14, i_current_price#15] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThan(i_current_price,50.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -213,7 +213,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ex (20) Scan parquet default.date_dim Output [2]: [d_date_sk#17, d_date#18] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-09-03), IsNotNull(d_date_sk)] ReadSchema: struct @@ -244,7 +244,7 @@ Input [8]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price# (27) Scan parquet default.promotion Output [2]: [p_promo_sk#20, p_channel_tv#21] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -275,7 +275,7 @@ Input [7]: [ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, (34) Scan parquet default.store Output [2]: [s_store_sk#23, s_store_id#24] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -320,7 +320,7 @@ Results [5]: [store channel AS channel#40, concat(store, s_store_id#24) AS id#41 (43) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_item_sk#47, cs_promo_sk#48, cs_order_number#49, cs_ext_sales_price#50, cs_net_profit#51] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk), IsNotNull(cs_item_sk), IsNotNull(cs_promo_sk)] ReadSchema: struct @@ -342,7 +342,7 @@ Arguments: [cs_item_sk#47 ASC NULLS FIRST, cs_order_number#49 ASC NULLS FIRST], (48) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#53, cr_order_number#54, cr_return_amount#55, cr_net_loss#56] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -409,7 +409,7 @@ Input [7]: [cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net (64) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#58, cp_catalog_page_id#59] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -454,7 +454,7 @@ Results [5]: [catalog channel AS channel#75, concat(catalog_page, cp_catalog_pag (73) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#80, ws_item_sk#81, ws_web_site_sk#82, ws_promo_sk#83, ws_order_number#84, ws_ext_sales_price#85, ws_net_profit#86] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_item_sk), IsNotNull(ws_promo_sk)] ReadSchema: struct @@ -476,7 +476,7 @@ Arguments: [cast(ws_item_sk#81 as bigint) ASC NULLS FIRST, cast(ws_order_number# (78) Scan parquet default.web_returns Output [4]: [wr_item_sk#88, wr_order_number#89, wr_return_amt#90, wr_net_loss#91] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -543,7 +543,7 @@ Input [7]: [ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_pro (94) Scan parquet default.web_site Output [2]: [web_site_sk#93, web_site_id#94] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt index 9f29340a501cb..ddfdeadcf8eb3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt @@ -120,7 +120,7 @@ TakeOrderedAndProject (116) (1) Scan parquet default.store_sales Output [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ticket_number#5, ss_ext_sales_price#6, ss_net_profit#7] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] ReadSchema: struct @@ -134,7 +134,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND is (4) Scan parquet default.store_returns Output [4]: [sr_item_sk#8, sr_ticket_number#9, sr_return_amt#10, sr_net_loss#11] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_returns] +Location [not included in comparison]/{warehouse_dir}/store_returns] PushedFilters: [IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number)] ReadSchema: struct @@ -161,7 +161,7 @@ Input [11]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_t (10) Scan parquet default.date_dim Output [2]: [d_date_sk#13, d_date#14] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-09-03), IsNotNull(d_date_sk)] ReadSchema: struct @@ -192,7 +192,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ex (17) Scan parquet default.store Output [2]: [s_store_sk#16, s_store_id#17] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store] +Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_store_sk)] ReadSchema: struct @@ -219,7 +219,7 @@ Input [9]: [ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss (23) Scan parquet default.item Output [2]: [i_item_sk#19, i_current_price#20] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), GreaterThan(i_current_price,50.00), IsNotNull(i_item_sk)] ReadSchema: struct @@ -250,7 +250,7 @@ Input [8]: [ss_item_sk#2, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, (30) Scan parquet default.promotion Output [2]: [p_promo_sk#22, p_channel_tv#23] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/promotion] +Location [not included in comparison]/{warehouse_dir}/promotion] PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] ReadSchema: struct @@ -299,7 +299,7 @@ Results [5]: [store channel AS channel#39, concat(store, s_store_id#17) AS id#40 (40) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#44, cs_catalog_page_sk#45, cs_item_sk#46, cs_promo_sk#47, cs_order_number#48, cs_ext_sales_price#49, cs_net_profit#50] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_sales] +Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk), IsNotNull(cs_catalog_page_sk), IsNotNull(cs_item_sk), IsNotNull(cs_promo_sk)] ReadSchema: struct @@ -313,7 +313,7 @@ Condition : (((isnotnull(cs_sold_date_sk#44) AND isnotnull(cs_catalog_page_sk#45 (43) Scan parquet default.catalog_returns Output [4]: [cr_item_sk#51, cr_order_number#52, cr_return_amount#53, cr_net_loss#54] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_returns] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] PushedFilters: [IsNotNull(cr_item_sk), IsNotNull(cr_order_number)] ReadSchema: struct @@ -352,7 +352,7 @@ Input [9]: [cs_sold_date_sk#44, cs_catalog_page_sk#45, cs_item_sk#46, cs_promo_s (52) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#56, cp_catalog_page_id#57] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/catalog_page] +Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct @@ -421,7 +421,7 @@ Results [5]: [catalog channel AS channel#73, concat(catalog_page, cp_catalog_pag (67) Scan parquet default.web_sales Output [7]: [ws_sold_date_sk#78, ws_item_sk#79, ws_web_site_sk#80, ws_promo_sk#81, ws_order_number#82, ws_ext_sales_price#83, ws_net_profit#84] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_item_sk), IsNotNull(ws_promo_sk)] ReadSchema: struct @@ -435,7 +435,7 @@ Condition : (((isnotnull(ws_sold_date_sk#78) AND isnotnull(ws_web_site_sk#80)) A (70) Scan parquet default.web_returns Output [4]: [wr_item_sk#85, wr_order_number#86, wr_return_amt#87, wr_net_loss#88] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_returns] +Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number)] ReadSchema: struct @@ -474,7 +474,7 @@ Input [9]: [ws_sold_date_sk#78, ws_item_sk#79, ws_web_site_sk#80, ws_promo_sk#81 (79) Scan parquet default.web_site Output [2]: [web_site_sk#90, web_site_id#91] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_site] +Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt index d331a2b0a2a7f..f61c214640e33 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt @@ -46,7 +46,7 @@ TakeOrderedAndProject (42) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : (isnotnull(ws_sold_date_sk#1) AND isnotnull(ws_item_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -91,7 +91,7 @@ Input [4]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3, d_date_sk#4] (11) Scan parquet default.item Output [3]: [i_item_sk#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt index c1c10c53bce82..f61c214640e33 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt @@ -46,7 +46,7 @@ TakeOrderedAndProject (42) (1) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/web_sales] +Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk), IsNotNull(ws_item_sk)] ReadSchema: struct @@ -60,7 +60,7 @@ Condition : (isnotnull(ws_sold_date_sk#1) AND isnotnull(ws_item_sk#2)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_month_seq#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1212), LessThanOrEqual(d_month_seq,1223), IsNotNull(d_date_sk)] ReadSchema: struct @@ -91,7 +91,7 @@ Input [4]: [ws_sold_date_sk#1, ws_item_sk#2, ws_net_paid#3, d_date_sk#4] (11) Scan parquet default.item Output [3]: [i_item_sk#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98.sf100/explain.txt index b9542957f9f0a..6fa7b04a3b463 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98.sf100/explain.txt @@ -32,7 +32,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -46,7 +46,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.date_dim Output [2]: [d_date_sk#4, d_date#5] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct @@ -85,7 +85,7 @@ Arguments: [ss_item_sk#2 ASC NULLS FIRST], false, 0 (13) Scan parquet default.item Output [6]: [i_item_sk#8, i_item_id#9, i_item_desc#10, i_current_price#11, i_class#12, i_category#13] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilityWithStatsSuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98/explain.txt index 017f79da89705..45cd61a0556cb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q98/explain.txt @@ -29,7 +29,7 @@ (1) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/store_sales] +Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct @@ -43,7 +43,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Scan parquet default.item Output [6]: [i_item_sk#4, i_item_id#5, i_item_desc#6, i_current_price#7, i_class#8, i_category#9] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/item] +Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [In(i_category, [Sports,Books,Home]), IsNotNull(i_item_sk)] ReadSchema: struct @@ -70,7 +70,7 @@ Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, (10) Scan parquet default.date_dim Output [2]: [d_date_sk#11, d_date#12] Batched: true -Location: InMemoryFileIndex [file:/Users/yi.wu/IdeaProjects/spark/sql/core/spark-warehouse/org.apache.spark.sql.TPCDSV2_7_PlanStabilitySuite/date_dim] +Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-22), LessThanOrEqual(d_date,1999-03-24), IsNotNull(d_date_sk)] ReadSchema: struct From 5c077f05805bda1d0db3476ebe32624034d4066c Mon Sep 17 00:00:00 2001 From: Udbhav30 Date: Tue, 25 Aug 2020 23:38:43 -0700 Subject: [PATCH 40/54] [SPARK-32481][CORE][SQL] Support truncate table to move data to trash ### What changes were proposed in this pull request? Instead of deleting the data, we can move the data to trash. Based on the configuration provided by the user it will be deleted permanently from the trash. ### Why are the changes needed? Instead of directly deleting the data, we can provide flexibility to move data to the trash and then delete it permanently. ### Does this PR introduce _any_ user-facing change? Yes, After truncate table the data is not permanently deleted now. It is first moved to the trash and then after the given time deleted permanently; ### How was this patch tested? new UTs added Closes #29387 from Udbhav30/tuncateTrash. Authored-by: Udbhav30 Signed-off-by: Dongjoon Hyun --- .../scala/org/apache/spark/util/Utils.scala | 23 +++++- .../apache/spark/sql/internal/SQLConf.scala | 13 ++++ .../spark/sql/execution/command/tables.scala | 4 +- .../sql/execution/command/DDLSuite.scala | 72 +++++++++++++++++++ 4 files changed, 110 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala index 35d60bb514405..a336c1260d344 100644 --- a/core/src/main/scala/org/apache/spark/util/Utils.scala +++ b/core/src/main/scala/org/apache/spark/util/Utils.scala @@ -50,7 +50,7 @@ import com.google.common.net.InetAddresses import org.apache.commons.codec.binary.Hex import org.apache.commons.lang3.SystemUtils import org.apache.hadoop.conf.Configuration -import org.apache.hadoop.fs.{FileSystem, FileUtil, Path} +import org.apache.hadoop.fs.{FileSystem, FileUtil, Path, Trash} import org.apache.hadoop.io.compress.{CompressionCodecFactory, SplittableCompressionCodec} import org.apache.hadoop.security.UserGroupInformation import org.apache.hadoop.yarn.conf.YarnConfiguration @@ -269,6 +269,27 @@ private[spark] object Utils extends Logging { file.setExecutable(true, true) } + /** + * Move data to trash if 'spark.sql.truncate.trash.enabled' is true + */ + def moveToTrashIfEnabled( + fs: FileSystem, + partitionPath: Path, + isTrashEnabled: Boolean, + hadoopConf: Configuration): Boolean = { + if (isTrashEnabled) { + logDebug(s"will move data ${partitionPath.toString} to trash") + val isSuccess = Trash.moveToAppropriateTrash(fs, partitionPath, hadoopConf) + if (!isSuccess) { + logWarning(s"Failed to move data ${partitionPath.toString} to trash") + return fs.delete(partitionPath, true) + } + isSuccess + } else { + fs.delete(partitionPath, true) + } + } + /** * Create a directory given the abstract pathname * @return true, if the directory is successfully created; otherwise, return false. diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 3e82b8e12df02..c9db7b1e8960a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -2722,6 +2722,17 @@ object SQLConf { .booleanConf .createWithDefault(false) + val TRUNCATE_TRASH_ENABLED = + buildConf("spark.sql.truncate.trash.enabled") + .doc("This configuration decides when truncating table, whether data files will be moved " + + "to trash directory or deleted permanently. The trash retention time is controlled by " + + "fs.trash.interval, and in default, the server side configuration value takes " + + "precedence over the client-side one. Note that if fs.trash.interval is non-positive, " + + "this will be a no-op and log a warning message.") + .version("3.1.0") + .booleanConf + .createWithDefault(false) + /** * Holds information about keys that have been deprecated. * @@ -3334,6 +3345,8 @@ class SQLConf extends Serializable with Logging { def legacyPathOptionBehavior: Boolean = getConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR) + def truncateTrashEnabled: Boolean = getConf(SQLConf.TRUNCATE_TRASH_ENABLED) + /** ********************** SQLConf functionality methods ************ */ /** Set Spark SQL configuration properties. */ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 7aebdddf1d59c..7aebdd7e57293 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -48,6 +48,7 @@ import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetDataSourceV2 import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} import org.apache.spark.sql.types._ import org.apache.spark.sql.util.SchemaUtils +import org.apache.spark.util.Utils /** * A command to create a table with the same definition of the given existing table. @@ -489,6 +490,7 @@ case class TruncateTableCommand( } val hadoopConf = spark.sessionState.newHadoopConf() val ignorePermissionAcl = SQLConf.get.truncateTableIgnorePermissionAcl + val isTrashEnabled = SQLConf.get.truncateTrashEnabled locations.foreach { location => if (location.isDefined) { val path = new Path(location.get) @@ -513,7 +515,7 @@ case class TruncateTableCommand( } } - fs.delete(path, true) + Utils.moveToTrashIfEnabled(fs, path, isTrashEnabled, hadoopConf) // We should keep original permission/acl of the path. // For owner/group, only super-user can set it, for example on HDFS. Because diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index 17857a6ce173d..d6d58a833dd7d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -3101,6 +3101,78 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { assert(spark.sessionState.catalog.isRegisteredFunction(rand)) } } + + test("SPARK-32481 Move data to trash on truncate table if enabled") { + val trashIntervalKey = "fs.trash.interval" + withTable("tab1") { + withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "true") { + sql("CREATE TABLE tab1 (col INT) USING parquet") + sql("INSERT INTO tab1 SELECT 1") + // scalastyle:off hadoopconfiguration + val hadoopConf = spark.sparkContext.hadoopConfiguration + // scalastyle:on hadoopconfiguration + val originalValue = hadoopConf.get(trashIntervalKey, "0") + val tablePath = new Path(spark.sessionState.catalog + .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) + + val fs = tablePath.getFileSystem(hadoopConf) + val trashRoot = fs.getTrashRoot(tablePath) + assert(!fs.exists(trashRoot)) + try { + hadoopConf.set(trashIntervalKey, "5") + sql("TRUNCATE TABLE tab1") + } finally { + hadoopConf.set(trashIntervalKey, originalValue) + } + assert(fs.exists(trashRoot)) + fs.delete(trashRoot, true) + } + } + } + + test("SPARK-32481 delete data permanently on truncate table if trash interval is non-positive") { + val trashIntervalKey = "fs.trash.interval" + withTable("tab1") { + withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "true") { + sql("CREATE TABLE tab1 (col INT) USING parquet") + sql("INSERT INTO tab1 SELECT 1") + // scalastyle:off hadoopconfiguration + val hadoopConf = spark.sparkContext.hadoopConfiguration + // scalastyle:on hadoopconfiguration + val originalValue = hadoopConf.get(trashIntervalKey, "0") + val tablePath = new Path(spark.sessionState.catalog + .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) + + val fs = tablePath.getFileSystem(hadoopConf) + val trashRoot = fs.getTrashRoot(tablePath) + assert(!fs.exists(trashRoot)) + try { + hadoopConf.set(trashIntervalKey, "0") + sql("TRUNCATE TABLE tab1") + } finally { + hadoopConf.set(trashIntervalKey, originalValue) + } + assert(!fs.exists(trashRoot)) + } + } + } + + test("SPARK-32481 Do not move data to trash on truncate table if disabled") { + withTable("tab1") { + withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "false") { + sql("CREATE TABLE tab1 (col INT) USING parquet") + sql("INSERT INTO tab1 SELECT 1") + val hadoopConf = spark.sessionState.newHadoopConf() + val tablePath = new Path(spark.sessionState.catalog + .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) + + val fs = tablePath.getFileSystem(hadoopConf) + val trashRoot = fs.getTrashRoot(tablePath) + sql("TRUNCATE TABLE tab1") + assert(!fs.exists(trashRoot)) + } + } + } } object FakeLocalFsFileSystem { From a8b568800e64f6a163da28e5e53441f84355df14 Mon Sep 17 00:00:00 2001 From: Yuming Wang Date: Wed, 26 Aug 2020 06:57:43 +0000 Subject: [PATCH 41/54] [SPARK-32659][SQL] Fix the data issue when pruning DPP on non-atomic type ### What changes were proposed in this pull request? Use `InSet` expression to fix data issue when pruning DPP on non-atomic type. for example: ```scala spark.range(1000) .select(col("id"), col("id").as("k")) .write .partitionBy("k") .format("parquet") .mode("overwrite") .saveAsTable("df1"); spark.range(100) .select(col("id"), col("id").as("k")) .write .partitionBy("k") .format("parquet") .mode("overwrite") .saveAsTable("df2") spark.sql("set spark.sql.optimizer.dynamicPartitionPruning.fallbackFilterRatio=2") spark.sql("set spark.sql.optimizer.dynamicPartitionPruning.reuseBroadcastOnly=false") spark.sql("SELECT df1.id, df2.k FROM df1 JOIN df2 ON struct(df1.k) = struct(df2.k) AND df2.id < 2").show ``` It should return two records, but it returns empty. ### Why are the changes needed? Fix data issue ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Add new unit test. Closes #29475 from wangyum/SPARK-32659. Authored-by: Yuming Wang Signed-off-by: Wenchen Fan --- .../apache/spark/sql/execution/subquery.scala | 21 +++----- .../sql/DynamicPartitionPruningSuite.scala | 51 ++++++++++++++++++- 2 files changed, 57 insertions(+), 15 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala index c2270c57eb941..9d15c76faa1b5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala @@ -114,9 +114,10 @@ case class InSubqueryExec( child: Expression, plan: BaseSubqueryExec, exprId: ExprId, - private var resultBroadcast: Broadcast[Array[Any]] = null) extends ExecSubqueryExpression { + private var resultBroadcast: Broadcast[Set[Any]] = null) extends ExecSubqueryExpression { - @transient private var result: Array[Any] = _ + @transient private var result: Set[Any] = _ + @transient private lazy val inSet = InSet(child, result) override def dataType: DataType = BooleanType override def children: Seq[Expression] = child :: Nil @@ -131,14 +132,11 @@ case class InSubqueryExec( def updateResult(): Unit = { val rows = plan.executeCollect() - result = child.dataType match { - case _: StructType => rows.toArray - case _ => rows.map(_.get(0, child.dataType)) - } + result = rows.map(_.get(0, child.dataType)).toSet resultBroadcast = plan.sqlContext.sparkContext.broadcast(result) } - def values(): Option[Array[Any]] = Option(resultBroadcast).map(_.value) + def values(): Option[Set[Any]] = Option(resultBroadcast).map(_.value) private def prepareResult(): Unit = { require(resultBroadcast != null, s"$this has not finished") @@ -149,17 +147,12 @@ case class InSubqueryExec( override def eval(input: InternalRow): Any = { prepareResult() - val v = child.eval(input) - if (v == null) { - null - } else { - result.contains(v) - } + inSet.eval(input) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { prepareResult() - InSet(child, result.toSet).doGenCode(ctx, ev) + inSet.doGenCode(ctx, ev) } override lazy val canonicalized: InSubqueryExec = { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala index 8b3e9286cb9e2..032135fbf43cd 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql import org.scalatest.GivenWhenThen -import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, Expression} +import org.apache.spark.sql.catalyst.expressions.{CodegenObjectFactoryMode, DynamicPruningExpression, Expression} import org.apache.spark.sql.catalyst.plans.ExistenceJoin import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, AdaptiveSparkPlanHelper} @@ -1309,6 +1309,55 @@ abstract class DynamicPartitionPruningSuiteBase ) } } + + test("SPARK-32659: Fix the data issue when pruning DPP on non-atomic type") { + withSQLConf( + SQLConf.DYNAMIC_PARTITION_PRUNING_FALLBACK_FILTER_RATIO.key -> "2", // Make sure insert DPP + SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false") { + withTable("df1", "df2") { + spark.range(1000) + .select(col("id"), col("id").as("k")) + .write + .partitionBy("k") + .format(tableFormat) + .mode("overwrite") + .saveAsTable("df1") + + spark.range(100) + .select(col("id"), col("id").as("k")) + .write + .partitionBy("k") + .format(tableFormat) + .mode("overwrite") + .saveAsTable("df2") + + Seq(CodegenObjectFactoryMode.NO_CODEGEN, + CodegenObjectFactoryMode.CODEGEN_ONLY).foreach { mode => + Seq(true, false).foreach { pruning => + withSQLConf( + SQLConf.CODEGEN_FACTORY_MODE.key -> mode.toString, + SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> s"$pruning") { + val df = sql( + """ + |SELECT df1.id, df2.k + |FROM df1 + | JOIN df2 + | ON struct(df1.k) = struct(df2.k) + | AND df2.id < 2 + |""".stripMargin) + if (pruning) { + checkPartitionPruningPredicate(df, true, false) + } else { + checkPartitionPruningPredicate(df, false, false) + } + + checkAnswer(df, Row(0, 0) :: Row(1, 1) :: Nil) + } + } + } + } + } + } } class DynamicPartitionPruningSuiteAEOff extends DynamicPartitionPruningSuiteBase { From d3304268d3046116d39ec3d54a8e319dce188f36 Mon Sep 17 00:00:00 2001 From: unirt Date: Wed, 26 Aug 2020 10:34:49 -0700 Subject: [PATCH 42/54] [MINOR][PYTHON] Fix typo in a docsting of RDD.toDF ### What changes were proposed in this pull request? Fixes typo in docsting of `toDF` ### Why are the changes needed? The third argument of `toDF` is actually `sampleRatio`. related discussion: https://github.com/apache/spark/pull/12746#discussion-diff-62704834 ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? This patch doesn't affect any logic, so existing tests should cover it. Closes #29551 from unirt/minor_fix_docs. Authored-by: unirt Signed-off-by: Dongjoon Hyun --- python/pyspark/sql/session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py index bba51791ec9a4..8ca6e41a9b940 100644 --- a/python/pyspark/sql/session.py +++ b/python/pyspark/sql/session.py @@ -43,7 +43,7 @@ def toDF(self, schema=None, sampleRatio=None): This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)`` :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns - :param samplingRatio: the sample ratio of rows used for inferring + :param sampleRatio: the sample ratio of rows used for inferring :return: a DataFrame >>> rdd.toDF().collect() From 2dee4352a0a3578818f1e364ee324fc012fbb91d Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Wed, 26 Aug 2020 11:24:35 -0700 Subject: [PATCH 43/54] Revert "[SPARK-32481][CORE][SQL] Support truncate table to move data to trash" This reverts commit 5c077f05805bda1d0db3476ebe32624034d4066c. --- .../scala/org/apache/spark/util/Utils.scala | 23 +----- .../apache/spark/sql/internal/SQLConf.scala | 13 ---- .../spark/sql/execution/command/tables.scala | 4 +- .../sql/execution/command/DDLSuite.scala | 72 ------------------- 4 files changed, 2 insertions(+), 110 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala index a336c1260d344..35d60bb514405 100644 --- a/core/src/main/scala/org/apache/spark/util/Utils.scala +++ b/core/src/main/scala/org/apache/spark/util/Utils.scala @@ -50,7 +50,7 @@ import com.google.common.net.InetAddresses import org.apache.commons.codec.binary.Hex import org.apache.commons.lang3.SystemUtils import org.apache.hadoop.conf.Configuration -import org.apache.hadoop.fs.{FileSystem, FileUtil, Path, Trash} +import org.apache.hadoop.fs.{FileSystem, FileUtil, Path} import org.apache.hadoop.io.compress.{CompressionCodecFactory, SplittableCompressionCodec} import org.apache.hadoop.security.UserGroupInformation import org.apache.hadoop.yarn.conf.YarnConfiguration @@ -269,27 +269,6 @@ private[spark] object Utils extends Logging { file.setExecutable(true, true) } - /** - * Move data to trash if 'spark.sql.truncate.trash.enabled' is true - */ - def moveToTrashIfEnabled( - fs: FileSystem, - partitionPath: Path, - isTrashEnabled: Boolean, - hadoopConf: Configuration): Boolean = { - if (isTrashEnabled) { - logDebug(s"will move data ${partitionPath.toString} to trash") - val isSuccess = Trash.moveToAppropriateTrash(fs, partitionPath, hadoopConf) - if (!isSuccess) { - logWarning(s"Failed to move data ${partitionPath.toString} to trash") - return fs.delete(partitionPath, true) - } - isSuccess - } else { - fs.delete(partitionPath, true) - } - } - /** * Create a directory given the abstract pathname * @return true, if the directory is successfully created; otherwise, return false. diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index c9db7b1e8960a..3e82b8e12df02 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -2722,17 +2722,6 @@ object SQLConf { .booleanConf .createWithDefault(false) - val TRUNCATE_TRASH_ENABLED = - buildConf("spark.sql.truncate.trash.enabled") - .doc("This configuration decides when truncating table, whether data files will be moved " + - "to trash directory or deleted permanently. The trash retention time is controlled by " + - "fs.trash.interval, and in default, the server side configuration value takes " + - "precedence over the client-side one. Note that if fs.trash.interval is non-positive, " + - "this will be a no-op and log a warning message.") - .version("3.1.0") - .booleanConf - .createWithDefault(false) - /** * Holds information about keys that have been deprecated. * @@ -3345,8 +3334,6 @@ class SQLConf extends Serializable with Logging { def legacyPathOptionBehavior: Boolean = getConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR) - def truncateTrashEnabled: Boolean = getConf(SQLConf.TRUNCATE_TRASH_ENABLED) - /** ********************** SQLConf functionality methods ************ */ /** Set Spark SQL configuration properties. */ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 7aebdd7e57293..7aebdddf1d59c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -48,7 +48,6 @@ import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetDataSourceV2 import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} import org.apache.spark.sql.types._ import org.apache.spark.sql.util.SchemaUtils -import org.apache.spark.util.Utils /** * A command to create a table with the same definition of the given existing table. @@ -490,7 +489,6 @@ case class TruncateTableCommand( } val hadoopConf = spark.sessionState.newHadoopConf() val ignorePermissionAcl = SQLConf.get.truncateTableIgnorePermissionAcl - val isTrashEnabled = SQLConf.get.truncateTrashEnabled locations.foreach { location => if (location.isDefined) { val path = new Path(location.get) @@ -515,7 +513,7 @@ case class TruncateTableCommand( } } - Utils.moveToTrashIfEnabled(fs, path, isTrashEnabled, hadoopConf) + fs.delete(path, true) // We should keep original permission/acl of the path. // For owner/group, only super-user can set it, for example on HDFS. Because diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index d6d58a833dd7d..17857a6ce173d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -3101,78 +3101,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { assert(spark.sessionState.catalog.isRegisteredFunction(rand)) } } - - test("SPARK-32481 Move data to trash on truncate table if enabled") { - val trashIntervalKey = "fs.trash.interval" - withTable("tab1") { - withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "true") { - sql("CREATE TABLE tab1 (col INT) USING parquet") - sql("INSERT INTO tab1 SELECT 1") - // scalastyle:off hadoopconfiguration - val hadoopConf = spark.sparkContext.hadoopConfiguration - // scalastyle:on hadoopconfiguration - val originalValue = hadoopConf.get(trashIntervalKey, "0") - val tablePath = new Path(spark.sessionState.catalog - .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) - - val fs = tablePath.getFileSystem(hadoopConf) - val trashRoot = fs.getTrashRoot(tablePath) - assert(!fs.exists(trashRoot)) - try { - hadoopConf.set(trashIntervalKey, "5") - sql("TRUNCATE TABLE tab1") - } finally { - hadoopConf.set(trashIntervalKey, originalValue) - } - assert(fs.exists(trashRoot)) - fs.delete(trashRoot, true) - } - } - } - - test("SPARK-32481 delete data permanently on truncate table if trash interval is non-positive") { - val trashIntervalKey = "fs.trash.interval" - withTable("tab1") { - withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "true") { - sql("CREATE TABLE tab1 (col INT) USING parquet") - sql("INSERT INTO tab1 SELECT 1") - // scalastyle:off hadoopconfiguration - val hadoopConf = spark.sparkContext.hadoopConfiguration - // scalastyle:on hadoopconfiguration - val originalValue = hadoopConf.get(trashIntervalKey, "0") - val tablePath = new Path(spark.sessionState.catalog - .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) - - val fs = tablePath.getFileSystem(hadoopConf) - val trashRoot = fs.getTrashRoot(tablePath) - assert(!fs.exists(trashRoot)) - try { - hadoopConf.set(trashIntervalKey, "0") - sql("TRUNCATE TABLE tab1") - } finally { - hadoopConf.set(trashIntervalKey, originalValue) - } - assert(!fs.exists(trashRoot)) - } - } - } - - test("SPARK-32481 Do not move data to trash on truncate table if disabled") { - withTable("tab1") { - withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "false") { - sql("CREATE TABLE tab1 (col INT) USING parquet") - sql("INSERT INTO tab1 SELECT 1") - val hadoopConf = spark.sessionState.newHadoopConf() - val tablePath = new Path(spark.sessionState.catalog - .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) - - val fs = tablePath.getFileSystem(hadoopConf) - val trashRoot = fs.getTrashRoot(tablePath) - sql("TRUNCATE TABLE tab1") - assert(!fs.exists(trashRoot)) - } - } - } } object FakeLocalFsFileSystem { From b786f31a42180523b0baa8113e26b2ddee445498 Mon Sep 17 00:00:00 2001 From: Devesh Agrawal Date: Wed, 26 Aug 2020 15:16:47 -0700 Subject: [PATCH 44/54] [SPARK-32643][CORE][K8S] Consolidate state decommissioning in the TaskSchedulerImpl realm ### What changes were proposed in this pull request? The decommissioning state is a bit fragment across two places in the TaskSchedulerImpl: https://github.com/apache/spark/pull/29014/ stored the incoming decommission info messages in TaskSchedulerImpl.executorsPendingDecommission. While https://github.com/apache/spark/pull/28619/ was storing just the executor end time in the map TaskSetManager.tidToExecutorKillTimeMapping (which in turn is contained in TaskSchedulerImpl). While the two states are not really overlapping, it's a bit of a code hygiene concern to save this state in two places. With https://github.com/apache/spark/pull/29422, TaskSchedulerImpl is emerging as the place where all decommissioning book keeping is kept within the driver. So consolidate the information in _tidToExecutorKillTimeMapping_ into _executorsPendingDecommission_. However, in order to do so, we need to walk away from keeping the raw ExecutorDecommissionInfo messages and instead keep another class ExecutorDecommissionState. This decoupling will allow the RPC message class ExecutorDecommissionInfo to evolve independently from the book keeping ExecutorDecommissionState. ### Why are the changes needed? This is just a code cleanup. These two features were added independently and its time to consolidate their state for good hygiene. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? Existing tests. Closes #29452 from agrawaldevesh/consolidate_decom_state. Authored-by: Devesh Agrawal Signed-off-by: Holden Karau --- .../spark/ExecutorAllocationClient.scala | 2 +- .../apache/spark/scheduler/DAGScheduler.scala | 2 +- .../scheduler/ExecutorDecommissionInfo.scala | 14 ++- .../spark/scheduler/TaskScheduler.scala | 2 +- .../spark/scheduler/TaskSchedulerImpl.scala | 52 +++++--- .../spark/scheduler/TaskSetManager.scala | 34 +++--- .../spark/scheduler/DAGSchedulerSuite.scala | 8 +- .../ExternalClusterManagerSuite.scala | 4 +- .../scheduler/TaskSchedulerImplSuite.scala | 112 ++++++++++++------ .../spark/scheduler/TaskSetManagerSuite.scala | 33 ++++-- 10 files changed, 167 insertions(+), 96 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala index 079340a358acf..ce47f3fd32203 100644 --- a/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala +++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala @@ -88,7 +88,7 @@ private[spark] trait ExecutorAllocationClient { * Default implementation delegates to kill, scheduler must override * if it supports graceful decommissioning. * - * @param executorsAndDecominfo identifiers of executors & decom info. + * @param executorsAndDecomInfo identifiers of executors & decom info. * @param adjustTargetNumExecutors whether the target number of executors will be adjusted down * after these executors have been decommissioned. * @return the ids of the executors acknowledged by the cluster manager to be removed. diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala index ae0387e09cc6b..18cd2410c1e4c 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala @@ -1825,7 +1825,7 @@ private[spark] class DAGScheduler( if (bmAddress != null) { val externalShuffleServiceEnabled = env.blockManager.externalShuffleServiceEnabled val isHostDecommissioned = taskScheduler - .getExecutorDecommissionInfo(bmAddress.executorId) + .getExecutorDecommissionState(bmAddress.executorId) .exists(_.isHostDecommissioned) // Shuffle output of all executors on host `bmAddress.host` may be lost if: diff --git a/core/src/main/scala/org/apache/spark/scheduler/ExecutorDecommissionInfo.scala b/core/src/main/scala/org/apache/spark/scheduler/ExecutorDecommissionInfo.scala index a82b5d38afe9f..48ae879a518ce 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/ExecutorDecommissionInfo.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/ExecutorDecommissionInfo.scala @@ -18,7 +18,7 @@ package org.apache.spark.scheduler /** - * Provides more detail when an executor is being decommissioned. + * Message providing more detail when an executor is being decommissioned. * @param message Human readable reason for why the decommissioning is happening. * @param isHostDecommissioned Whether the host (aka the `node` or `worker` in other places) is * being decommissioned too. Used to infer if the shuffle data might @@ -26,3 +26,15 @@ package org.apache.spark.scheduler */ private[spark] case class ExecutorDecommissionInfo(message: String, isHostDecommissioned: Boolean) + +/** + * State related to decommissioning that is kept by the TaskSchedulerImpl. This state is derived + * from the info message above but it is kept distinct to allow the state to evolve independently + * from the message. + */ +case class ExecutorDecommissionState( + // Timestamp the decommissioning commenced as per the Driver's clock, + // to estimate when the executor might eventually be lost if EXECUTOR_DECOMMISSION_KILL_INTERVAL + // is configured. + startTime: Long, + isHostDecommissioned: Boolean) diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala index 1101d0616d2bf..0fa80bbafdedd 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala @@ -106,7 +106,7 @@ private[spark] trait TaskScheduler { /** * If an executor is decommissioned, return its corresponding decommission info */ - def getExecutorDecommissionInfo(executorId: String): Option[ExecutorDecommissionInfo] + def getExecutorDecommissionState(executorId: String): Option[ExecutorDecommissionState] /** * Process a lost executor diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala index db6797cbbe308..d446638107690 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala @@ -141,7 +141,7 @@ private[spark] class TaskSchedulerImpl( // We add executors here when we first get decommission notification for them. Executors can // continue to run even after being asked to decommission, but they will eventually exit. - val executorsPendingDecommission = new HashMap[String, ExecutorDecommissionInfo] + val executorsPendingDecommission = new HashMap[String, ExecutorDecommissionState] // When they exit and we know of that via heartbeat failure, we will add them to this cache. // This cache is consulted to know if a fetch failure is because a source executor was @@ -152,7 +152,7 @@ private[spark] class TaskSchedulerImpl( .ticker(new Ticker{ override def read(): Long = TimeUnit.MILLISECONDS.toNanos(clock.getTimeMillis()) }) - .build[String, ExecutorDecommissionInfo]() + .build[String, ExecutorDecommissionState]() .asMap() def runningTasksByExecutors: Map[String, Int] = synchronized { @@ -293,7 +293,7 @@ private[spark] class TaskSchedulerImpl( private[scheduler] def createTaskSetManager( taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = { - new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt) + new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock) } override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = synchronized { @@ -922,22 +922,36 @@ private[spark] class TaskSchedulerImpl( synchronized { // Don't bother noting decommissioning for executors that we don't know about if (executorIdToHost.contains(executorId)) { - // The scheduler can get multiple decommission updates from multiple sources, - // and some of those can have isHostDecommissioned false. We merge them such that - // if we heard isHostDecommissioned ever true, then we keep that one since it is - // most likely coming from the cluster manager and thus authoritative - val oldDecomInfo = executorsPendingDecommission.get(executorId) - if (!oldDecomInfo.exists(_.isHostDecommissioned)) { - executorsPendingDecommission(executorId) = decommissionInfo + val oldDecomStateOpt = executorsPendingDecommission.get(executorId) + val newDecomState = if (oldDecomStateOpt.isEmpty) { + // This is the first time we are hearing of decommissioning this executor, + // so create a brand new state. + ExecutorDecommissionState( + clock.getTimeMillis(), + decommissionInfo.isHostDecommissioned) + } else { + val oldDecomState = oldDecomStateOpt.get + if (!oldDecomState.isHostDecommissioned && decommissionInfo.isHostDecommissioned) { + // Only the cluster manager is allowed to send decommission messages with + // isHostDecommissioned set. So the new decommissionInfo is from the cluster + // manager and is thus authoritative. Flip isHostDecommissioned to true but keep the old + // decommission start time. + ExecutorDecommissionState( + oldDecomState.startTime, + isHostDecommissioned = true) + } else { + oldDecomState + } } + executorsPendingDecommission(executorId) = newDecomState } } rootPool.executorDecommission(executorId) backend.reviveOffers() } - override def getExecutorDecommissionInfo(executorId: String) - : Option[ExecutorDecommissionInfo] = synchronized { + override def getExecutorDecommissionState(executorId: String) + : Option[ExecutorDecommissionState] = synchronized { executorsPendingDecommission .get(executorId) .orElse(Option(decommissionedExecutorsRemoved.get(executorId))) @@ -948,14 +962,14 @@ private[spark] class TaskSchedulerImpl( val reason = givenReason match { // Handle executor process loss due to decommissioning case ExecutorProcessLost(message, origWorkerLost, origCausedByApp) => - val executorDecommissionInfo = getExecutorDecommissionInfo(executorId) + val executorDecommissionState = getExecutorDecommissionState(executorId) ExecutorProcessLost( message, // Also mark the worker lost if we know that the host was decommissioned - origWorkerLost || executorDecommissionInfo.exists(_.isHostDecommissioned), + origWorkerLost || executorDecommissionState.exists(_.isHostDecommissioned), // Executor loss is certainly not caused by app if we knew that this executor is being // decommissioned - causedByApp = executorDecommissionInfo.isEmpty && origCausedByApp) + causedByApp = executorDecommissionState.isEmpty && origCausedByApp) case e => e } @@ -1047,8 +1061,8 @@ private[spark] class TaskSchedulerImpl( } - val decomInfo = executorsPendingDecommission.remove(executorId) - decomInfo.foreach(decommissionedExecutorsRemoved.put(executorId, _)) + val decomState = executorsPendingDecommission.remove(executorId) + decomState.foreach(decommissionedExecutorsRemoved.put(executorId, _)) if (reason != LossReasonPending) { executorIdToHost -= executorId @@ -1085,12 +1099,12 @@ private[spark] class TaskSchedulerImpl( // exposed for test protected final def isExecutorDecommissioned(execId: String): Boolean = - getExecutorDecommissionInfo(execId).nonEmpty + getExecutorDecommissionState(execId).isDefined // exposed for test protected final def isHostDecommissioned(host: String): Boolean = { hostToExecutors.get(host).exists { executors => - executors.exists(e => getExecutorDecommissionInfo(e).exists(_.isHostDecommissioned)) + executors.exists(e => getExecutorDecommissionState(e).exists(_.isHostDecommissioned)) } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala index 3a779d125e30d..ff0387602273d 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala @@ -169,7 +169,6 @@ private[spark] class TaskSetManager( // Task index, start and finish time for each task attempt (indexed by task ID) private[scheduler] val taskInfos = new HashMap[Long, TaskInfo] - private[scheduler] val tidToExecutorKillTimeMapping = new HashMap[Long, Long] // Use a MedianHeap to record durations of successful tasks so we know when to launch // speculative tasks. This is only used when speculation is enabled, to avoid the overhead @@ -943,7 +942,6 @@ private[spark] class TaskSetManager( /** If the given task ID is in the set of running tasks, removes it. */ def removeRunningTask(tid: Long): Unit = { - tidToExecutorKillTimeMapping.remove(tid) if (runningTasksSet.remove(tid) && parent != null) { parent.decreaseRunningTasks(1) } @@ -1054,15 +1052,21 @@ private[spark] class TaskSetManager( logDebug("Task length threshold for speculation: " + threshold) for (tid <- runningTasksSet) { var speculated = checkAndSubmitSpeculatableTask(tid, time, threshold) - if (!speculated && tidToExecutorKillTimeMapping.contains(tid)) { - // Check whether this task will finish before the exectorKillTime assuming - // it will take medianDuration overall. If this task cannot finish within - // executorKillInterval, then this task is a candidate for speculation - val taskEndTimeBasedOnMedianDuration = taskInfos(tid).launchTime + medianDuration - val canExceedDeadline = tidToExecutorKillTimeMapping(tid) < - taskEndTimeBasedOnMedianDuration - if (canExceedDeadline) { - speculated = checkAndSubmitSpeculatableTask(tid, time, 0) + if (!speculated && executorDecommissionKillInterval.isDefined) { + val taskInfo = taskInfos(tid) + val decomState = sched.getExecutorDecommissionState(taskInfo.executorId) + if (decomState.isDefined) { + // Check if this task might finish after this executor is decommissioned. + // We estimate the task's finish time by using the median task duration. + // Whereas the time when the executor might be decommissioned is estimated using the + // config executorDecommissionKillInterval. If the task is going to finish after + // decommissioning, then we will eagerly speculate the task. + val taskEndTimeBasedOnMedianDuration = taskInfos(tid).launchTime + medianDuration + val executorDecomTime = decomState.get.startTime + executorDecommissionKillInterval.get + val canExceedDeadline = executorDecomTime < taskEndTimeBasedOnMedianDuration + if (canExceedDeadline) { + speculated = checkAndSubmitSpeculatableTask(tid, time, 0) + } } } foundTasks |= speculated @@ -1123,14 +1127,6 @@ private[spark] class TaskSetManager( def executorDecommission(execId: String): Unit = { recomputeLocality() - if (speculationEnabled) { - executorDecommissionKillInterval.foreach { interval => - val executorKillTime = clock.getTimeMillis() + interval - runningTasksSet.filter(taskInfos(_).executorId == execId).foreach { tid => - tidToExecutorKillTimeMapping(tid) = executorKillTime - } - } - } } def recomputeLocality(): Unit = { diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala index c829006923c4f..a7f8affee918c 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala @@ -178,8 +178,8 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi override def executorDecommission( executorId: String, decommissionInfo: ExecutorDecommissionInfo): Unit = {} - override def getExecutorDecommissionInfo( - executorId: String): Option[ExecutorDecommissionInfo] = None + override def getExecutorDecommissionState( + executorId: String): Option[ExecutorDecommissionState] = None } /** @@ -787,8 +787,8 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi override def executorDecommission( executorId: String, decommissionInfo: ExecutorDecommissionInfo): Unit = {} - override def getExecutorDecommissionInfo( - executorId: String): Option[ExecutorDecommissionInfo] = None + override def getExecutorDecommissionState( + executorId: String): Option[ExecutorDecommissionState] = None } val noKillScheduler = new DAGScheduler( sc, diff --git a/core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala index 07d88672290fc..08191d09a9f2d 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala @@ -101,6 +101,6 @@ private class DummyTaskScheduler extends TaskScheduler { override def executorDecommission( executorId: String, decommissionInfo: ExecutorDecommissionInfo): Unit = {} - override def getExecutorDecommissionInfo( - executorId: String): Option[ExecutorDecommissionInfo] = None + override def getExecutorDecommissionState( + executorId: String): Option[ExecutorDecommissionState] = None } diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala index 1c8d799db877e..26c9d9130e56a 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala @@ -88,15 +88,10 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B } def setupSchedulerWithMaster(master: String, confs: (String, String)*): TaskSchedulerImpl = { - setupSchedulerWithMasterAndClock(master, new SystemClock, confs: _*) - } - - def setupSchedulerWithMasterAndClock(master: String, clock: Clock, confs: (String, String)*): - TaskSchedulerImpl = { val conf = new SparkConf().setMaster(master).setAppName("TaskSchedulerImplSuite") confs.foreach { case (k, v) => conf.set(k, v) } sc = new SparkContext(conf) - taskScheduler = new TaskSchedulerImpl(sc, sc.conf.get(config.TASK_MAX_FAILURES), clock = clock) + taskScheduler = new TaskSchedulerImpl(sc, sc.conf.get(config.TASK_MAX_FAILURES)) setupHelper() } @@ -1834,66 +1829,111 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(2 == taskDescriptions.head.resources(GPU).addresses.size) } - private def setupSchedulerForDecommissionTests(clock: Clock): TaskSchedulerImpl = { - val taskScheduler = setupSchedulerWithMasterAndClock( - s"local[2]", - clock, - config.CPUS_PER_TASK.key -> 1.toString) - taskScheduler.submitTasks(FakeTask.createTaskSet(2)) - val multiCoreWorkerOffers = IndexedSeq(WorkerOffer("executor0", "host0", 1), - WorkerOffer("executor1", "host1", 1)) - val taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten - assert(taskDescriptions.map(_.executorId).sorted === Seq("executor0", "executor1")) + private def setupSchedulerForDecommissionTests(clock: Clock, numTasks: Int): TaskSchedulerImpl = { + // one task per host + val numHosts = numTasks + val conf = new SparkConf() + .setMaster(s"local[$numHosts]") + .setAppName("TaskSchedulerImplSuite") + .set(config.CPUS_PER_TASK.key, "1") + sc = new SparkContext(conf) + val maxTaskFailures = sc.conf.get(config.TASK_MAX_FAILURES) + taskScheduler = new TaskSchedulerImpl(sc, maxTaskFailures, clock = clock) { + override def createTaskSetManager(taskSet: TaskSet, maxFailures: Int): TaskSetManager = { + val tsm = super.createTaskSetManager(taskSet, maxFailures) + // we need to create a spied tsm so that we can see the copies running + val tsmSpy = spy(tsm) + stageToMockTaskSetManager(taskSet.stageId) = tsmSpy + tsmSpy + } + } + setupHelper() + // Spawn the tasks on different executors/hosts + taskScheduler.submitTasks(FakeTask.createTaskSet(numTasks)) + for (i <- 0 until numTasks) { + val executorId = s"executor$i" + val taskDescriptions = taskScheduler.resourceOffers(IndexedSeq(WorkerOffer( + executorId, s"host$i", 1))).flatten + assert(taskDescriptions.size === 1) + assert(taskDescriptions(0).executorId == executorId) + assert(taskDescriptions(0).index === i) + } taskScheduler } - test("scheduler should keep the decommission info where host was decommissioned") { - val scheduler = setupSchedulerForDecommissionTests(new SystemClock) - + test("scheduler should keep the decommission state where host was decommissioned") { + val clock = new ManualClock(10000L) + val scheduler = setupSchedulerForDecommissionTests(clock, 2) + val oldTime = clock.getTimeMillis() scheduler.executorDecommission("executor0", ExecutorDecommissionInfo("0", false)) scheduler.executorDecommission("executor1", ExecutorDecommissionInfo("1", true)) + + clock.advance(3000L) scheduler.executorDecommission("executor0", ExecutorDecommissionInfo("0 new", false)) scheduler.executorDecommission("executor1", ExecutorDecommissionInfo("1 new", false)) - assert(scheduler.getExecutorDecommissionInfo("executor0") - === Some(ExecutorDecommissionInfo("0 new", false))) - assert(scheduler.getExecutorDecommissionInfo("executor1") - === Some(ExecutorDecommissionInfo("1", true))) - assert(scheduler.getExecutorDecommissionInfo("executor2").isEmpty) + assert(scheduler.getExecutorDecommissionState("executor0") + === Some(ExecutorDecommissionState(oldTime, false))) + assert(scheduler.getExecutorDecommissionState("executor1") + === Some(ExecutorDecommissionState(oldTime, true))) + assert(scheduler.getExecutorDecommissionState("executor2").isEmpty) } - test("scheduler should eventually purge removed and decommissioned executors") { + test("test full decommissioning flow") { val clock = new ManualClock(10000L) - val scheduler = setupSchedulerForDecommissionTests(clock) + val scheduler = setupSchedulerForDecommissionTests(clock, 2) + val manager = stageToMockTaskSetManager(0) + // The task started should be running. + assert(manager.copiesRunning.take(2) === Array(1, 1)) // executor 0 is decommissioned after loosing - assert(scheduler.getExecutorDecommissionInfo("executor0").isEmpty) + assert(scheduler.getExecutorDecommissionState("executor0").isEmpty) scheduler.executorLost("executor0", ExecutorExited(0, false, "normal")) - assert(scheduler.getExecutorDecommissionInfo("executor0").isEmpty) + assert(scheduler.getExecutorDecommissionState("executor0").isEmpty) scheduler.executorDecommission("executor0", ExecutorDecommissionInfo("", false)) - assert(scheduler.getExecutorDecommissionInfo("executor0").isEmpty) + assert(scheduler.getExecutorDecommissionState("executor0").isEmpty) + + // 0th task just died above + assert(manager.copiesRunning.take(2) === Array(0, 1)) assert(scheduler.executorsPendingDecommission.isEmpty) clock.advance(5000) + // executor1 hasn't been decommissioned yet + assert(scheduler.getExecutorDecommissionState("executor1").isEmpty) + // executor 1 is decommissioned before loosing - assert(scheduler.getExecutorDecommissionInfo("executor1").isEmpty) scheduler.executorDecommission("executor1", ExecutorDecommissionInfo("", false)) - assert(scheduler.getExecutorDecommissionInfo("executor1").isDefined) + assert(scheduler.getExecutorDecommissionState("executor1").isDefined) clock.advance(2000) + + // executor1 is eventually lost scheduler.executorLost("executor1", ExecutorExited(0, false, "normal")) assert(scheduler.decommissionedExecutorsRemoved.size === 1) assert(scheduler.executorsPendingDecommission.isEmpty) + // So now both the tasks are no longer running + assert(manager.copiesRunning.take(2) === Array(0, 0)) clock.advance(2000) - // It hasn't been 60 seconds yet before removal - assert(scheduler.getExecutorDecommissionInfo("executor1").isDefined) + + // Decommission state should hang around a bit after removal ... + assert(scheduler.getExecutorDecommissionState("executor1").isDefined) scheduler.executorDecommission("executor1", ExecutorDecommissionInfo("", false)) clock.advance(2000) assert(scheduler.decommissionedExecutorsRemoved.size === 1) - assert(scheduler.getExecutorDecommissionInfo("executor1").isDefined) - clock.advance(301000) - assert(scheduler.getExecutorDecommissionInfo("executor1").isEmpty) + assert(scheduler.getExecutorDecommissionState("executor1").isDefined) + + // The default timeout for expiry is 300k milliseconds (5 minutes) which completes now, + // and the executor1's decommission state should finally be purged. + clock.advance(300000) + assert(scheduler.getExecutorDecommissionState("executor1").isEmpty) assert(scheduler.decommissionedExecutorsRemoved.isEmpty) + + // Now give it some resources and both tasks should be rerun + val taskDescriptions = taskScheduler.resourceOffers(IndexedSeq( + WorkerOffer("executor2", "host2", 1), WorkerOffer("executor3", "host3", 1))).flatten + assert(taskDescriptions.size === 2) + assert(taskDescriptions.map(_.index).sorted == Seq(0, 1)) + assert(manager.copiesRunning.take(2) === Array(1, 1)) } /** diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala index c6f8fa5b3eb81..86d4e92df723b 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala @@ -41,7 +41,7 @@ import org.apache.spark.resource.TestResourceIDs._ import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend import org.apache.spark.serializer.SerializerInstance import org.apache.spark.storage.BlockManagerId -import org.apache.spark.util.{AccumulatorV2, ManualClock} +import org.apache.spark.util.{AccumulatorV2, Clock, ManualClock, SystemClock} class FakeDAGScheduler(sc: SparkContext, taskScheduler: FakeTaskScheduler) extends DAGScheduler(sc) { @@ -109,8 +109,11 @@ object FakeRackUtil { * a list of "live" executors and their hostnames for isExecutorAlive and hasExecutorsAliveOnHost * to work, and these are required for locality in TaskSetManager. */ -class FakeTaskScheduler(sc: SparkContext, liveExecutors: (String, String)* /* execId, host */) - extends TaskSchedulerImpl(sc) +class FakeTaskScheduler( + sc: SparkContext, + clock: Clock, + liveExecutors: (String, String)* /* execId, host */) + extends TaskSchedulerImpl(sc, sc.conf.get(config.TASK_MAX_FAILURES), clock = clock) { val startedTasks = new ArrayBuffer[Long] val endedTasks = new mutable.HashMap[Long, TaskEndReason] @@ -120,6 +123,10 @@ class FakeTaskScheduler(sc: SparkContext, liveExecutors: (String, String)* /* ex val executors = new mutable.HashMap[String, String] + def this(sc: SparkContext, liveExecutors: (String, String)*) = { + this(sc, new SystemClock, liveExecutors: _*) + } + // this must be initialized before addExecutor override val defaultRackValue: Option[String] = Some("default") for ((execId, host) <- liveExecutors) { @@ -1974,14 +1981,16 @@ class TaskSetManagerSuite test("SPARK-21040: Check speculative tasks are launched when an executor is decommissioned" + " and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL") { sc = new SparkContext("local", "test") - sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"), ("exec3", "host3")) + val clock = new ManualClock() + sched = new FakeTaskScheduler(sc, clock, + ("exec1", "host1"), ("exec2", "host2"), ("exec3", "host3")) + sched.backend = mock(classOf[SchedulerBackend]) val taskSet = FakeTask.createTaskSet(4) sc.conf.set(config.SPECULATION_ENABLED, true) sc.conf.set(config.SPECULATION_MULTIPLIER, 1.5) sc.conf.set(config.SPECULATION_QUANTILE, 0.5) sc.conf.set(config.EXECUTOR_DECOMMISSION_KILL_INTERVAL.key, "5s") - val clock = new ManualClock() - val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock) + val manager = sched.createTaskSetManager(taskSet, MAX_TASK_FAILURES) val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task => task.metrics.internalAccums } @@ -2017,13 +2026,13 @@ class TaskSetManagerSuite assert(!manager.checkSpeculatableTasks(0)) assert(sched.speculativeTasks.toSet === Set()) - // decommission exec-2. All tasks running on exec-2 (i.e. TASK 2,3) will be added to - // executorDecommissionSpeculationTriggerTimeoutOpt + // decommission exec-2. All tasks running on exec-2 (i.e. TASK 2,3) will be now + // checked if they should be speculated. // (TASK 2 -> 15, TASK 3 -> 15) - manager.executorDecommission("exec2") - assert(manager.tidToExecutorKillTimeMapping.keySet === Set(2, 3)) - assert(manager.tidToExecutorKillTimeMapping(2) === 15*1000) - assert(manager.tidToExecutorKillTimeMapping(3) === 15*1000) + sched.executorDecommission("exec2", ExecutorDecommissionInfo("decom", + isHostDecommissioned = false)) + assert(sched.getExecutorDecommissionState("exec2").map(_.startTime) === + Some(clock.getTimeMillis())) assert(manager.checkSpeculatableTasks(0)) // TASK 2 started at t=0s, so it can still finish before t=15s (Median task runtime = 10s) From baaa756deee536a06956d38d92ce81764a1aca54 Mon Sep 17 00:00:00 2001 From: Terry Kim Date: Thu, 27 Aug 2020 06:21:04 +0000 Subject: [PATCH 45/54] [SPARK-32516][SQL][FOLLOWUP] 'path' option cannot coexist with path parameter for DataFrameWriter.save(), DataStreamReader.load() and DataStreamWriter.start() ### What changes were proposed in this pull request? This is a follow up PR to #29328 to apply the same constraint where `path` option cannot coexist with path parameter to `DataFrameWriter.save()`, `DataStreamReader.load()` and `DataStreamWriter.start()`. ### Why are the changes needed? The current behavior silently overwrites the `path` option if path parameter is passed to `DataFrameWriter.save()`, `DataStreamReader.load()` and `DataStreamWriter.start()`. For example, ``` Seq(1).toDF.write.option("path", "/tmp/path1").parquet("/tmp/path2") ``` will write the result to `/tmp/path2`. ### Does this PR introduce _any_ user-facing change? Yes, if `path` option coexists with path parameter to any of the above methods, it will throw `AnalysisException`: ``` scala> Seq(1).toDF.write.option("path", "/tmp/path1").parquet("/tmp/path2") org.apache.spark.sql.AnalysisException: There is a 'path' option set and save() is called with a path parameter. Either remove the path option, or call save() without the parameter. To ignore this check, set 'spark.sql.legacy.pathOptionBehavior.enabled' to 'true'.; ``` The user can restore the previous behavior by setting `spark.sql.legacy.pathOptionBehavior.enabled` to `true`. ### How was this patch tested? Added new tests. Closes #29543 from imback82/path_option. Authored-by: Terry Kim Signed-off-by: Wenchen Fan --- docs/sql-migration-guide.md | 2 +- python/pyspark/sql/tests/test_streaming.py | 19 ++++--- .../apache/spark/sql/internal/SQLConf.scala | 5 +- .../apache/spark/sql/DataFrameReader.scala | 3 +- .../apache/spark/sql/DataFrameWriter.scala | 7 +++ .../sql/streaming/DataStreamReader.scala | 7 +++ .../sql/streaming/DataStreamWriter.scala | 7 +++ .../test/DataStreamReaderWriterSuite.scala | 51 ++++++++++++++++++- .../sql/test/DataFrameReaderWriterSuite.scala | 33 ++++++++++-- 9 files changed, 119 insertions(+), 15 deletions(-) diff --git a/docs/sql-migration-guide.md b/docs/sql-migration-guide.md index 3b66694556af9..0e03eac410985 100644 --- a/docs/sql-migration-guide.md +++ b/docs/sql-migration-guide.md @@ -38,7 +38,7 @@ license: | - In Spark 3.1, when `spark.sql.ansi.enabled` is false, Spark always returns null if the sum of decimal type column overflows. In Spark 3.0 or earlier, in the case, the sum of decimal type column may return null or incorrect result, or even fails at runtime (depending on the actual query plan execution). - - In Spark 3.1, when loading a dataframe, `path` or `paths` option cannot coexist with `load()`'s path parameters. For example, `spark.read.format("csv").option("path", "/tmp").load("/tmp2")` or `spark.read.option("path", "/tmp").csv("/tmp2")` will throw `org.apache.spark.sql.AnalysisException`. In Spark version 3.0 and below, `path` option is overwritten if one path parameter is passed to `load()`, or `path` option is added to the overall paths if multiple path parameters are passed to `load()`. To restore the behavior before Spark 3.1, you can set `spark.sql.legacy.pathOptionBehavior.enabled` to `true`. + - In Spark 3.1, `path` option cannot coexist when the following methods are called with path parameter(s): `DataFrameReader.load()`, `DataFrameWriter.save()`, `DataStreamReader.load()`, or `DataStreamWriter.start()`. In addition, `paths` option cannot coexist for `DataFrameReader.load()`. For example, `spark.read.format("csv").option("path", "/tmp").load("/tmp2")` or `spark.read.option("path", "/tmp").csv("/tmp2")` will throw `org.apache.spark.sql.AnalysisException`. In Spark version 3.0 and below, `path` option is overwritten if one path parameter is passed to above methods; `path` option is added to the overall paths if multiple path parameters are passed to `DataFrameReader.load()`. To restore the behavior before Spark 3.1, you can set `spark.sql.legacy.pathOptionBehavior.enabled` to `true`. ## Upgrading from Spark SQL 3.0 to 3.0.1 diff --git a/python/pyspark/sql/tests/test_streaming.py b/python/pyspark/sql/tests/test_streaming.py index caac67d7efdf3..34ff92b323c73 100644 --- a/python/pyspark/sql/tests/test_streaming.py +++ b/python/pyspark/sql/tests/test_streaming.py @@ -68,9 +68,12 @@ def test_stream_read_options(self): def test_stream_read_options_overwrite(self): bad_schema = StructType([StructField("test", IntegerType(), False)]) schema = StructType([StructField("data", StringType(), False)]) - df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \ - .schema(bad_schema)\ - .load(path='python/test_support/sql/streaming', schema=schema, format='text') + # SPARK-32516 disables the overwrite behavior by default. + with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}): + df = self.spark.readStream.format('csv')\ + .option('path', 'python/test_support/sql/fake')\ + .schema(bad_schema)\ + .load(path='python/test_support/sql/streaming', schema=schema, format='text') self.assertTrue(df.isStreaming) self.assertEqual(df.schema.simpleString(), "struct") @@ -110,10 +113,12 @@ def test_stream_save_options_overwrite(self): chk = os.path.join(tmpPath, 'chk') fake1 = os.path.join(tmpPath, 'fake1') fake2 = os.path.join(tmpPath, 'fake2') - q = df.writeStream.option('checkpointLocation', fake1)\ - .format('memory').option('path', fake2) \ - .queryName('fake_query').outputMode('append') \ - .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk) + # SPARK-32516 disables the overwrite behavior by default. + with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}): + q = df.writeStream.option('checkpointLocation', fake1)\ + .format('memory').option('path', fake2) \ + .queryName('fake_query').outputMode('append') \ + .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk) try: self.assertEqual(q.name, 'this_query') diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 3e82b8e12df02..47cd3c7d62a72 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -2716,8 +2716,9 @@ object SQLConf { buildConf("spark.sql.legacy.pathOptionBehavior.enabled") .internal() .doc("When true, \"path\" option is overwritten if one path parameter is passed to " + - "DataFramerReader.load(), or \"path\" option is added to the overall paths if multiple " + - "path parameters are passed to DataFramerReader.load()") + "DataFrameReader.load(), DataFrameWriter.save(), DataStreamReader.load(), or " + + "DataStreamWriter.start(). Also, \"path\" option is added to the overall paths if " + + "multiple path parameters are passed to DataFrameReader.load()") .version("3.1.0") .booleanConf .createWithDefault(false) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 8d3a7eea05c77..5ffff20853180 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -255,7 +255,8 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { (extraOptions.contains("path") || extraOptions.contains("paths")) && paths.nonEmpty) { throw new AnalysisException("There is a 'path' or 'paths' option set and load() is called " + "with path parameters. Either remove the path option if it's the same as the path " + - "parameter, or add it to the load() parameter if you do want to read multiple paths.") + "parameter, or add it to the load() parameter if you do want to read multiple paths. " + + s"To ignore this check, set '${SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key}' to 'true'.") } val updatedPaths = if (!legacyPathOptionBehavior && paths.length == 1) { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index f463166a9f268..2da8814d66aea 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -35,6 +35,7 @@ import org.apache.spark.sql.execution.SQLExecution import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.{CreateTable, DataSource, DataSourceUtils, LogicalRelation} import org.apache.spark.sql.execution.datasources.v2._ +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode import org.apache.spark.sql.sources.BaseRelation import org.apache.spark.sql.types.StructType @@ -284,6 +285,12 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { * @since 1.4.0 */ def save(path: String): Unit = { + if (!df.sparkSession.sessionState.conf.legacyPathOptionBehavior && + extraOptions.contains("path") && path.nonEmpty) { + throw new AnalysisException("There is a 'path' option set and save() is called with a path " + + "parameter. Either remove the path option, or call save() without the parameter. " + + s"To ignore this check, set '${SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key}' to 'true'.") + } this.extraOptions += ("path" -> path) save() } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala index 2b0db4381c6e4..6122b96c9a0bc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala @@ -30,6 +30,7 @@ import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Utils, FileDataSourceV2} import org.apache.spark.sql.execution.streaming.{StreamingRelation, StreamingRelationV2} +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources.StreamSourceProvider import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap @@ -239,6 +240,12 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo * @since 2.0.0 */ def load(path: String): DataFrame = { + if (!sparkSession.sessionState.conf.legacyPathOptionBehavior && + extraOptions.contains("path") && path.nonEmpty) { + throw new AnalysisException("There is a 'path' option set and load() is called with a path" + + "parameter. Either remove the path option, or call load() without the parameter. " + + s"To ignore this check, set '${SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key}' to 'true'.") + } option("path", path).load() } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala index 1d0ca4d9453a5..45250c50a970e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala @@ -33,6 +33,7 @@ import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Utils, FileDataSourceV2} import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.sources._ +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.util.CaseInsensitiveStringMap /** @@ -266,6 +267,12 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) { * @since 2.0.0 */ def start(path: String): StreamingQuery = { + if (!df.sparkSession.sessionState.conf.legacyPathOptionBehavior && + extraOptions.contains("path") && path.nonEmpty) { + throw new AnalysisException("There is a 'path' option set and start() is called with a " + + "path parameter. Either remove the path option, or call start() without the parameter. " + + s"To ignore this check, set '${SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key}' to 'true'.") + } option("path", path).start() } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala index f9fc540c2ab80..8f34106d3d8f5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala @@ -109,6 +109,7 @@ class DefaultSource extends StreamSourceProvider with StreamSinkProvider { } class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { + import testImplicits._ private def newMetadataDir = Utils.createTempDir(namePrefix = "streaming.metadata").getCanonicalPath @@ -435,7 +436,6 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { } private def testMemorySinkCheckpointRecovery(chkLoc: String, provideInWriter: Boolean): Unit = { - import testImplicits._ val ms = new MemoryStream[Int](0, sqlContext) val df = ms.toDF().toDF("a") val tableName = "test" @@ -703,4 +703,53 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { queries.foreach(_.stop()) } } + + test("SPARK-32516: 'path' cannot coexist with load()'s path parameter") { + def verifyLoadFails(f: => DataFrame): Unit = { + val e = intercept[AnalysisException](f) + assert(e.getMessage.contains( + "Either remove the path option, or call load() without the parameter")) + } + + verifyLoadFails(spark.readStream.option("path", "tmp1").parquet("tmp2")) + verifyLoadFails(spark.readStream.option("path", "tmp1").format("parquet").load("tmp2")) + + withClue("SPARK-32516: legacy behavior") { + withSQLConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key -> "true") { + spark.readStream + .format("org.apache.spark.sql.streaming.test") + .option("path", "tmp1") + .load("tmp2") + // The legacy behavior overwrites the path option. + assert(LastOptions.parameters("path") == "tmp2") + } + } + } + + test("SPARK-32516: 'path' cannot coexist with start()'s path parameter") { + val df = spark.readStream + .format("org.apache.spark.sql.streaming.test") + .load("tmp1") + + val e = intercept[AnalysisException] { + df.writeStream + .format("org.apache.spark.sql.streaming.test") + .option("path", "tmp2") + .start("tmp3") + .stop() + } + assert(e.getMessage.contains( + "Either remove the path option, or call start() without the parameter")) + + withClue("SPARK-32516: legacy behavior") { + withSQLConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key -> "true") { + spark.readStream + .format("org.apache.spark.sql.streaming.test") + .option("path", "tmp4") + .load("tmp5") + // The legacy behavior overwrites the path option. + assert(LastOptions.parameters("path") == "tmp5") + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala index 85036ac8476fe..c84d361024309 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala @@ -224,7 +224,7 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSparkSession with assert(LastOptions.parameters("opt3") == "3") } - test("SPARK-32364: later option should override earlier options") { + test("SPARK-32364: later option should override earlier options for load()") { spark.read .format("org.apache.spark.sql.test") .option("paTh", "1") @@ -249,15 +249,29 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSparkSession with } } - test("SPARK-32364: path argument of save function should override all existing options") { + test("SPARK-32364: later option should override earlier options for save()") { Seq(1).toDF.write .format("org.apache.spark.sql.test") .option("paTh", "1") .option("PATH", "2") .option("Path", "3") .option("patH", "4") - .save("5") + .option("path", "5") + .save() assert(LastOptions.parameters("path") == "5") + + withClue("SPARK-32516: legacy path option behavior") { + withSQLConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key -> "true") { + Seq(1).toDF.write + .format("org.apache.spark.sql.test") + .option("paTh", "1") + .option("PATH", "2") + .option("Path", "3") + .option("patH", "4") + .save("5") + assert(LastOptions.parameters("path") == "5") + } + } } test("pass partitionBy as options") { @@ -1157,4 +1171,17 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSparkSession with } } } + + test("SPARK-32516: 'path' option cannot coexist with save()'s path parameter") { + def verifyLoadFails(f: => Unit): Unit = { + val e = intercept[AnalysisException](f) + assert(e.getMessage.contains( + "Either remove the path option, or call save() without the parameter")) + } + + val df = Seq(1).toDF + val path = "tmp" + verifyLoadFails(df.write.option("path", path).parquet(path)) + verifyLoadFails(df.write.option("path", path).format("parquet").save(path)) + } } From eb379766f406fc1f91821f9109bacff7f3403fc3 Mon Sep 17 00:00:00 2001 From: "xuewei.linxuewei" Date: Thu, 27 Aug 2020 06:24:42 +0000 Subject: [PATCH 46/54] [SPARK-32705][SQL] Fix serialization issue for EmptyHashedRelation ### What changes were proposed in this pull request? Currently, EmptyHashedRelation and HashedRelationWithAllNullKeys is an object, and it will cause JavaDeserialization Exception as following ``` 20/08/26 11:13:30 WARN [task-result-getter-2] TaskSetManager: Lost task 34.0 in stage 57.0 (TID 18076, emr-worker-5.cluster-183257, executor 18): java.io.InvalidClassException: org.apache.spark.sql.execution.joins.EmptyHashedRelation$; no valid constructor at java.io.ObjectStreamClass$ExceptionInfo.newInvalidClassException(ObjectStreamClass.java:169) at java.io.ObjectStreamClass.checkDeserialize(ObjectStreamClass.java:874) at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2042) at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1572) at java.io.ObjectInputStream.readObject(ObjectInputStream.java:430) at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:76) at org.apache.spark.broadcast.TorrentBroadcast$.$anonfun$unBlockifyObject$4(TorrentBroadcast.scala:328) ``` This PR includes * Using case object instead to fix serialization issue. * Also change EmptyHashedRelation not to extend NullAwareHashedRelation since it's already being used in other non-NAAJ joins. ### Why are the changes needed? It will cause BHJ failed when buildSide is Empty and BHJ(NAAJ) failed when buildSide with null partition keys. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? * Existing UT. * Run entire TPCDS for E2E coverage. Closes #29547 from leanken/leanken-SPARK-32705. Authored-by: xuewei.linxuewei Signed-off-by: Wenchen Fan --- .../sql/execution/joins/HashedRelation.scala | 85 +++++++++---------- 1 file changed, 38 insertions(+), 47 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala index a2e062d880524..89836f6de641a 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala @@ -71,7 +71,9 @@ private[execution] sealed trait HashedRelation extends KnownSizeEstimation { * * Returns null if there is no matched rows. */ - def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] + def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = { + throw new UnsupportedOperationException + } /** * Returns key index and matched single row. @@ -79,17 +81,23 @@ private[execution] sealed trait HashedRelation extends KnownSizeEstimation { * * Returns null if there is no matched rows. */ - def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex + def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = { + throw new UnsupportedOperationException + } /** * Returns an iterator for keys index and rows of InternalRow type. */ - def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] + def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = { + throw new UnsupportedOperationException + } /** * Returns the maximum number of allowed keys index. */ - def maxNumKeysIndex: Int + def maxNumKeysIndex: Int = { + throw new UnsupportedOperationException + } /** * Returns true iff all the keys are unique. @@ -1065,34 +1073,20 @@ private[joins] object LongHashedRelation { } /** - * Common trait with dummy implementation for NAAJ special HashedRelation - * EmptyHashedRelation - * HashedRelationWithAllNullKeys + * A special HashedRelation indicating that it's built from a empty input:Iterator[InternalRow]. + * get & getValue will return null just like + * empty LongHashedRelation or empty UnsafeHashedRelation does. */ -trait NullAwareHashedRelation extends HashedRelation with Externalizable { - override def get(key: InternalRow): Iterator[InternalRow] = { - throw new UnsupportedOperationException - } - - override def getValue(key: InternalRow): InternalRow = { - throw new UnsupportedOperationException - } +case object EmptyHashedRelation extends HashedRelation { + override def get(key: Long): Iterator[InternalRow] = null - override def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = { - throw new UnsupportedOperationException - } + override def get(key: InternalRow): Iterator[InternalRow] = null - override def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = { - throw new UnsupportedOperationException - } + override def getValue(key: Long): InternalRow = null - override def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = { - throw new UnsupportedOperationException - } + override def getValue(key: InternalRow): InternalRow = null - override def maxNumKeysIndex: Int = { - throw new UnsupportedOperationException - } + override def asReadOnlyCopy(): EmptyHashedRelation.type = this override def keyIsUnique: Boolean = true @@ -1102,36 +1096,33 @@ trait NullAwareHashedRelation extends HashedRelation with Externalizable { override def close(): Unit = {} - override def writeExternal(out: ObjectOutput): Unit = {} - - override def readExternal(in: ObjectInput): Unit = {} - override def estimatedSize: Long = 0 } /** - * A special HashedRelation indicates it built from a empty input:Iterator[InternalRow]. - * get & getValue will return null just like - * empty LongHashedRelation or empty UnsafeHashedRelation does. + * A special HashedRelation indicating that it's built from a non-empty input:Iterator[InternalRow] + * with all the keys to be null. */ -object EmptyHashedRelation extends NullAwareHashedRelation { - override def get(key: Long): Iterator[InternalRow] = null +case object HashedRelationWithAllNullKeys extends HashedRelation { + override def get(key: InternalRow): Iterator[InternalRow] = { + throw new UnsupportedOperationException + } - override def get(key: InternalRow): Iterator[InternalRow] = null + override def getValue(key: InternalRow): InternalRow = { + throw new UnsupportedOperationException + } - override def getValue(key: Long): InternalRow = null + override def asReadOnlyCopy(): HashedRelationWithAllNullKeys.type = this - override def getValue(key: InternalRow): InternalRow = null + override def keyIsUnique: Boolean = true - override def asReadOnlyCopy(): EmptyHashedRelation.type = this -} + override def keys(): Iterator[InternalRow] = { + throw new UnsupportedOperationException + } -/** - * A special HashedRelation indicates it built from a non-empty input:Iterator[InternalRow], - * which contains all null columns key. - */ -object HashedRelationWithAllNullKeys extends NullAwareHashedRelation { - override def asReadOnlyCopy(): HashedRelationWithAllNullKeys.type = this + override def close(): Unit = {} + + override def estimatedSize: Long = 0 } /** The HashedRelationBroadcastMode requires that rows are broadcasted as a HashedRelation. */ From f14f3742e0c98dd306abf02e93d2f10d89bc423f Mon Sep 17 00:00:00 2001 From: Kent Yao Date: Thu, 27 Aug 2020 06:52:34 +0000 Subject: [PATCH 47/54] [SPARK-32696][SQL][TEST-HIVE1.2][TEST-HADOOP2.7] Get columns operation should handle interval column properly ### What changes were proposed in this pull request? This PR let JDBC clients identify spark interval columns properly. ### Why are the changes needed? JDBC users can query interval values through thrift server, create views with interval columns, e.g. ```sql CREATE global temp view view1 as select interval 1 day as i; ``` but when they want to get the details of the columns of view1, the will fail with `Unrecognized type name: INTERVAL` ``` Caused by: java.lang.IllegalArgumentException: Unrecognized type name: INTERVAL at org.apache.hadoop.hive.serde2.thrift.Type.getType(Type.java:170) at org.apache.spark.sql.hive.thriftserver.ThriftserverShimUtils$.toJavaSQLType(ThriftserverShimUtils.scala:53) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.$anonfun$addToRowSet$1(SparkGetColumnsOperation.scala:157) at scala.collection.Iterator.foreach(Iterator.scala:941) at scala.collection.Iterator.foreach$(Iterator.scala:941) at scala.collection.AbstractIterator.foreach(Iterator.scala:1429) at scala.collection.IterableLike.foreach(IterableLike.scala:74) at scala.collection.IterableLike.foreach$(IterableLike.scala:73) at org.apache.spark.sql.types.StructType.foreach(StructType.scala:102) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.addToRowSet(SparkGetColumnsOperation.scala:149) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.$anonfun$runInternal$6(SparkGetColumnsOperation.scala:113) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.$anonfun$runInternal$6$adapted(SparkGetColumnsOperation.scala:112) at scala.Option.foreach(Option.scala:407) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.$anonfun$runInternal$5(SparkGetColumnsOperation.scala:112) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.$anonfun$runInternal$5$adapted(SparkGetColumnsOperation.scala:111) at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) at org.apache.spark.sql.hive.thriftserver.SparkGetColumnsOperation.runInternal(SparkGetColumnsOperation.scala:111) ... 34 more ``` ### Does this PR introduce _any_ user-facing change? YES, #### before ![image](https://user-images.githubusercontent.com/8326978/91162239-6cd1ec80-e6fe-11ea-8c2c-914ddb325c4e.png) #### after ![image](https://user-images.githubusercontent.com/8326978/91162025-1a90cb80-e6fe-11ea-94c4-03a6f2ec296b.png) ### How was this patch tested? new tests Closes #29539 from yaooqinn/SPARK-32696. Authored-by: Kent Yao Signed-off-by: Wenchen Fan --- .../SparkGetColumnsOperation.scala | 28 ++++++++++++++++-- .../HiveThriftServer2Suites.scala | 17 +++++++++++ .../SparkMetadataOperationSuite.scala | 29 ++++++++++++++++++- .../thriftserver/ThriftserverShimUtils.scala | 2 -- .../thriftserver/ThriftserverShimUtils.scala | 2 -- 5 files changed, 70 insertions(+), 8 deletions(-) diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala index 069517acd68cc..0a46c837183e8 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala @@ -31,7 +31,6 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.SessionCatalog -import org.apache.spark.sql.hive.thriftserver.ThriftserverShimUtils.toJavaSQLType import org.apache.spark.sql.types._ /** @@ -131,7 +130,8 @@ private[hive] class SparkGetColumnsOperation( * For array, map, string, and binaries, the column size is variable, return null as unknown. */ private def getColumnSize(typ: DataType): Option[Int] = typ match { - case dt @ (BooleanType | _: NumericType | DateType | TimestampType) => Some(dt.defaultSize) + case dt @ (BooleanType | _: NumericType | DateType | TimestampType | CalendarIntervalType) => + Some(dt.defaultSize) case StructType(fields) => val sizeArr = fields.map(f => getColumnSize(f.dataType)) if (sizeArr.contains(None)) { @@ -164,6 +164,28 @@ private[hive] class SparkGetColumnsOperation( case _ => None } + private def toJavaSQLType(typ: DataType): Integer = typ match { + case NullType => java.sql.Types.NULL + case BooleanType => java.sql.Types.BOOLEAN + case ByteType => java.sql.Types.TINYINT + case ShortType => java.sql.Types.SMALLINT + case IntegerType => java.sql.Types.INTEGER + case LongType => java.sql.Types.BIGINT + case FloatType => java.sql.Types.FLOAT + case DoubleType => java.sql.Types.DOUBLE + case _: DecimalType => java.sql.Types.DECIMAL + case StringType => java.sql.Types.VARCHAR + case BinaryType => java.sql.Types.BINARY + case DateType => java.sql.Types.DATE + case TimestampType => java.sql.Types.TIMESTAMP + case _: ArrayType => java.sql.Types.ARRAY + case _: MapType => java.sql.Types.JAVA_OBJECT + case _: StructType => java.sql.Types.STRUCT + // Hive's year-month and day-time intervals are mapping to java.sql.Types.OTHER + case _: CalendarIntervalType => java.sql.Types.OTHER + case _ => throw new IllegalArgumentException(s"Unrecognized type name: ${typ.sql}") + } + private def addToRowSet( columnPattern: Pattern, dbName: String, @@ -177,7 +199,7 @@ private[hive] class SparkGetColumnsOperation( dbName, // TABLE_SCHEM tableName, // TABLE_NAME column.name, // COLUMN_NAME - toJavaSQLType(column.dataType.sql).asInstanceOf[AnyRef], // DATA_TYPE + toJavaSQLType(column.dataType), // DATA_TYPE column.dataType.sql, // TYPE_NAME getColumnSize(column.dataType).map(_.asInstanceOf[AnyRef]).orNull, // COLUMN_SIZE null, // BUFFER_LENGTH, unused diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala index 3fd46dc82f03f..ad0f97cae3f8e 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala @@ -686,6 +686,23 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { } } + test("Query Intervals in VIEWs through thrift server") { + val viewName1 = "view_interval_1" + val viewName2 = "view_interval_2" + val ddl1 = s"CREATE GLOBAL TEMP VIEW $viewName1 AS SELECT INTERVAL 1 DAY AS i" + val ddl2 = s"CREATE TEMP VIEW $viewName2 as select * from global_temp.$viewName1" + withJdbcStatement(viewName1, viewName2) { statement => + statement.executeQuery(ddl1) + statement.executeQuery(ddl2) + val rs = statement.executeQuery(s"SELECT v1.i as a, v2.i as b FROM global_temp.$viewName1" + + s" v1 join $viewName2 v2 on date_part('DAY', v1.i) = date_part('DAY', v2.i)") + while (rs.next()) { + assert(rs.getString("a") === "1 days") + assert(rs.getString("b") === "1 days") + } + } + } + test("ThriftCLIService FetchResults FETCH_FIRST, FETCH_NEXT, FETCH_PRIOR") { def checkResult(rows: RowSet, start: Long, end: Long): Unit = { assert(rows.getStartOffset() == start) diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala index 5df337044480e..e8932491d138c 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.thriftserver import java.sql.{DatabaseMetaData, ResultSet} -import org.apache.spark.sql.types.{ArrayType, BinaryType, BooleanType, DecimalType, DoubleType, FloatType, IntegerType, MapType, NumericType, StringType, StructType, TimestampType} +import org.apache.spark.sql.types.{ArrayType, BinaryType, BooleanType, CalendarIntervalType, DecimalType, DoubleType, FloatType, IntegerType, MapType, NumericType, StringType, StructType, TimestampType} class SparkMetadataOperationSuite extends HiveThriftJdbcTest { @@ -333,4 +333,31 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { assert(pos === 17, "all columns should have been verified") } } + + test("get columns operation should handle interval column properly") { + val viewName = "view_interval" + val ddl = s"CREATE GLOBAL TEMP VIEW $viewName as select interval 1 day as i" + + withJdbcStatement(viewName) { statement => + statement.execute(ddl) + val data = statement.getConnection.getMetaData + val rowSet = data.getColumns("", "global_temp", viewName, null) + while (rowSet.next()) { + assert(rowSet.getString("TABLE_CAT") === null) + assert(rowSet.getString("TABLE_SCHEM") === "global_temp") + assert(rowSet.getString("TABLE_NAME") === viewName) + assert(rowSet.getString("COLUMN_NAME") === "i") + assert(rowSet.getInt("DATA_TYPE") === java.sql.Types.OTHER) + assert(rowSet.getString("TYPE_NAME").equalsIgnoreCase(CalendarIntervalType.sql)) + assert(rowSet.getInt("COLUMN_SIZE") === CalendarIntervalType.defaultSize) + assert(rowSet.getInt("DECIMAL_DIGITS") === 0) + assert(rowSet.getInt("NUM_PREC_RADIX") === 0) + assert(rowSet.getInt("NULLABLE") === 0) + assert(rowSet.getString("REMARKS") === "") + assert(rowSet.getInt("ORDINAL_POSITION") === 0) + assert(rowSet.getString("IS_NULLABLE") === "YES") + assert(rowSet.getString("IS_AUTO_INCREMENT") === "NO") + } + } + } } diff --git a/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala b/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala index fbfc698ecb4bf..ceb74473332f4 100644 --- a/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala +++ b/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala @@ -49,8 +49,6 @@ private[thriftserver] object ThriftserverShimUtils { RowSetFactory.create(getResultSetSchema, getProtocolVersion) } - private[thriftserver] def toJavaSQLType(s: String): Int = Type.getType(s).toJavaSQLType - private[thriftserver] def supportedType(): Seq[Type] = { Seq(NULL_TYPE, BOOLEAN_TYPE, STRING_TYPE, BINARY_TYPE, TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, diff --git a/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala b/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala index 850382fe2bfd7..1f9fd6338ab93 100644 --- a/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala +++ b/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala @@ -50,8 +50,6 @@ private[thriftserver] object ThriftserverShimUtils { RowSetFactory.create(getResultSetSchema, getProtocolVersion, false) } - private[thriftserver] def toJavaSQLType(s: String): Int = Type.getType(s).toJavaSQLType - private[thriftserver] def supportedType(): Seq[Type] = { Seq(NULL_TYPE, BOOLEAN_TYPE, STRING_TYPE, BINARY_TYPE, TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, From ed51a7f083936e9214f27837ba788c766e1e599c Mon Sep 17 00:00:00 2001 From: Dale Clarke Date: Thu, 27 Aug 2020 09:03:39 -0500 Subject: [PATCH 48/54] [SPARK-30654] Bootstrap4 docs upgrade ### What changes were proposed in this pull request? We are using an older version of Bootstrap (v. 2.1.0) for the online documentation site. Bootstrap 2.x was moved to EOL in Aug 2013 and Bootstrap 3.x was moved to EOL in July 2019 (https://github.com/twbs/release). Older versions of Bootstrap are also getting flagged in security scans for various CVEs: https://snyk.io/vuln/SNYK-JS-BOOTSTRAP-72889 https://snyk.io/vuln/SNYK-JS-BOOTSTRAP-173700 https://snyk.io/vuln/npm:bootstrap:20180529 https://snyk.io/vuln/npm:bootstrap:20160627 I haven't validated each CVE, but it would probably be good practice to resolve any potential issues and get on a supported release. The bad news is that there have been quite a few changes between Bootstrap 2 and Bootstrap 4. I've tried updating the library, refactoring/tweaking the CSS and JS to maintain a similar appearance and functionality, and testing the documentation. This is a fairly large change so I'm sure additional testing and fixes will be needed. ### How was this patch tested? This has been manually tested, but as there is a lot of documentation it is possible issues were missed. Additional testing and feedback is welcomed. If it appears a whole section was missed let me know and I'll take a pass at addressing that section. Closes #27369 from clarkead/bootstrap4-docs-upgrade. Authored-by: Dale Clarke Signed-off-by: Sean Owen --- docs/_layouts/global.html | 128 +- docs/css/bootstrap-responsive.css | 1040 ---- docs/css/bootstrap-responsive.min.css | 9 - docs/css/bootstrap.css | 5624 -------------------- docs/css/bootstrap.min.css | 14 +- docs/css/bootstrap.min.css.map | 1 + docs/css/main.css | 150 +- docs/js/main.js | 34 +- docs/js/vendor/bootstrap.bundle.min.js | 7 + docs/js/vendor/bootstrap.bundle.min.js.map | 1 + docs/js/vendor/bootstrap.js | 2027 ------- docs/js/vendor/bootstrap.min.js | 6 - 12 files changed, 231 insertions(+), 8810 deletions(-) delete mode 100644 docs/css/bootstrap-responsive.css delete mode 100644 docs/css/bootstrap-responsive.min.css delete mode 100644 docs/css/bootstrap.css create mode 100644 docs/css/bootstrap.min.css.map create mode 100644 docs/js/vendor/bootstrap.bundle.min.js create mode 100644 docs/js/vendor/bootstrap.bundle.min.js.map delete mode 100755 docs/js/vendor/bootstrap.js delete mode 100755 docs/js/vendor/bootstrap.min.js diff --git a/docs/_layouts/global.html b/docs/_layouts/global.html index d05ac6bbe129d..09f7018262a0b 100755 --- a/docs/_layouts/global.html +++ b/docs/_layouts/global.html @@ -25,7 +25,6 @@ } - @@ -55,75 +54,82 @@ -