Skip to content

Commit

Permalink
Merge remote-tracking branch 'apache/master' into newCodeGen
Browse files Browse the repository at this point in the history
Conflicts:
	sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
  • Loading branch information
marmbrus committed Jul 30, 2014
2 parents f34122d + 86534d0 commit 96ef82c
Show file tree
Hide file tree
Showing 5 changed files with 144 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ class Analyzer(catalog: Catalog, registry: FunctionRegistry, caseSensitive: Bool
object UnresolvedHavingClauseAttributes extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case filter @ Filter(havingCondition, aggregate @ Aggregate(_, originalAggExprs, _))
if !filter.resolved && aggregate.resolved && containsAggregate(havingCondition) => {
if aggregate.resolved && containsAggregate(havingCondition) => {
val evaluatedCondition = Alias(havingCondition, "havingCondition")()
val aggExprsWithHaving = evaluatedCondition +: originalAggExprs

Expand Down
4 changes: 4 additions & 0 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import java.util.Properties
import scala.collection.JavaConverters._

object SQLConf {
val COMPRESS_CACHED = "spark.sql.inMemoryColumnarStorage.compressed"
val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold"
val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes"
val AUTO_CONVERT_JOIN_SIZE = "spark.sql.auto.convert.join.size"
Expand Down Expand Up @@ -52,6 +53,9 @@ trait SQLConf {
/** ************************ Spark SQL Params/Hints ******************* */
// TODO: refactor so that these hints accessors don't pollute the name space of SQLContext?

/** When true tables cached using the in-memory columnar caching will be compressed. */
private[spark] def useCompression: Boolean = get(COMPRESS_CACHED, "false").toBoolean

/** Number of partitions to use for shuffle operators. */
private[spark] def numShufflePartitions: Int = get(SHUFFLE_PARTITIONS, "200").toInt

Expand Down
2 changes: 0 additions & 2 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,6 @@ class SQLContext(@transient val sparkContext: SparkContext)
currentTable.logicalPlan

case _ =>
val useCompression =
sparkContext.conf.getBoolean("spark.sql.inMemoryColumnarStorage.compressed", false)
InMemoryRelation(useCompression, executePlan(currentTable).executedPlan)
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
0
5
12
15
18
24
26
35
37
42
51
58
67
70
72
76
83
84
90
95
97
98
100
103
104
113
118
119
120
125
128
129
134
137
138
146
149
152
164
165
167
169
172
174
175
176
179
187
191
193
195
197
199
200
203
205
207
208
209
213
216
217
219
221
223
224
229
230
233
237
238
239
242
255
256
265
272
273
277
278
280
281
282
288
298
307
309
311
316
317
318
321
322
325
327
331
333
342
344
348
353
367
369
382
384
395
396
397
399
401
403
404
406
409
413
414
417
424
429
430
431
438
439
454
458
459
462
463
466
468
469
478
480
489
492
498
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ class HiveQuerySuite extends HiveComparisonTest {
createQueryTest("case else null",
"""SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else null end FROM src LIMIT 1""")

createQueryTest("having no references",
"SELECT key FROM src GROUP BY key HAVING COUNT(*) > 1")

createQueryTest("boolean = number",
"""
|SELECT
Expand Down

0 comments on commit 96ef82c

Please sign in to comment.