Skip to content

Commit

Permalink
Remove unnecessary imports, fix test
Browse files Browse the repository at this point in the history
  • Loading branch information
osopardo1 committed Dec 19, 2024
1 parent 595b966 commit 8ed7393
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 7 deletions.
3 changes: 0 additions & 3 deletions src/main/scala/io/qbeast/internal/rules/SampleRule.scala
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,8 @@
*/
package io.qbeast.internal.rules

import io.qbeast.core.model.QbeastOptions
import io.qbeast.core.model.Weight
import io.qbeast.core.model.WeightRange
import io.qbeast.spark.index.DefaultFileIndex
import io.qbeast.spark.internal.expressions.QbeastMurmur3Hash
import io.qbeast.spark.internal.rules.QbeastRelation
import io.qbeast.IndexedColumns
Expand All @@ -32,7 +30,6 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.plans.logical.Project
import org.apache.spark.sql.catalyst.plans.logical.Sample
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.HadoopFsRelation
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.SparkSession

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -451,11 +451,11 @@ class TransformerIndexingTest

it should "be able to change Transformer type at will" in withQbeastContextSparkAndTmpDir {
(spark, tmpDir) =>
val data = loadTestData(spark).sample(0.1)
val dataFirst = spark.range(1).toDF("price")
val data = spark.range(2, 10).toDF("price")

// Creates a LinearTransformer with IdentityTransformation
data
.limit(1)
.write
dataFirst.write
.mode("append")
.format("qbeast")
.option("columnsToIndex", "price")
Expand Down

0 comments on commit 8ed7393

Please sign in to comment.