Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-22799][ML] Bucketizer should throw exception if single- and multi-column params are both set #19993

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 13 additions & 23 deletions mllib/src/main/scala/org/apache/spark/ml/feature/Bucketizer.scala
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
/**
* `Bucketizer` maps a column of continuous features to a column of feature buckets. Since 2.3.0,
* `Bucketizer` can map multiple columns at once by setting the `inputCols` parameter. Note that
* when both the `inputCol` and `inputCols` parameters are set, a log warning will be printed and
* only `inputCol` will take effect, while `inputCols` will be ignored. The `splits` parameter is
* only used for single column usage, and `splitsArray` is for multiple columns.
* when both the `inputCol` and `inputCols` parameters are set, an Exception will be thrown. The
* `splits` parameter is only used for single column usage, and `splitsArray` is for multiple
* columns.
*/
@Since("1.4.0")
final class Bucketizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
Expand Down Expand Up @@ -134,28 +134,11 @@ final class Bucketizer @Since("1.4.0") (@Since("1.4.0") override val uid: String
@Since("2.3.0")
def setOutputCols(value: Array[String]): this.type = set(outputCols, value)

/**
* Determines whether this `Bucketizer` is going to map multiple columns. If and only if
* `inputCols` is set, it will map multiple columns. Otherwise, it just maps a column specified
* by `inputCol`. A warning will be printed if both are set.
*/
private[feature] def isBucketizeMultipleColumns(): Boolean = {
if (isSet(inputCols) && isSet(inputCol)) {
logWarning("Both `inputCol` and `inputCols` are set, we ignore `inputCols` and this " +
"`Bucketizer` only map one column specified by `inputCol`")
false
} else if (isSet(inputCols)) {
true
} else {
false
}
}

@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
val transformedSchema = transformSchema(dataset.schema)

val (inputColumns, outputColumns) = if (isBucketizeMultipleColumns()) {
val (inputColumns, outputColumns) = if (isSet(inputCols)) {
($(inputCols).toSeq, $(outputCols).toSeq)
} else {
(Seq($(inputCol)), Seq($(outputCol)))
Expand All @@ -170,7 +153,7 @@ final class Bucketizer @Since("1.4.0") (@Since("1.4.0") override val uid: String
}
}

val seqOfSplits = if (isBucketizeMultipleColumns()) {
val seqOfSplits = if (isSet(inputCols)) {
$(splitsArray).toSeq
} else {
Seq($(splits))
Expand Down Expand Up @@ -201,7 +184,14 @@ final class Bucketizer @Since("1.4.0") (@Since("1.4.0") override val uid: String

@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
if (isBucketizeMultipleColumns()) {
ParamValidators.checkMultiColumnParams(this)
if (isSet(inputCol) && isSet(splitsArray)) {
ParamValidators.raiseIncompatibleParamsException("inputCol", "splitsArray")
}
if (isSet(inputCols) && isSet(splits)) {
ParamValidators.raiseIncompatibleParamsException("inputCols", "splits")
}
if (isSet(inputCols)) {
var transformedSchema = schema
$(inputCols).zip($(outputCols)).zipWithIndex.map { case ((inputCol, outputCol), idx) =>
SchemaUtils.checkNumericType(transformedSchema, inputCol)
Expand Down
22 changes: 22 additions & 0 deletions mllib/src/main/scala/org/apache/spark/ml/param/params.scala
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkException
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.ml.linalg.{JsonMatrixConverter, JsonVectorConverter, Matrix, Vector}
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util.Identifiable

/**
Expand Down Expand Up @@ -249,6 +250,27 @@ object ParamValidators {
def arrayLengthGt[T](lowerBound: Double): Array[T] => Boolean = { (value: Array[T]) =>
value.length > lowerBound
}

/**
* Checks that either inputCols and outputCols are set or inputCol and outputCol are set. If
* this is not true, an `IllegalArgumentException` is raised.
* @param model
*/
private[spark] def checkMultiColumnParams(model: Params): Unit = {
model match {
case m: HasInputCols with HasInputCol if m.isSet(m.inputCols) && m.isSet(m.inputCol) =>
raiseIncompatibleParamsException("inputCols", "inputCol")
case m: HasOutputCols with HasOutputCol if m.isSet(m.outputCols) && m.isSet(m.outputCol) =>
raiseIncompatibleParamsException("outputCols", "outputCol")
case _ =>
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we need to check other exclusive params, e.g., inputCol and splitsArray or inputCols and splits, why not just have a method like:

def checkExclusiveParams(model: Params, params: String*): Unit = {
  if (params.filter(model.isSet(_)).size > 1) {
    val paramString = params.mkString("`", "`, `", "`")
    throw new IllegalArgumentException(s"$paramString are exclusive, but more than one among them are set.")
  }
}
ParamValidators.checkExclusiveParams(this, "inputCol", "inputCols")
ParamValidators.checkExclusiveParams(this, "outputCol", "outputCols")
ParamValidators.checkExclusiveParams(this, "inputCol", "splitsArray")
ParamValidators.checkExclusiveParams(this, "inputCols", "splits")

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added this method too in #20146.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can use that method once merged, thanks.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure if #20146 will get merged for 2.3 - but I think we must merge this PR for 2.3 because I'd prefer not to have this inconsistency in param error handling between QuantileDiscretizer and Bucketizer. This is a relatively small change, so we can merge it into the branch if we move it quickly.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Based on #20146 (comment) from @WeichenXu123, I think #20146 cannot get merged for 2.3.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this method looks good to you, maybe you can just copy it from #20146 to use here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MLnick @viirya in order to address https://github.com/apache/spark/pull/19993/files#r161682506, I was thinking to let this method as it is (just renaming it as per @viirya suggestion) and only adding an additionalExclusiveParams: (String, String)* argument to the function. WDYT?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think @viirya's method is simpler and more general, so why not use it?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@viirya your actual method in #20146 is slightly different (see here). Is that the best version to use?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MLnick Yes. I didn't test the method posted here. The model possibly doesn't have the params, so we need to check it with model.hasParam. Please use the method in #20146.

}

private[spark] def raiseIncompatibleParamsException(
paramName1: String,
paramName2: String): Unit = {
throw new IllegalArgumentException(s"`$paramName1` and `$paramName2` cannot both be set.")
}
}

// specialize primitive-typed params because Java doesn't recognize scala.Double, scala.Int, ...
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -401,15 +401,9 @@ class BucketizerSuite extends SparkFunSuite with MLlibTestSparkContext with Defa
}
}

test("Both inputCol and inputCols are set") {
val bucket = new Bucketizer()
.setInputCol("feature1")
.setOutputCol("result")
.setSplits(Array(-0.5, 0.0, 0.5))
.setInputCols(Array("feature1", "feature2"))

// When both are set, we ignore `inputCols` and just map the column specified by `inputCol`.
assert(bucket.isBucketizeMultipleColumns() == false)
test("assert exception is thrown if both multi-column and single-column params are set") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should also test the other exclusive params (input cols and splits params) as per https://github.com/apache/spark/pull/19993/files#r159133936

val df = Seq((0.5, 0.3), (0.5, -0.4)).toDF("feature1", "feature2")
ParamsSuite.testMultiColumnParams(classOf[Bucketizer], df)
}
}

Expand Down
44 changes: 44 additions & 0 deletions mllib/src/test/scala/org/apache/spark/ml/param/ParamsSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,11 @@ package org.apache.spark.ml.param
import java.io.{ByteArrayOutputStream, ObjectOutputStream}

import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.{Estimator, Transformer}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.shared.{HasInputCol, HasInputCols, HasOutputCol, HasOutputCols}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think these are used any longer?

import org.apache.spark.ml.util.MyParams
import org.apache.spark.sql.Dataset

class ParamsSuite extends SparkFunSuite {

Expand Down Expand Up @@ -430,4 +433,45 @@ object ParamsSuite extends SparkFunSuite {
require(copyReturnType === obj.getClass,
s"${clazz.getName}.copy should return ${clazz.getName} instead of ${copyReturnType.getName}.")
}

/**
* Checks that the class throws an exception in case both `inputCols` and `inputCol` are set and
* in case both `outputCols` and `outputCol` are set.
* These checks are performed only when the class extends respectively both `HasInputCols` and
* `HasInputCol` and both `HasOutputCols` and `HasOutputCol`.
*
* @param paramsClass The Class to be checked
* @param dataset A `Dataset` to use in the tests
*/
def testMultiColumnParams(paramsClass: Class[_ <: Params], dataset: Dataset[_]): Unit = {
val cols = dataset.columns

if (paramsClass.isAssignableFrom(classOf[HasInputCols])
&& paramsClass.isAssignableFrom(classOf[HasInputCol])) {
val model = paramsClass.newInstance()
model.set(model.asInstanceOf[HasInputCols].inputCols, cols)
model.set(model.asInstanceOf[HasInputCol].inputCol, cols(0))
val e = intercept[IllegalArgumentException] {
model match {
case t: Transformer => t.transform(dataset)
case e: Estimator[_] => e.fit(dataset)
}
}
assert(e.getMessage.contains("cannot be both set"))
}

if (paramsClass.isAssignableFrom(classOf[HasOutputCols])
&& paramsClass.isAssignableFrom(classOf[HasOutputCol])) {
val model = paramsClass.newInstance()
model.set(model.asInstanceOf[HasOutputCols].outputCols, cols)
model.set(model.asInstanceOf[HasOutputCol].outputCol, cols(0))
val e = intercept[IllegalArgumentException] {
model match {
case t: Transformer => t.transform(dataset)
case e: Estimator[_] => e.fit(dataset)
}
}
assert(e.getMessage.contains("cannot be both set"))
}
}
}