Skip to content

Commit

Permalink
[SPARK-21622][ML][SPARKR] Support offset in SparkR GLM
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?
Support offset in SparkR GLM #16699

Author: actuaryzhang <actuaryzhang10@gmail.com>

Closes #18831 from actuaryzhang/sparkROffset.
  • Loading branch information
actuaryzhang authored and Felix Cheung committed Aug 6, 2017
1 parent 74b4784 commit 55aa4da
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 5 deletions.
22 changes: 18 additions & 4 deletions R/pkg/R/mllib_regression.R
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ setClass("IsotonicRegressionModel", representation(jobj = "jobj"))
#' "frequencyDesc", "frequencyAsc", "alphabetDesc", and "alphabetAsc".
#' The default value is "frequencyDesc". When the ordering is set to
#' "alphabetDesc", this drops the same category as R when encoding strings.
#' @param offsetCol the offset column name. If this is not set or empty, we treat all instance offsets
#' as 0.0. The feature specified as offset has a constant coefficient of 1.0.
#' @param ... additional arguments passed to the method.
#' @aliases spark.glm,SparkDataFrame,formula-method
#' @return \code{spark.glm} returns a fitted generalized linear model.
Expand Down Expand Up @@ -127,7 +129,8 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"),
function(data, formula, family = gaussian, tol = 1e-6, maxIter = 25, weightCol = NULL,
regParam = 0.0, var.power = 0.0, link.power = 1.0 - var.power,
stringIndexerOrderType = c("frequencyDesc", "frequencyAsc",
"alphabetDesc", "alphabetAsc")) {
"alphabetDesc", "alphabetAsc"),
offsetCol = NULL) {

stringIndexerOrderType <- match.arg(stringIndexerOrderType)
if (is.character(family)) {
Expand Down Expand Up @@ -159,12 +162,19 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"),
weightCol <- as.character(weightCol)
}

if (!is.null(offsetCol)) {
offsetCol <- as.character(offsetCol)
if (nchar(offsetCol) == 0) {
offsetCol <- NULL
}
}

# For known families, Gamma is upper-cased
jobj <- callJStatic("org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper",
"fit", formula, data@sdf, tolower(family$family), family$link,
tol, as.integer(maxIter), weightCol, regParam,
as.double(var.power), as.double(link.power),
stringIndexerOrderType)
stringIndexerOrderType, offsetCol)
new("GeneralizedLinearRegressionModel", jobj = jobj)
})

Expand Down Expand Up @@ -192,6 +202,8 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"),
#' "frequencyDesc", "frequencyAsc", "alphabetDesc", and "alphabetAsc".
#' The default value is "frequencyDesc". When the ordering is set to
#' "alphabetDesc", this drops the same category as R when encoding strings.
#' @param offsetCol the offset column name. If this is not set or empty, we treat all instance offsets
#' as 0.0. The feature specified as offset has a constant coefficient of 1.0.
#' @return \code{glm} returns a fitted generalized linear model.
#' @rdname glm
#' @export
Expand All @@ -209,10 +221,12 @@ setMethod("glm", signature(formula = "formula", family = "ANY", data = "SparkDat
function(formula, family = gaussian, data, epsilon = 1e-6, maxit = 25, weightCol = NULL,
var.power = 0.0, link.power = 1.0 - var.power,
stringIndexerOrderType = c("frequencyDesc", "frequencyAsc",
"alphabetDesc", "alphabetAsc")) {
"alphabetDesc", "alphabetAsc"),
offsetCol = NULL) {
spark.glm(data, formula, family, tol = epsilon, maxIter = maxit, weightCol = weightCol,
var.power = var.power, link.power = link.power,
stringIndexerOrderType = stringIndexerOrderType)
stringIndexerOrderType = stringIndexerOrderType,
offsetCol = offsetCol)
})

# Returns the summary of a model produced by glm() or spark.glm(), similarly to R's summary().
Expand Down
8 changes: 8 additions & 0 deletions R/pkg/tests/fulltests/test_mllib_regression.R
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,14 @@ test_that("spark.glm summary", {
expect_equal(stats$df.residual, rStats$df.residual)
expect_equal(stats$aic, rStats$aic)

# Test spark.glm works with offset
training <- suppressWarnings(createDataFrame(iris))
stats <- summary(spark.glm(training, Sepal_Width ~ Sepal_Length + Species,
family = poisson(), offsetCol = "Petal_Length"))
rStats <- suppressWarnings(summary(glm(Sepal.Width ~ Sepal.Length + Species,
data = iris, family = poisson(), offset = iris$Petal.Length)))
expect_true(all(abs(rStats$coefficients - stats$coefficients) < 1e-3))

# Test summary works on base GLM models
baseModel <- stats::glm(Sepal.Width ~ Sepal.Length + Species, data = iris)
baseSummary <- summary(baseModel)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,8 @@ private[r] object GeneralizedLinearRegressionWrapper
regParam: Double,
variancePower: Double,
linkPower: Double,
stringIndexerOrderType: String): GeneralizedLinearRegressionWrapper = {
stringIndexerOrderType: String,
offsetCol: String): GeneralizedLinearRegressionWrapper = {
// scalastyle:on
val rFormula = new RFormula().setFormula(formula)
.setStringIndexerOrderType(stringIndexerOrderType)
Expand All @@ -99,6 +100,7 @@ private[r] object GeneralizedLinearRegressionWrapper
glr.setLink(link)
}
if (weightCol != null) glr.setWeightCol(weightCol)
if (offsetCol != null) glr.setOffsetCol(offsetCol)

val pipeline = new Pipeline()
.setStages(Array(rFormulaModel, glr))
Expand Down

0 comments on commit 55aa4da

Please sign in to comment.