diff --git a/.circleci/config.yml b/.circleci/config.yml index 980bc8ada4b35..d0e66bbcb9195 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,9 +28,6 @@ all-branches-and-tags: &all-branches-and-tags # Step templates step_templates: - restore-build-binaries-cache: &restore-build-binaries-cache - restore_cache: - key: build-binaries-{{ checksum "build/mvn" }}-{{ checksum "build/sbt" }} restore-ivy-cache: &restore-ivy-cache restore_cache: keys: @@ -136,20 +133,11 @@ jobs: - maven-dependency-cache-{{ checksum "pom.xml" }} # Fallback - see https://circleci.com/docs/2.0/configuration-reference/#example-2 - maven-dependency-cache- - # Given the build-maven cache, this is superfluous, but leave it in in case we will want to remove the former - - restore_cache: - keys: - - build-binaries-{{ checksum "build/mvn" }}-{{ checksum "build/sbt" }} - - build-binaries- - run: command: ./build/mvn -DskipTests -Psparkr -Phadoop-palantir install no_output_timeout: 20m # Get sbt to run trivially, ensures its launcher is downloaded under build/ - run: ./build/sbt -h || true - - save_cache: - key: build-binaries-{{ checksum "build/mvn" }}-{{ checksum "build/sbt" }} - paths: - - ./build - save_cache: key: maven-dependency-cache-{{ checksum "pom.xml" }} paths: @@ -165,7 +153,6 @@ jobs: # Failed to execute goal on project spark-assembly_2.11: Could not resolve dependencies for project org.apache.spark:spark-assembly_2.11:pom:2.4.0-SNAPSHOT - restore_cache: key: maven-dependency-cache-{{ checksum "pom.xml" }} - - *restore-build-binaries-cache - run: name: Run style tests command: dev/run-style-tests.py @@ -181,7 +168,6 @@ jobs: # key: build-maven-{{ .Branch }}-{{ .BuildNum }} - restore_cache: key: maven-dependency-cache-{{ checksum "pom.xml" }} - - *restore-build-binaries-cache - run: | dev/run-build-tests.py | tee /tmp/run-build-tests.log - store_artifacts: @@ -206,7 +192,6 @@ jobs: fi - *restore-ivy-cache - *restore-home-sbt-cache - - *restore-build-binaries-cache - run: name: Download all external dependencies for the test configuration (which extends compile) and ensure we update first command: dev/sbt test:externalDependencyClasspath oldDeps/test:externalDependencyClasspath @@ -251,7 +236,6 @@ jobs: - attach_workspace: at: . - *restore-ivy-cache - - *restore-build-binaries-cache - *restore-home-sbt-cache - run: | dev/run-backcompat-tests.py | tee /tmp/run-backcompat-tests.log @@ -305,7 +289,7 @@ jobs: run-scala-tests: <<: *test-defaults # project/CirclePlugin.scala does its own test splitting in SBT based on CIRCLE_NODE_INDEX, CIRCLE_NODE_TOTAL - parallelism: 12 + parallelism: 8 # Spark runs a lot of tests in parallel, we need 16 GB of RAM for this resource_class: xlarge steps: @@ -320,7 +304,6 @@ jobs: - *link-in-build-sbt-cache # --- - *restore-ivy-cache - - *restore-build-binaries-cache - *restore-home-sbt-cache - restore_cache: keys: @@ -407,7 +390,6 @@ jobs: - *checkout-code - restore_cache: key: maven-dependency-cache-{{ checksum "pom.xml" }} - - *restore-build-binaries-cache - run: command: dev/set_version_and_package.sh no_output_timeout: 15m diff --git a/.sbtopts b/.sbtopts new file mode 100644 index 0000000000000..9afbdca6db1c7 --- /dev/null +++ b/.sbtopts @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +-J-Xmx4G +-J-Xss4m diff --git a/build/sbt-launch-lib.bash b/build/sbt-launch-lib.bash index 162bfbf2257c7..423ba3b766e61 100755 --- a/build/sbt-launch-lib.bash +++ b/build/sbt-launch-lib.bash @@ -39,7 +39,7 @@ dlog () { acquire_sbt_jar () { SBT_VERSION=`awk -F "=" '/sbt\.version/ {print $2}' ./project/build.properties` - URL1=https://dl.bintray.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch/${SBT_VERSION}/sbt-launch.jar + URL1=https://repo1.maven.org/maven2/org/scala-sbt/sbt-launch/${SBT_VERSION}/sbt-launch-${SBT_VERSION}.jar JAR=build/sbt-launch-${SBT_VERSION}.jar sbt_jar=$JAR diff --git a/dev/run-tests.py b/dev/run-tests.py index bd8446d6dbe67..9d29e3898233d 100644 --- a/dev/run-tests.py +++ b/dev/run-tests.py @@ -390,7 +390,8 @@ def build_spark_assembly_sbt(extra_profiles, checkstyle=False): if checkstyle: run_java_style_checks(build_profiles) - build_spark_unidoc_sbt(extra_profiles) + # TODO(lmartini): removed because broken, checks generated classes + # build_spark_unidoc_sbt(extra_profiles) def build_apache_spark(build_tool, extra_profiles): diff --git a/project/CirclePlugin.scala b/project/CirclePlugin.scala index 18d153c082dce..0d0aea2276870 100644 --- a/project/CirclePlugin.scala +++ b/project/CirclePlugin.scala @@ -288,8 +288,8 @@ object CirclePlugin extends AutoPlugin { } }, - test := (test, copyTestReportsToCircle) { (test, copy) => - test.doFinally(copy.map(_ => ())) - }.value + test := (test andFinally Def.taskDyn { + copyTestReportsToCircle + }).value )) } diff --git a/project/MimaBuild.scala b/project/MimaBuild.scala index 10c02103aeddb..badcdf34a2ad0 100644 --- a/project/MimaBuild.scala +++ b/project/MimaBuild.scala @@ -22,9 +22,7 @@ import com.typesafe.tools.mima.core._ import com.typesafe.tools.mima.core.MissingClassProblem import com.typesafe.tools.mima.core.MissingTypesProblem import com.typesafe.tools.mima.core.ProblemFilters._ -import com.typesafe.tools.mima.plugin.MimaKeys.{mimaBinaryIssueFilters, mimaPreviousArtifacts} -import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings - +import com.typesafe.tools.mima.plugin.MimaKeys.{mimaBinaryIssueFilters, mimaPreviousArtifacts, mimaFailOnNoPrevious} object MimaBuild { @@ -86,14 +84,17 @@ object MimaBuild { ignoredMembers.flatMap(excludeMember) ++ MimaExcludes.excludes(currentSparkVersion) } - def mimaSettings(sparkHome: File, projectRef: ProjectRef) = { + def mimaSettings(sparkHome: File, projectRef: ProjectRef): Seq[Setting[_]] = { val organization = "org.apache.spark" - val previousSparkVersion = "2.4.0" + val previousSparkVersion = "3.0.0" val project = projectRef.project val fullId = "spark-" + project + "_2.12" - mimaDefaultSettings ++ - Seq(mimaPreviousArtifacts := Set(organization % fullId % previousSparkVersion), - mimaBinaryIssueFilters ++= ignoredABIProblems(sparkHome, version.value)) + + Seq( + mimaFailOnNoPrevious := true, + mimaPreviousArtifacts := Set(organization % fullId % previousSparkVersion), + mimaBinaryIssueFilters ++= ignoredABIProblems(sparkHome, version.value) + ) } } diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala index 793ebecd965f7..6069b4bf48835 100644 --- a/project/MimaExcludes.scala +++ b/project/MimaExcludes.scala @@ -36,6 +36,44 @@ object MimaExcludes { // Exclude rules for 3.0.x lazy val v30excludes = v24excludes ++ Seq( + //[SPARK-21708][BUILD] Migrate build to sbt 1.x + // mima plugin update caused new incompatibilities to be detected + // core module + // TODO(lmartini): this group was originally on top of 3.1 but applied on 3.0 because we picked the above commit + // on top of 3.0 + ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.shuffle.sort.io.LocalDiskShuffleMapOutputWriter.commitAllPartitions"), + ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.shuffle.api.ShuffleMapOutputWriter.commitAllPartitions"), + ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.shuffle.api.ShuffleMapOutputWriter.commitAllPartitions"), + // mllib module + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionTrainingSummary.totalIterations"), + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.LogisticRegressionTrainingSummary.$init$"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.labels"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.truePositiveRateByLabel"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.falsePositiveRateByLabel"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.precisionByLabel"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.recallByLabel"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.fMeasureByLabel"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.fMeasureByLabel"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.accuracy"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.weightedTruePositiveRate"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.weightedFalsePositiveRate"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.weightedRecall"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.weightedPrecision"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.weightedFMeasure"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.weightedFMeasure"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.roc"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.areaUnderROC"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.pr"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.fMeasureByThreshold"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.precisionByThreshold"), + ProblemFilters.exclude[NewMixinForwarderProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.recallByThreshold"), + ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.FMClassifier.trainImpl"), + ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.FMRegressor.trainImpl"), + // TODO(lmartini): Additional excludes not in upstream but unique to palantir fork + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkContext.initializeForcefully"), + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkContext.initializeForcefully"), + ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.broadcast.Broadcast.initializeForcefully"), + // [SPARK-23429][CORE] Add executor memory metrics to heartbeat and expose in executors REST API ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate.apply"), ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate.copy"), diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index cc405134a3109..6392117cae22a 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -28,13 +28,13 @@ import scala.collection.mutable.Stack import sbt._ import sbt.Classpaths.publishTask import sbt.Keys._ -import sbtunidoc.Plugin.UnidocKeys.unidocGenjavadocVersion import com.etsy.sbt.checkstyle.CheckstylePlugin.autoImport._ import com.simplytyped.Antlr4Plugin._ import com.typesafe.sbt.pom.{MavenHelper, PomBuild, SbtPomKeys} import com.typesafe.tools.mima.plugin.MimaKeys import org.scalastyle.sbt.ScalastylePlugin.autoImport._ import org.scalastyle.sbt.Tasks +import sbtassembly.AssemblyPlugin.autoImport._ import spray.revolver.RevolverPlugin._ @@ -84,6 +84,8 @@ object BuildCommons { object SparkBuild extends PomBuild { import BuildCommons._ + import sbtunidoc.GenJavadocPlugin + import sbtunidoc.GenJavadocPlugin.autoImport._ import scala.collection.mutable.Map val projectsMap: Map[String, Seq[Setting[_]]] = Map.empty @@ -121,13 +123,10 @@ object SparkBuild extends PomBuild { override val userPropertiesMap = System.getProperties.asScala.toMap lazy val MavenCompile = config("m2r") extend(Compile) - lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy") + lazy val publishLocalBoth = TaskKey[Unit]("localPublish", "publish local for m2 and ivy", KeyRanks.ATask) - lazy val sparkGenjavadocSettings: Seq[sbt.Def.Setting[_]] = Seq( - libraryDependencies += compilerPlugin( - "com.typesafe.genjavadoc" %% "genjavadoc-plugin" % unidocGenjavadocVersion.value cross CrossVersion.full), + lazy val sparkGenjavadocSettings: Seq[sbt.Def.Setting[_]] = GenJavadocPlugin.projectSettings ++ Seq( scalacOptions ++= Seq( - "-P:genjavadoc:out=" + (target.value / "java"), "-P:genjavadoc:strictVisibility=true" // hide package private types ) ) @@ -172,7 +171,7 @@ object SparkBuild extends PomBuild { val scalaSourceV = Seq(file(scalaSource.in(config).value.getAbsolutePath)) val configV = (baseDirectory in ThisBuild).value / scalaStyleOnCompileConfig val configUrlV = scalastyleConfigUrl.in(config).value - val streamsV = streams.in(config).value + val streamsV = (streams.in(config).value: @sbtUnchecked) val failOnErrorV = true val failOnWarningV = false val scalastyleTargetV = scalastyleTarget.in(config).value @@ -219,7 +218,6 @@ object SparkBuild extends PomBuild { javaHome := sys.env.get("JAVA_HOME") .orElse(sys.props.get("java.home").map { p => new File(p).getParentFile().getAbsolutePath() }) .map(file), - incOptions := incOptions.value.withNameHashing(true), publishMavenStyle := true, unidocGenjavadocVersion := "0.15", @@ -235,10 +233,12 @@ object SparkBuild extends PomBuild { ), externalResolvers := resolvers.value, otherResolvers := SbtPomKeys.mvnLocalRepository(dotM2 => Seq(Resolver.file("dotM2", dotM2))).value, - publishLocalConfiguration in MavenCompile := - new PublishConfiguration(None, "dotM2", packagedArtifacts.value, Seq(), ivyLoggingLevel.value), + publishLocalConfiguration in MavenCompile := PublishConfiguration() + .withResolverName("dotM2") + .withArtifacts(packagedArtifacts.value.toVector) + .withLogging(ivyLoggingLevel.value), publishMavenStyle in MavenCompile := true, - publishLocal in MavenCompile := publishTask(publishLocalConfiguration in MavenCompile, deliverLocal).value, + publishLocal in MavenCompile := publishTask(publishLocalConfiguration in MavenCompile).value, publishLocalBoth := Seq(publishLocal in MavenCompile, publishLocal).dependOn.value, javacOptions in (Compile, doc) ++= { @@ -267,6 +267,8 @@ object SparkBuild extends PomBuild { "-sourcepath", (baseDirectory in ThisBuild).value.getAbsolutePath // Required for relative source links in scaladoc ), + SbtPomKeys.profiles := profiles, + // Remove certain packages from Scaladoc scalacOptions in (Compile, doc) := Seq( "-groups", @@ -289,15 +291,16 @@ object SparkBuild extends PomBuild { val out = streams.value def logProblem(l: (=> String) => Unit, f: File, p: xsbti.Problem) = { - l(f.toString + ":" + p.position.line.fold("")(_ + ":") + " " + p.message) + val jmap = new java.util.function.Function[Integer, String]() {override def apply(i: Integer): String = {i.toString}} + l(f.toString + ":" + p.position.line.map[String](jmap.apply).map(_ + ":").orElse("") + " " + p.message) l(p.position.lineContent) l("") } var failed = 0 - analysis.infos.allInfos.foreach { case (k, i) => - i.reportedProblems foreach { p => - val deprecation = p.message.contains("is deprecated") + analysis.asInstanceOf[sbt.internal.inc.Analysis].infos.allInfos.foreach { case (k, i) => + i.getReportedProblems foreach { p => + val deprecation = p.message.contains("deprecated") if (!deprecation) { failed = failed + 1 @@ -319,11 +322,16 @@ object SparkBuild extends PomBuild { } analysis }, + + // disable Mima check for all modules, + // to be enabled in specific ones that have previous artifacts + MimaKeys.mimaFailOnNoPrevious := false, + dependencyOverrides ++= MavenHelper.fromPom { pom => for { dep <- pom.getDependencyManagement.getDependencies.asScala } yield MavenHelper.convertDep(dep) - }.value.toSet + }.value.toSeq ) def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = { @@ -432,7 +440,7 @@ object SparkBuild extends PomBuild { } ))(assembly) - enable(Seq(sparkShell := sparkShell in LocalProject("assembly")))(spark) + enable(Seq(sparkShell := (sparkShell in LocalProject("assembly")).value))(spark) // TODO: move this to its upstream project. override def projectDefinitions(baseDirectory: File): Seq[Project] = { @@ -503,12 +511,12 @@ object SparkParallelTestGrouping { testGrouping in Test := { val tests: Seq[TestDefinition] = (definedTests in Test).value val defaultForkOptions = ForkOptions( - bootJars = Nil, javaHome = javaHome.value, - connectInput = connectInput.value, outputStrategy = outputStrategy.value, - runJVMOptions = (javaOptions in Test).value, + bootJars = Vector.empty[java.io.File], workingDirectory = Some(baseDirectory.value), + runJVMOptions = (javaOptions in Test).value.toVector, + connectInput = connectInput.value, envVars = (envVars in Test).value ) tests.groupBy(test => testNameToTestGroup(test.name)).map { case (groupName, groupTests) => @@ -516,7 +524,7 @@ object SparkParallelTestGrouping { if (groupName == DEFAULT_TEST_GROUP) { defaultForkOptions } else { - defaultForkOptions.copy(runJVMOptions = defaultForkOptions.runJVMOptions ++ + defaultForkOptions.withRunJVMOptions(defaultForkOptions.runJVMOptions ++ Seq(s"-Djava.io.tmpdir=${baseDirectory.value}/target/tmp/$groupName")) } } @@ -530,6 +538,7 @@ object SparkParallelTestGrouping { } object Core { + import scala.sys.process.Process lazy val settings = Seq( resourceGenerators in Compile += Def.task { val buildScript = baseDirectory.value + "/../build/spark-build-info" @@ -575,6 +584,7 @@ object DockerIntegrationTests { */ object KubernetesIntegrationTests { import BuildCommons._ + import scala.sys.process.Process val dockerBuild = TaskKey[Unit]("docker-imgs", "Build the docker images for ITs.") val runITs = TaskKey[Unit]("run-its", "Only run ITs, skip image build.") @@ -653,7 +663,9 @@ object ExcludedDependencies { */ object OldDeps { - lazy val project = Project("oldDeps", file("dev"), settings = oldDepsSettings) + lazy val project = Project("oldDeps", file("dev")) + .settings(oldDepsSettings) + .disablePlugins(com.typesafe.sbt.pom.PomReaderPlugin) lazy val allPreviousArtifactKeys = Def.settingDyn[Seq[Set[ModuleID]]] { SparkBuild.mimaProjects @@ -669,7 +681,10 @@ object OldDeps { } object Catalyst { - lazy val settings = antlr4Settings ++ Seq( + import com.simplytyped.Antlr4Plugin + import com.simplytyped.Antlr4Plugin.autoImport._ + + lazy val settings = Antlr4Plugin.projectSettings ++ Seq( antlr4Version in Antlr4 := SbtPomKeys.effectivePom.value.getProperties.get("antlr4.version").asInstanceOf[String], antlr4PackageName in Antlr4 := Some("org.apache.spark.sql.catalyst.parser"), antlr4GenListener in Antlr4 := true, @@ -679,6 +694,9 @@ object Catalyst { } object SQL { + + import sbtavro.SbtAvro.autoImport._ + lazy val settings = Seq( initialCommands in console := """ @@ -700,8 +718,10 @@ object SQL { |import sqlContext.implicits._ |import sqlContext._ """.stripMargin, - cleanupCommands in console := "sc.stop()" + cleanupCommands in console := "sc.stop()", + Test / avroGenerate := (Compile / avroGenerate).value ) + } object Hive { @@ -740,27 +760,27 @@ object Hive { object Assembly { import sbtassembly.AssemblyUtils._ - import sbtassembly.Plugin._ - import AssemblyKeys._ + import sbtassembly.AssemblyPlugin.autoImport._ val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.") - lazy val settings = assemblySettings ++ Seq( + lazy val settings = baseAssemblySettings ++ Seq( test in assembly := {}, hadoopVersion := { sys.props.get("hadoop.version") .getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String]) }, - jarName in assembly := { + assemblyJarName in assembly := { + lazy val hdpVersion = hadoopVersion.value if (moduleName.value.contains("streaming-kafka-0-10-assembly") || moduleName.value.contains("streaming-kinesis-asl-assembly")) { s"${moduleName.value}-${version.value}.jar" } else { - s"${moduleName.value}-${version.value}-hadoop${hadoopVersion.value}.jar" + s"${moduleName.value}-${version.value}-hadoop${hdpVersion}.jar" } }, - jarName in (Test, assembly) := s"${moduleName.value}-test-${version.value}.jar", - mergeStrategy in assembly := { + assemblyJarName in (Test, assembly) := s"${moduleName.value}-test-${version.value}.jar", + assemblyMergeStrategy in assembly := { case m if m.toLowerCase(Locale.ROOT).endsWith("manifest.mf") => MergeStrategy.discard case m if m.toLowerCase(Locale.ROOT).matches("meta-inf.*\\.sf$") @@ -775,8 +795,7 @@ object Assembly { } object PySparkAssembly { - import sbtassembly.Plugin._ - import AssemblyKeys._ + import sbtassembly.AssemblyPlugin.autoImport._ import java.util.zip.{ZipOutputStream, ZipEntry} lazy val settings = Seq( @@ -826,8 +845,13 @@ object PySparkAssembly { object Unidoc { import BuildCommons._ - import sbtunidoc.Plugin._ - import UnidocKeys._ + import sbtunidoc.BaseUnidocPlugin + import sbtunidoc.JavaUnidocPlugin + import sbtunidoc.ScalaUnidocPlugin + import sbtunidoc.BaseUnidocPlugin.autoImport._ + import sbtunidoc.GenJavadocPlugin.autoImport._ + import sbtunidoc.JavaUnidocPlugin.autoImport._ + import sbtunidoc.ScalaUnidocPlugin.autoImport._ private def ignoreUndocumentedPackages(packages: Seq[Seq[File]]): Seq[Seq[File]] = { packages @@ -855,6 +879,7 @@ object Unidoc { .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalog/v2/utils"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/hive"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/v2/avro"))) + .map(_.filterNot(_.getCanonicalPath.contains("SSLOptions"))) } private def ignoreClasspaths(classpaths: Seq[Classpath]): Seq[Classpath] = { @@ -865,7 +890,10 @@ object Unidoc { val unidocSourceBase = settingKey[String]("Base URL of source links in Scaladoc.") - lazy val settings = scalaJavaUnidocSettings ++ Seq ( + lazy val settings = BaseUnidocPlugin.projectSettings ++ + ScalaUnidocPlugin.projectSettings ++ + JavaUnidocPlugin.projectSettings ++ + Seq ( publish := {}, unidocProjectFilter in(ScalaUnidoc, unidoc) := diff --git a/project/build.properties b/project/build.properties index 23aa187fb35a7..b1e5e313d853f 100644 --- a/project/build.properties +++ b/project/build.properties @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -sbt.version=0.13.18 +sbt.version=1.3.13 diff --git a/project/plugins.sbt b/project/plugins.sbt index 8eb1c8e92d8ca..b504bf0341f55 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -23,8 +23,7 @@ libraryDependencies += "com.puppycrawl.tools" % "checkstyle" % "8.25" // checkstyle uses guava 23.0. libraryDependencies += "com.google.guava" % "guava" % "23.0" -// need to make changes to uptake sbt 1.0 support in "com.eed3si9n" % "sbt-assembly" % "1.14.5" -addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.11.2") +addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.15.0") addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "5.2.4") @@ -32,19 +31,11 @@ addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2") addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "1.0.0") -// SPARK-29560 Only sbt-mima-plugin needs this repo -resolvers += Resolver.url("bintray", - new java.net.URL("https://dl.bintray.com/typesafe/sbt-plugins"))(Resolver.defaultIvyPatterns) -addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.3.0") +addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.8.0") -// sbt 1.0.0 support: https://github.com/AlpineNow/junit_xml_listener/issues/6 -addSbtPlugin("com.alpinenow" % "junit_xml_listener" % "0.5.1") +addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.4.3") -// need to make changes to uptake sbt 1.0 support in "com.eed3si9n" % "sbt-unidoc" % "0.4.1" -addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.3.3") - -// need to make changes to uptake sbt 1.0 support in "com.cavorite" % "sbt-avro-1-7" % "1.1.2" -addSbtPlugin("com.cavorite" % "sbt-avro" % "0.3.2") +addSbtPlugin("com.cavorite" % "sbt-avro" % "2.1.1") libraryDependencies += "org.apache.avro" % "avro-compiler" % "1.10.1" addSbtPlugin("io.spray" % "sbt-revolver" % "0.9.1") @@ -53,14 +44,6 @@ libraryDependencies += "org.ow2.asm" % "asm" % "7.2" libraryDependencies += "org.ow2.asm" % "asm-commons" % "7.2" -// sbt 1.0.0 support: https://github.com/ihji/sbt-antlr4/issues/14 -addSbtPlugin("com.simplytyped" % "sbt-antlr4" % "0.7.13") - -// Spark uses a custom fork of the sbt-pom-reader plugin which contains a patch to fix issues -// related to test-jar dependencies (https://github.com/sbt/sbt-pom-reader/pull/14). The source for -// this fork is published at https://github.com/JoshRosen/sbt-pom-reader/tree/v1.0.0-spark -// and corresponds to commit b160317fcb0b9d1009635a7c5aa05d0f3be61936 in that repository. -// In the long run, we should try to merge our patch upstream and switch to an upstream version of -// the plugin; this is tracked at SPARK-14401. +addSbtPlugin("com.simplytyped" % "sbt-antlr4" % "0.8.2") -addSbtPlugin("org.spark-project" % "sbt-pom-reader" % "1.0.0-spark") +addSbtPlugin("com.typesafe.sbt" % "sbt-pom-reader" % "2.2.0") diff --git a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala index f9bc499961ad7..a6fee8616df11 100644 --- a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala +++ b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala @@ -24,6 +24,7 @@ import scala.reflect.runtime.universe.runtimeMirror import scala.util.Try import org.clapper.classutil.ClassFinder +import org.objectweb.asm.Opcodes /** * A tool for generating classes to be excluded during binary checking with MIMA. It is expected @@ -146,7 +147,7 @@ object GenerateMIMAIgnore { * and subpackages both from directories and jars present on the classpath. */ private def getClasses(packageName: String): Set[String] = { - val finder = ClassFinder() + val finder = ClassFinder(maybeOverrideAsmVersion = Some(Opcodes.ASM7)) finder .getClasses .map(_.name)