Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/apache/spark into SPARK-1609
Browse files Browse the repository at this point in the history
  • Loading branch information
witgo committed Apr 26, 2014
2 parents 0640852 + 87cf35c commit 91da0bb
Show file tree
Hide file tree
Showing 14 changed files with 307 additions and 242 deletions.
20 changes: 12 additions & 8 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -300,10 +300,17 @@ class SparkContext(config: SparkConf) extends Logging {

// Create and start the scheduler
private[spark] var taskScheduler = SparkContext.createTaskScheduler(this, master)
taskScheduler.start()
@volatile private[spark] var dagScheduler: DAGScheduler = _
try {
dagScheduler = new DAGScheduler(this)
} catch {
case e: Exception => throw
new SparkException("DAGScheduler cannot be initialized due to %s".format(e.getMessage))
}

@volatile private[spark] var dagScheduler = new DAGScheduler(this)
dagScheduler.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
taskScheduler.start()

private[spark] val cleaner: Option[ContextCleaner] = {
if (conf.getBoolean("spark.cleaner.referenceTracking", true)) {
Expand Down Expand Up @@ -1022,8 +1029,8 @@ class SparkContext(config: SparkConf) extends Logging {
partitions: Seq[Int],
allowLocal: Boolean,
resultHandler: (Int, U) => Unit) {
partitions.foreach{ p =>
require(p >= 0 && p < rdd.partitions.size, s"Invalid partition requested: $p")
if (dagScheduler == null) {
throw new SparkException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
Expand Down Expand Up @@ -1132,9 +1139,6 @@ class SparkContext(config: SparkConf) extends Logging {
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
partitions.foreach{ p =>
require(p >= 0 && p < rdd.partitions.size, s"Invalid partition requested: $p")
}
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1142,9 +1142,9 @@ abstract class RDD[T: ClassTag](
@transient private var doCheckpointCalled = false

/**
* Performs the checkpointing of this RDD by saving this. It is called by the DAGScheduler
* after a job using this RDD has completed (therefore the RDD has been materialized and
* potentially stored in memory). doCheckpoint() is called recursively on the parent RDDs.
* Performs the checkpointing of this RDD by saving this. It is called after a job using this RDD
* has completed (therefore the RDD has been materialized and potentially stored in memory).
* doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint() {
if (!doCheckpointCalled) {
Expand Down
Loading

0 comments on commit 91da0bb

Please sign in to comment.