Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Sital Kedia committed Mar 29, 2017
1 parent 1e6e88a commit bdaff12
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 10 deletions.
16 changes: 7 additions & 9 deletions core/src/main/scala/org/apache/spark/MapOutputTracker.scala
Original file line number Diff line number Diff line change
Expand Up @@ -426,16 +426,14 @@ private[spark] class MapOutputTrackerMaster(conf: SparkConf,

/** Get the epoch for map output for a shuffle, if it is available */
def getEpochForMapOutput(shuffleId: Int, mapId: Int): Option[Long] = {
val arrayOpt = mapStatuses.get(shuffleId)
if (arrayOpt.isDefined && arrayOpt.get != null) {
val array = arrayOpt.get
array.synchronized {
if (array(mapId) != null) {
return Some(epochForMapStatus(shuffleId)(mapId))
}
}
if (mapId < 0) {
return None
}
None
for {
mapStatus <- mapStatuses.get(shuffleId).flatMap { mapStatusArray =>
Option(mapStatusArray(mapId))
}
} yield epochForMapStatus(shuffleId)(mapId)
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1295,7 +1295,6 @@ class DAGScheduler(
val failedStage = stageIdToStage(task.stageId)
val mapStage = shuffleIdToMapStage(shuffleId)

val epochForMapOutput = mapOutputTracker.getEpochForMapOutput(shuffleId, mapId)
// It is possible that the map output was regenerated by rerun of the stage and the
// fetch failure is being reported for stale map output. In that case, we should just
// ignore the fetch failure and relaunch the task with latest map output info.
Expand Down

0 comments on commit bdaff12

Please sign in to comment.