Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into SPARK-1149
Browse files Browse the repository at this point in the history
  • Loading branch information
liguoqiang committed Mar 5, 2014
2 parents ac006a3 + 0283665 commit 3395ee7
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)])
}

/** Alias for reduceByKeyLocally */
@deprecated("Use reduceByKeyLocally", "1.0.0")
def reduceByKeyToDriver(func: (V, V) => V): Map[K, V] = reduceByKeyLocally(func)

/** Count the number of elements for each key, and return the result to the master as a Map. */
Expand Down
7 changes: 7 additions & 0 deletions docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,13 @@ Apart from these, the following properties are also available, and may be useful
multi-user services.
</td>
</tr>
<tr>
<td>spark.scheduler.revive.interval</td>
<td>1000</td>
<td>
The interval length for the scheduler to revive the worker resource offers to run tasks. (in milliseconds)
</td>
</tr>
<tr>
<td>spark.reducer.maxMbInFlight</td>
<td>48</td>
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/rdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def getCheckpointFile(self):

def map(self, f, preservesPartitioning=False):
"""
Return a new RDD containing the distinct elements in this RDD.
Return a new RDD by applying a function to each element of this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
Expand Down

0 comments on commit 3395ee7

Please sign in to comment.