From 519dab056964dae71309f65bcadee8ec08366284 Mon Sep 17 00:00:00 2001 From: Marcelo Vanzin Date: Tue, 15 Aug 2017 15:27:39 -0700 Subject: [PATCH] Feedback. --- .../spark/deploy/history/FsHistoryProvider.scala | 12 +++++------- docs/monitoring.md | 6 ++++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala b/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala index cae8587bdc1fe..2d33a91b38cd7 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala @@ -56,11 +56,10 @@ import org.apache.spark.util.kvstore._ * * - New attempts are detected in [[checkForLogs]]: the log dir is scanned, and any * entries in the log dir whose modification time is greater than the last scan time - * are considered new or updated. These are replayed to create a new [[FsApplicationAttemptInfo]] - * entry and update or create a matching [[FsApplicationHistoryInfo]] element in the list - * of applications. + * are considered new or updated. These are replayed to create a new attempt info entry + * and update or create a matching application info element in the list of applications. * - Updated attempts are also found in [[checkForLogs]] -- if the attempt's log file has grown, the - * [[FsApplicationAttemptInfo]] is replaced by another one with a larger log size. + * attempt is replaced by another one with a larger log size. * - When [[updateProbe()]] is invoked to check if a loaded [[SparkUI]] * instance is out of date, the log size of the cached instance is checked against the app last * loaded by [[checkForLogs]]. @@ -137,10 +136,9 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) val meta = db.getMetadata(classOf[KVStoreMetadata]) if (meta == null) { - db.setMetadata(new KVStoreMetadata(CURRENT_LISTING_VERSION, logDir.toString())) + db.setMetadata(new KVStoreMetadata(CURRENT_LISTING_VERSION, logDir)) db - } else if (meta.version != CURRENT_LISTING_VERSION || - !logDir.toString().equals(meta.logDir)) { + } else if (meta.version != CURRENT_LISTING_VERSION || !logDir.equals(meta.logDir)) { logInfo("Detected mismatched config in existing DB, deleting...") db.close() Utils.deleteRecursively(dbPath) diff --git a/docs/monitoring.md b/docs/monitoring.md index 6bbd3e45be54e..f20cb22dd11e1 100644 --- a/docs/monitoring.md +++ b/docs/monitoring.md @@ -222,9 +222,11 @@ The history server can be configured as follows: spark.history.store.path - /var/lib/spark-history + (none) - Local directory where history server will cache application history data. + Local directory where to cache application history data. If set, the history + server will store application data on disk instead of keeping it in memory. The data + written to disk will be re-used in the event of a history server restart.