From 2f706f14a228400e7fed54a261bd78bdb67237d0 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Sat, 5 Apr 2014 13:54:41 -0700 Subject: [PATCH] Don't use floats --- .../scala/org/apache/spark/Aggregator.scala | 2 +- .../scala/org/apache/spark/Dependency.scala | 10 +++---- .../scala/org/apache/spark/FutureAction.scala | 4 +-- .../scala/org/apache/spark/TaskContext.scala | 2 +- .../apache/spark/executor/TaskMetrics.scala | 6 ++-- .../apache/spark/io/CompressionCodec.scala | 6 ++-- .../apache/spark/partial/BoundedDouble.scala | 2 +- .../apache/spark/partial/PartialResult.scala | 2 +- .../apache/spark/rdd/AsyncRDDActions.scala | 2 +- .../org/apache/spark/rdd/HadoopRDD.scala | 2 +- .../org/apache/spark/rdd/NewHadoopRDD.scala | 2 +- .../spark/rdd/PartitionPruningRDD.scala | 4 +-- .../org/apache/spark/rdd/ShuffledRDD.scala | 2 +- .../spark/scheduler/SparkListener.scala | 28 +++++++++---------- .../spark/serializer/JavaSerializer.scala | 2 +- .../apache/spark/serializer/Serializer.scala | 8 +++--- .../spark/util/BoundedPriorityQueue.scala | 2 +- .../org/apache/spark/util/MutablePair.scala | 2 +- .../spark/util/random/Pseudorandom.scala | 2 +- .../spark/util/random/RandomSampler.scala | 6 ++-- .../org/apache/spark/graphx/package.scala | 2 +- .../org/apache/spark/sql/SQLContext.scala | 2 +- .../org/apache/spark/sql/SchemaRDD.scala | 8 +++--- 23 files changed, 54 insertions(+), 54 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala index 97f4f36bd7c49..32c0e8228705c 100644 --- a/core/src/main/scala/org/apache/spark/Aggregator.scala +++ b/core/src/main/scala/org/apache/spark/Aggregator.scala @@ -20,7 +20,7 @@ package org.apache.spark import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap} /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A set of functions used to aggregate data. * diff --git a/core/src/main/scala/org/apache/spark/Dependency.scala b/core/src/main/scala/org/apache/spark/Dependency.scala index df5c798d9855c..af51e6d177bf4 100644 --- a/core/src/main/scala/org/apache/spark/Dependency.scala +++ b/core/src/main/scala/org/apache/spark/Dependency.scala @@ -21,7 +21,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.serializer.Serializer /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Base class for dependencies. */ @@ -29,7 +29,7 @@ abstract class Dependency[T](val rdd: RDD[T]) extends Serializable /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Base class for dependencies where each partition of the parent RDD is used by at most one * partition of the child RDD. Narrow dependencies allow for pipelined execution. @@ -45,7 +45,7 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Represents a dependency on the output of a shuffle stage. * @param rdd the parent RDD @@ -65,7 +65,7 @@ class ShuffleDependency[K, V]( /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Represents a one-to-one dependency between partitions of the parent and child RDDs. */ @@ -75,7 +75,7 @@ class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs. * @param rdd the parent RDD diff --git a/core/src/main/scala/org/apache/spark/FutureAction.scala b/core/src/main/scala/org/apache/spark/FutureAction.scala index bea19913228fc..392150e682c47 100644 --- a/core/src/main/scala/org/apache/spark/FutureAction.scala +++ b/core/src/main/scala/org/apache/spark/FutureAction.scala @@ -25,7 +25,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter} /** - * EXPERIMENTAL API + * EXPERIMENTAL API * * A future for the result of an action to support cancellation. This is an extension of the * Scala Future interface to support cancellation. @@ -150,7 +150,7 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc: /** - * EXPERIMENTAL API + * EXPERIMENTAL API * * A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take, * takeSample. Cancellation works by setting the cancelled flag to true and interrupting the diff --git a/core/src/main/scala/org/apache/spark/TaskContext.scala b/core/src/main/scala/org/apache/spark/TaskContext.scala index ddb4b5ce453ff..f1f5bf5ea8d75 100644 --- a/core/src/main/scala/org/apache/spark/TaskContext.scala +++ b/core/src/main/scala/org/apache/spark/TaskContext.scala @@ -22,7 +22,7 @@ import scala.collection.mutable.ArrayBuffer import org.apache.spark.executor.TaskMetrics /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Contextual information about a task which can be read or mutated during execution. */ diff --git a/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala b/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala index 31c871d6e3db7..7f44886ef569d 100644 --- a/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala +++ b/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala @@ -20,7 +20,7 @@ package org.apache.spark.executor import org.apache.spark.storage.{BlockId, BlockStatus} /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Metrics tracked during the execution of a task. */ @@ -88,7 +88,7 @@ object TaskMetrics { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Metrics pertaining to shuffle data read in a given task. */ @@ -127,7 +127,7 @@ class ShuffleReadMetrics extends Serializable { } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Metrics pertaining to shuffle data written in a given task. */ diff --git a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala index d65022d2356fb..cdcbaf4f0e947 100644 --- a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala +++ b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala @@ -25,7 +25,7 @@ import org.xerial.snappy.{SnappyInputStream, SnappyOutputStream} import org.apache.spark.SparkConf /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * CompressionCodec allows the customization of choosing different compression implementations * to be used in block storage. @@ -58,7 +58,7 @@ private[spark] object CompressionCodec { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * LZF implementation of [[org.apache.spark.io.CompressionCodec]]. * @@ -77,7 +77,7 @@ class LZFCompressionCodec(conf: SparkConf) extends CompressionCodec { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Snappy implementation of [[org.apache.spark.io.CompressionCodec]]. * Block size can be configured by spark.io.compression.snappy.block.size. diff --git a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala index 0aa79de87bf99..811a771f007e9 100644 --- a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala +++ b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala @@ -18,7 +18,7 @@ package org.apache.spark.partial /** - * EXPERIMENTAL API + * EXPERIMENTAL API * * A Double value with error bars and associated confidence. */ diff --git a/core/src/main/scala/org/apache/spark/partial/PartialResult.scala b/core/src/main/scala/org/apache/spark/partial/PartialResult.scala index 2e2591973c665..46095dc69968f 100644 --- a/core/src/main/scala/org/apache/spark/partial/PartialResult.scala +++ b/core/src/main/scala/org/apache/spark/partial/PartialResult.scala @@ -18,7 +18,7 @@ package org.apache.spark.partial /** - * EXPERIMENTAL API + * EXPERIMENTAL API */ class PartialResult[R](initialVal: R, isFinal: Boolean) { private var finalValue: Option[R] = if (isFinal) Some(initialVal) else None diff --git a/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala b/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala index 992b67dfff1af..6b6efa1f26893 100644 --- a/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala @@ -26,7 +26,7 @@ import scala.reflect.ClassTag import org.apache.spark.{ComplexFutureAction, FutureAction, Logging} /** - * EXPERIMENTAL API + * EXPERIMENTAL API * * A set of asynchronous RDD actions available through an implicit conversion. * Import `org.apache.spark.SparkContext._` at the top of your program to use these functions. diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala index 8483c13c989a1..818bfd46150c8 100644 --- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala @@ -70,7 +70,7 @@ private[spark] class HadoopPartition(rddId: Int, idx: Int, @transient s: InputSp } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * An RDD that provides core functionality for reading data stored in Hadoop (e.g., files in HDFS, * sources in HBase, or S3), using the older MapReduce API (`org.apache.hadoop.mapred`). diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala index 71a055f05691f..75f82163ef50b 100644 --- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala @@ -36,7 +36,7 @@ class NewHadoopPartition(rddId: Int, val index: Int, @transient rawSplit: InputS } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * An RDD that provides core functionality for reading data stored in Hadoop (e.g., files in HDFS, * sources in HBase, or S3), using the new MapReduce API (`org.apache.hadoop.mapreduce`). diff --git a/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala b/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala index ff9e9cde4225c..46f44c129ba5a 100644 --- a/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala @@ -46,7 +46,7 @@ private[spark] class PruneDependency[T](rdd: RDD[T], @transient partitionFilterF /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A RDD used to prune RDD partitions/partitions so we can avoid launching tasks on * all partitions. An example use case: If we know the RDD is partitioned by range, @@ -67,7 +67,7 @@ class PartitionPruningRDD[T: ClassTag]( /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE */ object PartitionPruningRDD { diff --git a/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala index 166b35d9508c0..c94f67717448b 100644 --- a/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala @@ -28,7 +28,7 @@ private[spark] class ShuffledRDDPartition(val idx: Int) extends Partition { } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * The resulting RDD from a shuffle (e.g. repartitioning of data). * @param prev the parent RDD. diff --git a/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala b/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala index 13a17f8fd84cf..af882924213de 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala @@ -27,23 +27,23 @@ import org.apache.spark.executor.TaskMetrics import org.apache.spark.storage.BlockManagerId import org.apache.spark.util.{Distribution, Utils} -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ sealed trait SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerStageSubmitted(stageInfo: StageInfo, properties: Properties = null) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerStageCompleted(stageInfo: StageInfo) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerTaskStart(stageId: Int, taskInfo: TaskInfo) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerTaskGettingResult(taskInfo: TaskInfo) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerTaskEnd( stageId: Int, taskType: String, @@ -52,26 +52,26 @@ case class SparkListenerTaskEnd( taskMetrics: TaskMetrics) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerJobStart(jobId: Int, stageIds: Seq[Int], properties: Properties = null) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerJobEnd(jobId: Int, jobResult: JobResult) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerEnvironmentUpdate(environmentDetails: Map[String, Seq[(String, String)]]) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerBlockManagerAdded(blockManagerId: BlockManagerId, maxMem: Long) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerBlockManagerRemoved(blockManagerId: BlockManagerId) extends SparkListenerEvent -/** DEVELOPER API - UNSTABLE */ +/** DEVELOPER API - UNSTABLE */ case class SparkListenerUnpersistRDD(rddId: Int) extends SparkListenerEvent /** An event used in the listener to shutdown the listener daemon thread. */ @@ -79,7 +79,7 @@ private[spark] case object SparkListenerShutdown extends SparkListenerEvent /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Interface for listening to events from the Spark scheduler. Note that this is an internal * interface which might change in different Spark releases. @@ -143,7 +143,7 @@ trait SparkListener { } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Simple SparkListener that logs a few summary statistics when each stage completes */ diff --git a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala index ee71ea6ec6550..df9b1ab8c8931 100644 --- a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala +++ b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala @@ -94,7 +94,7 @@ private[spark] class JavaSerializerInstance(counterReset: Int) extends Serialize } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A Spark serializer that uses Java's built-in serialization. * diff --git a/core/src/main/scala/org/apache/spark/serializer/Serializer.scala b/core/src/main/scala/org/apache/spark/serializer/Serializer.scala index 3440fd4aa0ad2..d423d2e61a865 100644 --- a/core/src/main/scala/org/apache/spark/serializer/Serializer.scala +++ b/core/src/main/scala/org/apache/spark/serializer/Serializer.scala @@ -26,7 +26,7 @@ import org.apache.spark.util.{ByteBufferInputStream, NextIterator} import org.apache.spark.SparkEnv /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A serializer. Because some serialization libraries are not thread safe, this class is used to * create [[org.apache.spark.serializer.SerializerInstance]] objects that do the actual @@ -55,7 +55,7 @@ object Serializer { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * An instance of a serializer, for use by one thread at a time. */ @@ -89,7 +89,7 @@ trait SerializerInstance { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A stream for writing serialized objects. */ @@ -108,7 +108,7 @@ trait SerializationStream { /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A stream for reading serialized objects. */ diff --git a/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala b/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala index 97c1ba15f04e9..6423d6948f2d7 100644 --- a/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala +++ b/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala @@ -24,7 +24,7 @@ import scala.collection.JavaConverters._ import scala.collection.generic.Growable /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * Bounded priority queue. This class wraps the original PriorityQueue * class and modifies it such that only the top K elements are retained. diff --git a/core/src/main/scala/org/apache/spark/util/MutablePair.scala b/core/src/main/scala/org/apache/spark/util/MutablePair.scala index 27b6ccf18df73..db09d21bd4ec7 100644 --- a/core/src/main/scala/org/apache/spark/util/MutablePair.scala +++ b/core/src/main/scala/org/apache/spark/util/MutablePair.scala @@ -18,7 +18,7 @@ package org.apache.spark.util /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A tuple of 2 elements. This can be used as an alternative to Scala's Tuple2 when we want to * minimize object allocation. diff --git a/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala b/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala index a0863f41dc302..dac0d7d8a3b3c 100644 --- a/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala +++ b/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala @@ -18,7 +18,7 @@ package org.apache.spark.util.random /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A class with pseudorandom behavior. */ diff --git a/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala b/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala index 1fb2f60a8cd9d..479ca086b979c 100644 --- a/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala +++ b/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala @@ -23,7 +23,7 @@ import cern.jet.random.Poisson import cern.jet.random.engine.DRand /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A pseudorandom sampler. It is possible to change the sampled item type. For example, we might * want to add weights for stratified sampling or importance sampling. Should only use @@ -42,7 +42,7 @@ trait RandomSampler[T, U] extends Pseudorandom with Cloneable with Serializable } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A sampler based on Bernoulli trials. * @@ -71,7 +71,7 @@ class BernoulliSampler[T](lb: Double, ub: Double, complement: Boolean = false) } /** - * DEVELOPER API - UNSTABLE + * DEVELOPER API - UNSTABLE * * A sampler based on values drawn from Poisson distribution. * diff --git a/graphx/src/main/scala/org/apache/spark/graphx/package.scala b/graphx/src/main/scala/org/apache/spark/graphx/package.scala index 6f602a59cb5b8..6d0e3cde812b1 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/package.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/package.scala @@ -20,7 +20,7 @@ package org.apache.spark import org.apache.spark.util.collection.OpenHashSet /** - * ALPHA COMPONENT + * ALPHA COMPONENT * * GraphX is a graph processing framework built on top of Spark. */ package object graphx { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index cf3c06acce5b0..bace60f59cd22 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -62,7 +62,7 @@ class SQLContext(@transient val sparkContext: SparkContext) new this.QueryExecution { val logical = plan } /** - * EXPERIMENTAL + * EXPERIMENTAL * * Allows catalyst LogicalPlans to be executed as a SchemaRDD. Note that the LogicalPlan * interface is considered internal, and thus not guranteed to be stable. As a result, using diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala index 770cabcb31d13..57a37e138e9ff 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala @@ -241,7 +241,7 @@ class SchemaRDD( Filter(ScalaUdf(udf, BooleanType, Seq(UnresolvedAttribute(arg1.name))), logicalPlan)) /** - * EXPERIMENTAL + * EXPERIMENTAL * * Filters tuples using a function over a `Dynamic` version of a given Row. DynamicRows use * scala's Dynamic trait to emulate an ORM of in a dynamically typed language. Since the type of @@ -260,7 +260,7 @@ class SchemaRDD( Filter(ScalaUdf(dynamicUdf, BooleanType, Seq(WrapDynamic(logicalPlan.output))), logicalPlan)) /** - * EXPERIMENTAL + * EXPERIMENTAL * * Returns a sampled version of the underlying dataset. * @@ -273,7 +273,7 @@ class SchemaRDD( new SchemaRDD(sqlContext, Sample(fraction, withReplacement, seed, logicalPlan)) /** - * EXPERIMENTAL + * EXPERIMENTAL * * Applies the given Generator, or table generating function, to this relation. * @@ -298,7 +298,7 @@ class SchemaRDD( new SchemaRDD(sqlContext, Generate(generator, join, outer, None, logicalPlan)) /** - * EXPERIMENTAL + * EXPERIMENTAL * * Adds the rows from this RDD to the specified table. Note in a standard [[SQLContext]] there is * no notion of persistent tables, and thus queries that contain this operator will fail to