Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-45964][SQL] Remove private sql accessor in XML and JSON package under catalyst package #43856

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import sun.nio.cs.StreamDecoder
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.unsafe.types.UTF8String

private[sql] object CreateJacksonParser extends Serializable {
object CreateJacksonParser extends Serializable {
def string(jsonFactory: JsonFactory, record: String): JsonParser = {
jsonFactory.createParser(record)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf}
*
* Most of these map directly to Jackson's internal options, specified in [[JsonReadFeature]].
*/
private[sql] class JSONOptions(
class JSONOptions(
@transient val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
Expand Down Expand Up @@ -212,7 +212,7 @@ private[sql] class JSONOptions(
}
}

private[sql] class JSONOptionsInRead(
class JSONOptionsInRead(
@transient override val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
Expand Down Expand Up @@ -242,7 +242,7 @@ private[sql] class JSONOptionsInRead(
}
}

private[sql] object JSONOptionsInRead {
object JSONOptionsInRead {
// The following encodings are not supported in per-line mode (multiline is false)
// because they cause some problems in reading files with BOM which is supposed to
// present in the files with such encodings. After splitting input files by lines,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import org.apache.spark.util.ArrayImplicits._
* of map. An exception will be thrown if trying to write out a struct if it is initialized with
* a `MapType`, and vice verse.
*/
private[sql] class JacksonGenerator(
class JacksonGenerator(
dataType: DataType,
writer: Writer,
options: JSONOptions) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils

private[sql] class JsonInferSchema(options: JSONOptions) extends Serializable with Logging {
class JsonInferSchema(options: JSONOptions) extends Serializable with Logging {

private val decimalParser = ExprUtils.getDecimalParser(options.locale)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import sun.nio.cs.StreamDecoder
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.unsafe.types.UTF8String

private[sql] object CreateXmlParser extends Serializable {
object CreateXmlParser extends Serializable {
val filter = new EventFilter {
override def accept(event: XMLEvent): Boolean =
// Ignore comments and processing instructions
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,7 @@ class StaxXmlParser(
*
* This implementation is ultimately loosely based on LineRecordReader in Hadoop.
*/
private[xml] class XmlTokenizer(
class XmlTokenizer(
inputStream: InputStream,
options: XmlOptions) {
private val reader = new InputStreamReader(inputStream, Charset.forName(options.charset))
Expand Down Expand Up @@ -742,7 +742,7 @@ private[xml] class XmlTokenizer(
}
}

private[sql] object StaxXmlParser {
object StaxXmlParser {
/**
* Parses a stream that contains CSV strings and turns it into an iterator of tokens.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import javax.xml.stream.events._
import scala.annotation.tailrec
import scala.jdk.CollectionConverters._

private[sql] object StaxXmlParserUtils {
object StaxXmlParserUtils {

private[sql] val factory: XMLInputFactory = {
val factory = XMLInputFactory.newInstance()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import org.apache.spark.internal.Logging
/**
* Utilities for working with XSD validation.
*/
private[sql] object ValidatorUtil extends Logging{
object ValidatorUtil extends Logging {
// Parsing XSDs may be slow, so cache them by path:

private val cache = CacheBuilder.newBuilder().softValues().build(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import org.apache.spark.sql.catalyst.util.{DateFormatter, PermissiveMode, Timest
import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT
import org.apache.spark.sql.types._

private[sql] class XmlInferSchema(options: XmlOptions, caseSensitive: Boolean)
class XmlInferSchema(options: XmlOptions, caseSensitive: Boolean)
extends Serializable
with Logging {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf}
/**
* Options for the XML data source.
*/
private[sql] class XmlOptions(
class XmlOptions(
val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String,
Expand Down Expand Up @@ -172,7 +172,7 @@ private[sql] class XmlOptions(
}
}

private[sql] object XmlOptions extends DataSourceOptions {
object XmlOptions extends DataSourceOptions {
val DEFAULT_ATTRIBUTE_PREFIX = "_"
val DEFAULT_VALUE_TAG = "_VALUE"
val DEFAULT_ROW_TAG = "ROW"
Expand Down