diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala index 64f70687e5ec6..6e77cfc948a03 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala @@ -32,7 +32,6 @@ import org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GRO import org.apache.parquet.hadoop._ import org.apache.spark.TaskContext -import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.{Logging, MDC} import org.apache.spark.internal.LogKey.{PATH, SCHEMA} import org.apache.spark.sql._ @@ -208,7 +207,6 @@ class ParquetFileFormat assert(supportBatch(sparkSession, resultSchema)) } - val sparkConf = sparkSession.sparkContext.conf (file: PartitionedFile) => { assert(file.partitionValues.numFields == partitionSchema.size)