Skip to content

[SPARK-52704][SQL][FOLLOWUP] Move session state utilities from trait FileFormat to SessionStateHelper #51411

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributes
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.internal.{SessionState, SQLConf}
import org.apache.spark.sql.internal.{SessionStateHelper, SQLConf}
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types._

Expand Down Expand Up @@ -235,20 +235,6 @@ trait FileFormat {
*/
def fileConstantMetadataExtractors: Map[String, PartitionedFile => Any] =
FileFormat.BASE_METADATA_EXTRACTORS

protected def sessionState(sparkSession: SparkSession): SessionState = {
sparkSession.sessionState
}

protected def sqlConf(sparkSession: SparkSession): SQLConf = {
sessionState(sparkSession).conf
}

protected def hadoopConf(
sparkSession: SparkSession,
options: Map[String, String]): Configuration = {
sessionState(sparkSession).newHadoopConfWithOptions(options)
}
}

object FileFormat {
Expand Down Expand Up @@ -370,15 +356,15 @@ object FileFormat {
/**
* The base class file format that is based on text file.
*/
abstract class TextBasedFileFormat extends FileFormat {
abstract class TextBasedFileFormat extends FileFormat with SessionStateHelper {
private var codecFactory: CompressionCodecFactory = _

override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
if (codecFactory == null) {
codecFactory = new CompressionCodecFactory(hadoopConf(sparkSession, options))
codecFactory = new CompressionCodecFactory(getHadoopConf(sparkSession, options))
}
val codec = codecFactory.getCodec(path)
codec == null || codec.isInstanceOf[SplittableCompressionCodec]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ case class CSVFileFormat() extends TextBasedFileFormat with DataSourceRegister {
private def getCsvOptions(
sparkSession: SparkSession,
options: Map[String, String]): CSVOptions = {
val conf = sqlConf(sparkSession)
val conf = getSqlConf(sparkSession)
new CSVOptions(
options,
conf.csvColumnPruning,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ case class JsonFileFormat() extends TextBasedFileFormat with DataSourceRegister
spark: SparkSession,
options: Map[String, String],
inRead: Boolean = true): JSONOptions = {
val conf = sqlConf(spark)
val conf = getSqlConf(spark)
if (inRead) {
new JSONOptionsInRead(options, conf.sessionLocalTimeZone, conf.columnNameOfCorruptRecord)
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ import org.apache.spark.sql.catalyst.expressions.{AttributeSet, Expression, Expr
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributes
import org.apache.spark.sql.connector.read.{Batch, InputPartition, Scan, Statistics, SupportsReportStatistics}
import org.apache.spark.sql.connector.read._
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.execution.PartitionedFileUtil
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.{SessionState, SQLConf}
import org.apache.spark.sql.internal.{SessionStateHelper, SQLConf}
import org.apache.spark.sql.internal.connector.SupportsMetadata
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
Expand Down Expand Up @@ -113,10 +113,7 @@ trait FileScan extends Scan

override def hashCode(): Int = getClass.hashCode()

override def conf: SQLConf = {
val sessionState: SessionState = sparkSession.sessionState
sessionState.conf
}
override def conf: SQLConf = SessionStateHelper.getSqlConf(sparkSession)

val maxMetadataValueLength = conf.maxMetadataStringLength

Expand Down Expand Up @@ -177,7 +174,7 @@ trait FileScan extends Scan
if (splitFiles.length == 1) {
val path = splitFiles(0).toPath
if (!isSplitable(path) && splitFiles(0).length >
sparkSession.sparkContext.conf.get(IO_WARNING_LARGEFILETHRESHOLD)) {
SessionStateHelper.getSparkConf(sparkSession).get(IO_WARNING_LARGEFILETHRESHOLD)) {
logWarning(log"Loading one large unsplittable file ${MDC(PATH, path.toString)} with only " +
log"one partition, the reason is: ${MDC(REASON, getFileUnSplittableReason(path))}")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ case class XmlFileFormat() extends TextBasedFileFormat with DataSourceRegister {
private def getXmlOptions(
sparkSession: SparkSession,
parameters: Map[String, String]): XmlOptions = {
val conf = sqlConf(sparkSession)
val conf = getSqlConf(sparkSession)
new XmlOptions(parameters, conf.sessionLocalTimeZone, conf.columnNameOfCorruptRecord, true)
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.internal

import org.apache.hadoop.conf.Configuration

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

/**
* Helper trait to access session state related configurations and utilities.
* It also provides type annotations for IDEs to build indexes.
*/
trait SessionStateHelper {
private def sessionState(sparkSession: SparkSession): SessionState = {
sparkSession.sessionState
}

private def sparkContext(sparkSession: SparkSession): SparkContext = {
sparkSession.sparkContext
}

def getSparkConf(sparkSession: SparkSession): SparkConf = {
sparkContext(sparkSession).conf
}

def getSqlConf(sparkSession: SparkSession): SQLConf = {
sessionState(sparkSession).conf
}

def getHadoopConf(
sparkSession: SparkSession,
options: Map[String, String]): Configuration = {
sessionState(sparkSession).newHadoopConfWithOptions(options)
}
}

object SessionStateHelper extends SessionStateHelper