Skip to content

Commit 2c006d7

Browse files
mwiewiorMarek Wiewiórka
and
Marek Wiewiórka
authored
feat: spark 3.4 support (#174)
* Bumping to Spark-3.4.3 * Fixes after upgrade * Apple Silicon (no-GKL) support --------- Co-authored-by: Marek Wiewiórka <mwiewior@Mareks-MacBook-Pro.local>
1 parent a49c32b commit 2c006d7

30 files changed

+152
-153
lines changed

build.sbt

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import sbtassembly.AssemblyPlugin.autoImport.ShadeRule
33
import scala.util.Properties
44

55
name := """sequila"""
6-
val DEFAULT_SPARK_3_VERSION = "3.2.2"
6+
val DEFAULT_SPARK_3_VERSION = "3.4.3"
77
lazy val sparkVersion = Properties.envOrElse("SPARK_VERSION", DEFAULT_SPARK_3_VERSION)
88

99
version := s"${sys.env.getOrElse("VERSION", "0.1.0")}"
@@ -14,7 +14,7 @@ scalaVersion := "2.12.13"
1414
val isSnapshotVersion = settingKey[Boolean]("Is snapshot")
1515
isSnapshotVersion := version.value.toLowerCase.contains("snapshot")
1616

17-
val DEFAULT_HADOOP_VERSION = "3.1.2"
17+
val DEFAULT_HADOOP_VERSION = "3.3.6"
1818

1919
lazy val hadoopVersion = Properties.envOrElse("SPARK_HADOOP_VERSION", DEFAULT_HADOOP_VERSION)
2020

@@ -29,8 +29,9 @@ dependencyOverrides += "io.netty" % "netty-transport" % nettyVersion
2929
dependencyOverrides += "io.netty" % "netty-transport-native-epoll" % nettyVersion
3030
dependencyOverrides += "io.netty" % "netty-transport-native-unix-common" % nettyVersion
3131
dependencyOverrides += "com.google.guava" % "guava" % "15.0"
32-
dependencyOverrides += "org.apache.orc" % "orc-core" % "1.6.9"
33-
dependencyOverrides += "org.apache.logging.log4j" % "log4j-core" % "2.3"
32+
//dependencyOverrides += "org.apache.orc" % "orc-core" % "1.7.5"
33+
//dependencyOverrides += "org.apache.logging.log4j" % "log4j-core" % "2.20.0"
34+
//dependencyOverrides += "org.scalatest" %% "scalatest" % "3.0.3" % "test"
3435

3536

3637
//removing hadoop-bam to used a patched one with support for htsjdk 2.22
@@ -40,10 +41,10 @@ libraryDependencies += "org.apache.spark" %% "spark-core" % sparkVersion
4041
libraryDependencies += "org.apache.spark" %% "spark-sql" % sparkVersion
4142
libraryDependencies += "com.github.mrpowers" %% "spark-fast-tests" % "0.21.3"
4243
libraryDependencies += "com.github.mrpowers" %% "spark-daria" % "0.38.2"
43-
libraryDependencies += "com.holdenkarau" %% "spark-testing-base" % "3.2.0_1.2.0" % "test" excludeAll ExclusionRule(organization = "javax.servlet") excludeAll (ExclusionRule("org.apache.hadoop"))
44-
libraryDependencies += "org.bdgenomics.adam" %% "adam-core-spark3" % "0.36.0" excludeAll (ExclusionRule("org.seqdoop"))
45-
libraryDependencies += "org.bdgenomics.adam" %% "adam-apis-spark3" % "0.36.0" excludeAll (ExclusionRule("org.seqdoop"))
46-
libraryDependencies += "org.bdgenomics.adam" %% "adam-cli-spark3" % "0.36.0" excludeAll (ExclusionRule("org.seqdoop"))
44+
libraryDependencies += "com.holdenkarau" %% "spark-testing-base" % "3.4.1_1.4.4" % "test" excludeAll ExclusionRule(organization = "javax.servlet") excludeAll (ExclusionRule("org.apache.hadoop"))
45+
libraryDependencies += "org.bdgenomics.adam" %% "adam-core-spark3" % "1.0.1" excludeAll (ExclusionRule("org.seqdoop"))
46+
libraryDependencies += "org.bdgenomics.adam" %% "adam-apis-spark3" % "1.0.1" excludeAll (ExclusionRule("org.seqdoop"))
47+
libraryDependencies += "org.bdgenomics.adam" %% "adam-cli-spark3" % "1.0.1" excludeAll (ExclusionRule("org.seqdoop"))
4748
libraryDependencies += "org.scala-lang" % "scala-library" % scalaVersion.value
4849
libraryDependencies += "org.rogach" %% "scallop" % "3.1.2"
4950
libraryDependencies += "com.github.samtools" % "htsjdk" % "2.24.1"
@@ -54,9 +55,9 @@ libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.7"
5455
libraryDependencies += "org.eclipse.jetty" % "jetty-servlet" % "9.3.24.v20180605"
5556
libraryDependencies += "org.apache.derby" % "derbyclient" % "10.14.2.0"
5657
//libraryDependencies += "org.disq-bio" % "disq" % "0.3.8" <-disabled since we use patched version of HtsjdkReadsTraversalParameters
57-
libraryDependencies += "io.projectglow" %% "glow-spark3" % "1.0.1" excludeAll (ExclusionRule("com.github.samtools")) excludeAll (ExclusionRule("org.seqdoop")) //FIXME:: remove togehter with disq
58+
libraryDependencies += "io.projectglow" %% "glow-spark3" % "2.0.0" excludeAll (ExclusionRule("com.github.samtools")) excludeAll (ExclusionRule("org.seqdoop")) //FIXME:: remove togehter with disq
5859
libraryDependencies += "com.intel.gkl" % "gkl" % "0.8.8"
59-
libraryDependencies += "org.openjdk.jol" % "jol-core" % "0.16" % "provided"
60+
libraryDependencies += "org.openjdk.jol" % "jol-core" % "0.17" % "provided"
6061
libraryDependencies += "com.github.jsr203hadoop" % "jsr203hadoop" % "1.0.3"
6162

6263

sbt-cache/ivy/.sbt.ivy.lock

Whitespace-only changes.

src/main/scala/org/biodatageeks/sequila/datasources/BAM/SequilaDataSourceStrategy.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import org.apache.spark.sql.execution.datasources.DataSourceStrategy.selectFilte
1414
import org.apache.spark.sql.internal.SQLConf
1515
import org.apache.spark.sql._
1616
import org.apache.spark.sql.catalyst.encoders.RowEncoder
17+
import org.apache.spark.sql.execution.datasources.v2.PushedDownOperators
1718
import org.apache.spark.sql.sources._
1819
import org.apache.spark.sql.types.{StringType, StructType}
1920
import org.apache.spark.unsafe.types.UTF8String
@@ -102,7 +103,7 @@ case class SequilaDataSourceStrategy(spark: SparkSession) extends Strategy
102103
l.output.toStructType,
103104
Set.empty,
104105
Set.empty,
105-
None,
106+
PushedDownOperators(None, None, None, None, Seq.empty, Seq.empty),
106107
toCatalystRDD(l, baseRelation.buildScan()),
107108
baseRelation,
108109
None) :: Nil
@@ -216,7 +217,7 @@ case class SequilaDataSourceStrategy(spark: SparkSession) extends Strategy
216217
projects.map(_.toAttribute).toStructType,
217218
Set.empty,
218219
Set.empty,
219-
None,
220+
PushedDownOperators(None, None, None, None, Seq.empty, Seq.empty),
220221
scanBuilder(requestedColumns, candidatePredicates, pushedFilters),
221222
relation.relation,
222223
relation.catalogTable.map(_.identifier))
@@ -231,7 +232,7 @@ case class SequilaDataSourceStrategy(spark: SparkSession) extends Strategy
231232
requestedColumns.toStructType,
232233
Set.empty,
233234
Set.empty,
234-
None,
235+
PushedDownOperators(None, None, None, None, Seq.empty, Seq.empty),
235236
scanBuilder(requestedColumns, candidatePredicates, pushedFilters),
236237
relation.relation,
237238
relation.catalogTable.map(_.identifier))

src/main/scala/org/biodatageeks/sequila/utvf/ResolveTableValuedFunctionsSeq.scala

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,8 @@ package org.apache.spark.sql
2020

2121

2222
import java.util.Locale
23-
24-
import org.apache.spark.sql.ResolveTableValuedFunctionsSeq.tvf
2523
import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, TypeCoercion, UnresolvedTableValuedFunction}
24+
import org.apache.spark.sql.catalyst.catalog.SessionCatalog
2625
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Expression}
2726
import org.apache.spark.sql.catalyst.plans.logical._
2827
import org.apache.spark.sql.catalyst.rules._
@@ -35,7 +34,7 @@ import org.biodatageeks.sequila.utils.Columns
3534
/**
3635
* Rule that resolves table-valued function references.
3736
*/
38-
object ResolveTableValuedFunctionsSeq extends Rule[LogicalPlan] {
37+
case class ResolveTableValuedFunctionsSeq(catalog: SessionCatalog) extends Rule[LogicalPlan] {
3938
/**
4039
* List of argument names and their types, used to declare a function.
4140
*/
@@ -130,7 +129,7 @@ object ResolveTableValuedFunctionsSeq extends Rule[LogicalPlan] {
130129

131130
override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
132131
case u: UnresolvedTableValuedFunction if u.functionArgs.forall(_.resolved) =>
133-
val resolvedFunc = builtinFunctions.get(u.name.funcName.toLowerCase(Locale.ROOT)) match {
132+
val resolvedFunc = builtinFunctions.get(u.name.head.toLowerCase(Locale.ROOT)) match {
134133
case Some(tvf) =>
135134
val resolved = tvf.flatMap { case (argList, resolver) =>
136135
argList.implicitCast(u.functionArgs) match {
@@ -143,12 +142,12 @@ object ResolveTableValuedFunctionsSeq extends Rule[LogicalPlan] {
143142
resolved.headOption.getOrElse {
144143
val argTypes = u.functionArgs.map(_.dataType.typeName).mkString(", ")
145144
u.failAnalysis(
146-
s"""error: table-valued function ${u.name.funcName} with alternatives:
145+
s"""error: table-valued function ${u.name.head} with alternatives:
147146
|${tvf.keys.map(_.toString).toSeq.sorted.map(x => s" ($x)").mkString("\n")}
148-
|cannot be applied to: (${argTypes})""".stripMargin)
147+
|cannot be applied to: (${argTypes})""".stripMargin, Map.empty)
149148
}
150149
case _ =>
151-
u.failAnalysis(s"could not resolve `${u.name.funcName}` to a table-valued function")
150+
u.failAnalysis(s"could not resolve `${u.name.head}` to a table-valued function", Map.empty)
152151
}
153152

154153
// If alias names assigned, add `Project` with the aliases
@@ -157,8 +156,8 @@ object ResolveTableValuedFunctionsSeq extends Rule[LogicalPlan] {
157156
// Checks if the number of the aliases is equal to expected one
158157
if (u.output.size != outputAttrs.size) {
159158
u.failAnalysis(s"Number of given aliases does not match number of output columns. " +
160-
s"Function name: ${u.name.funcName}; number of aliases: " +
161-
s"${u.output.size}; number of output columns: ${outputAttrs.size}.")
159+
s"Function name: ${u.name.head}; number of aliases: " +
160+
s"${u.output.size}; number of output columns: ${outputAttrs.size}.", Map.empty)
162161
}
163162
val aliases = outputAttrs.zip(u.output).map {
164163
case (attr, name) => Alias(attr, name.toString())()

src/main/scala/org/biodatageeks/sequila/utvf/SeQuiLaAnalyzer.scala

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3,36 +3,35 @@ package org.apache.spark.sql.catalyst.analysis
33
import org.apache.spark.sql.catalyst.analysis.TypeCoercion.typeCoercionRules
44
import org.apache.spark.sql.{ResolveTableValuedFunctionsSeq, SparkSession}
55
import org.apache.spark.sql.catalyst.catalog.SessionCatalog
6+
import org.apache.spark.sql.catalyst.expressions.{Attribute, NamedExpression}
67
import org.apache.spark.sql.catalyst.optimizer.OptimizeUpdateFields
7-
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
8+
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoStatement, LogicalPlan, Project}
89
import org.apache.spark.sql.catalyst.rules.Rule
10+
import org.apache.spark.sql.catalyst.trees.AlwaysProcess
11+
import org.apache.spark.sql.catalyst.util.StringUtils
12+
import org.apache.spark.sql.errors.QueryCompilationErrors
913
import org.apache.spark.sql.execution.aggregate.ResolveEncodersInScalaAgg
1014
import org.apache.spark.sql.execution.analysis.DetectAmbiguousSelfJoin
1115
import org.apache.spark.sql.execution.command.CommandCheck
12-
import org.apache.spark.sql.execution.datasources.v2.TableCapabilityCheck
16+
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, TableCapabilityCheck}
1317
import org.apache.spark.sql.execution.datasources.{DataSourceAnalysis, FallBackFileSourceV2, FindDataSourceTable, HiveOnlyCheck, PreReadCheck, PreWriteCheck, PreprocessTableCreation, PreprocessTableInsertion, ResolveSQLOnFile}
1418

1519

16-
1720
class SeQuiLaAnalyzer(session: SparkSession) extends
18-
Analyzer( session.sessionState.analyzer.catalogManager){
21+
Analyzer( session.sessionState.analyzer.catalogManager) {
1922

2023
override val conf = session.sessionState.conf
2124
val catalog = session.sessionState.analyzer.catalogManager.v1SessionCatalog
2225
override val extendedResolutionRules: Seq[Rule[LogicalPlan]] =
2326
new FindDataSourceTable(session) +:
24-
new ResolveSQLOnFile(session) +:
25-
new FallBackFileSourceV2(session) +:
26-
new ResolveSessionCatalog(
27-
catalogManager) +:
28-
ResolveEncodersInScalaAgg+: session.extensions.buildResolutionRules(session)
27+
new ResolveSQLOnFile(session) +:
28+
new FallBackFileSourceV2(session) +:
29+
//FIXME: After upgrade to Spark - 3.4.0, this line is commented out
30+
// new ResolveSessionCatalog(
31+
// catalogManager) +:
32+
ResolveEncodersInScalaAgg +: session.extensions.buildResolutionRules(session)
2933

3034

31-
override val postHocResolutionRules: Seq[Rule[LogicalPlan]] =
32-
DetectAmbiguousSelfJoin +:
33-
PreprocessTableCreation(session) +:
34-
PreprocessTableInsertion +:
35-
DataSourceAnalysis +: session.extensions.buildPostHocResolutionRules(session)
3635

3736
override val extendedCheckRules: Seq[LogicalPlan => Unit] =
3837
PreWriteCheck +:
@@ -42,9 +41,6 @@ class SeQuiLaAnalyzer(session: SparkSession) extends
4241
CommandCheck +: session.extensions.buildCheckRules(session)
4342

4443

45-
private val v1SessionCatalog: SessionCatalog = catalogManager.v1SessionCatalog
46-
47-
4844
override def batches: Seq[Batch] = Seq(
4945
Batch("Substitution", fixedPoint,
5046
// This rule optimizes `UpdateFields` expression chains so looks more like optimization rule.
@@ -53,6 +49,7 @@ class SeQuiLaAnalyzer(session: SparkSession) extends
5349
// at the beginning of analysis.
5450
OptimizeUpdateFields,
5551
CTESubstitution,
52+
BindParameters,
5653
WindowsSubstitution,
5754
EliminateUnions,
5855
SubstituteUnresolvedOrdinals),
@@ -63,28 +60,28 @@ class SeQuiLaAnalyzer(session: SparkSession) extends
6360
ResolveHints.ResolveCoalesceHints),
6461
Batch("Simple Sanity Check", Once,
6562
LookupFunctions),
63+
Batch("Keep Legacy Outputs", Once,
64+
KeepLegacyOutputs),
6665
Batch("Resolution", fixedPoint,
67-
ResolveTableValuedFunctionsSeq ::
68-
ResolveNamespace(catalogManager) ::
66+
ResolveTableValuedFunctionsSeq(catalog) ::
6967
new ResolveCatalogs(catalogManager) ::
7068
ResolveUserSpecifiedColumns ::
7169
ResolveInsertInto ::
7270
ResolveRelations ::
73-
ResolveTables ::
7471
ResolvePartitionSpec ::
75-
ResolveAlterTableCommands ::
72+
ResolveFieldNameAndPosition ::
7673
AddMetadataColumns ::
7774
DeduplicateRelations ::
7875
ResolveReferences ::
76+
ResolveLateralColumnAliasReference ::
7977
ResolveExpressionsWithNamePlaceholders ::
8078
ResolveDeserializer ::
8179
ResolveNewInstance ::
8280
ResolveUpCast ::
8381
ResolveGroupingAnalytics ::
8482
ResolvePivot ::
83+
ResolveUnpivot ::
8584
ResolveOrdinalInOrderByAndGroupBy ::
86-
ResolveAggAliasInGroupBy ::
87-
ResolveMissingReferences ::
8885
ExtractGenerator ::
8986
ResolveGenerate ::
9087
ResolveFunctions ::
@@ -100,19 +97,19 @@ class SeQuiLaAnalyzer(session: SparkSession) extends
10097
ResolveAggregateFunctions ::
10198
TimeWindowing ::
10299
SessionWindowing ::
100+
ResolveWindowTime ::
101+
ResolveDefaultColumns(ResolveRelations.resolveRelationOrTempView) ::
103102
ResolveInlineTables ::
104-
ResolveHigherOrderFunctions(catalogManager) ::
105103
ResolveLambdaVariables ::
106104
ResolveTimeZone ::
107105
ResolveRandomSeed ::
108106
ResolveBinaryArithmetic ::
109107
ResolveUnion ::
108+
RewriteDeleteFromTable ::
110109
typeCoercionRules ++
111110
Seq(ResolveWithCTE) ++
112-
extendedResolutionRules : _*),
111+
extendedResolutionRules: _*),
113112
Batch("Remove TempResolvedColumn", Once, RemoveTempResolvedColumn),
114-
Batch("Apply Char Padding", Once,
115-
ApplyCharTypePadding),
116113
Batch("Post-Hoc Resolution", Once,
117114
Seq(ResolveCommandsWithIfExists) ++
118115
postHocResolutionRules: _*),
@@ -129,7 +126,10 @@ class SeQuiLaAnalyzer(session: SparkSession) extends
129126
UpdateOuterReferences),
130127
Batch("Cleanup", fixedPoint,
131128
CleanupAliases),
132-
Batch("HandleAnalysisOnlyCommand", Once,
133-
HandleAnalysisOnlyCommand)
129+
Batch("HandleSpecialCommand", Once,
130+
HandleSpecialCommand),
131+
Batch("Remove watermark for batch query", Once,
132+
EliminateEventTimeWatermark)
134133
)
135-
}
134+
}
135+

src/main/scala/org/biodatageeks/sequila/utvf/SequilaSession.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ case class SequilaSessionState(sparkSession: SparkSession, customAnalyzer: Analy
133133
sparkSession.sessionState.executePlan,
134134
(sparkSession:SparkSession,sessionState: SessionState) => sessionState.clone(sparkSession),
135135
sparkSession.sessionState.columnarRules,
136-
sparkSession.sessionState.queryStagePrepRules
136+
sparkSession.sessionState.adaptiveRulesHolder,
137+
sparkSession.sessionState.planNormalizationRules
137138
){
138139
}

src/test/scala/org/biodatageeks/sequila/tests/base/BAMBaseTestSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@ package org.biodatageeks.sequila.tests.base
22

33
import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
44
import org.apache.spark.sql.SequilaSession
5-
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuite}
5+
import org.scalatest.funsuite.AnyFunSuite
6+
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
67

78
class BAMBaseTestSuite
8-
extends FunSuite
9+
extends AnyFunSuite
910
with DataFrameSuiteBase
1011
with SharedSparkContext with BeforeAndAfter{
1112

src/test/scala/org/biodatageeks/sequila/tests/base/BEDBaseTestSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
package org.biodatageeks.sequila.tests.base
22

33
import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
4-
import org.scalatest.{BeforeAndAfter, FunSuite}
4+
import org.scalatest.{BeforeAndAfter}
5+
import org.scalatest.funsuite.AnyFunSuite
56

67
class BEDBaseTestSuite
78
extends
8-
FunSuite
9+
AnyFunSuite
910
with DataFrameSuiteBase
1011
with SharedSparkContext with BeforeAndAfter{
1112

src/test/scala/org/biodatageeks/sequila/tests/base/FASTQBaseTestSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
package org.biodatageeks.sequila.tests.base
22

33
import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
4-
import org.scalatest.{BeforeAndAfter, FunSuite}
4+
import org.scalatest.{BeforeAndAfter}
5+
import org.scalatest.funsuite.AnyFunSuite
56

67
class FASTQBaseTestSuite
78
extends
8-
FunSuite
9+
AnyFunSuite
910
with DataFrameSuiteBase
1011
with SharedSparkContext with BeforeAndAfter{
1112

src/test/scala/org/biodatageeks/sequila/tests/base/IntervalJoinBaseTestSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@ import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
44
import org.apache.spark.sql.Row
55
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
66
import org.biodatageeks.sequila.rangejoins.IntervalTree.IntervalTreeJoinStrategyOptim
7-
import org.scalatest.{BeforeAndAfter, FunSuite}
7+
import org.scalatest.{BeforeAndAfter}
8+
import org.scalatest.funsuite.AnyFunSuite
89

9-
class IntervalJoinBaseTestSuite extends FunSuite
10+
class IntervalJoinBaseTestSuite extends AnyFunSuite
1011
with DataFrameSuiteBase
1112
with SharedSparkContext
1213
with BeforeAndAfter {

src/test/scala/org/biodatageeks/sequila/tests/dataquality/ContigNormalizationTest.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
package org.biodatageeks.sequila.tests.dataquality
22

33
import org.biodatageeks.sequila.utils.DataQualityFuncs
4-
import org.scalatest.FunSuite
4+
import org.scalatest.funsuite.AnyFunSuite
55

6-
class ContigNormalizationTest extends FunSuite{
6+
class ContigNormalizationTest extends AnyFunSuite{
77

88
test("Test contig") {
99
val chrInTest1 = "chr1"

src/test/scala/org/biodatageeks/sequila/tests/datasources/ADAMBenchmarkTestSuite.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
package org.biodatageeks.sequila.tests.datasources
22

33
import java.io.{OutputStreamWriter, PrintWriter}
4-
54
import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
65
import org.biodatageeks.sequila.rangejoins.IntervalTree.IntervalTreeJoinStrategyOptim
76
import org.biodatageeks.sequila.rangejoins.genApp.IntervalTreeJoinStrategy
87
import org.biodatageeks.sequila.utils.{Columns, InternalParams}
9-
import org.scalatest.{BeforeAndAfter, FunSuite}
8+
import org.scalatest.BeforeAndAfter
9+
import org.scalatest.funsuite.AnyFunSuite
1010

1111
class ADAMBenchmarkTestSuite
12-
extends FunSuite
12+
extends AnyFunSuite
1313
with DataFrameSuiteBase
1414
with BeforeAndAfter
1515
with SharedSparkContext {

0 commit comments

Comments
 (0)