@@ -1189,7 +1189,7 @@ index 9e9d717db3b..c1a7caf56e0 100644
1189
1189
package org.apache.spark.sql.execution
1190
1190
1191
1191
- import org.apache.spark.sql.{DataFrame, QueryTest, Row}
1192
- + import org.apache.spark.sql.{DataFrame, IgnoreComet, QueryTest, Row}
1192
+ + import org.apache.spark.sql.{DataFrame, QueryTest, Row}
1193
1193
+ import org.apache.spark.sql.comet.CometProjectExec
1194
1194
import org.apache.spark.sql.connector.SimpleWritableDataSource
1195
1195
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite}
@@ -1206,13 +1206,12 @@ index 9e9d717db3b..c1a7caf56e0 100644
1206
1206
assert(actual == expected)
1207
1207
}
1208
1208
}
1209
- @@ -112,7 +116,8 @@ abstract class RemoveRedundantProjectsSuiteBase
1209
+ @@ -112,7 +116,7 @@ abstract class RemoveRedundantProjectsSuiteBase
1210
1210
assertProjectExec(query, 1, 3)
1211
1211
}
1212
1212
1213
1213
- test("join with ordering requirement") {
1214
- + test("join with ordering requirement",
1215
- + IgnoreComet("TODO: Support SubqueryBroadcastExec in Comet: #242")) {
1214
+ + test("join with ordering requirement") {
1216
1215
val query = "select * from (select key, a, c, b from testView) as t1 join " +
1217
1216
"(select key, a, b, c from testView) as t2 on t1.key = t2.key where t2.a > 50"
1218
1217
assertProjectExec(query, 2, 2)
0 commit comments