Skip to content

Commit 3f661d5

Browse files
authored
Spark 3.4: Migrate tests in spark, extensions and functions (#12853)
1 parent 0386463 commit 3f661d5

16 files changed

+601
-594
lines changed

spark/v3.4/spark/src/test/java/org/apache/iceberg/TaskCheckHelper.java

Lines changed: 47 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,11 @@
1818
*/
1919
package org.apache.iceberg;
2020

21+
import static org.assertj.core.api.Assertions.assertThat;
22+
2123
import java.util.Comparator;
2224
import java.util.List;
2325
import java.util.stream.Collectors;
24-
import org.junit.Assert;
2526

2627
public final class TaskCheckHelper {
2728
private TaskCheckHelper() {}
@@ -31,8 +32,9 @@ public static void assertEquals(
3132
List<FileScanTask> expectedTasks = getFileScanTasksInFilePathOrder(expected);
3233
List<FileScanTask> actualTasks = getFileScanTasksInFilePathOrder(actual);
3334

34-
Assert.assertEquals(
35-
"The number of file scan tasks should match", expectedTasks.size(), actualTasks.size());
35+
assertThat(actualTasks)
36+
.as("The number of file scan tasks should match")
37+
.hasSameSizeAs(expectedTasks);
3638

3739
for (int i = 0; i < expectedTasks.size(); i++) {
3840
FileScanTask expectedTask = expectedTasks.get(i);
@@ -45,61 +47,55 @@ public static void assertEquals(FileScanTask expected, FileScanTask actual) {
4547
assertEquals(expected.file(), actual.file());
4648

4749
// PartitionSpec implements its own equals method
48-
Assert.assertEquals("PartitionSpec doesn't match", expected.spec(), actual.spec());
50+
assertThat(actual.spec()).as("PartitionSpec doesn't match").isEqualTo(expected.spec());
4951

50-
Assert.assertEquals("starting position doesn't match", expected.start(), actual.start());
52+
assertThat(actual.start()).as("starting position doesn't match").isEqualTo(expected.start());
5153

52-
Assert.assertEquals(
53-
"the number of bytes to scan doesn't match", expected.start(), actual.start());
54+
assertThat(actual.start())
55+
.as("the number of bytes to scan doesn't match")
56+
.isEqualTo(expected.start());
5457

5558
// simplify comparison on residual expression via comparing toString
56-
Assert.assertEquals(
57-
"Residual expression doesn't match",
58-
expected.residual().toString(),
59-
actual.residual().toString());
59+
assertThat(actual.residual())
60+
.asString()
61+
.as("Residual expression doesn't match")
62+
.isEqualTo(expected.residual().toString());
6063
}
6164

6265
public static void assertEquals(DataFile expected, DataFile actual) {
63-
Assert.assertEquals(
64-
"Should match the serialized record path", expected.location(), actual.location());
65-
Assert.assertEquals(
66-
"Should match the serialized record format", expected.format(), actual.format());
67-
Assert.assertEquals(
68-
"Should match the serialized record partition",
69-
expected.partition().get(0, Object.class),
70-
actual.partition().get(0, Object.class));
71-
Assert.assertEquals(
72-
"Should match the serialized record count", expected.recordCount(), actual.recordCount());
73-
Assert.assertEquals(
74-
"Should match the serialized record size",
75-
expected.fileSizeInBytes(),
76-
actual.fileSizeInBytes());
77-
Assert.assertEquals(
78-
"Should match the serialized record value counts",
79-
expected.valueCounts(),
80-
actual.valueCounts());
81-
Assert.assertEquals(
82-
"Should match the serialized record null value counts",
83-
expected.nullValueCounts(),
84-
actual.nullValueCounts());
85-
Assert.assertEquals(
86-
"Should match the serialized record lower bounds",
87-
expected.lowerBounds(),
88-
actual.lowerBounds());
89-
Assert.assertEquals(
90-
"Should match the serialized record upper bounds",
91-
expected.upperBounds(),
92-
actual.upperBounds());
93-
Assert.assertEquals(
94-
"Should match the serialized record key metadata",
95-
expected.keyMetadata(),
96-
actual.keyMetadata());
97-
Assert.assertEquals(
98-
"Should match the serialized record offsets",
99-
expected.splitOffsets(),
100-
actual.splitOffsets());
101-
Assert.assertEquals(
102-
"Should match the serialized record offsets", expected.keyMetadata(), actual.keyMetadata());
66+
assertThat(actual.location())
67+
.as("Should match the serialized record path")
68+
.isEqualTo(expected.location());
69+
assertThat(actual.format())
70+
.as("Should match the serialized record format")
71+
.isEqualTo(expected.format());
72+
assertThat(actual.partition().get(0, Object.class))
73+
.as("Should match the serialized record partition")
74+
.isEqualTo(expected.partition().get(0, Object.class));
75+
assertThat(actual.recordCount())
76+
.as("Should match the serialized record count")
77+
.isEqualTo(expected.recordCount());
78+
assertThat(actual.fileSizeInBytes())
79+
.as("Should match the serialized record size")
80+
.isEqualTo(expected.fileSizeInBytes());
81+
assertThat(actual.valueCounts())
82+
.as("Should match the serialized record value counts")
83+
.isEqualTo(expected.valueCounts());
84+
assertThat(actual.nullValueCounts())
85+
.as("Should match the serialized record null value counts")
86+
.isEqualTo(expected.nullValueCounts());
87+
assertThat(actual.lowerBounds())
88+
.as("Should match the serialized record lower bounds")
89+
.isEqualTo(expected.lowerBounds());
90+
assertThat(actual.upperBounds())
91+
.as("Should match the serialized record upper bounds")
92+
.isEqualTo(expected.upperBounds());
93+
assertThat(actual.keyMetadata())
94+
.as("Should match the serialized record key metadata")
95+
.isEqualTo(expected.keyMetadata());
96+
assertThat(actual.splitOffsets())
97+
.as("Should match the serialized record offsets")
98+
.isEqualTo(expected.splitOffsets());
10399
}
104100

105101
private static List<FileScanTask> getFileScanTasksInFilePathOrder(

spark/v3.4/spark/src/test/java/org/apache/iceberg/TestDataFileSerialization.java

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
import java.io.ObjectOutputStream;
3737
import java.nio.ByteBuffer;
3838
import java.nio.ByteOrder;
39+
import java.nio.file.Path;
3940
import java.util.Map;
4041
import java.util.UUID;
4142
import org.apache.iceberg.io.FileAppender;
@@ -49,10 +50,8 @@
4950
import org.apache.spark.SparkConf;
5051
import org.apache.spark.serializer.KryoSerializer;
5152
import org.apache.spark.sql.catalyst.InternalRow;
52-
import org.junit.Assert;
53-
import org.junit.Rule;
54-
import org.junit.Test;
55-
import org.junit.rules.TemporaryFolder;
53+
import org.junit.jupiter.api.Test;
54+
import org.junit.jupiter.api.io.TempDir;
5655

5756
public class TestDataFileSerialization {
5857

@@ -102,12 +101,12 @@ public class TestDataFileSerialization {
102101
.withSortOrder(SortOrder.unsorted())
103102
.build();
104103

105-
@Rule public TemporaryFolder temp = new TemporaryFolder();
104+
@TempDir private Path temp;
106105

107106
@Test
108107
public void testDataFileKryoSerialization() throws Exception {
109-
File data = temp.newFile();
110-
Assert.assertTrue(data.delete());
108+
File data = File.createTempFile("junit", null, temp.toFile());
109+
assertThat(data.delete()).isTrue();
111110
Kryo kryo = new KryoSerializer(new SparkConf()).newKryo();
112111

113112
try (Output out = new Output(new FileOutputStream(data))) {
@@ -146,7 +145,7 @@ public void testDataFileJavaSerialization() throws Exception {
146145
public void testParquetWriterSplitOffsets() throws IOException {
147146
Iterable<InternalRow> records = RandomData.generateSpark(DATE_SCHEMA, 1, 33L);
148147
File parquetFile =
149-
new File(temp.getRoot(), FileFormat.PARQUET.addExtension(UUID.randomUUID().toString()));
148+
new File(temp.toFile(), FileFormat.PARQUET.addExtension(UUID.randomUUID().toString()));
150149
FileAppender<InternalRow> writer =
151150
Parquet.write(Files.localOutput(parquetFile))
152151
.schema(DATE_SCHEMA)
@@ -161,7 +160,7 @@ public void testParquetWriterSplitOffsets() throws IOException {
161160
}
162161

163162
Kryo kryo = new KryoSerializer(new SparkConf()).newKryo();
164-
File dataFile = temp.newFile();
163+
File dataFile = File.createTempFile("junit", null, temp.toFile());
165164
try (Output out = new Output(new FileOutputStream(dataFile))) {
166165
kryo.writeClassAndObject(out, writer.splitOffsets());
167166
}

spark/v3.4/spark/src/test/java/org/apache/iceberg/TestFileIOSerialization.java

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,12 @@
2020

2121
import static org.apache.iceberg.types.Types.NestedField.optional;
2222
import static org.apache.iceberg.types.Types.NestedField.required;
23+
import static org.assertj.core.api.Assertions.assertThat;
2324

2425
import java.io.File;
2526
import java.io.IOException;
27+
import java.nio.file.Files;
28+
import java.nio.file.Path;
2629
import java.util.Map;
2730
import org.apache.hadoop.conf.Configuration;
2831
import org.apache.iceberg.hadoop.HadoopFileIO;
@@ -32,11 +35,9 @@
3235
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
3336
import org.apache.iceberg.spark.source.SerializableTableWithSize;
3437
import org.apache.iceberg.types.Types;
35-
import org.junit.Assert;
36-
import org.junit.Before;
37-
import org.junit.Rule;
38-
import org.junit.Test;
39-
import org.junit.rules.TemporaryFolder;
38+
import org.junit.jupiter.api.BeforeEach;
39+
import org.junit.jupiter.api.Test;
40+
import org.junit.jupiter.api.io.TempDir;
4041

4142
public class TestFileIOSerialization {
4243

@@ -60,15 +61,15 @@ public class TestFileIOSerialization {
6061
CONF.set("k2", "v2");
6162
}
6263

63-
@Rule public TemporaryFolder temp = new TemporaryFolder();
64+
@TempDir private Path temp;
6465
private Table table;
6566

66-
@Before
67+
@BeforeEach
6768
public void initTable() throws IOException {
6869
Map<String, String> props = ImmutableMap.of("k1", "v1", "k2", "v2");
6970

70-
File tableLocation = temp.newFolder();
71-
Assert.assertTrue(tableLocation.delete());
71+
File tableLocation = Files.createTempDirectory(temp, "junit").toFile();
72+
assertThat(tableLocation.delete()).isTrue();
7273

7374
this.table = TABLES.create(SCHEMA, SPEC, SORT_ORDER, props, tableLocation.toString());
7475
}
@@ -82,9 +83,7 @@ public void testHadoopFileIOKryoSerialization() throws IOException {
8283
FileIO deserializedIO = KryoHelpers.roundTripSerialize(serializableTable.io());
8384
Configuration actualConf = ((HadoopFileIO) deserializedIO).conf();
8485

85-
Assert.assertEquals("Conf pairs must match", toMap(expectedConf), toMap(actualConf));
86-
Assert.assertEquals("Conf values must be present", "v1", actualConf.get("k1"));
87-
Assert.assertEquals("Conf values must be present", "v2", actualConf.get("k2"));
86+
assertThat(actualConf).containsExactlyInAnyOrderElementsOf(expectedConf);
8887
}
8988

9089
@Test
@@ -96,9 +95,7 @@ public void testHadoopFileIOJavaSerialization() throws IOException, ClassNotFoun
9695
FileIO deserializedIO = TestHelpers.roundTripSerialize(serializableTable.io());
9796
Configuration actualConf = ((HadoopFileIO) deserializedIO).conf();
9897

99-
Assert.assertEquals("Conf pairs must match", toMap(expectedConf), toMap(actualConf));
100-
Assert.assertEquals("Conf values must be present", "v1", actualConf.get("k1"));
101-
Assert.assertEquals("Conf values must be present", "v2", actualConf.get("k2"));
98+
assertThat(actualConf).containsExactlyInAnyOrderElementsOf(expectedConf);
10299
}
103100

104101
private Map<String, String> toMap(Configuration conf) {

spark/v3.4/spark/src/test/java/org/apache/iceberg/TestHadoopMetricsContextSerialization.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
import org.apache.iceberg.io.FileIOMetricsContext;
2424
import org.apache.iceberg.metrics.MetricsContext;
2525
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
26-
import org.junit.Test;
26+
import org.junit.jupiter.api.Test;
2727

2828
public class TestHadoopMetricsContextSerialization {
2929

0 commit comments

Comments
 (0)