diff --git a/src/it/java/io/deephaven/benchmark/tests/standard/file/CsvColTypeTest.java b/src/it/java/io/deephaven/benchmark/tests/standard/file/CsvColTypeTest.java new file mode 100644 index 00000000..4efd5019 --- /dev/null +++ b/src/it/java/io/deephaven/benchmark/tests/standard/file/CsvColTypeTest.java @@ -0,0 +1,41 @@ +package io.deephaven.benchmark.tests.standard.file; + +import org.junit.jupiter.api.*; +import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; + +/** + * Standard tests for writing single column CSV for different column types. + */ +@TestMethodOrder(OrderAnnotation.class) +class CsvColTypeTest { + final FileTestRunner runner = new FileTestRunner(this); + + @Test + @Order(1) + void writeThreeIntegralCols() { + runner.setScaleFactors(5, 3); + runner.runCsvWriteTest("CsvWrite- 3 Integral Cols -Static", "short10K", "int10K", "long10K"); + } + + @Test + @Order(2) + void readThreeIntegralCols() { + runner.setScaleFactors(5, 3); + runner.runCsvReadTest("CsvRead- 3 Integral Cols -Static", "short10K", "int10K", "long10K"); + } + + @Test + @Order(3) + void writeOneStringCol() { + runner.setScaleFactors(5, 5); + runner.runCsvWriteTest("CsvWrite- 1 String Col -Static", "str10K"); + } + + @Test + @Order(4) + void readOneStringCol() { + runner.setScaleFactors(5, 5); + runner.runCsvReadTest("CsvRead- 1 String Col -Static", "str10K"); + } + +} diff --git a/src/it/java/io/deephaven/benchmark/tests/standard/file/CsvMultiColTest.java b/src/it/java/io/deephaven/benchmark/tests/standard/file/CsvMultiColTest.java new file mode 100644 index 00000000..eaf3f94c --- /dev/null +++ b/src/it/java/io/deephaven/benchmark/tests/standard/file/CsvMultiColTest.java @@ -0,0 +1,33 @@ +package io.deephaven.benchmark.tests.standard.file; + +import org.junit.jupiter.api.*; +import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; + +/** + * Standard tests for writing/reading multi-column data. To save time, the csv generated by the "write" tests is used by + * the "read" tests. + */ +@TestMethodOrder(OrderAnnotation.class) +class CsvMultiColTest { + final String[] usedColumns = {"str10K", "long10K", "int10K", "short10K"}; + final FileTestRunner runner = new FileTestRunner(this); + + + @BeforeEach + void setup() { + runner.setScaleFactors(5, 2); + } + + @Test + @Order(1) + void writeMultiCol() { + runner.runCsvWriteTest("CsvWrite- Multi Col -Static", usedColumns); + } + + @Test + @Order(2) + void readMultiCol() { + runner.runCsvReadTest("CsvRead- Multi Col -Static"); + } + +} diff --git a/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetTestRunner.java b/src/it/java/io/deephaven/benchmark/tests/standard/file/FileTestRunner.java similarity index 71% rename from src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetTestRunner.java rename to src/it/java/io/deephaven/benchmark/tests/standard/file/FileTestRunner.java index 089629c2..a565337a 100644 --- a/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetTestRunner.java +++ b/src/it/java/io/deephaven/benchmark/tests/standard/file/FileTestRunner.java @@ -1,4 +1,4 @@ -package io.deephaven.benchmark.tests.standard.parquet; +package io.deephaven.benchmark.tests.standard.file; import static org.junit.jupiter.api.Assertions.assertEquals; import java.time.Duration; @@ -12,7 +12,7 @@ /** * Test reading and writing parquet files with various data types and compression codecs. */ -class ParquetTestRunner { +class FileTestRunner { final String parquetCfg = "max_dictionary_keys=1048576, max_dictionary_size=1048576, target_page_size=65536"; final Object testInst; final Bench api; @@ -21,7 +21,7 @@ class ParquetTestRunner { private long scaleRowCount; private boolean useParquetDefaultSettings = false; - ParquetTestRunner(Object testInst) { + FileTestRunner(Object testInst) { this.testInst = testInst; this.api = initialize(testInst); this.scaleRowCount = api.propertyAsIntegral("scale.row.count", "100000"); @@ -48,15 +48,63 @@ void useParquetDefaultSettings() { } /** - * Read a benchmark that measures parquet read performance. This tests always runs after a corresponding write test. + * Run a benchmark that measures csv read performance. This test always runs after a corresponding write test. * * @param testName name that will appear in the results as the benchmark name */ - void runReadTest(String testName) { + void runCsvReadTest(String testName, String... columnNames) { + var q = "read_csv('/data/source.ptr.csv', ${types})"; + q = q.replace("${types}", getTypes(columnNames)); + runReadTest(testName, q); + } + + /** + * Run a benchmark that measures parquet read performance. This test always runs after a corresponding write test. + * + * @param testName name that will appear in the results as the benchmark name + */ + void runParquetReadTest(String testName) { + runReadTest(testName, "read('/data/source.ptr.parquet').select()"); + } + + /** + * Run a benchmark the measures parquet write performance. + * + * @param testName the benchmark name to record with the measurement + * @param codec a compression codec + * @param columnNames the names of the pre-defined columns to generate + */ + void runParquetWriteTest(String testName, String codec, String... columnNames) { + var q = """ + write( + source, '/data/source.ptr.parquet', compression_codec_name='${codec}'${parquetSettings} + ) + """; + q = q.replace("${codec}", codec.equalsIgnoreCase("none") ? "UNCOMPRESSED" : codec); + q = q.replace("${parquetSettings}", useParquetDefaultSettings ? "" : (",\n " + parquetCfg)); + runWriteTest(testName, q, columnNames); + } + + /** + * Run a benchmark the measures csv write performance. + * + * @param testName the benchmark name to record with the measurement + * @param columnNames the names of the pre-defined columns to generate + */ + void runCsvWriteTest(String testName, String... columnNames) { + runWriteTest(testName, "write_csv(source, '/data/source.ptr.csv')", columnNames); + } + + /** + * Run a benchmark that measures read performance. This test always runs after a corresponding write test. + * + * @param testName name that will appear in the results as the benchmark name + */ + private void runReadTest(String testName, String readQuery, String... columnNames) { var q = """ bench_api_metrics_snapshot() begin_time = time.perf_counter_ns() - source = read('/data/source.ptr.parquet').select() + source = ${readQuery} end_time = time.perf_counter_ns() bench_api_metrics_snapshot() standard_metrics = bench_api_metrics_collect() @@ -67,17 +115,11 @@ void runReadTest(String testName) { long_col("result_row_count", [source.size]) ]) """; + q = q.replace("${readQuery}", readQuery); runTest(testName, q); } - /** - * Run a benchmark the measures parquet write performance. - * - * @param testName the benchmark name to record with the measurement - * @param codec a compression codec - * @param columnNames the names of the pre-defined columns to generate - */ - void runWriteTest(String testName, String codec, String... columnNames) { + private void runWriteTest(String testName, String writeQuery, String... columnNames) { var q = """ source = merge([empty_table(${rowCount}).update([ ${generators} @@ -85,9 +127,7 @@ void runWriteTest(String testName, String codec, String... columnNames) { bench_api_metrics_snapshot() begin_time = time.perf_counter_ns() - write( - source, '/data/source.ptr.parquet', compression_codec_name='${codec}'${parquetSettings} - ) + ${writeQuery} end_time = time.perf_counter_ns() bench_api_metrics_snapshot() standard_metrics = bench_api_metrics_collect() @@ -98,11 +138,10 @@ void runWriteTest(String testName, String codec, String... columnNames) { long_col("result_row_count", [source.size]) ]) """; + q = q.replace("${writeQuery}", writeQuery); q = q.replace("${rowCount}", "" + scaleRowCount); q = q.replace("${scaleFactor}", "" + scaleFactor); - q = q.replace("${codec}", codec.equalsIgnoreCase("none") ? "UNCOMPRESSED" : codec); q = q.replace("${generators}", getGenerators(columnNames)); - q = q.replace("${parquetSettings}", useParquetDefaultSettings ? "" : (",\n " + parquetCfg)); runTest(testName, q); } @@ -112,7 +151,7 @@ void runWriteTest(String testName, String codec, String... columnNames) { * @param testName the benchmark name to record with the results * @param query the test query to run */ - void runTest(String testName, String query) { + private void runTest(String testName, String query) { try { api.setName(testName); api.query(query).fetchAfter("stats", table -> { @@ -139,7 +178,7 @@ void runTest(String testName, String query) { * @param columnNames the column names to generate code for * @return the lines of code needed to generate column ndata */ - String getGenerators(String... columnNames) { + private String getGenerators(String... columnNames) { return Arrays.stream(columnNames).map(c -> "'" + c + "=" + getGenerator(c) + "'") .collect(Collectors.joining(",\n")) + '\n'; } @@ -150,7 +189,7 @@ String getGenerators(String... columnNames) { * @param columnName the column name to generate data for * @return the data generation code */ - String getGenerator(final String columnName) { + private String getGenerator(final String columnName) { var array5 = "java.util.stream.IntStream.range((int)(ii % 5),(int)((ii % 5) + 5)).toArray()"; var array1K = "java.util.stream.IntStream.range((int)(ii % 1000),(int)((ii % 1000) + 1000)).toArray()"; var objArr5 = "java.util.stream.Stream.of(`1`,null,`3`,null,`5`).toArray()"; @@ -171,6 +210,27 @@ String getGenerator(final String columnName) { return "(ii % 10 == 0) ? null : " + gen; } + private String getTypes(String... cols) { + return "{" + Arrays.stream(cols).map(c -> "'" + c + "':" + getType(c)).collect(Collectors.joining(",")) + "}"; + } + + private String getType(String columnName) { + return switch (columnName) { + case "str10K" -> "dht.string"; + case "long10K" -> "dht.long"; + case "int10K" -> "dht.int_"; + case "short10K" -> "dht.short"; + case "bigDec10K" -> "dht.BigDecimal"; + case "intArr5" -> "dht.int_array"; + case "intVec5" -> "dht.int_array"; + case "intArr1K" -> "dht.int_array"; + case "intVec1K" -> "dht.int_array"; + case "objArr5" -> "string_array"; + case "objVec5" -> "string_array"; + default -> throw new RuntimeException("Undefined column: " + columnName); + }; + } + /** * Initialize the test client and its properties. Restart Docker if it is local to the test client and the * {@code docker.compose.file} set. @@ -178,12 +238,14 @@ String getGenerator(final String columnName) { * @param testInst the test instance this runner is associated with. * @return a new Bench API instance. */ - Bench initialize(Object testInst) { + private Bench initialize(Object testInst) { var query = """ import time from deephaven import empty_table, garbage_collect, new_table, merge from deephaven.column import long_col, double_col from deephaven.parquet import read, write + from deephaven import read_csv, write_csv + from deephaven import dtypes as dht """; Bench api = Bench.create(testInst); @@ -197,7 +259,7 @@ Bench initialize(Object testInst) { * * @param api the Bench API for this test runner. */ - void restartDocker(Bench api) { + private void restartDocker(Bench api) { var timer = api.timer(); if (!Exec.restartDocker(api.property("docker.compose.file", ""), api.property("deephaven.addr", ""))) return; diff --git a/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetColTypeTest.java b/src/it/java/io/deephaven/benchmark/tests/standard/file/ParquetColTypeTest.java similarity index 52% rename from src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetColTypeTest.java rename to src/it/java/io/deephaven/benchmark/tests/standard/file/ParquetColTypeTest.java index dafe4e2f..e21ea4e6 100644 --- a/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetColTypeTest.java +++ b/src/it/java/io/deephaven/benchmark/tests/standard/file/ParquetColTypeTest.java @@ -1,4 +1,4 @@ -package io.deephaven.benchmark.tests.standard.parquet; +package io.deephaven.benchmark.tests.standard.file; import org.junit.jupiter.api.*; import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; @@ -8,132 +8,132 @@ */ @TestMethodOrder(OrderAnnotation.class) class ParquetColTypeTest { - final ParquetTestRunner runner = new ParquetTestRunner(this); + final FileTestRunner runner = new FileTestRunner(this); @Test @Order(1) void writeThreeIntegralCols() { runner.setScaleFactors(5, 5); - runner.runWriteTest("ParquetWrite- 3 Long Cols -Static", "NONE", "short10K", "int10K", "long10K"); + runner.runParquetWriteTest("ParquetWrite- 3 Long Cols -Static", "NONE", "short10K", "int10K", "long10K"); } @Test @Order(2) void readThreeIntegralCols() { runner.setScaleFactors(5, 5); - runner.runReadTest("ParquetRead- 3 Long Cols -Static"); + runner.runParquetReadTest("ParquetRead- 3 Long Cols -Static"); } @Test @Order(3) void writeOneStringCol() { runner.setScaleFactors(5, 10); - runner.runWriteTest("ParquetWrite- 1 String Col -Static", "NONE", "str10K"); + runner.runParquetWriteTest("ParquetWrite- 1 String Col -Static", "NONE", "str10K"); } @Test @Order(4) void readOneStringCol() { runner.setScaleFactors(5, 10); - runner.runReadTest("ParquetRead- 1 String Col -Static"); + runner.runParquetReadTest("ParquetRead- 1 String Col -Static"); } @Test @Order(5) void writeOneBigDecimalCol() { - runner.setScaleFactors(5, 3); - runner.runWriteTest("ParquetWrite- 1 Big Decimal Col -Static", "NONE", "bigDec10K"); + runner.setScaleFactors(5, 4); + runner.runParquetWriteTest("ParquetWrite- 1 Big Decimal Col -Static", "NONE", "bigDec10K"); } @Test @Order(6) void readOneBigDecimalCol() { - runner.setScaleFactors(5, 3); - runner.runReadTest("ParquetRead- 1 Big Decimal Col -Static"); + runner.setScaleFactors(5, 4); + runner.runParquetReadTest("ParquetRead- 1 Big Decimal Col -Static"); } @Test @Order(7) void writeOneInt1KArrayCol() { runner.setScaleFactors(0.10, 2); - runner.runWriteTest("ParquetWrite- 1 Array Col of 1K Ints -Static", "NONE", "intArr1K"); + runner.runParquetWriteTest("ParquetWrite- 1 Array Col of 1K Ints -Static", "NONE", "intArr1K"); } @Test @Order(8) void readOneInt1KArrayCol() { runner.setScaleFactors(0.10, 2); - runner.runReadTest("ParquetRead- 1 Array Col of 1K Ints -Static"); + runner.runParquetReadTest("ParquetRead- 1 Array Col of 1K Ints -Static"); } @Test @Order(9) void writeOneInt1KVectorCol() { runner.setScaleFactors(0.10, 2); - runner.runWriteTest("ParquetWrite- 1 Vector Col of 1K Ints -Static", "NONE", "intVec1K"); + runner.runParquetWriteTest("ParquetWrite- 1 Vector Col of 1K Ints -Static", "NONE", "intVec1K"); } @Test @Order(10) void readOneInt1KVectorCol() { runner.setScaleFactors(0.10, 2); - runner.runReadTest("ParquetRead- 1 Vector Col of 1K Ints -Static"); + runner.runParquetReadTest("ParquetRead- 1 Vector Col of 1K Ints -Static"); } @Test @Order(11) void writeOneInt5ArrayCol() { runner.setScaleFactors(2, 10); - runner.runWriteTest("ParquetWrite- 1 Array Col of 5 Ints -Static", "NONE", "intArr5"); + runner.runParquetWriteTest("ParquetWrite- 1 Array Col of 5 Ints -Static", "NONE", "intArr5"); } @Test @Order(12) void readOneInt5ArrayCol() { runner.setScaleFactors(2, 10); - runner.runReadTest("ParquetRead- 1 Array Col of 5 Ints -Static"); + runner.runParquetReadTest("ParquetRead- 1 Array Col of 5 Ints -Static"); } @Test @Order(13) void writeOneInt5VectorCol() { runner.setScaleFactors(2, 10); - runner.runWriteTest("ParquetWrite- 1 Vector Col of 5 Ints -Static", "NONE", "intVec5"); + runner.runParquetWriteTest("ParquetWrite- 1 Vector Col of 5 Ints -Static", "NONE", "intVec5"); } @Test @Order(14) void readOneInt5VectorCol() { runner.setScaleFactors(2, 10); - runner.runReadTest("ParquetRead- 1 Vector Col of 5 Ints -Static"); + runner.runParquetReadTest("ParquetRead- 1 Vector Col of 5 Ints -Static"); } @Test @Order(15) void writeOneObjectArrayCol() { runner.setScaleFactors(2, 1); - runner.runWriteTest("ParquetWrite- 1 Array Col of 3 Strings and 2 Nulls -Static", "NONE", "objArr5"); + runner.runParquetWriteTest("ParquetWrite- 1 Array Col of 3 Strings and 2 Nulls -Static", "NONE", "objArr5"); } @Test @Order(16) void readOneObjectArrayCol() { runner.setScaleFactors(2, 1); - runner.runReadTest("ParquetRead- 1 Array Col of 3 Strings and 2 Nulls -Static"); + runner.runParquetReadTest("ParquetRead- 1 Array Col of 3 Strings and 2 Nulls -Static"); } @Test @Order(17) void writeOneObjectVectorCol() { runner.setScaleFactors(1, 1); - runner.runWriteTest("ParquetWrite- 1 Vector Col of 3 String and 2 Nulls -Static", "NONE", "objVec5"); + runner.runParquetWriteTest("ParquetWrite- 1 Vector Col of 3 String and 2 Nulls -Static", "NONE", "objVec5"); } @Test @Order(18) void readOneObjectVectorCol() { runner.setScaleFactors(1, 1); - runner.runReadTest("ParquetRead- 1 Vector Col of 3 String and 2 Nulls -Static"); + runner.runParquetReadTest("ParquetRead- 1 Vector Col of 3 String and 2 Nulls -Static"); } } diff --git a/src/it/java/io/deephaven/benchmark/tests/standard/file/ParquetMultiColTest.java b/src/it/java/io/deephaven/benchmark/tests/standard/file/ParquetMultiColTest.java new file mode 100644 index 00000000..4a2a4834 --- /dev/null +++ b/src/it/java/io/deephaven/benchmark/tests/standard/file/ParquetMultiColTest.java @@ -0,0 +1,106 @@ +package io.deephaven.benchmark.tests.standard.file; + +import org.junit.jupiter.api.*; +import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; + +/** + * Standard tests for writing/reading multi-column data with different codec/compression. To save time, the parquet + * generated by the "write" tests is used by the "read" tests. + */ +@TestMethodOrder(OrderAnnotation.class) +class ParquetMultiColTest { + final String[] usedColumns = {"str10K", "long10K", "int10K", "short10K", "bigDec10K", "intArr5", "intVec5"}; + final FileTestRunner runner = new FileTestRunner(this); + + @BeforeEach + void setup() { + runner.setScaleFactors(5, 1); + } + + @Test + @Order(1) + void writeMultiColSnappy() { + runner.runParquetWriteTest("ParquetWrite- Snappy Multi Col -Static", "SNAPPY", usedColumns); + } + + @Test + @Order(2) + void readMultiColSnappy() { + runner.runParquetReadTest("ParquetRead- Snappy Multi Col -Static"); + } + + @Test + @Order(3) + void writeMultiColZstd() { + runner.runParquetWriteTest("ParquetWrite- Zstd Multi Col -Static", "ZSTD", usedColumns); + } + + @Test + @Order(4) + void readMultiColZstd() { + runner.runParquetReadTest("ParquetRead- Zstd Multi Col -Static"); + } + + @Test + @Order(5) + void writeMultiColLzo() { + runner.runParquetWriteTest("ParquetWrite- Lzo Multi Col -Static", "LZO", usedColumns); + } + + @Test + @Order(6) + void readMultiColLzo() { + runner.runParquetReadTest("ParquetRead- Lzo Multi Col -Static"); + } + + @Test + @Order(7) + void writeMultiColLz4Raw() { + runner.runParquetWriteTest("ParquetWrite- Lz4Raw Multi Col -Static", "LZ4_RAW", usedColumns); + } + + @Test + @Order(8) + void readMultiColLz4Raw() { + runner.runParquetReadTest("ParquetRead- Lz4Raw Multi Col -Static"); + } + + @Test + @Order(9) + void writeMultiColGzip() { + runner.runParquetWriteTest("ParquetWrite- Gzip Multi Col -Static", "GZIP", usedColumns); + } + + @Test + @Order(10) + void readMultiColGzip() { + runner.runParquetReadTest("ParquetRead- Gzip Multi Col -Static"); + } + + @Test + @Order(11) + void writeMultiColNone() { + runner.runParquetWriteTest("ParquetWrite- No Codec Multi Col -Static", "NONE", usedColumns); + } + + @Test + @Order(12) + void readMultiColNone() { + runner.runParquetReadTest("ParquetRead- No Codec Multi Col -Static"); + } + + @Test + @Order(13) + void writeMultiColDefaultSnappy() { + runner.useParquetDefaultSettings(); + runner.runParquetWriteTest("ParquetWrite- Snappy Multi Col Defaults -Static", "SNAPPY", usedColumns); + } + + @Test + @Order(14) + void readMultiColDefaultSnappy() { + runner.useParquetDefaultSettings(); + runner.runParquetReadTest("ParquetRead- Snappy Multi Col Defaults -Static"); + } + +} diff --git a/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetCodecTest.java b/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetCodecTest.java deleted file mode 100644 index 9f3024e2..00000000 --- a/src/it/java/io/deephaven/benchmark/tests/standard/parquet/ParquetCodecTest.java +++ /dev/null @@ -1,106 +0,0 @@ -package io.deephaven.benchmark.tests.standard.parquet; - -import org.junit.jupiter.api.*; -import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; - -/** - * Standard tests for writing/reading multi-column data with different codec/compression. To save time, the parquet - * generated by the "write" tests is used by the "read" tests - */ -@TestMethodOrder(OrderAnnotation.class) -class ParquetCodecTest { - final ParquetTestRunner runner = new ParquetTestRunner(this); - final String[] usedColumns = {"str10K", "long10K", "int10K", "short10K", "bigDec10K", "intArr5", "intVec5"}; - - @BeforeEach - void setup() { - runner.setScaleFactors(5, 1); - } - - @Test - @Order(1) - void writeMultiColSnappy() { - runner.runWriteTest("ParquetWrite- Snappy Multi Col -Static", "SNAPPY", usedColumns); - } - - @Test - @Order(2) - void readMultiColSnappy() { - runner.runReadTest("ParquetRead- Snappy Multi Col -Static"); - } - - @Test - @Order(3) - void writeMultiColZstd() { - runner.runWriteTest("ParquetWrite- Zstd Multi Col -Static", "ZSTD", usedColumns); - } - - @Test - @Order(4) - void readMultiColZstd() { - runner.runReadTest("ParquetRead- Zstd Multi Col -Static"); - } - - @Test - @Order(5) - void writeMultiColLzo() { - runner.runWriteTest("ParquetWrite- Lzo Multi Col -Static", "LZO", usedColumns); - } - - @Test - @Order(6) - void readMultiColLzo() { - runner.runReadTest("ParquetRead- Lzo Multi Col -Static"); - } - - @Test - @Order(7) - void writeMultiColLz4Raw() { - runner.runWriteTest("ParquetWrite- Lz4Raw Multi Col -Static", "LZ4_RAW", usedColumns); - } - - @Test - @Order(8) - void readMultiColLz4Raw() { - runner.runReadTest("ParquetRead- Lz4Raw Multi Col -Static"); - } - - @Test - @Order(9) - void writeMultiColGzip() { - runner.runWriteTest("ParquetWrite- Gzip Multi Col -Static", "GZIP", usedColumns); - } - - @Test - @Order(10) - void readMultiColGzip() { - runner.runReadTest("ParquetRead- Gzip Multi Col -Static"); - } - - @Test - @Order(11) - void writeMultiColNone() { - runner.runWriteTest("ParquetWrite- No Codec Multi Col -Static", "NONE", usedColumns); - } - - @Test - @Order(12) - void readMultiColNone() { - runner.runReadTest("ParquetRead- No Codec Multi Col -Static"); - } - - @Test - @Order(13) - void writeMultiColDefaultSnappy() { - runner.useParquetDefaultSettings(); - runner.runWriteTest("ParquetWrite- Snappy Multi Col Defaults -Static", "SNAPPY", usedColumns); - } - - @Test - @Order(14) - void readMultiColDefaultSnappy() { - runner.useParquetDefaultSettings(); - runner.runReadTest("ParquetRead- Snappy Multi Col Defaults -Static"); - } - -}