identifierFieldIds,
+ boolean upsertMode) {
+ super(
+ spec,
+ format,
+ appenderFactory,
+ fileFactory,
+ io,
+ targetFileSize,
+ schema,
+ identifierFieldIds,
+ upsertMode);
+ this.partitionKey = new PartitionKey(spec, schema);
+ }
+
+ @Override
+ RowDataDeltaWriter route(Record row) {
+ partitionKey.partition(wrapper().wrap(row));
+
+ RowDataDeltaWriter writer = writers.get(partitionKey);
+ if (writer == null) {
+ // NOTICE: we need to copy a new partition key here, in case of messing up the keys in
+ // writers.
+ PartitionKey copiedKey = partitionKey.copy();
+ writer = new RowDataDeltaWriter(copiedKey);
+ writers.put(copiedKey, writer);
+ }
+
+ return writer;
+ }
+
+ @Override
+ public void close() {
+ try {
+ Tasks.foreach(writers.values())
+ .throwFailureWhenFinished()
+ .noRetry()
+ .run(RowDataDeltaWriter::close, IOException.class);
+
+ writers.clear();
+ } catch (IOException e) {
+ throw new UncheckedIOException("Failed to close equality delta writer", e);
+ }
+ }
+}
diff --git a/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordProjection.java b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordProjection.java
new file mode 100644
index 000000000000..79ce2c111a3a
--- /dev/null
+++ b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordProjection.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import java.util.List;
+import java.util.Map;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.types.Types.ListType;
+import org.apache.iceberg.types.Types.MapType;
+import org.apache.iceberg.types.Types.NestedField;
+import org.apache.iceberg.types.Types.StructType;
+
+/**
+ * This is modified from {@link org.apache.iceberg.util.StructProjection} to support record types.
+ */
+public class RecordProjection implements Record {
+
+ /**
+ * Creates a projecting wrapper for {@link Record} rows.
+ *
+ * This projection does not work with repeated types like lists and maps.
+ *
+ * @param dataSchema schema of rows wrapped by this projection
+ * @param projectedSchema result schema of the projected rows
+ * @return a wrapper to project rows
+ */
+ public static RecordProjection create(Schema dataSchema, Schema projectedSchema) {
+ return new RecordProjection(dataSchema.asStruct(), projectedSchema.asStruct());
+ }
+
+ private final StructType type;
+ private final int[] positionMap;
+ private final RecordProjection[] nestedProjections;
+ private Record record;
+
+ private RecordProjection(StructType structType, StructType projection) {
+ this(structType, projection, false);
+ }
+
+ @SuppressWarnings("checkstyle:CyclomaticComplexity")
+ private RecordProjection(StructType structType, StructType projection, boolean allowMissing) {
+ this.type = projection;
+ this.positionMap = new int[projection.fields().size()];
+ this.nestedProjections = new RecordProjection[projection.fields().size()];
+
+ // set up the projection positions and any nested projections that are needed
+ List dataFields = structType.fields();
+ for (int pos = 0; pos < positionMap.length; pos += 1) {
+ NestedField projectedField = projection.fields().get(pos);
+
+ boolean found = false;
+ for (int i = 0; !found && i < dataFields.size(); i += 1) {
+ NestedField dataField = dataFields.get(i);
+ if (projectedField.fieldId() == dataField.fieldId()) {
+ found = true;
+ positionMap[pos] = i;
+ switch (projectedField.type().typeId()) {
+ case STRUCT:
+ nestedProjections[pos] =
+ new RecordProjection(
+ dataField.type().asStructType(), projectedField.type().asStructType());
+ break;
+ case MAP:
+ MapType projectedMap = projectedField.type().asMapType();
+ MapType originalMap = dataField.type().asMapType();
+
+ boolean keyProjectable =
+ !projectedMap.keyType().isNestedType()
+ || projectedMap.keyType().equals(originalMap.keyType());
+ boolean valueProjectable =
+ !projectedMap.valueType().isNestedType()
+ || projectedMap.valueType().equals(originalMap.valueType());
+ Preconditions.checkArgument(
+ keyProjectable && valueProjectable,
+ "Cannot project a partial map key or value struct. Trying to project %s out of %s",
+ projectedField,
+ dataField);
+
+ nestedProjections[pos] = null;
+ break;
+ case LIST:
+ ListType projectedList = projectedField.type().asListType();
+ ListType originalList = dataField.type().asListType();
+
+ boolean elementProjectable =
+ !projectedList.elementType().isNestedType()
+ || projectedList.elementType().equals(originalList.elementType());
+ Preconditions.checkArgument(
+ elementProjectable,
+ "Cannot project a partial list element struct. Trying to project %s out of %s",
+ projectedField,
+ dataField);
+
+ nestedProjections[pos] = null;
+ break;
+ default:
+ nestedProjections[pos] = null;
+ }
+ }
+ }
+
+ if (!found && projectedField.isOptional() && allowMissing) {
+ positionMap[pos] = -1;
+ nestedProjections[pos] = null;
+ } else if (!found) {
+ throw new IllegalArgumentException(
+ String.format("Cannot find field %s in %s", projectedField, structType));
+ }
+ }
+ }
+
+ public RecordProjection wrap(Record newRecord) {
+ this.record = newRecord;
+ return this;
+ }
+
+ @Override
+ public int size() {
+ return type.fields().size();
+ }
+
+ @Override
+ public T get(int pos, Class javaClass) {
+ // struct can be null if wrap is not called first before the get call
+ // or if a null struct is wrapped.
+ if (record == null) {
+ return null;
+ }
+
+ int recordPos = positionMap[pos];
+ if (nestedProjections[pos] != null) {
+ Record nestedStruct = record.get(recordPos, Record.class);
+ if (nestedStruct == null) {
+ return null;
+ }
+
+ return javaClass.cast(nestedProjections[pos].wrap(nestedStruct));
+ }
+
+ if (recordPos != -1) {
+ return record.get(recordPos, javaClass);
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public void set(int pos, T value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public StructType struct() {
+ return type;
+ }
+
+ @Override
+ public Object getField(String name) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setField(String name, Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Object get(int pos) {
+ return get(pos, Object.class);
+ }
+
+ @Override
+ public Record copy() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Record copy(Map overwriteValues) {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordUtils.java b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordUtils.java
index 5ac930739738..1cd4d08fa999 100644
--- a/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordUtils.java
+++ b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordUtils.java
@@ -154,19 +154,47 @@ public static TaskWriter createTableWriter(
TaskWriter writer;
if (table.spec().isUnpartitioned()) {
- writer =
- new UnpartitionedWriter<>(
- table.spec(), format, appenderFactory, fileFactory, table.io(), targetFileSize);
+ if (config.tablesCdcField() == null && !config.isUpsertMode()) {
+ writer =
+ new UnpartitionedWriter<>(
+ table.spec(), format, appenderFactory, fileFactory, table.io(), targetFileSize);
+ } else {
+ writer =
+ new UnpartitionedDeltaWriter(
+ table.spec(),
+ format,
+ appenderFactory,
+ fileFactory,
+ table.io(),
+ targetFileSize,
+ table.schema(),
+ identifierFieldIds,
+ config.isUpsertMode());
+ }
} else {
- writer =
- new PartitionedAppendWriter(
- table.spec(),
- format,
- appenderFactory,
- fileFactory,
- table.io(),
- targetFileSize,
- table.schema());
+ if (config.tablesCdcField() == null && !config.isUpsertMode()) {
+ writer =
+ new PartitionedAppendWriter(
+ table.spec(),
+ format,
+ appenderFactory,
+ fileFactory,
+ table.io(),
+ targetFileSize,
+ table.schema());
+ } else {
+ writer =
+ new PartitionedDeltaWriter(
+ table.spec(),
+ format,
+ appenderFactory,
+ fileFactory,
+ table.io(),
+ targetFileSize,
+ table.schema(),
+ identifierFieldIds,
+ config.isUpsertMode());
+ }
}
return writer;
}
diff --git a/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordWrapper.java b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordWrapper.java
new file mode 100644
index 000000000000..915608562034
--- /dev/null
+++ b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordWrapper.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import java.util.Map;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.types.Types.StructType;
+
+public class RecordWrapper implements Record {
+
+ private final Record delegate;
+ private final Operation op;
+
+ public RecordWrapper(Record delegate, Operation op) {
+ this.delegate = delegate;
+ this.op = op;
+ }
+
+ public Operation op() {
+ return op;
+ }
+
+ @Override
+ public StructType struct() {
+ return delegate.struct();
+ }
+
+ @Override
+ public Object getField(String name) {
+ return delegate.getField(name);
+ }
+
+ @Override
+ public void setField(String name, Object value) {
+ delegate.setField(name, value);
+ }
+
+ @Override
+ public Object get(int pos) {
+ return delegate.get(pos);
+ }
+
+ @Override
+ public Record copy() {
+ return new RecordWrapper(delegate.copy(), op);
+ }
+
+ @Override
+ public Record copy(Map overwriteValues) {
+ return new RecordWrapper(delegate.copy(overwriteValues), op);
+ }
+
+ @Override
+ public int size() {
+ return delegate.size();
+ }
+
+ @Override
+ public T get(int pos, Class javaClass) {
+ return delegate.get(pos, javaClass);
+ }
+
+ @Override
+ public void set(int pos, T value) {
+ delegate.set(pos, value);
+ }
+}
diff --git a/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/UnpartitionedDeltaWriter.java b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/UnpartitionedDeltaWriter.java
new file mode 100644
index 000000000000..46b0a45d532d
--- /dev/null
+++ b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/UnpartitionedDeltaWriter.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import java.io.IOException;
+import java.util.Set;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.io.FileAppenderFactory;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.io.OutputFileFactory;
+
+public class UnpartitionedDeltaWriter extends BaseDeltaTaskWriter {
+ private final RowDataDeltaWriter writer;
+
+ UnpartitionedDeltaWriter(
+ PartitionSpec spec,
+ FileFormat format,
+ FileAppenderFactory appenderFactory,
+ OutputFileFactory fileFactory,
+ FileIO io,
+ long targetFileSize,
+ Schema schema,
+ Set identifierFieldIds,
+ boolean upsertMode) {
+ super(
+ spec,
+ format,
+ appenderFactory,
+ fileFactory,
+ io,
+ targetFileSize,
+ schema,
+ identifierFieldIds,
+ upsertMode);
+ this.writer = new RowDataDeltaWriter(null);
+ }
+
+ @Override
+ RowDataDeltaWriter route(Record row) {
+ return writer;
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.close();
+ }
+}
diff --git a/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/WriterResult.java b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/WriterResult.java
new file mode 100644
index 000000000000..3828b3ad6c86
--- /dev/null
+++ b/kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/WriterResult.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import java.util.List;
+import java.util.Map;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.types.Types.StructType;
+import org.apache.kafka.common.TopicPartition;
+
+public class WriterResult {
+
+ private final TableIdentifier tableIdentifier;
+ private final List dataFiles;
+ private final List deleteFiles;
+ private final StructType partitionStruct;
+ private final Map offsets;
+
+ public WriterResult(
+ TableIdentifier tableIdentifier,
+ List dataFiles,
+ List deleteFiles,
+ StructType partitionStruct,
+ Map offsets) {
+ this.tableIdentifier = tableIdentifier;
+ this.dataFiles = dataFiles;
+ this.deleteFiles = deleteFiles;
+ this.partitionStruct = partitionStruct;
+ this.offsets = offsets;
+ }
+
+ public TableIdentifier getTableIdentifier() {
+ return tableIdentifier;
+ }
+
+ public List getDataFiles() {
+ return dataFiles;
+ }
+
+ public List getDeleteFiles() {
+ return deleteFiles;
+ }
+
+ public StructType getPartitionStruct() {
+ return partitionStruct;
+ }
+
+ public Map getOffsets() {
+ return offsets;
+ }
+}
diff --git a/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/BaseWriterTest.java b/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/BaseWriterTest.java
index ac44952a5c15..9696ad528e50 100644
--- a/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/BaseWriterTest.java
+++ b/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/BaseWriterTest.java
@@ -69,11 +69,12 @@ public void before() {
.thenReturn(LocationProviders.locationsFor("file", ImmutableMap.of()));
when(table.encryption()).thenReturn(PlaintextEncryptionManager.instance());
when(table.properties()).thenReturn(ImmutableMap.of());
+ when(table.name()).thenReturn("name");
}
protected WriteResult writeTest(
List rows, IcebergSinkConfig config, Class> expectedWriterClass) {
- try (TaskWriter writer = RecordUtils.createTableWriter(table, "name", config)) {
+ try (TaskWriter writer = RecordUtils.createTableWriter(table, table.name(), config)) {
assertThat(writer.getClass()).isEqualTo(expectedWriterClass);
rows.forEach(
diff --git a/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/PartitionedDeltaWriterTest.java b/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/PartitionedDeltaWriterTest.java
new file mode 100644
index 000000000000..4d44147267b6
--- /dev/null
+++ b/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/PartitionedDeltaWriterTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.List;
+import java.util.regex.Pattern;
+import org.apache.iceberg.connect.IcebergSinkConfig;
+import org.apache.iceberg.connect.TableSinkConfig;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.io.WriteResult;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.junit.jupiter.api.Test;
+
+public class PartitionedDeltaWriterTest extends BaseWriterTest {
+
+ @Test
+ public void testPartitionedDeltaWriter() {
+ IcebergSinkConfig config = mock(IcebergSinkConfig.class);
+ when(config.isUpsertMode()).thenReturn(true);
+ when(config.tableConfig(table.name()))
+ .thenReturn(new TableSinkConfig(Pattern.compile(""), List.of(), List.of(), ""));
+
+ when(table.spec()).thenReturn(SPEC);
+
+ Record row1 = GenericRecord.create(SCHEMA);
+ row1.setField("id", 123L);
+ row1.setField("data", "hello world!");
+ row1.setField("id2", 123L);
+
+ Record row2 = GenericRecord.create(SCHEMA);
+ row2.setField("id", 234L);
+ row2.setField("data", "foobar");
+ row2.setField("id2", 234L);
+
+ WriteResult result =
+ writeTest(ImmutableList.of(row1, row2), config, PartitionedDeltaWriter.class);
+
+ // in upsert mode, each write is a delete + append, so we'll have 1 data file
+ // and 1 delete file for each partition (2 total)
+ assertThat(result.dataFiles()).hasSize(2);
+ assertThat(result.deleteFiles()).hasSize(2);
+ }
+}
diff --git a/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/UnpartitionedDeltaWriterTest.java b/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/UnpartitionedDeltaWriterTest.java
new file mode 100644
index 000000000000..b7ec1e02f94d
--- /dev/null
+++ b/kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/data/UnpartitionedDeltaWriterTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.List;
+import java.util.regex.Pattern;
+import org.apache.iceberg.connect.IcebergSinkConfig;
+import org.apache.iceberg.connect.TableSinkConfig;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.io.WriteResult;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.junit.jupiter.api.Test;
+
+public class UnpartitionedDeltaWriterTest extends BaseWriterTest {
+
+ @Test
+ public void testUnpartitionedDeltaWriter() {
+ IcebergSinkConfig config = mock(IcebergSinkConfig.class);
+ when(config.isUpsertMode()).thenReturn(true);
+ when(config.tableConfig(table.name()))
+ .thenReturn(new TableSinkConfig(Pattern.compile(""), List.of(), List.of(), ""));
+
+ Record row = GenericRecord.create(SCHEMA);
+ row.setField("id", 123L);
+ row.setField("data", "hello world!");
+ row.setField("id2", 123L);
+
+ WriteResult result = writeTest(ImmutableList.of(row), config, UnpartitionedDeltaWriter.class);
+
+ // in upsert mode, each write is a delete + append, so we'll have 1 data file
+ // and 1 delete file
+ assertThat(result.dataFiles()).hasSize(1);
+ assertThat(result.deleteFiles()).hasSize(1);
+ }
+}