use of io.confluent.connect.storage.partitioner.FieldPartitioner in project kafka-connect-storage-cloud by confluentinc.
the class TopicPartitionWriterTest method testPartitioningExceptionReported.
@Test
public void testPartitioningExceptionReported() throws Exception {
String field = "field";
setUp();
// Define the partitioner
Partitioner<?> partitioner = new FieldPartitioner<>();
parsedConfig.put(PARTITION_FIELD_NAME_CONFIG, Arrays.asList(field));
partitioner.configure(parsedConfig);
SinkTaskContext mockContext = mock(SinkTaskContext.class);
ErrantRecordReporter mockReporter = mock(ErrantRecordReporter.class);
TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, mockContext, mockReporter);
Schema schema = SchemaBuilder.struct().field(field, Schema.STRING_SCHEMA);
Struct struct = new Struct(schema).put(field, "a");
topicPartitionWriter.buffer(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, "key", schema, struct, 0));
// non-struct record should throw exception and get reported
topicPartitionWriter.buffer(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, "key", schema, "not a struct", 1));
// Test actual write
topicPartitionWriter.write();
ArgumentCaptor<Throwable> exceptionCaptor = ArgumentCaptor.forClass(PartitionException.class);
Mockito.verify(mockReporter, times(1)).report(any(), exceptionCaptor.capture());
assertEquals("Error encoding partition.", exceptionCaptor.getValue().getMessage());
topicPartitionWriter.close();
}
use of io.confluent.connect.storage.partitioner.FieldPartitioner in project kafka-connect-storage-cloud by confluentinc.
the class TopicPartitionWriterTest method testWriteRecordFieldPartitioner.
@Test
public void testWriteRecordFieldPartitioner() throws Exception {
localProps.put(FLUSH_SIZE_CONFIG, "9");
setUp();
// Define the partitioner
Partitioner<?> partitioner = new FieldPartitioner<>();
partitioner.configure(parsedConfig);
TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, null);
String key = "key";
Schema schema = createSchema();
List<Struct> records = createRecordBatches(schema, 3, 6);
Collection<SinkRecord> sinkRecords = createSinkRecords(records, key, schema);
for (SinkRecord record : sinkRecords) {
topicPartitionWriter.buffer(record);
}
// Test actual write
topicPartitionWriter.write();
topicPartitionWriter.close();
@SuppressWarnings("unchecked") List<String> partitionFields = (List<String>) parsedConfig.get(PARTITION_FIELD_NAME_CONFIG);
String partitionField = partitionFields.get(0);
String dirPrefix1 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(16));
String dirPrefix2 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(17));
String dirPrefix3 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(18));
List<Struct> expectedRecords = new ArrayList<>();
int ibase = 16;
float fbase = 12.2f;
// The expected sequence of records is constructed taking into account that sorting of files occurs in verify
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 6; ++j) {
expectedRecords.add(createRecord(schema, ibase + i, fbase + i));
}
}
List<String> expectedFiles = new ArrayList<>();
for (int i = 0; i < 18; i += 9) {
expectedFiles.add(FileUtils.fileKeyToCommit(topicsDir, dirPrefix1, TOPIC_PARTITION, i, extension, ZERO_PAD_FMT));
expectedFiles.add(FileUtils.fileKeyToCommit(topicsDir, dirPrefix2, TOPIC_PARTITION, i + 1, extension, ZERO_PAD_FMT));
expectedFiles.add(FileUtils.fileKeyToCommit(topicsDir, dirPrefix3, TOPIC_PARTITION, i + 2, extension, ZERO_PAD_FMT));
}
verify(expectedFiles, 3, schema, expectedRecords);
}
Aggregations