use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class StringConverterTest method testBytesToString.
@Test
public void testBytesToString() {
SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes());
assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema());
assertEquals(SAMPLE_STRING, data.value());
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class StringConverterTest method testBytesToStringNonUtf8Encoding.
@Test
public void testBytesToStringNonUtf8Encoding() throws UnsupportedEncodingException {
converter.configure(Collections.singletonMap("converter.encoding", "UTF-16"), true);
SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes("UTF-16"));
assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema());
assertEquals(SAMPLE_STRING, data.value());
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectOnePoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
// Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
// returning empty data, we return one record. The expectation is that the data will be ignored by the
// response behavior specified using the return value of this method.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(1L);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() throws Exception {
final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final long startOffset = 40L;
final Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, startOffset);
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(1L);
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION, startOffset);
EasyMock.expectLastCall();
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class KafkaStatusBackingStoreTest method readTaskState.
@Test
public void readTaskState() {
byte[] value = new byte[0];
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
Map<String, Object> statusMap = new HashMap<>();
statusMap.put("worker_id", WORKER_ID);
statusMap.put("state", "RUNNING");
statusMap.put("generation", 0L);
expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, statusMap));
replayAll();
store.read(consumerRecord(0, "status-task-conn-0", value));
TaskStatus status = new TaskStatus(TASK, TaskStatus.State.RUNNING, WORKER_ID, 0);
assertEquals(status, store.get(TASK));
verifyAll();
}
Aggregations