use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class KafkaStatusBackingStoreTest method readConnectorState.
@Test
public void readConnectorState() {
byte[] value = new byte[0];
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
Map<String, Object> statusMap = new HashMap<>();
statusMap.put("worker_id", WORKER_ID);
statusMap.put("state", "RUNNING");
statusMap.put("generation", 0L);
expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, statusMap));
replayAll();
store.read(consumerRecord(0, "status-connector-conn", value));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
assertEquals(status, store.get(CONNECTOR));
verifyAll();
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class KafkaConfigBackingStoreTest method expectRead.
private void expectRead(LinkedHashMap<String, byte[]> serializedValues, Map<String, Struct> deserializedValues) {
expectReadToEnd(serializedValues);
for (Map.Entry<String, Struct> deserializedValueEntry : deserializedValues.entrySet()) {
byte[] serializedValue = serializedValues.get(deserializedValueEntry.getKey());
EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(serializedValue))).andReturn(new SchemaAndValue(null, structToMap(deserializedValueEntry.getValue())));
}
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectPolls.
// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
// Stub out all the consumer stream/iterator responses, which we just want to verify occur,
// but don't care about the exact details here.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(pollDelayMs);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {
@Override
public SinkRecord answer() {
return recordCapture.getValue();
}
}).anyTimes();
Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
sinkTask.put(EasyMock.capture(capturedRecords));
EasyMock.expectLastCall().anyTimes();
return capturedRecords;
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class ByteArrayConverterTest method testToConnectNull.
@Test
public void testToConnectNull() {
SchemaAndValue data = converter.toConnectData(TOPIC, null);
assertEquals(Schema.OPTIONAL_BYTES_SCHEMA, data.schema());
assertNull(data.value());
}
use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.
the class OffsetStorageReaderImpl method offsets.
@Override
@SuppressWarnings("unchecked")
public <T> Map<Map<String, T>, Map<String, Object>> offsets(Collection<Map<String, T>> partitions) {
// Serialize keys so backing store can work with them
Map<ByteBuffer, Map<String, T>> serializedToOriginal = new HashMap<>(partitions.size());
for (Map<String, T> key : partitions) {
try {
// Offsets are treated as schemaless, their format is only validated here (and the returned value below)
OffsetUtils.validateFormat(key);
byte[] keySerialized = keyConverter.fromConnectData(namespace, null, Arrays.asList(namespace, key));
ByteBuffer keyBuffer = (keySerialized != null) ? ByteBuffer.wrap(keySerialized) : null;
serializedToOriginal.put(keyBuffer, key);
} catch (Throwable t) {
log.error("CRITICAL: Failed to serialize partition key when getting offsets for task with " + "namespace {}. No value for this data will be returned, which may break the " + "task or cause it to skip some data.", namespace, t);
}
}
// Get serialized key -> serialized value from backing store
Map<ByteBuffer, ByteBuffer> raw;
try {
raw = backingStore.get(serializedToOriginal.keySet(), null).get();
} catch (Exception e) {
log.error("Failed to fetch offsets from namespace {}: ", namespace, e);
throw new ConnectException("Failed to fetch offsets.", e);
}
// Deserialize all the values and map back to the original keys
Map<Map<String, T>, Map<String, Object>> result = new HashMap<>(partitions.size());
for (Map.Entry<ByteBuffer, ByteBuffer> rawEntry : raw.entrySet()) {
try {
// Since null could be a valid key, explicitly check whether map contains the key
if (!serializedToOriginal.containsKey(rawEntry.getKey())) {
log.error("Should be able to map {} back to a requested partition-offset key, backing " + "store may have returned invalid data", rawEntry.getKey());
continue;
}
Map<String, T> origKey = serializedToOriginal.get(rawEntry.getKey());
SchemaAndValue deserializedSchemaAndValue = valueConverter.toConnectData(namespace, rawEntry.getValue() != null ? rawEntry.getValue().array() : null);
Object deserializedValue = deserializedSchemaAndValue.value();
OffsetUtils.validateFormat(deserializedValue);
result.put(origKey, (Map<String, Object>) deserializedValue);
} catch (Throwable t) {
log.error("CRITICAL: Failed to deserialize offset data when getting offsets for task with" + " namespace {}. No value for this data will be returned, which may break the " + "task or cause it to skip some data. This could either be due to an error in " + "the connector implementation or incompatible schema.", namespace, t);
}
}
return result;
}
Aggregations