use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KafkaConfigBackingStoreTest method testRecordToRestartRequest.
@Test
public void testRecordToRestartRequest() {
ConsumerRecord<String, byte[]> record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty());
Struct struct = RESTART_REQUEST_STRUCTS.get(0);
SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct));
RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue);
assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName());
assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks());
assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KafkaConfigBackingStoreTest method testRestoreTargetStateUnexpectedDeletion.
@Test
public void testRestoreTargetStateUnexpectedDeletion() throws Exception {
expectConfigure();
List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()));
LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
deserialized.put(CONFIGS_SERIALIZED.get(3), null);
deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
logOffset = 5;
expectStart(existingRecords, deserialized);
expectPartitionCount(1);
// Shouldn't see any callbacks since this is during startup
expectStop();
PowerMock.replayAll();
configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
configStorage.start();
// The target state deletion should reset the state to STARTED
ClusterConfigState configState = configStorage.snapshot();
// Should always be next to be read, even if uncommitted
assertEquals(5, configState.offset());
assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
configStorage.stop();
PowerMock.verifyAll();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerSourceTaskTest method testHeaders.
@Test
public void testHeaders() throws Exception {
Headers headers = new RecordHeaders();
headers.add("header_key", "header_value".getBytes());
org.apache.kafka.connect.header.Headers connectHeaders = new ConnectHeaders();
connectHeaders.add("header_key", new SchemaAndValue(Schema.STRING_SCHEMA, "header_value"));
createWorkerTask();
List<SourceRecord> records = new ArrayList<>();
records.add(new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, null, connectHeaders));
expectTopicCreation(TOPIC);
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecord(TOPIC, true, true, true, true, headers);
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(SERIALIZED_KEY, sent.getValue().key());
assertEquals(SERIALIZED_RECORD, sent.getValue().value());
assertEquals(headers, sent.getValue().headers());
PowerMock.verifyAll();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class StreamTask method punctuate.
/**
* @throws IllegalStateException if the current node is not null
* @throws TaskMigratedException if the task producer got fenced (EOS only)
*/
@SuppressWarnings("unchecked")
@Override
public void punctuate(final ProcessorNode<?, ?, ?, ?> node, final long timestamp, final PunctuationType type, final Punctuator punctuator) {
if (processorContext.currentNode() != null) {
throw new IllegalStateException(String.format("%sCurrent node is not null", logPrefix));
}
// when punctuating, we need to preserve the timestamp (this can be either system time or event time)
// while other record context are set as dummy: null topic, -1 partition, -1 offset and empty header
final ProcessorRecordContext recordContext = new ProcessorRecordContext(timestamp, -1L, -1, null, new RecordHeaders());
updateProcessorContext(node, time.milliseconds(), recordContext);
if (log.isTraceEnabled()) {
log.trace("Punctuating processor {} with timestamp {} and punctuation type {}", node.name(), timestamp, type);
}
try {
maybeMeasureLatency(() -> node.punctuate(timestamp, punctuator), time, punctuateLatencySensor);
} catch (final StreamsException e) {
throw e;
} catch (final RuntimeException e) {
throw new StreamsException(String.format("%sException caught while punctuating processor '%s'", logPrefix, node.name()), e);
} finally {
processorContext.setCurrentNode(null);
}
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class GlobalStateTaskTest method maybeDeserialize.
private void maybeDeserialize(final GlobalStateUpdateTask globalStateTask, final byte[] key, final byte[] recordValue, final boolean failExpected) {
final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>(topic2, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, key, recordValue, new RecordHeaders(), Optional.empty());
globalStateTask.initialize();
try {
globalStateTask.update(record);
if (failExpected) {
fail("Should have failed to deserialize.");
}
} catch (final StreamsException e) {
if (!failExpected) {
fail("Shouldn't have failed to deserialize.");
}
}
}
Aggregations