Search in sources :

Example 41 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRecordToRestartRequest.

@Test
public void testRecordToRestartRequest() {
    ConsumerRecord<String, byte[]> record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty());
    Struct struct = RESTART_REQUEST_STRUCTS.get(0);
    SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct));
    RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue);
    assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName());
    assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks());
    assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RestartRequest(org.apache.kafka.connect.runtime.RestartRequest) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Struct(org.apache.kafka.connect.data.Struct) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 42 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRestoreTargetStateUnexpectedDeletion.

@Test
public void testRestoreTargetStateUnexpectedDeletion() throws Exception {
    expectConfigure();
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), null);
    deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    logOffset = 5;
    expectStart(existingRecords, deserialized);
    expectPartitionCount(1);
    // Shouldn't see any callbacks since this is during startup
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // The target state deletion should reset the state to STARTED
    ClusterConfigState configState = configStorage.snapshot();
    // Should always be next to be read, even if uncommitted
    assertEquals(5, configState.offset());
    assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
    assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Struct(org.apache.kafka.connect.data.Struct) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 43 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class WorkerSourceTaskTest method testHeaders.

@Test
public void testHeaders() throws Exception {
    Headers headers = new RecordHeaders();
    headers.add("header_key", "header_value".getBytes());
    org.apache.kafka.connect.header.Headers connectHeaders = new ConnectHeaders();
    connectHeaders.add("header_key", new SchemaAndValue(Schema.STRING_SCHEMA, "header_value"));
    createWorkerTask();
    List<SourceRecord> records = new ArrayList<>();
    records.add(new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, null, connectHeaders));
    expectTopicCreation(TOPIC);
    Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecord(TOPIC, true, true, true, true, headers);
    PowerMock.replayAll();
    Whitebox.setInternalState(workerTask, "toSend", records);
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(SERIALIZED_KEY, sent.getValue().key());
    assertEquals(SERIALIZED_RECORD, sent.getValue().value());
    assertEquals(headers, sent.getValue().headers());
    PowerMock.verifyAll();
}
Also used : Headers(org.apache.kafka.common.header.Headers) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ArrayList(java.util.ArrayList) SourceRecord(org.apache.kafka.connect.source.SourceRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ThreadedTest(org.apache.kafka.connect.util.ThreadedTest) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) ParameterizedTest(org.apache.kafka.connect.util.ParameterizedTest) Test(org.junit.Test)

Example 44 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class StreamTask method punctuate.

/**
 * @throws IllegalStateException if the current node is not null
 * @throws TaskMigratedException if the task producer got fenced (EOS only)
 */
@SuppressWarnings("unchecked")
@Override
public void punctuate(final ProcessorNode<?, ?, ?, ?> node, final long timestamp, final PunctuationType type, final Punctuator punctuator) {
    if (processorContext.currentNode() != null) {
        throw new IllegalStateException(String.format("%sCurrent node is not null", logPrefix));
    }
    // when punctuating, we need to preserve the timestamp (this can be either system time or event time)
    // while other record context are set as dummy: null topic, -1 partition, -1 offset and empty header
    final ProcessorRecordContext recordContext = new ProcessorRecordContext(timestamp, -1L, -1, null, new RecordHeaders());
    updateProcessorContext(node, time.milliseconds(), recordContext);
    if (log.isTraceEnabled()) {
        log.trace("Punctuating processor {} with timestamp {} and punctuation type {}", node.name(), timestamp, type);
    }
    try {
        maybeMeasureLatency(() -> node.punctuate(timestamp, punctuator), time, punctuateLatencySensor);
    } catch (final StreamsException e) {
        throw e;
    } catch (final RuntimeException e) {
        throw new StreamsException(String.format("%sException caught while punctuating processor '%s'", logPrefix, node.name()), e);
    } finally {
        processorContext.setCurrentNode(null);
    }
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StreamsException(org.apache.kafka.streams.errors.StreamsException)

Example 45 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class GlobalStateTaskTest method maybeDeserialize.

private void maybeDeserialize(final GlobalStateUpdateTask globalStateTask, final byte[] key, final byte[] recordValue, final boolean failExpected) {
    final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>(topic2, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, key, recordValue, new RecordHeaders(), Optional.empty());
    globalStateTask.initialize();
    try {
        globalStateTask.update(record);
        if (failExpected) {
            fail("Should have failed to deserialize.");
        }
    } catch (final StreamsException e) {
        if (!failExpected) {
            fail("Shouldn't have failed to deserialize.");
        }
    }
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StreamsException(org.apache.kafka.streams.errors.StreamsException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)156 Test (org.junit.Test)111 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)52 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)34 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)27 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)13 Header (org.apache.kafka.common.header.Header)13 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8