Search in sources :

Example 36 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class ProcessorRecordContextTest method shouldEstimateHeadersLength.

@Test
public void shouldEstimateHeadersLength() {
    final Headers headers = new RecordHeaders();
    headers.add("header-key", "header-value".getBytes());
    final ProcessorRecordContext context = new ProcessorRecordContext(42L, 73L, 0, null, headers);
    assertEquals(MIN_SIZE + 10L + 12L, context.residentMemorySizeEstimate());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) Test(org.junit.Test)

Example 37 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class ProcessorRecordContextTest method shouldEstimateEmptyHeaderAsZeroLength.

@Test
public void shouldEstimateEmptyHeaderAsZeroLength() {
    final ProcessorRecordContext context = new ProcessorRecordContext(42L, 73L, 0, null, new RecordHeaders());
    assertEquals(MIN_SIZE, context.residentMemorySizeEstimate());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Test(org.junit.Test)

Example 38 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRecordToRestartRequestOnlyFailedInconsistent.

@Test
public void testRecordToRestartRequestOnlyFailedInconsistent() {
    ConsumerRecord<String, byte[]> record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty());
    Struct struct = ONLY_FAILED_MISSING_STRUCT;
    SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct));
    RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue);
    assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName());
    assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks());
    assertFalse(restartRequest.onlyFailed());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RestartRequest(org.apache.kafka.connect.runtime.RestartRequest) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Struct(org.apache.kafka.connect.data.Struct) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 39 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRestoreConnectorDeletion.

@Test
public void testRestoreConnectorDeletion() throws Exception {
    // Restoring data should notify only of the latest values after loading is complete. This also validates
    // that inconsistent state is ignored.
    expectConfigure();
    // Overwrite each type at least once to ensure we see the latest data after loading
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), null);
    deserialized.put(CONFIGS_SERIALIZED.get(4), null);
    deserialized.put(CONFIGS_SERIALIZED.get(5), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    logOffset = 6;
    expectStart(existingRecords, deserialized);
    expectPartitionCount(1);
    // Shouldn't see any callbacks since this is during startup
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Should see a single connector and its config should be the last one seen anywhere in the log
    ClusterConfigState configState = configStorage.snapshot();
    // Should always be next to be read, even if uncommitted
    assertEquals(6, configState.offset());
    assertTrue(configState.connectors().isEmpty());
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Struct(org.apache.kafka.connect.data.Struct) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 40 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testPutTaskConfigsDoesNotResolveAllInconsistencies.

@Test
public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exception {
    // Test a case where a failure and compaction has left us in an inconsistent state when reading the log.
    // We start out by loading an initial configuration where we started to write a task update, and then
    // compaction cleaned up the earlier record.
    expectConfigure();
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), // new ConsumerRecord<>(TOPIC, 0, 1, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1)),
    new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    deserialized.put(CONFIGS_SERIALIZED.get(5), TASK_CONFIG_STRUCTS.get(1));
    logOffset = 6;
    expectStart(existingRecords, deserialized);
    expectPartitionCount(1);
    // Successful attempt to write new task config
    expectReadToEnd(new LinkedHashMap<>());
    expectConvertWriteRead(TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), "properties", SAMPLE_CONFIGS.get(0));
    expectReadToEnd(new LinkedHashMap<>());
    expectConvertWriteRead(COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), "tasks", // Updated to just 1 task
    1);
    // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks
    configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0)));
    EasyMock.expectLastCall();
    // Records to be read by consumer as it reads to the end of the log
    LinkedHashMap<String, byte[]> serializedConfigs = new LinkedHashMap<>();
    serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0));
    serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2));
    expectReadToEnd(serializedConfigs);
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // After reading the log, it should have been in an inconsistent state
    ClusterConfigState configState = configStorage.snapshot();
    // Should always be next to be read, not last committed
    assertEquals(6, configState.offset());
    assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
    // Inconsistent data should leave us with no tasks listed for the connector and an entry in the inconsistent list
    assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0)));
    // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0]
    assertNull(configState.taskConfig(TASK_IDS.get(0)));
    assertNull(configState.taskConfig(TASK_IDS.get(1)));
    assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors());
    // Next, issue a write that has everything that is needed and it should be accepted. Note that in this case
    // we are going to shrink the number of tasks to 1
    configStorage.putTaskConfigs("connector1", Collections.singletonList(SAMPLE_CONFIGS.get(0)));
    // Validate updated config
    configState = configStorage.snapshot();
    // This is only two more ahead of the last one because multiple calls fail, and so their configs are not written
    // to the topic. Only the last call with 1 task config + 1 commit actually gets written.
    assertEquals(8, configState.offset());
    assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
    assertEquals(Arrays.asList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0)));
    assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0)));
    assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors());
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Struct(org.apache.kafka.connect.data.Struct) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)156 Test (org.junit.Test)111 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)52 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)34 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)27 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)13 Header (org.apache.kafka.common.header.Header)13 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8