Search in sources :

Example 16 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class NamedCacheTest method shouldNotThrowIllegalArgumentAfterEvictingDirtyRecordAndThenPuttingNewRecordWithSameKey.

@Test
public void shouldNotThrowIllegalArgumentAfterEvictingDirtyRecordAndThenPuttingNewRecordWithSameKey() {
    final LRUCacheEntry dirty = new LRUCacheEntry(new byte[] { 3 }, new RecordHeaders(), true, 0, 0, 0, "");
    final LRUCacheEntry clean = new LRUCacheEntry(new byte[] { 3 });
    final Bytes key = Bytes.wrap(new byte[] { 3 });
    cache.setListener(dirty1 -> cache.put(key, clean));
    cache.put(key, dirty);
    cache.evict();
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Test(org.junit.Test)

Example 17 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaOffsetBackingStoreTest method testGetSet.

@Test
public void testGetSet() throws Exception {
    expectConfigure();
    expectStart(Collections.emptyList());
    expectStop();
    // First get() against an empty store
    final Capture<Callback<Void>> firstGetReadToEndCallback = EasyMock.newCapture();
    storeLog.readToEnd(EasyMock.capture(firstGetReadToEndCallback));
    PowerMock.expectLastCall().andAnswer(() -> {
        firstGetReadToEndCallback.getValue().onCompletion(null, null);
        return null;
    });
    // Set offsets
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    storeLog.send(EasyMock.aryEq(TP0_KEY.array()), EasyMock.aryEq(TP0_VALUE.array()), EasyMock.capture(callback0));
    PowerMock.expectLastCall();
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    storeLog.send(EasyMock.aryEq(TP1_KEY.array()), EasyMock.aryEq(TP1_VALUE.array()), EasyMock.capture(callback1));
    PowerMock.expectLastCall();
    // Second get() should get the produced data and return the new values
    final Capture<Callback<Void>> secondGetReadToEndCallback = EasyMock.newCapture();
    storeLog.readToEnd(EasyMock.capture(secondGetReadToEndCallback));
    PowerMock.expectLastCall().andAnswer(() -> {
        capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY.array(), TP0_VALUE.array(), new RecordHeaders(), Optional.empty()));
        capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY.array(), TP1_VALUE.array(), new RecordHeaders(), Optional.empty()));
        secondGetReadToEndCallback.getValue().onCompletion(null, null);
        return null;
    });
    // Third get() should pick up data produced by someone else and return those values
    final Capture<Callback<Void>> thirdGetReadToEndCallback = EasyMock.newCapture();
    storeLog.readToEnd(EasyMock.capture(thirdGetReadToEndCallback));
    PowerMock.expectLastCall().andAnswer(() -> {
        capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY.array(), TP0_VALUE_NEW.array(), new RecordHeaders(), Optional.empty()));
        capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY.array(), TP1_VALUE_NEW.array(), new RecordHeaders(), Optional.empty()));
        thirdGetReadToEndCallback.getValue().onCompletion(null, null);
        return null;
    });
    expectClusterId();
    PowerMock.replayAll();
    store.configure(DEFAULT_DISTRIBUTED_CONFIG);
    store.start();
    // Getting from empty store should return nulls
    Map<ByteBuffer, ByteBuffer> offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS);
    // Since we didn't read them yet, these will be null
    assertNull(offsets.get(TP0_KEY));
    assertNull(offsets.get(TP1_KEY));
    // Set some offsets
    Map<ByteBuffer, ByteBuffer> toSet = new HashMap<>();
    toSet.put(TP0_KEY, TP0_VALUE);
    toSet.put(TP1_KEY, TP1_VALUE);
    final AtomicBoolean invoked = new AtomicBoolean(false);
    Future<Void> setFuture = store.set(toSet, (error, result) -> invoked.set(true));
    assertFalse(setFuture.isDone());
    // Out of order callbacks shouldn't matter, should still require all to be invoked before invoking the callback
    // for the store's set callback
    callback1.getValue().onCompletion(null, null);
    assertFalse(invoked.get());
    callback0.getValue().onCompletion(null, null);
    setFuture.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(invoked.get());
    // Getting data should read to end of our published data and return it
    offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS);
    assertEquals(TP0_VALUE, offsets.get(TP0_KEY));
    assertEquals(TP1_VALUE, offsets.get(TP1_KEY));
    // Getting data should read to end of our published data and return it
    offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS);
    assertEquals(TP0_VALUE_NEW, offsets.get(TP0_KEY));
    assertEquals(TP1_VALUE_NEW, offsets.get(TP1_KEY));
    store.stop();
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Callback(org.apache.kafka.connect.util.Callback) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 18 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRestore.

@Test
public void testRestore() throws Exception {
    // Restoring data should notify only of the latest values after loading is complete. This also validates
    // that inconsistent state is ignored.
    expectConfigure();
    // Overwrite each type at least once to ensure we see the latest data after loading
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), // Connector after root update should make it through, task update shouldn't
    new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(6), new RecordHeaders(), Optional.empty()));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), CONNECTOR_CONFIG_STRUCTS.get(1));
    deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    deserialized.put(CONFIGS_SERIALIZED.get(5), CONNECTOR_CONFIG_STRUCTS.get(2));
    deserialized.put(CONFIGS_SERIALIZED.get(6), TASK_CONFIG_STRUCTS.get(1));
    logOffset = 7;
    expectStart(existingRecords, deserialized);
    expectPartitionCount(1);
    // Shouldn't see any callbacks since this is during startup
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Should see a single connector and its config should be the last one seen anywhere in the log
    ClusterConfigState configState = configStorage.snapshot();
    // Should always be next to be read, even if uncommitted
    assertEquals(7, configState.offset());
    assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
    assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
    // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2]
    assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0)));
    // Should see 2 tasks for that connector. Only config updates before the root key update should be reflected
    assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0)));
    // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0]
    assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0)));
    assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(1)));
    assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors());
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Struct(org.apache.kafka.connect.data.Struct) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 19 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testBackgroundUpdateTargetState.

@Test
public void testBackgroundUpdateTargetState() throws Exception {
    // verify that we handle target state changes correctly when they come up through the log
    expectConfigure();
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    logOffset = 5;
    expectStart(existingRecords, deserialized);
    expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_PAUSED);
    configUpdateListener.onConnectorTargetStateChange(CONNECTOR_IDS.get(0));
    EasyMock.expectLastCall();
    expectPartitionCount(1);
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Should see a single connector with initial state paused
    ClusterConfigState configState = configStorage.snapshot();
    assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
    configStorage.refresh(0, TimeUnit.SECONDS);
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Struct(org.apache.kafka.connect.data.Struct) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 20 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaConfigBackingStoreTest method testBackgroundConnectorDeletion.

@Test
public void testBackgroundConnectorDeletion() throws Exception {
    // verify that we handle connector deletions correctly when they come up through the log
    expectConfigure();
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(1));
    deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    logOffset = 5;
    expectStart(existingRecords, deserialized);
    LinkedHashMap<String, byte[]> serializedData = new LinkedHashMap<>();
    serializedData.put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0));
    serializedData.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(1));
    Map<String, Struct> deserializedData = new HashMap<>();
    deserializedData.put(CONNECTOR_CONFIG_KEYS.get(0), null);
    deserializedData.put(TARGET_STATE_KEYS.get(0), null);
    expectRead(serializedData, deserializedData);
    configUpdateListener.onConnectorConfigRemove(CONNECTOR_IDS.get(0));
    EasyMock.expectLastCall();
    expectPartitionCount(1);
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Should see a single connector with initial state paused
    ClusterConfigState configState = configStorage.snapshot();
    assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
    assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0)));
    assertEquals(SAMPLE_CONFIGS.subList(0, 2), configState.allTaskConfigs(CONNECTOR_IDS.get(0)));
    assertEquals(2, configState.taskCount(CONNECTOR_IDS.get(0)));
    configStorage.refresh(0, TimeUnit.SECONDS);
    configState = configStorage.snapshot();
    // Connector should now be removed from the snapshot
    assertFalse(configState.contains(CONNECTOR_IDS.get(0)));
    // Task configs for the deleted connector should also be removed from the snapshot
    assertEquals(Collections.emptyList(), configState.allTaskConfigs(CONNECTOR_IDS.get(0)));
    assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0)));
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Struct(org.apache.kafka.connect.data.Struct) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)156 Test (org.junit.Test)111 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)52 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)34 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)27 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)13 Header (org.apache.kafka.common.header.Header)13 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8