Search in sources :

Example 6 with ClusterConfigState

use of org.apache.kafka.connect.runtime.distributed.ClusterConfigState in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRestoreTargetStateUnexpectedDeletion.

@Test
public void testRestoreTargetStateUnexpectedDeletion() throws Exception {
    expectConfigure();
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1)), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(3)), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(4)));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), null);
    deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    logOffset = 5;
    expectStart(existingRecords, deserialized);
    // Shouldn't see any callbacks since this is during startup
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // The target state deletion should reset the state to STARTED
    ClusterConfigState configState = configStorage.snapshot();
    // Should always be next to be read, even if uncommitted
    assertEquals(5, configState.offset());
    assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
    assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) LinkedHashMap(java.util.LinkedHashMap) Struct(org.apache.kafka.connect.data.Struct) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 7 with ClusterConfigState

use of org.apache.kafka.connect.runtime.distributed.ClusterConfigState in project kafka by apache.

the class KafkaConfigBackingStoreTest method testBackgroundConnectorDeletion.

@Test
public void testBackgroundConnectorDeletion() throws Exception {
    // verify that we handle connector deletions correctly when they come up through the log
    expectConfigure();
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1)), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(3)));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    logOffset = 5;
    expectStart(existingRecords, deserialized);
    LinkedHashMap<String, byte[]> serializedData = new LinkedHashMap<>();
    serializedData.put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0));
    serializedData.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(1));
    Map<String, Struct> deserializedData = new HashMap<>();
    deserializedData.put(CONNECTOR_CONFIG_KEYS.get(0), null);
    deserializedData.put(TARGET_STATE_KEYS.get(0), null);
    expectRead(serializedData, deserializedData);
    configUpdateListener.onConnectorConfigRemove(CONNECTOR_IDS.get(0));
    EasyMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Should see a single connector with initial state paused
    ClusterConfigState configState = configStorage.snapshot();
    assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0)));
    configStorage.refresh(0, TimeUnit.SECONDS);
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) LinkedHashMap(java.util.LinkedHashMap) Struct(org.apache.kafka.connect.data.Struct) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 8 with ClusterConfigState

use of org.apache.kafka.connect.runtime.distributed.ClusterConfigState in project kafka by apache.

the class KafkaConfigBackingStoreTest method testRestoreZeroTasks.

@Test
public void testRestoreZeroTasks() throws Exception {
    // Restoring data should notify only of the latest values after loading is complete. This also validates
    // that inconsistent state is ignored.
    expectConfigure();
    // Overwrite each type at least once to ensure we see the latest data after loading
    List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1)), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)), new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(3)), new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(4)), // Connector after root update should make it through, task update shouldn't
    new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(5)), new ConsumerRecord<>(TOPIC, 0, 6, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(6)), new ConsumerRecord<>(TOPIC, 0, 7, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(7)));
    LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap();
    deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0));
    deserialized.put(CONFIGS_SERIALIZED.get(3), CONNECTOR_CONFIG_STRUCTS.get(1));
    deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
    deserialized.put(CONFIGS_SERIALIZED.get(5), CONNECTOR_CONFIG_STRUCTS.get(2));
    deserialized.put(CONFIGS_SERIALIZED.get(6), TASK_CONFIG_STRUCTS.get(1));
    deserialized.put(CONFIGS_SERIALIZED.get(7), TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR);
    logOffset = 8;
    expectStart(existingRecords, deserialized);
    // Shouldn't see any callbacks since this is during startup
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Should see a single connector and its config should be the last one seen anywhere in the log
    ClusterConfigState configState = configStorage.snapshot();
    // Should always be next to be read, even if uncommitted
    assertEquals(8, configState.offset());
    assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors()));
    // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2]
    assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0)));
    // Should see 0 tasks for that connector.
    assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0)));
    // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0]
    assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors());
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) LinkedHashMap(java.util.LinkedHashMap) Struct(org.apache.kafka.connect.data.Struct) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 9 with ClusterConfigState

use of org.apache.kafka.connect.runtime.distributed.ClusterConfigState in project kafka by apache.

the class KafkaConfigBackingStoreTest method testPutTaskConfigsZeroTasks.

@Test
public void testPutTaskConfigsZeroTasks() throws Exception {
    expectConfigure();
    expectStart(Collections.EMPTY_LIST, Collections.EMPTY_MAP);
    // Task configs should read to end, write to the log, read to end, write root.
    expectReadToEnd(new LinkedHashMap<String, byte[]>());
    expectConvertWriteRead(COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), "tasks", // We have 0 tasks
    0);
    // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks
    configUpdateListener.onTaskConfigUpdate(Collections.<ConnectorTaskId>emptyList());
    EasyMock.expectLastCall();
    // Records to be read by consumer as it reads to the end of the log
    LinkedHashMap<String, byte[]> serializedConfigs = new LinkedHashMap<>();
    serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0));
    expectReadToEnd(serializedConfigs);
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Bootstrap as if we had already added the connector, but no tasks had been added yet
    whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.EMPTY_LIST);
    // Null before writing
    ClusterConfigState configState = configStorage.snapshot();
    assertEquals(-1, configState.offset());
    // Writing task task configs should block until all the writes have been performed and the root record update
    // has completed
    List<Map<String, String>> taskConfigs = Collections.emptyList();
    configStorage.putTaskConfigs("connector1", taskConfigs);
    // Validate root config by listing all connectors and tasks
    configState = configStorage.snapshot();
    assertEquals(1, configState.offset());
    String connectorName = CONNECTOR_IDS.get(0);
    assertEquals(Arrays.asList(connectorName), new ArrayList<>(configState.connectors()));
    assertEquals(Collections.emptyList(), configState.tasks(connectorName));
    assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors());
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 10 with ClusterConfigState

use of org.apache.kafka.connect.runtime.distributed.ClusterConfigState in project kafka by apache.

the class KafkaConfigBackingStoreTest method testPutTaskConfigs.

@Test
public void testPutTaskConfigs() throws Exception {
    expectConfigure();
    expectStart(Collections.EMPTY_LIST, Collections.EMPTY_MAP);
    // Task configs should read to end, write to the log, read to end, write root, then read to end again
    expectReadToEnd(new LinkedHashMap<String, byte[]>());
    expectConvertWriteRead(TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), "properties", SAMPLE_CONFIGS.get(0));
    expectConvertWriteRead(TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), "properties", SAMPLE_CONFIGS.get(1));
    expectReadToEnd(new LinkedHashMap<String, byte[]>());
    expectConvertWriteRead(COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), "tasks", // Starts with 0 tasks, after update has 2
    2);
    // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks
    configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)));
    EasyMock.expectLastCall();
    // Records to be read by consumer as it reads to the end of the log
    LinkedHashMap<String, byte[]> serializedConfigs = new LinkedHashMap<>();
    serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0));
    serializedConfigs.put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1));
    serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2));
    expectReadToEnd(serializedConfigs);
    expectStop();
    PowerMock.replayAll();
    configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG);
    configStorage.start();
    // Bootstrap as if we had already added the connector, but no tasks had been added yet
    whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.EMPTY_LIST);
    // Null before writing
    ClusterConfigState configState = configStorage.snapshot();
    assertEquals(-1, configState.offset());
    assertNull(configState.taskConfig(TASK_IDS.get(0)));
    assertNull(configState.taskConfig(TASK_IDS.get(1)));
    // Writing task task configs should block until all the writes have been performed and the root record update
    // has completed
    List<Map<String, String>> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1));
    configStorage.putTaskConfigs("connector1", taskConfigs);
    // Validate root config by listing all connectors and tasks
    configState = configStorage.snapshot();
    assertEquals(3, configState.offset());
    String connectorName = CONNECTOR_IDS.get(0);
    assertEquals(Arrays.asList(connectorName), new ArrayList<>(configState.connectors()));
    assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName));
    assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0)));
    assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1)));
    assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors());
    configStorage.stop();
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) LinkedHashMap(java.util.LinkedHashMap) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

ClusterConfigState (org.apache.kafka.connect.runtime.distributed.ClusterConfigState)11 Test (org.junit.Test)11 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)11 LinkedHashMap (java.util.LinkedHashMap)10 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)8 Struct (org.apache.kafka.connect.data.Struct)8 HashMap (java.util.HashMap)3 Map (java.util.Map)2