use of org.apache.kafka.connect.runtime.ConnectorStatus in project kafka by apache.
the class KafkaStatusBackingStore method readConnectorStatus.
private void readConnectorStatus(String key, byte[] value) {
String connector = parseConnectorStatusKey(key);
if (connector.isEmpty()) {
log.warn("Discarding record with invalid connector status key {}", key);
return;
}
if (value == null) {
log.trace("Removing status for connector {}", connector);
remove(connector);
return;
}
ConnectorStatus status = parseConnectorStatus(connector, value);
if (status == null)
return;
synchronized (this) {
log.trace("Received connector {} status update {}", connector, status);
CacheEntry<ConnectorStatus> entry = getOrAdd(connector);
entry.put(status);
}
}
use of org.apache.kafka.connect.runtime.ConnectorStatus in project kafka by apache.
the class StandaloneHerderTest method testDestroyConnector.
@Test
public void testDestroyConnector() throws Exception {
connector = PowerMock.createMock(BogusSourceConnector.class);
expectAdd(SourceSink.SOURCE);
Map<String, String> config = connectorConfig(SourceSink.SOURCE);
Connector connectorMock = PowerMock.createMock(SourceConnector.class);
expectConfigValidation(connectorMock, true, config);
EasyMock.expect(statusBackingStore.getAll(CONNECTOR_NAME)).andReturn(Collections.emptyList());
statusBackingStore.put(new ConnectorStatus(CONNECTOR_NAME, AbstractStatus.State.DESTROYED, WORKER_ID, 0));
statusBackingStore.put(new TaskStatus(new ConnectorTaskId(CONNECTOR_NAME, 0), TaskStatus.State.DESTROYED, WORKER_ID, 0));
expectDestroy();
PowerMock.replayAll();
herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback);
Herder.Created<ConnectorInfo> connectorInfo = createCallback.get(1000L, TimeUnit.SECONDS);
assertEquals(createdInfo(SourceSink.SOURCE), connectorInfo.result());
FutureCallback<Herder.Created<ConnectorInfo>> deleteCallback = new FutureCallback<>();
herder.deleteConnectorConfig(CONNECTOR_NAME, deleteCallback);
deleteCallback.get(1000L, TimeUnit.MILLISECONDS);
// Second deletion should fail since the connector is gone
FutureCallback<Herder.Created<ConnectorInfo>> failedDeleteCallback = new FutureCallback<>();
herder.deleteConnectorConfig(CONNECTOR_NAME, failedDeleteCallback);
try {
failedDeleteCallback.get(1000L, TimeUnit.MILLISECONDS);
fail("Should have thrown NotFoundException");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof NotFoundException);
}
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.runtime.ConnectorStatus in project kafka by apache.
the class KafkaStatusBackingStoreTest method putSafeConnectorIgnoresStaleStatus.
@Test
public void putSafeConnectorIgnoresStaleStatus() {
byte[] value = new byte[0];
String otherWorkerId = "anotherhost:8083";
// the persisted came from a different host and has a newer generation
Map<String, Object> statusMap = new HashMap<>();
statusMap.put("worker_id", otherWorkerId);
statusMap.put("state", "RUNNING");
statusMap.put("generation", 1L);
when(converter.toConnectData(STATUS_TOPIC, value)).thenReturn(new SchemaAndValue(null, statusMap));
store.read(consumerRecord(0, "status-connector-conn", value));
store.putSafe(new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0));
verify(kafkaBasedLog, never()).send(anyString(), any(), any(Callback.class));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, otherWorkerId, 1);
assertEquals(status, store.get(CONNECTOR));
}
use of org.apache.kafka.connect.runtime.ConnectorStatus in project kafka by apache.
the class KafkaStatusBackingStoreTest method putConnectorStateShouldOverride.
@Test
public void putConnectorStateShouldOverride() {
final byte[] value = new byte[0];
String otherWorkerId = "anotherhost:8083";
// the persisted came from a different host and has a newer generation
Map<String, Object> firstStatusRead = new HashMap<>();
firstStatusRead.put("worker_id", otherWorkerId);
firstStatusRead.put("state", "RUNNING");
firstStatusRead.put("generation", 1L);
Map<String, Object> secondStatusRead = new HashMap<>();
secondStatusRead.put("worker_id", WORKER_ID);
secondStatusRead.put("state", "UNASSIGNED");
secondStatusRead.put("generation", 0L);
when(converter.toConnectData(STATUS_TOPIC, value)).thenReturn(new SchemaAndValue(null, firstStatusRead)).thenReturn(new SchemaAndValue(null, secondStatusRead));
when(converter.fromConnectData(eq(STATUS_TOPIC), any(Schema.class), any(Struct.class))).thenReturn(value);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, null);
store.read(consumerRecord(1, "status-connector-conn", value));
return null;
}).when(kafkaBasedLog).send(eq("status-connector-conn"), eq(value), any(Callback.class));
store.read(consumerRecord(0, "status-connector-conn", value));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
store.put(status);
assertEquals(status, store.get(CONNECTOR));
}
use of org.apache.kafka.connect.runtime.ConnectorStatus in project kafka by apache.
the class KafkaStatusBackingStoreTest method putSafeWithNoPreviousValueIsPropagated.
@Test
public void putSafeWithNoPreviousValueIsPropagated() {
final byte[] value = new byte[0];
ArgumentCaptor<Struct> captor = ArgumentCaptor.forClass(Struct.class);
kafkaBasedLog.send(eq("status-connector-" + CONNECTOR), eq(value), any(Callback.class));
final ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.FAILED, WORKER_ID, 0);
store.putSafe(status);
verify(converter).fromConnectData(eq(STATUS_TOPIC), any(Schema.class), captor.capture());
assertEquals(status.state().toString(), captor.getValue().get(KafkaStatusBackingStore.STATE_KEY_NAME));
assertEquals(status.workerId(), captor.getValue().get(KafkaStatusBackingStore.WORKER_ID_KEY_NAME));
assertEquals(status.generation(), captor.getValue().get(KafkaStatusBackingStore.GENERATION_KEY_NAME));
}
Aggregations