Search in sources :

Example 41 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class OffsetStorageReaderImpl method offsets.

@Override
@SuppressWarnings("unchecked")
public <T> Map<Map<String, T>, Map<String, Object>> offsets(Collection<Map<String, T>> partitions) {
    // Serialize keys so backing store can work with them
    Map<ByteBuffer, Map<String, T>> serializedToOriginal = new HashMap<>(partitions.size());
    for (Map<String, T> key : partitions) {
        try {
            // Offsets are treated as schemaless, their format is only validated here (and the returned value below)
            OffsetUtils.validateFormat(key);
            byte[] keySerialized = keyConverter.fromConnectData(namespace, null, Arrays.asList(namespace, key));
            ByteBuffer keyBuffer = (keySerialized != null) ? ByteBuffer.wrap(keySerialized) : null;
            serializedToOriginal.put(keyBuffer, key);
        } catch (Throwable t) {
            log.error("CRITICAL: Failed to serialize partition key when getting offsets for task with " + "namespace {}. No value for this data will be returned, which may break the " + "task or cause it to skip some data.", namespace, t);
        }
    }
    // Get serialized key -> serialized value from backing store
    Map<ByteBuffer, ByteBuffer> raw;
    try {
        raw = backingStore.get(serializedToOriginal.keySet(), null).get();
    } catch (Exception e) {
        log.error("Failed to fetch offsets from namespace {}: ", namespace, e);
        throw new ConnectException("Failed to fetch offsets.", e);
    }
    // Deserialize all the values and map back to the original keys
    Map<Map<String, T>, Map<String, Object>> result = new HashMap<>(partitions.size());
    for (Map.Entry<ByteBuffer, ByteBuffer> rawEntry : raw.entrySet()) {
        try {
            // Since null could be a valid key, explicitly check whether map contains the key
            if (!serializedToOriginal.containsKey(rawEntry.getKey())) {
                log.error("Should be able to map {} back to a requested partition-offset key, backing " + "store may have returned invalid data", rawEntry.getKey());
                continue;
            }
            Map<String, T> origKey = serializedToOriginal.get(rawEntry.getKey());
            SchemaAndValue deserializedSchemaAndValue = valueConverter.toConnectData(namespace, rawEntry.getValue() != null ? rawEntry.getValue().array() : null);
            Object deserializedValue = deserializedSchemaAndValue.value();
            OffsetUtils.validateFormat(deserializedValue);
            result.put(origKey, (Map<String, Object>) deserializedValue);
        } catch (Throwable t) {
            log.error("CRITICAL: Failed to deserialize offset data when getting offsets for task with" + " namespace {}. No value for this data will be returned, which may break the " + "task or cause it to skip some data. This could either be due to an error in " + "the connector implementation or incompatible schema.", namespace, t);
        }
    }
    return result;
}
Also used : HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer) ConnectException(org.apache.kafka.connect.errors.ConnectException) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Map(java.util.Map) HashMap(java.util.HashMap) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 42 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class KafkaConfigBackingStoreTest method expectStart.

// If non-empty, deserializations should be a LinkedHashMap
private void expectStart(final List<ConsumerRecord<String, byte[]>> preexistingRecords, final Map<byte[], Struct> deserializations) throws Exception {
    storeLog.start();
    PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {

        @Override
        public Object answer() throws Throwable {
            for (ConsumerRecord<String, byte[]> rec : preexistingRecords) capturedConsumedCallback.getValue().onCompletion(null, rec);
            return null;
        }
    });
    for (Map.Entry<byte[], Struct> deserializationEntry : deserializations.entrySet()) {
        // Note null schema because default settings for internal serialization are schema-less
        EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(deserializationEntry.getKey()))).andReturn(new SchemaAndValue(null, structToMap(deserializationEntry.getValue())));
    }
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Struct(org.apache.kafka.connect.data.Struct) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 43 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class KafkaConfigBackingStoreTest method expectConvertWriteRead.

// Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back
// from the log. Validate the data that is captured when the conversion is performed matches the specified data
// (by checking a single field's value)
private void expectConvertWriteRead(final String configKey, final Schema valueSchema, final byte[] serialized, final String dataFieldName, final Object dataFieldValue) {
    final Capture<Struct> capturedRecord = EasyMock.newCapture();
    if (serialized != null)
        EasyMock.expect(converter.fromConnectData(EasyMock.eq(TOPIC), EasyMock.eq(valueSchema), EasyMock.capture(capturedRecord))).andReturn(serialized);
    storeLog.send(EasyMock.eq(configKey), EasyMock.aryEq(serialized));
    PowerMock.expectLastCall();
    EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(serialized))).andAnswer(new IAnswer<SchemaAndValue>() {

        @Override
        public SchemaAndValue answer() throws Throwable {
            if (dataFieldName != null)
                assertEquals(dataFieldValue, capturedRecord.getValue().get(dataFieldName));
            // Note null schema because default settings for internal serialization are schema-less
            return new SchemaAndValue(null, serialized == null ? null : structToMap(capturedRecord.getValue()));
        }
    });
}
Also used : Struct(org.apache.kafka.connect.data.Struct) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 44 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class KafkaStatusBackingStoreTest method putSafeConnectorIgnoresStaleStatus.

@Test
public void putSafeConnectorIgnoresStaleStatus() {
    byte[] value = new byte[0];
    String otherWorkerId = "anotherhost:8083";
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    // the persisted came from a different host and has a newer generation
    Map<String, Object> statusMap = new HashMap<>();
    statusMap.put("worker_id", otherWorkerId);
    statusMap.put("state", "RUNNING");
    statusMap.put("generation", 1L);
    expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, statusMap));
    // we're verifying that there is no call to KafkaBasedLog.send
    replayAll();
    store.read(consumerRecord(0, "status-connector-conn", value));
    store.putSafe(new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0));
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, otherWorkerId, 1);
    assertEquals(status, store.get(CONNECTOR));
    verifyAll();
}
Also used : HashMap(java.util.HashMap) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) EasyMock.anyObject(org.easymock.EasyMock.anyObject) MockTime(org.apache.kafka.common.utils.MockTime) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Test(org.junit.Test)

Example 45 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class KafkaStatusBackingStoreTest method putSafeOverridesValueSetBySameWorker.

@Test
public void putSafeOverridesValueSetBySameWorker() {
    final byte[] value = new byte[0];
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    // the persisted came from the same host, but has a newer generation
    Map<String, Object> firstStatusRead = new HashMap<>();
    firstStatusRead.put("worker_id", WORKER_ID);
    firstStatusRead.put("state", "RUNNING");
    firstStatusRead.put("generation", 1L);
    Map<String, Object> secondStatusRead = new HashMap<>();
    secondStatusRead.put("worker_id", WORKER_ID);
    secondStatusRead.put("state", "UNASSIGNED");
    secondStatusRead.put("generation", 0L);
    expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, firstStatusRead)).andReturn(new SchemaAndValue(null, secondStatusRead));
    expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
    final Capture<Callback> callbackCapture = newCapture();
    kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
    expectLastCall().andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, null);
            store.read(consumerRecord(1, "status-connector-conn", value));
            return null;
        }
    });
    replayAll();
    store.read(consumerRecord(0, "status-connector-conn", value));
    store.putSafe(new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0));
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
    assertEquals(status, store.get(CONNECTOR));
    verifyAll();
}
Also used : HashMap(java.util.HashMap) Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Struct(org.apache.kafka.connect.data.Struct) Callback(org.apache.kafka.clients.producer.Callback) EasyMock.anyObject(org.easymock.EasyMock.anyObject) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Aggregations

SchemaAndValue (org.apache.kafka.connect.data.SchemaAndValue)46 Test (org.junit.Test)36 Schema (org.apache.kafka.connect.data.Schema)19 HashMap (java.util.HashMap)14 Date (org.apache.kafka.connect.data.Date)9 Struct (org.apache.kafka.connect.data.Struct)6 BigInteger (java.math.BigInteger)5 Map (java.util.Map)5 MockTime (org.apache.kafka.common.utils.MockTime)5 ConnectorStatus (org.apache.kafka.connect.runtime.ConnectorStatus)5 EasyMock.anyObject (org.easymock.EasyMock.anyObject)5 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)4 BigDecimal (java.math.BigDecimal)3 Collection (java.util.Collection)3 GregorianCalendar (java.util.GregorianCalendar)3 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 TaskStatus (org.apache.kafka.connect.runtime.TaskStatus)3 ByteBuffer (java.nio.ByteBuffer)2 LinkedHashMap (java.util.LinkedHashMap)2