Search in sources :

Example 81 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class WorkerSinkTaskThreadedTest method expectPolls.

// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
    // Stub out all the consumer stream/iterator responses, which we just want to verify occur,
    // but don't care about the exact details here.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(pollDelayMs);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
    final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
    EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {

        @Override
        public SinkRecord answer() {
            return recordCapture.getValue();
        }
    }).anyTimes();
    Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
    sinkTask.put(EasyMock.capture(capturedRecords));
    EasyMock.expectLastCall().anyTimes();
    return capturedRecords;
}
Also used : SinkRecord(org.apache.kafka.connect.sink.SinkRecord) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) IAnswer(org.easymock.IAnswer) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection)

Example 82 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka by apache.

the class KafkaConfigBackingStoreTest method expectConvertWriteRead.

// Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back
// from the log. Validate the data that is captured when the conversion is performed matches the specified data
// (by checking a single field's value)
private void expectConvertWriteRead(final String configKey, final Schema valueSchema, final byte[] serialized, final String dataFieldName, final Object dataFieldValue) {
    final Capture<Struct> capturedRecord = EasyMock.newCapture();
    if (serialized != null)
        EasyMock.expect(converter.fromConnectData(EasyMock.eq(TOPIC), EasyMock.eq(valueSchema), EasyMock.capture(capturedRecord))).andReturn(serialized);
    storeLog.send(EasyMock.eq(configKey), EasyMock.aryEq(serialized));
    PowerMock.expectLastCall();
    EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(serialized))).andAnswer(new IAnswer<SchemaAndValue>() {

        @Override
        public SchemaAndValue answer() throws Throwable {
            if (dataFieldName != null)
                assertEquals(dataFieldValue, capturedRecord.getValue().get(dataFieldName));
            // Note null schema because default settings for internal serialization are schema-less
            return new SchemaAndValue(null, serialized == null ? null : structToMap(capturedRecord.getValue()));
        }
    });
}
Also used : Struct(org.apache.kafka.connect.data.Struct) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 83 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka-connect-storage-cloud by confluentinc.

the class DataWriterAvroTest method createRecordsWithEnums.

protected List<SinkRecord> createRecordsWithEnums(int size, long startOffset, Set<TopicPartition> partitions) {
    String key = "key";
    Schema schema = createEnumSchema();
    SchemaAndValue valueAndSchema = new SchemaAndValue(schema, "bar");
    List<SinkRecord> sinkRecords = new ArrayList<>();
    for (TopicPartition tp : partitions) {
        for (long offset = startOffset; offset < startOffset + size; ++offset) {
            sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, valueAndSchema.value(), offset));
        }
    }
    return sinkRecords;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Schema(org.apache.kafka.connect.data.Schema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 84 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project kafka-connect-storage-cloud by confluentinc.

the class DataWriterAvroTest method createRecordsWithUnion.

protected List<SinkRecord> createRecordsWithUnion(int size, long startOffset, Set<TopicPartition> partitions) {
    Schema recordSchema1 = SchemaBuilder.struct().name("Test1").field("test", Schema.INT32_SCHEMA).optional().build();
    Schema recordSchema2 = SchemaBuilder.struct().name("io.confluent.Test2").field("test", Schema.INT32_SCHEMA).optional().build();
    Schema schema = SchemaBuilder.struct().name("io.confluent.connect.avro.Union").field("int", Schema.OPTIONAL_INT32_SCHEMA).field("string", Schema.OPTIONAL_STRING_SCHEMA).field("Test1", recordSchema1).field("io.confluent.Test2", recordSchema2).build();
    SchemaAndValue valueAndSchemaInt = new SchemaAndValue(schema, new Struct(schema).put("int", 12));
    SchemaAndValue valueAndSchemaString = new SchemaAndValue(schema, new Struct(schema).put("string", "teststring"));
    Struct schema1Test = new Struct(schema).put("Test1", new Struct(recordSchema1).put("test", 12));
    SchemaAndValue valueAndSchema1 = new SchemaAndValue(schema, schema1Test);
    Struct schema2Test = new Struct(schema).put("io.confluent.Test2", new Struct(recordSchema2).put("test", 12));
    SchemaAndValue valueAndSchema2 = new SchemaAndValue(schema, schema2Test);
    String key = "key";
    List<SinkRecord> sinkRecords = new ArrayList<>();
    for (TopicPartition tp : partitions) {
        for (long offset = startOffset; offset < startOffset + 4 * size; ) {
            sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, valueAndSchemaInt.value(), offset++));
            sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, valueAndSchemaString.value(), offset++));
            sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, valueAndSchema1.value(), offset++));
            sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, valueAndSchema2.value(), offset++));
        }
    }
    return sinkRecords;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Schema(org.apache.kafka.connect.data.Schema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Struct(org.apache.kafka.connect.data.Struct)

Example 85 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project connect-utils by jcustenborder.

the class BaseKeyValueTransformation method process.

protected SchemaAndValue process(R record, SchemaAndValue input) {
    final SchemaAndValue result;
    if (null == input.schema() && null == input.value()) {
        return new SchemaAndValue(null, null);
    }
    if (input.value() instanceof Map) {
        log.trace("process() - Processing as map");
        result = processMap(record, (Map<String, Object>) input.value());
        return result;
    }
    if (null == input.schema()) {
        log.trace("process() - Determining schema");
        final Schema schema = SchemaHelper.schema(input.value());
        return process(record, new SchemaAndValue(schema, input.value()));
    }
    log.trace("process() - input.value() has as schema. schema = {}", input.schema());
    if (Schema.Type.STRUCT == input.schema().type()) {
        result = processStruct(record, input.schema(), (Struct) input.value());
    } else if (Timestamp.LOGICAL_NAME.equals(input.schema().name())) {
        result = processTimestamp(record, input.schema(), (Date) input.value());
    } else if (org.apache.kafka.connect.data.Date.LOGICAL_NAME.equals(input.schema().name())) {
        result = processDate(record, input.schema(), (Date) input.value());
    } else if (Time.LOGICAL_NAME.equals(input.schema().name())) {
        result = processTime(record, input.schema(), (Date) input.value());
    } else if (Decimal.LOGICAL_NAME.equals(input.schema().name())) {
        result = processDecimal(record, input.schema(), (BigDecimal) input.value());
    } else if (Schema.Type.STRING == input.schema().type()) {
        result = processString(record, input.schema(), (String) input.value());
    } else if (Schema.Type.BYTES == input.schema().type()) {
        result = processBytes(record, input.schema(), (byte[]) input.value());
    } else if (Schema.Type.INT8 == input.schema().type()) {
        result = processInt8(record, input.schema(), (byte) input.value());
    } else if (Schema.Type.INT16 == input.schema().type()) {
        result = processInt16(record, input.schema(), (short) input.value());
    } else if (Schema.Type.INT32 == input.schema().type()) {
        result = processInt32(record, input.schema(), (int) input.value());
    } else if (Schema.Type.INT64 == input.schema().type()) {
        result = processInt64(record, input.schema(), (long) input.value());
    } else if (Schema.Type.FLOAT32 == input.schema().type()) {
        result = processFloat32(record, input.schema(), (float) input.value());
    } else if (Schema.Type.FLOAT64 == input.schema().type()) {
        result = processFloat64(record, input.schema(), (double) input.value());
    } else if (Schema.Type.ARRAY == input.schema().type()) {
        result = processArray(record, input.schema(), (List<Object>) input.value());
    } else if (Schema.Type.MAP == input.schema().type()) {
        result = processMap(record, input.schema(), (Map<Object, Object>) input.value());
    } else if (Schema.Type.BOOLEAN == input.schema().type()) {
        result = processBoolean(record, input.schema(), (boolean) input.value());
    } else {
        throw new UnsupportedOperationException(String.format("Schema is not supported. type='%s' name='%s'", input.schema().type(), input.schema().name()));
    }
    return result;
}
Also used : Schema(org.apache.kafka.connect.data.Schema) Map(java.util.Map) Date(java.util.Date) BigDecimal(java.math.BigDecimal) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Struct(org.apache.kafka.connect.data.Struct)

Aggregations

SchemaAndValue (org.apache.kafka.connect.data.SchemaAndValue)140 Test (org.junit.Test)57 Schema (org.apache.kafka.connect.data.Schema)49 Test (org.junit.jupiter.api.Test)46 HashMap (java.util.HashMap)32 Struct (org.apache.kafka.connect.data.Struct)21 Date (org.apache.kafka.connect.data.Date)18 BigInteger (java.math.BigInteger)12 Map (java.util.Map)12 ConnectorStatus (org.apache.kafka.connect.runtime.ConnectorStatus)11 BigDecimal (java.math.BigDecimal)10 TopicPartition (org.apache.kafka.common.TopicPartition)9 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)8 TaskStatus (org.apache.kafka.connect.runtime.TaskStatus)8 Callback (org.apache.kafka.clients.producer.Callback)7 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)7 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)7 Collection (java.util.Collection)6 GregorianCalendar (java.util.GregorianCalendar)6 LinkedHashMap (java.util.LinkedHashMap)6