Search in sources :

Example 1 with KafkaDeserializationSchemaWrapper

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper in project flink by apache.

the class KafkaConsumerTestBase method writeAppendSequence.

protected void writeAppendSequence(String topicName, final int originalNumElements, final int numElementsToAppend, final int parallelism) throws Exception {
    LOG.info("\n===================================\n" + "== Appending sequence of " + numElementsToAppend + " into " + topicName + "===================================");
    final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {
    });
    final SerializationSchema<Tuple2<Integer, Integer>> serSchema = new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig());
    final KafkaDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KafkaDeserializationSchemaWrapper<>(new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig()));
    // -------- Write the append sequence --------
    StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    DataStream<Tuple2<Integer, Integer>> stream = writeEnv.addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() {

        private boolean running = true;

        @Override
        public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
            int cnt = originalNumElements;
            int partition = getRuntimeContext().getIndexOfThisSubtask();
            while (running && cnt < numElementsToAppend + originalNumElements) {
                ctx.collect(new Tuple2<>(partition, cnt));
                cnt++;
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    }).setParallelism(parallelism);
    // the producer must not produce duplicates
    Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
    producerProperties.setProperty("retries", "0");
    producerProperties.putAll(secureProps);
    kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2FlinkPartitioner(parallelism)).setParallelism(parallelism);
    try {
        writeEnv.execute("Write sequence");
    } catch (Exception e) {
        throw new Exception("Failed to append sequence to Kafka; append job failed.", e);
    }
    LOG.info("Finished writing append sequence");
    // we need to validate the sequence, because kafka's producers are not exactly once
    LOG.info("Validating sequence");
    while (!getRunningJobs(client).isEmpty()) {
        Thread.sleep(50);
    }
    if (!validateSequence(topicName, parallelism, deserSchema, originalNumElements + numElementsToAppend)) {
        throw new Exception("Could not append a valid sequence to Kafka.");
    }
}
Also used : KafkaDeserializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Properties(java.util.Properties) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) RetryOnException(org.apache.flink.testutils.junit.RetryOnException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) IOException(java.io.IOException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) SuccessException(org.apache.flink.test.util.SuccessException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) JobCancellationException(org.apache.flink.runtime.client.JobCancellationException) TypeInformationSerializationSchema(org.apache.flink.api.common.serialization.TypeInformationSerializationSchema) Tuple2FlinkPartitioner(org.apache.flink.streaming.connectors.kafka.testutils.Tuple2FlinkPartitioner) Tuple2(org.apache.flink.api.java.tuple.Tuple2) RichParallelSourceFunction(org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Example 2 with KafkaDeserializationSchemaWrapper

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper in project flink by apache.

the class KafkaConsumerTestBase method runStartFromLatestOffsets.

/**
 * This test ensures that when explicitly set to start from latest record, the consumer ignores
 * the "auto.offset.reset" behaviour as well as any committed group offsets in Kafka.
 */
public void runStartFromLatestOffsets() throws Exception {
    // 50 records written to each of 3 partitions before launching a latest-starting consuming
    // job
    final int parallelism = 3;
    final int recordsInEachPartition = 50;
    // each partition will be written an extra 200 records
    final int extraRecordsInEachPartition = 200;
    // all already existing data in the topic, before the consuming topology has started, should
    // be ignored
    final String topicName = writeSequence("testStartFromLatestOffsetsTopic", recordsInEachPartition, parallelism, 1);
    // the committed offsets should be ignored
    KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler();
    kafkaOffsetHandler.setCommittedOffset(topicName, 0, 23);
    kafkaOffsetHandler.setCommittedOffset(topicName, 1, 31);
    kafkaOffsetHandler.setCommittedOffset(topicName, 2, 43);
    // job names for the topologies for writing and consuming the extra records
    final String consumeExtraRecordsJobName = "Consume Extra Records Job";
    final String writeExtraRecordsJobName = "Write Extra Records Job";
    // serialization / deserialization schemas for writing and consuming the extra records
    final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {
    });
    final SerializationSchema<Tuple2<Integer, Integer>> serSchema = new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig());
    final KafkaDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KafkaDeserializationSchemaWrapper<>(new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig()));
    // setup and run the latest-consuming job
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(parallelism);
    final Properties readProps = new Properties();
    readProps.putAll(standardProps);
    // this should be ignored
    readProps.setProperty("auto.offset.reset", "earliest");
    DataStreamSource<Tuple2<Integer, Integer>> stream;
    if (useNewSource) {
        KafkaSource<Tuple2<Integer, Integer>> source = kafkaServer.getSourceBuilder(topicName, deserSchema, readProps).setStartingOffsets(OffsetsInitializer.latest()).build();
        stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "KafkaSource");
    } else {
        FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> latestReadingConsumer = kafkaServer.getConsumer(topicName, deserSchema, readProps);
        latestReadingConsumer.setStartFromLatest();
        stream = env.addSource(latestReadingConsumer);
    }
    stream.setParallelism(parallelism).flatMap(new FlatMapFunction<Tuple2<Integer, Integer>, Object>() {

        @Override
        public void flatMap(Tuple2<Integer, Integer> value, Collector<Object> out) throws Exception {
            if (value.f1 - recordsInEachPartition < 0) {
                throw new RuntimeException("test failed; consumed a record that was previously written: " + value);
            }
        }
    }).setParallelism(1).addSink(new DiscardingSink<>());
    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
    final JobID consumeJobId = jobGraph.getJobID();
    final AtomicReference<Throwable> error = new AtomicReference<>();
    Thread consumeThread = new Thread(() -> {
        try {
            submitJobAndWaitForResult(client, jobGraph, getClass().getClassLoader());
        } catch (Throwable t) {
            if (!ExceptionUtils.findThrowable(t, JobCancellationException.class).isPresent()) {
                error.set(t);
            }
        }
    });
    consumeThread.start();
    // wait until the consuming job has started, to be extra safe
    waitUntilJobIsRunning(client);
    // setup the extra records writing job
    final StreamExecutionEnvironment env2 = StreamExecutionEnvironment.getExecutionEnvironment();
    env2.setParallelism(parallelism);
    DataStream<Tuple2<Integer, Integer>> extraRecordsStream = env2.addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() {

        private boolean running = true;

        @Override
        public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
            int count = // the extra records should start
            recordsInEachPartition;
            // from the last written value
            int partition = getRuntimeContext().getIndexOfThisSubtask();
            while (running && count < recordsInEachPartition + extraRecordsInEachPartition) {
                ctx.collect(new Tuple2<>(partition, count));
                count++;
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    });
    kafkaServer.produceIntoKafka(extraRecordsStream, topicName, serSchema, readProps, null);
    try {
        env2.execute(writeExtraRecordsJobName);
    } catch (Exception e) {
        throw new RuntimeException("Writing extra records failed", e);
    }
    // cancel the consume job after all extra records are written
    client.cancel(consumeJobId).get();
    consumeThread.join();
    kafkaOffsetHandler.close();
    deleteTestTopic(topicName);
    // check whether the consuming thread threw any test errors;
    // test will fail here if the consume job had incorrectly read any records other than the
    // extra records
    final Throwable consumerError = error.get();
    if (consumerError != null) {
        throw new Exception("Exception in the consuming thread", consumerError);
    }
}
Also used : KafkaDeserializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Properties(java.util.Properties) JobCancellationException(org.apache.flink.runtime.client.JobCancellationException) AtomicReference(java.util.concurrent.atomic.AtomicReference) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) RetryOnException(org.apache.flink.testutils.junit.RetryOnException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) IOException(java.io.IOException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) SuccessException(org.apache.flink.test.util.SuccessException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) JobCancellationException(org.apache.flink.runtime.client.JobCancellationException) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) TypeInformationSerializationSchema(org.apache.flink.api.common.serialization.TypeInformationSerializationSchema) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID)

Example 3 with KafkaDeserializationSchemaWrapper

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper in project flink by apache.

the class KafkaConsumerTestBase method writeSequence.

protected String writeSequence(String baseTopicName, final int numElements, final int parallelism, final int replicationFactor) throws Exception {
    LOG.info("\n===================================\n" + "== Writing sequence of " + numElements + " into " + baseTopicName + " with p=" + parallelism + "\n" + "===================================");
    final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {
    });
    final SerializationSchema<Tuple2<Integer, Integer>> serSchema = new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig());
    final KafkaDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KafkaDeserializationSchemaWrapper<>(new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig()));
    final int maxNumAttempts = 10;
    for (int attempt = 1; attempt <= maxNumAttempts; attempt++) {
        final String topicName = baseTopicName + '-' + attempt;
        LOG.info("Writing attempt #" + attempt);
        // -------- Write the Sequence --------
        createTestTopic(topicName, parallelism, replicationFactor);
        StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.getExecutionEnvironment();
        writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart());
        DataStream<Tuple2<Integer, Integer>> stream = writeEnv.addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() {

            private boolean running = true;

            @Override
            public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
                int cnt = 0;
                int partition = getRuntimeContext().getIndexOfThisSubtask();
                while (running && cnt < numElements) {
                    ctx.collect(new Tuple2<>(partition, cnt));
                    cnt++;
                }
            }

            @Override
            public void cancel() {
                running = false;
            }
        }).setParallelism(parallelism);
        // the producer must not produce duplicates
        Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
        producerProperties.setProperty("retries", "0");
        producerProperties.putAll(secureProps);
        kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2FlinkPartitioner(parallelism)).setParallelism(parallelism);
        try {
            writeEnv.execute("Write sequence");
        } catch (Exception e) {
            LOG.error("Write attempt failed, trying again", e);
            deleteTestTopic(topicName);
            waitUntilNoJobIsRunning(client);
            continue;
        }
        LOG.info("Finished writing sequence");
        // -------- Validate the Sequence --------
        // we need to validate the sequence, because kafka's producers are not exactly once
        LOG.info("Validating sequence");
        waitUntilNoJobIsRunning(client);
        if (validateSequence(topicName, parallelism, deserSchema, numElements)) {
            // everything is good!
            return topicName;
        } else {
            deleteTestTopic(topicName);
        // fall through the loop
        }
    }
    throw new Exception("Could not write a valid sequence to Kafka after " + maxNumAttempts + " attempts");
}
Also used : KafkaDeserializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Properties(java.util.Properties) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) RetryOnException(org.apache.flink.testutils.junit.RetryOnException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) IOException(java.io.IOException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) SuccessException(org.apache.flink.test.util.SuccessException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) JobCancellationException(org.apache.flink.runtime.client.JobCancellationException) TypeInformationSerializationSchema(org.apache.flink.api.common.serialization.TypeInformationSerializationSchema) Tuple2FlinkPartitioner(org.apache.flink.streaming.connectors.kafka.testutils.Tuple2FlinkPartitioner) Tuple2(org.apache.flink.api.java.tuple.Tuple2) RichParallelSourceFunction(org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Aggregations

IOException (java.io.IOException)3 Properties (java.util.Properties)3 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)3 TypeInformationSerializationSchema (org.apache.flink.api.common.serialization.TypeInformationSerializationSchema)3 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)3 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)3 ProgramInvocationException (org.apache.flink.client.program.ProgramInvocationException)3 JobCancellationException (org.apache.flink.runtime.client.JobCancellationException)3 JobExecutionException (org.apache.flink.runtime.client.JobExecutionException)3 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)3 KafkaDeserializationSchemaWrapper (org.apache.flink.streaming.connectors.kafka.internals.KafkaDeserializationSchemaWrapper)3 SuccessException (org.apache.flink.test.util.SuccessException)3 RetryOnException (org.apache.flink.testutils.junit.RetryOnException)3 NotLeaderForPartitionException (org.apache.kafka.common.errors.NotLeaderForPartitionException)3 TimeoutException (org.apache.kafka.common.errors.TimeoutException)3 RichParallelSourceFunction (org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction)2 Tuple2FlinkPartitioner (org.apache.flink.streaming.connectors.kafka.testutils.Tuple2FlinkPartitioner)2 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 JobID (org.apache.flink.api.common.JobID)1