Search in sources :

Example 1 with SuccessException

use of org.apache.flink.test.util.SuccessException in project flink by apache.

the class ClassLoaderITCase method testJobsWithCustomClassLoader.

@Test
public void testJobsWithCustomClassLoader() {
    try {
        int port = testCluster.getLeaderRPCPort();
        PackagedProgram inputSplitTestProg = new PackagedProgram(new File(INPUT_SPLITS_PROG_JAR_FILE), new String[] { INPUT_SPLITS_PROG_JAR_FILE, // classpath
        "", "localhost", String.valueOf(port), // parallelism
        "4" });
        inputSplitTestProg.invokeInteractiveModeForExecution();
        PackagedProgram streamingInputSplitTestProg = new PackagedProgram(new File(STREAMING_INPUT_SPLITS_PROG_JAR_FILE), new String[] { STREAMING_INPUT_SPLITS_PROG_JAR_FILE, "localhost", String.valueOf(port), // parallelism
        "4" });
        streamingInputSplitTestProg.invokeInteractiveModeForExecution();
        String classpath = new File(INPUT_SPLITS_PROG_JAR_FILE).toURI().toURL().toString();
        PackagedProgram inputSplitTestProg2 = new PackagedProgram(new File(INPUT_SPLITS_PROG_JAR_FILE), new String[] { "", // classpath
        classpath, "localhost", String.valueOf(port), // parallelism
        "4" });
        inputSplitTestProg2.invokeInteractiveModeForExecution();
        // regular streaming job
        PackagedProgram streamingProg = new PackagedProgram(new File(STREAMING_PROG_JAR_FILE), new String[] { STREAMING_PROG_JAR_FILE, "localhost", String.valueOf(port) });
        streamingProg.invokeInteractiveModeForExecution();
        // the test also ensures that user specific exceptions are serializable between JobManager <--> JobClient.
        try {
            PackagedProgram streamingCheckpointedProg = new PackagedProgram(new File(STREAMING_CHECKPOINTED_PROG_JAR_FILE), new String[] { STREAMING_CHECKPOINTED_PROG_JAR_FILE, "localhost", String.valueOf(port) });
            streamingCheckpointedProg.invokeInteractiveModeForExecution();
        } catch (Exception e) {
            // we can not access the SuccessException here when executing the tests with maven, because its not available in the jar.
            assertEquals("Program should terminate with a 'SuccessException'", "org.apache.flink.test.classloading.jar.CheckpointedStreamingProgram.SuccessException", e.getCause().getCause().getClass().getCanonicalName());
        }
        PackagedProgram kMeansProg = new PackagedProgram(new File(KMEANS_JAR_PATH), new String[] { KMEANS_JAR_PATH, "localhost", String.valueOf(port), // parallelism
        "4", KMeansData.DATAPOINTS, KMeansData.INITIAL_CENTERS, "25" });
        kMeansProg.invokeInteractiveModeForExecution();
        // test FLINK-3633
        final PackagedProgram userCodeTypeProg = new PackagedProgram(new File(USERCODETYPE_JAR_PATH), new String[] { USERCODETYPE_JAR_PATH, "localhost", String.valueOf(port) });
        userCodeTypeProg.invokeInteractiveModeForExecution();
        File checkpointDir = FOLDER.newFolder();
        File outputDir = FOLDER.newFolder();
        final PackagedProgram program = new PackagedProgram(new File(CHECKPOINTING_CUSTOM_KV_STATE_JAR_PATH), new String[] { CHECKPOINTING_CUSTOM_KV_STATE_JAR_PATH, "localhost", String.valueOf(port), checkpointDir.toURI().toString(), outputDir.toURI().toString() });
        program.invokeInteractiveModeForExecution();
    } catch (Exception e) {
        if (!(e.getCause().getCause() instanceof SuccessException)) {
            fail(e.getMessage());
        }
    }
}
Also used : PackagedProgram(org.apache.flink.client.program.PackagedProgram) SuccessException(org.apache.flink.test.util.SuccessException) File(java.io.File) TriggerSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.TriggerSavepoint) DisposeSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) SuccessException(org.apache.flink.test.util.SuccessException) Test(org.junit.Test)

Example 2 with SuccessException

use of org.apache.flink.test.util.SuccessException in project flink by apache.

the class KafkaConsumerTestBase method runStartFromKafkaCommitOffsets.

/**
	 * This test first writes a total of 300 records to a test topic, reads the first 150 so that some offsets are
	 * committed to Kafka, and then startup the consumer again to read the remaining records starting from the committed offsets.
	 * The test ensures that whatever offsets were committed to Kafka, the consumer correctly picks them up
	 * and starts at the correct position.
	 */
public void runStartFromKafkaCommitOffsets() throws Exception {
    final int parallelism = 3;
    final int recordsInEachPartition = 300;
    final String topicName = writeSequence("testStartFromKafkaCommitOffsetsTopic", recordsInEachPartition, parallelism, 1);
    KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler();
    Long o1;
    Long o2;
    Long o3;
    int attempt = 0;
    // make sure that o1, o2, o3 are not all null before proceeding
    do {
        attempt++;
        LOG.info("Attempt " + attempt + " to read records and commit some offsets to Kafka");
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
        env.getConfig().disableSysoutLogging();
        env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
        env.setParallelism(parallelism);
        // fast checkpoints to make sure we commit some offsets
        env.enableCheckpointing(20);
        env.addSource(kafkaServer.getConsumer(topicName, new SimpleStringSchema(), standardProps)).map(new ThrottledMapper<String>(50)).map(new MapFunction<String, Object>() {

            int count = 0;

            @Override
            public Object map(String value) throws Exception {
                count++;
                if (count == 150) {
                    throw new SuccessException();
                }
                return null;
            }
        }).addSink(new DiscardingSink<>());
        tryExecute(env, "Read some records to commit offsets to Kafka");
        o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0);
        o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1);
        o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2);
    } while (o1 == null && o2 == null && o3 == null && attempt < 3);
    if (o1 == null && o2 == null && o3 == null) {
        throw new RuntimeException("No offsets have been committed after 3 attempts");
    }
    LOG.info("Got final committed offsets from Kafka o1={}, o2={}, o3={}", o1, o2, o3);
    final StreamExecutionEnvironment env2 = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env2.getConfig().disableSysoutLogging();
    env2.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    env2.setParallelism(parallelism);
    // whatever offsets were committed for each partition, the consumer should pick
    // them up and start from the correct position so that the remaining records are all read
    HashMap<Integer, Tuple2<Integer, Integer>> partitionsToValuesCountAndStartOffset = new HashMap<>();
    partitionsToValuesCountAndStartOffset.put(0, new Tuple2<>((o1 != null) ? (int) (recordsInEachPartition - o1) : recordsInEachPartition, (o1 != null) ? o1.intValue() : 0));
    partitionsToValuesCountAndStartOffset.put(1, new Tuple2<>((o2 != null) ? (int) (recordsInEachPartition - o2) : recordsInEachPartition, (o2 != null) ? o2.intValue() : 0));
    partitionsToValuesCountAndStartOffset.put(2, new Tuple2<>((o3 != null) ? (int) (recordsInEachPartition - o3) : recordsInEachPartition, (o3 != null) ? o3.intValue() : 0));
    readSequence(env2, StartupMode.GROUP_OFFSETS, null, standardProps, topicName, partitionsToValuesCountAndStartOffset);
    kafkaOffsetHandler.close();
    deleteTestTopic(topicName);
}
Also used : HashMap(java.util.HashMap) MapFunction(org.apache.flink.api.common.functions.MapFunction) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) SuccessException(org.apache.flink.test.util.SuccessException) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Example 3 with SuccessException

use of org.apache.flink.test.util.SuccessException in project flink by apache.

the class KafkaConsumerTestBase method readSequence.

// ------------------------------------------------------------------------
//  Reading writing test data sets
// ------------------------------------------------------------------------
/**
	 * Runs a job using the provided environment to read a sequence of records from a single Kafka topic.
	 * The method allows to individually specify the expected starting offset and total read value count of each partition.
	 * The job will be considered successful only if all partition read results match the start offset and value count criteria.
	 */
protected void readSequence(final StreamExecutionEnvironment env, final StartupMode startupMode, final Map<KafkaTopicPartition, Long> specificStartupOffsets, final Properties cc, final String topicName, final Map<Integer, Tuple2<Integer, Integer>> partitionsToValuesCountAndStartOffset) throws Exception {
    final int sourceParallelism = partitionsToValuesCountAndStartOffset.keySet().size();
    int finalCountTmp = 0;
    for (Map.Entry<Integer, Tuple2<Integer, Integer>> valuesCountAndStartOffset : partitionsToValuesCountAndStartOffset.entrySet()) {
        finalCountTmp += valuesCountAndStartOffset.getValue().f0;
    }
    final int finalCount = finalCountTmp;
    final TypeInformation<Tuple2<Integer, Integer>> intIntTupleType = TypeInfoParser.parse("Tuple2<Integer, Integer>");
    final TypeInformationSerializationSchema<Tuple2<Integer, Integer>> deser = new TypeInformationSerializationSchema<>(intIntTupleType, env.getConfig());
    // create the consumer
    cc.putAll(secureProps);
    FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> consumer = kafkaServer.getConsumer(topicName, deser, cc);
    switch(startupMode) {
        case EARLIEST:
            consumer.setStartFromEarliest();
            break;
        case LATEST:
            consumer.setStartFromLatest();
            break;
        case SPECIFIC_OFFSETS:
            consumer.setStartFromSpecificOffsets(specificStartupOffsets);
            break;
        case GROUP_OFFSETS:
            consumer.setStartFromGroupOffsets();
            break;
    }
    DataStream<Tuple2<Integer, Integer>> source = env.addSource(consumer).setParallelism(sourceParallelism).map(new ThrottledMapper<Tuple2<Integer, Integer>>(20)).setParallelism(sourceParallelism);
    // verify data
    source.flatMap(new RichFlatMapFunction<Tuple2<Integer, Integer>, Integer>() {

        private HashMap<Integer, BitSet> partitionsToValueCheck;

        private int count = 0;

        @Override
        public void open(Configuration parameters) throws Exception {
            partitionsToValueCheck = new HashMap<>();
            for (Integer partition : partitionsToValuesCountAndStartOffset.keySet()) {
                partitionsToValueCheck.put(partition, new BitSet());
            }
        }

        @Override
        public void flatMap(Tuple2<Integer, Integer> value, Collector<Integer> out) throws Exception {
            int partition = value.f0;
            int val = value.f1;
            BitSet bitSet = partitionsToValueCheck.get(partition);
            if (bitSet == null) {
                throw new RuntimeException("Got a record from an unknown partition");
            } else {
                bitSet.set(val - partitionsToValuesCountAndStartOffset.get(partition).f1);
            }
            count++;
            LOG.info("Received message {}, total {} messages", value, count);
            // verify if we've seen everything
            if (count == finalCount) {
                for (Map.Entry<Integer, BitSet> partitionsToValueCheck : this.partitionsToValueCheck.entrySet()) {
                    BitSet check = partitionsToValueCheck.getValue();
                    int expectedValueCount = partitionsToValuesCountAndStartOffset.get(partitionsToValueCheck.getKey()).f0;
                    if (check.cardinality() != expectedValueCount) {
                        throw new RuntimeException("Expected cardinality to be " + expectedValueCount + ", but was " + check.cardinality());
                    } else if (check.nextClearBit(0) != expectedValueCount) {
                        throw new RuntimeException("Expected next clear bit to be " + expectedValueCount + ", but was " + check.cardinality());
                    }
                }
                // test has passed
                throw new SuccessException();
            }
        }
    }).setParallelism(1);
    tryExecute(env, "Read data from Kafka");
    LOG.info("Successfully read sequence for verification");
}
Also used : Configuration(org.apache.flink.configuration.Configuration) HashMap(java.util.HashMap) BitSet(java.util.BitSet) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) ThrottledMapper(org.apache.flink.streaming.connectors.kafka.testutils.ThrottledMapper) TypeInformationSerializationSchema(org.apache.flink.streaming.util.serialization.TypeInformationSerializationSchema) Tuple2(org.apache.flink.api.java.tuple.Tuple2) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) Collector(org.apache.flink.util.Collector) SuccessException(org.apache.flink.test.util.SuccessException) Map(java.util.Map) HashMap(java.util.HashMap)

Example 4 with SuccessException

use of org.apache.flink.test.util.SuccessException in project flink by apache.

the class KafkaConsumerTestBase method runProduceConsumeMultipleTopics.

/**
	 * Test producing and consuming into multiple topics
	 * @throws java.lang.Exception
	 */
public void runProduceConsumeMultipleTopics() throws java.lang.Exception {
    final int NUM_TOPICS = 5;
    final int NUM_ELEMENTS = 20;
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.getConfig().disableSysoutLogging();
    // create topics with content
    final List<String> topics = new ArrayList<>();
    for (int i = 0; i < NUM_TOPICS; i++) {
        final String topic = "topic-" + i;
        topics.add(topic);
        // create topic
        createTestTopic(topic, i + 1, /*partitions*/
        1);
    }
    // run first job, producing into all topics
    DataStream<Tuple3<Integer, Integer, String>> stream = env.addSource(new RichParallelSourceFunction<Tuple3<Integer, Integer, String>>() {

        @Override
        public void run(SourceContext<Tuple3<Integer, Integer, String>> ctx) throws Exception {
            int partition = getRuntimeContext().getIndexOfThisSubtask();
            for (int topicId = 0; topicId < NUM_TOPICS; topicId++) {
                for (int i = 0; i < NUM_ELEMENTS; i++) {
                    ctx.collect(new Tuple3<>(partition, i, "topic-" + topicId));
                }
            }
        }

        @Override
        public void cancel() {
        }
    });
    Tuple2WithTopicSchema schema = new Tuple2WithTopicSchema(env.getConfig());
    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    kafkaServer.produceIntoKafka(stream, "dummy", schema, props, null);
    env.execute("Write to topics");
    // run second job consuming from multiple topics
    env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.getConfig().disableSysoutLogging();
    stream = env.addSource(kafkaServer.getConsumer(topics, schema, props));
    stream.flatMap(new FlatMapFunction<Tuple3<Integer, Integer, String>, Integer>() {

        Map<String, Integer> countPerTopic = new HashMap<>(NUM_TOPICS);

        @Override
        public void flatMap(Tuple3<Integer, Integer, String> value, Collector<Integer> out) throws Exception {
            Integer count = countPerTopic.get(value.f2);
            if (count == null) {
                count = 1;
            } else {
                count++;
            }
            countPerTopic.put(value.f2, count);
            // check map:
            for (Map.Entry<String, Integer> el : countPerTopic.entrySet()) {
                if (el.getValue() < NUM_ELEMENTS) {
                    // not enough yet
                    break;
                }
                if (el.getValue() > NUM_ELEMENTS) {
                    throw new RuntimeException("There is a failure in the test. I've read " + el.getValue() + " from topic " + el.getKey());
                }
            }
            // we've seen messages from all topics
            throw new SuccessException();
        }
    }).setParallelism(1);
    tryExecute(env, "Count elements from the topics");
    // delete all topics again
    for (int i = 0; i < NUM_TOPICS; i++) {
        final String topic = "topic-" + i;
        deleteTestTopic(topic);
    }
}
Also used : ArrayList(java.util.ArrayList) Properties(java.util.Properties) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) RetryOnException(org.apache.flink.testutils.junit.RetryOnException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) SuccessException(org.apache.flink.test.util.SuccessException) NoResourceAvailableException(org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) JobCancellationException(org.apache.flink.runtime.client.JobCancellationException) IOException(java.io.IOException) Tuple3(org.apache.flink.api.java.tuple.Tuple3) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) Collector(org.apache.flink.util.Collector) SuccessException(org.apache.flink.test.util.SuccessException) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with SuccessException

use of org.apache.flink.test.util.SuccessException in project flink by apache.

the class KafkaConsumerTestBase method runSimpleConcurrentProducerConsumerTopology.

/**
	 * Ensure Kafka is working on both producer and consumer side.
	 * This executes a job that contains two Flink pipelines.
	 *
	 * <pre>
	 * (generator source) --> (kafka sink)-[KAFKA-TOPIC]-(kafka source) --> (validating sink)
	 * </pre>
	 * 
	 * We need to externally retry this test. We cannot let Flink's retry mechanism do it, because the Kafka producer
	 * does not guarantee exactly-once output. Hence a recovery would introduce duplicates that
	 * cause the test to fail.
	 *
	 * This test also ensures that FLINK-3156 doesn't happen again:
	 *
	 * The following situation caused a NPE in the FlinkKafkaConsumer
	 *
	 * topic-1 <-- elements are only produced into topic1.
	 * topic-2
	 *
	 * Therefore, this test is consuming as well from an empty topic.
	 *
	 */
@RetryOnException(times = 2, exception = kafka.common.NotLeaderForPartitionException.class)
public void runSimpleConcurrentProducerConsumerTopology() throws Exception {
    final String topic = "concurrentProducerConsumerTopic_" + UUID.randomUUID().toString();
    final String additionalEmptyTopic = "additionalEmptyTopic_" + UUID.randomUUID().toString();
    final int parallelism = 3;
    final int elementsPerPartition = 100;
    final int totalElements = parallelism * elementsPerPartition;
    createTestTopic(topic, parallelism, 2);
    // create an empty topic which will remain empty all the time
    createTestTopic(additionalEmptyTopic, parallelism, 1);
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.setParallelism(parallelism);
    env.enableCheckpointing(500);
    // fail immediately
    env.setRestartStrategy(RestartStrategies.noRestart());
    env.getConfig().disableSysoutLogging();
    TypeInformation<Tuple2<Long, String>> longStringType = TypeInfoParser.parse("Tuple2<Long, String>");
    TypeInformationSerializationSchema<Tuple2<Long, String>> sourceSchema = new TypeInformationSerializationSchema<>(longStringType, env.getConfig());
    TypeInformationSerializationSchema<Tuple2<Long, String>> sinkSchema = new TypeInformationSerializationSchema<>(longStringType, env.getConfig());
    // ----------- add producer dataflow ----------
    DataStream<Tuple2<Long, String>> stream = env.addSource(new RichParallelSourceFunction<Tuple2<Long, String>>() {

        private boolean running = true;

        @Override
        public void run(SourceContext<Tuple2<Long, String>> ctx) throws InterruptedException {
            int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
            int limit = cnt + elementsPerPartition;
            while (running && cnt < limit) {
                ctx.collect(new Tuple2<>(1000L + cnt, "kafka-" + cnt));
                cnt++;
                // we delay data generation a bit so that we are sure that some checkpoints are
                // triggered (for FLINK-3156)
                Thread.sleep(50);
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    });
    Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
    producerProperties.setProperty("retries", "3");
    producerProperties.putAll(secureProps);
    kafkaServer.produceIntoKafka(stream, topic, new KeyedSerializationSchemaWrapper<>(sinkSchema), producerProperties, null);
    // ----------- add consumer dataflow ----------
    List<String> topics = new ArrayList<>();
    topics.add(topic);
    topics.add(additionalEmptyTopic);
    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    FlinkKafkaConsumerBase<Tuple2<Long, String>> source = kafkaServer.getConsumer(topics, sourceSchema, props);
    DataStreamSource<Tuple2<Long, String>> consuming = env.addSource(source).setParallelism(parallelism);
    consuming.addSink(new RichSinkFunction<Tuple2<Long, String>>() {

        private int elCnt = 0;

        private BitSet validator = new BitSet(totalElements);

        @Override
        public void invoke(Tuple2<Long, String> value) throws Exception {
            String[] sp = value.f1.split("-");
            int v = Integer.parseInt(sp[1]);
            assertEquals(value.f0 - 1000, (long) v);
            assertFalse("Received tuple twice", validator.get(v));
            validator.set(v);
            elCnt++;
            if (elCnt == totalElements) {
                // check if everything in the bitset is set to true
                int nc;
                if ((nc = validator.nextClearBit(0)) != totalElements) {
                    fail("The bitset was not set to 1 on all elements. Next clear:" + nc + " Set: " + validator);
                }
                throw new SuccessException();
            }
        }

        @Override
        public void close() throws Exception {
            super.close();
        }
    }).setParallelism(1);
    try {
        tryExecutePropagateExceptions(env, "runSimpleConcurrentProducerConsumerTopology");
    } catch (ProgramInvocationException | JobExecutionException e) {
        // look for NotLeaderForPartitionException
        Throwable cause = e.getCause();
        // search for nested SuccessExceptions
        int depth = 0;
        while (cause != null && depth++ < 20) {
            if (cause instanceof kafka.common.NotLeaderForPartitionException) {
                throw (Exception) cause;
            }
            cause = cause.getCause();
        }
        throw e;
    }
    deleteTestTopic(topic);
}
Also used : ArrayList(java.util.ArrayList) Properties(java.util.Properties) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) RichSinkFunction(org.apache.flink.streaming.api.functions.sink.RichSinkFunction) BitSet(java.util.BitSet) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) TypeInformationSerializationSchema(org.apache.flink.streaming.util.serialization.TypeInformationSerializationSchema) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SuccessException(org.apache.flink.test.util.SuccessException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RetryOnException(org.apache.flink.testutils.junit.RetryOnException)

Aggregations

SuccessException (org.apache.flink.test.util.SuccessException)11 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)8 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)8 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)8 Properties (java.util.Properties)7 ProgramInvocationException (org.apache.flink.client.program.ProgramInvocationException)7 JobExecutionException (org.apache.flink.runtime.client.JobExecutionException)6 RetryOnException (org.apache.flink.testutils.junit.RetryOnException)6 IOException (java.io.IOException)5 JobCancellationException (org.apache.flink.runtime.client.JobCancellationException)5 NoResourceAvailableException (org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException)5 TimeoutException (org.apache.kafka.common.errors.TimeoutException)5 TypeInformationSerializationSchema (org.apache.flink.streaming.util.serialization.TypeInformationSerializationSchema)4 HashMap (java.util.HashMap)3 Random (java.util.Random)3 RichFlatMapFunction (org.apache.flink.api.common.functions.RichFlatMapFunction)3 RichMapFunction (org.apache.flink.api.common.functions.RichMapFunction)3 ArrayList (java.util.ArrayList)2 BitSet (java.util.BitSet)2 Map (java.util.Map)2