Search in sources :

Example 11 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class KinesisDataStreamsTableApiIT method readAllOrdersFromKinesis.

private List<Order> readAllOrdersFromKinesis() throws Exception {
    Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
    List<Order> orders;
    do {
        orders = readMessagesFromStream(recordBytes -> fromJson(new String(recordBytes), Order.class));
    } while (deadline.hasTimeLeft() && orders.size() < 5);
    return orders;
}
Also used : KinesaliteContainer(org.apache.flink.connectors.kinesis.testutils.KinesaliteContainer) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) Deadline(org.apache.flink.api.common.time.Deadline) GetShardIteratorRequest(software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest) BeforeClass(org.junit.BeforeClass) ShardIteratorType(software.amazon.awssdk.services.kinesis.model.ShardIteratorType) DockerImageName(org.testcontainers.utility.DockerImageName) DockerImageVersions(org.apache.flink.util.DockerImageVersions) LoggerFactory(org.slf4j.LoggerFactory) Function(java.util.function.Function) Network(org.testcontainers.containers.Network) TestUtils(org.apache.flink.tests.util.TestUtils) ArrayList(java.util.ArrayList) SdkSystemSetting(software.amazon.awssdk.core.SdkSystemSetting) StreamStatus(software.amazon.awssdk.services.kinesis.model.StreamStatus) SQLJobSubmission(org.apache.flink.tests.util.flink.SQLJobSubmission) After(org.junit.After) Duration(java.time.Duration) Timeout(org.junit.rules.Timeout) Assertions(org.assertj.core.api.Assertions) ClassRule(org.junit.ClassRule) SdkAsyncHttpClient(software.amazon.awssdk.http.async.SdkAsyncHttpClient) Path(java.nio.file.Path) Before(org.junit.Before) KinesisAsyncClient(software.amazon.awssdk.services.kinesis.KinesisAsyncClient) AfterClass(org.junit.AfterClass) Logger(org.slf4j.Logger) DescribeStreamRequest(software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest) Files(java.nio.file.Files) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Test(org.junit.Test) RateLimiterBuilder(org.rnorth.ducttape.ratelimits.RateLimiterBuilder) CreateStreamRequest(software.amazon.awssdk.services.kinesis.model.CreateStreamRequest) GetRecordsRequest(software.amazon.awssdk.services.kinesis.model.GetRecordsRequest) Objects(java.util.Objects) TimeUnit(java.util.concurrent.TimeUnit) AWSGeneralUtil(org.apache.flink.connector.aws.util.AWSGeneralUtil) List(java.util.List) RateLimiter(org.rnorth.ducttape.ratelimits.RateLimiter) ImmutableList(org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList) Paths(java.nio.file.Paths) Record(software.amazon.awssdk.services.kinesis.model.Record) FlinkContainers(org.apache.flink.tests.util.flink.container.FlinkContainers) SECONDS(java.util.concurrent.TimeUnit.SECONDS) Deadline(org.apache.flink.api.common.time.Deadline)

Example 12 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class KinesisDataStreamsTableApiIT method prepareStream.

private void prepareStream(String streamName) throws Exception {
    final RateLimiter rateLimiter = RateLimiterBuilder.newBuilder().withRate(1, SECONDS).withConstantThroughput().build();
    kinesisClient.createStream(CreateStreamRequest.builder().streamName(streamName).shardCount(1).build()).get();
    Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
    while (!rateLimiter.getWhenReady(() -> streamExists(streamName))) {
        if (deadline.isOverdue()) {
            throw new RuntimeException("Failed to create stream within time");
        }
    }
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) RateLimiter(org.rnorth.ducttape.ratelimits.RateLimiter)

Example 13 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class KafkaContainerClient method readMessages.

public <T> List<T> readMessages(int expectedNumMessages, String groupId, String topic, Deserializer<T> valueDeserializer) throws Exception {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, container.getBootstrapServers());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    final List<T> messages = Collections.synchronizedList(new ArrayList<>(expectedNumMessages));
    try (Consumer<Bytes, T> consumer = new KafkaConsumer<>(props, new BytesDeserializer(), valueDeserializer)) {
        waitUntilTopicAvailableThenAssign(topic, consumer, Duration.ofSeconds(60));
        // Keep polling until getting expected number of messages
        final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
        while (deadline.hasTimeLeft() && messages.size() < expectedNumMessages) {
            LOG.info("Waiting for messages. Received {}/{}.", messages.size(), expectedNumMessages);
            ConsumerRecords<Bytes, T> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<Bytes, T> record : records) {
                messages.add(record.value());
            }
        }
        if (messages.size() != expectedNumMessages) {
            throw new IOException("Could not read expected number of messages.");
        }
        return messages;
    }
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) Deadline(org.apache.flink.api.common.time.Deadline) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) IOException(java.io.IOException) Properties(java.util.Properties)

Example 14 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class SQLClientHBaseITCase method checkHBaseSinkResult.

private void checkHBaseSinkResult() throws Exception {
    boolean success = false;
    final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
    while (deadline.hasTimeLeft()) {
        final List<String> lines = hbase.scanTable("sink");
        if (lines.size() == 6) {
            success = true;
            assertThat(lines.toArray(new String[0]), arrayContainingInAnyOrder(CoreMatchers.allOf(containsString("row1"), containsString("family1"), containsString("f1c1"), containsString("value1")), CoreMatchers.allOf(containsString("row1"), containsString("family2"), containsString("f2c1"), containsString("v2")), CoreMatchers.allOf(containsString("row1"), containsString("family2"), containsString("f2c2"), containsString("v3")), CoreMatchers.allOf(containsString("row2"), containsString("family1"), containsString("f1c1"), containsString("value4")), CoreMatchers.allOf(containsString("row2"), containsString("family2"), containsString("f2c1"), containsString("v5")), CoreMatchers.allOf(containsString("row2"), containsString("family2"), containsString("f2c2"), containsString("v6"))));
            break;
        } else {
            LOG.info("The HBase sink table does not contain enough records, current {} records, left time: {}s", lines.size(), deadline.timeLeft().getSeconds());
        }
        Thread.sleep(500);
    }
    Assert.assertTrue("Did not get expected results before timeout.", success);
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) Matchers.containsString(org.hamcrest.Matchers.containsString)

Example 15 with Deadline

use of org.apache.flink.api.common.time.Deadline in project flink by apache.

the class AbstractQueryableStateTestBase method testValueState.

/**
 * Tests simple value state queryable state instance. Each source emits (subtaskIndex,
 * 0)..(subtaskIndex, numElements) tuples, which are then queried. The tests succeeds after each
 * subtask index is queried with value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {
    final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
    final long numElements = 1024L;
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStateBackend(stateBackend);
    env.setParallelism(maxParallelism);
    // Very important, because cluster is shared between tests and we
    // don't explicitly check that all slots are available before
    // submitting.
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
    DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
    // Value state
    ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());
    source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

        private static final long serialVersionUID = 7662520075515707428L;

        @Override
        public Integer getKey(Tuple2<Integer, Long> value) {
            return value.f0;
        }
    }).asQueryableState("hakuna", valueState);
    try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
        final JobID jobId = autoCancellableJob.getJobId();
        final JobGraph jobGraph = autoCancellableJob.getJobGraph();
        clusterClient.submitJob(jobGraph).get();
        executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
    }
}
Also used : Deadline(org.apache.flink.api.common.time.Deadline) KeySelector(org.apache.flink.api.java.functions.KeySelector) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Tuple2(org.apache.flink.api.java.tuple.Tuple2) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Aggregations

Deadline (org.apache.flink.api.common.time.Deadline)75 Test (org.junit.Test)34 JobID (org.apache.flink.api.common.JobID)29 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)26 Duration (java.time.Duration)19 Configuration (org.apache.flink.configuration.Configuration)15 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)14 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)13 IOException (java.io.IOException)12 ExecutionException (java.util.concurrent.ExecutionException)12 KeySelector (org.apache.flink.api.java.functions.KeySelector)12 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 MiniCluster (org.apache.flink.runtime.minicluster.MiniCluster)10 File (java.io.File)9 TimeUnit (java.util.concurrent.TimeUnit)9 JobStatus (org.apache.flink.api.common.JobStatus)9 List (java.util.List)8 Test (org.junit.jupiter.api.Test)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 CountDownLatch (java.util.concurrent.CountDownLatch)7