Search in sources :

Example 1 with RecordEmitter

use of org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter in project flink by apache.

the class KinesisDataFetcher method runFetcher.

/**
 * Starts the fetcher. After starting the fetcher, it can only be stopped by calling {@link
 * KinesisDataFetcher#shutdownFetcher()}.
 *
 * @throws Exception the first error or exception thrown by the fetcher or any of the threads
 *     created by the fetcher.
 */
public void runFetcher() throws Exception {
    // check that we are running before proceeding
    if (!running) {
        return;
    }
    // ------------------------------------------------------------------------
    // Procedures before starting the infinite while loop:
    // ------------------------------------------------------------------------
    // 1. check that there is at least one shard in the subscribed streams to consume from (can
    // be done by
    // checking if at least one value in subscribedStreamsToLastDiscoveredShardIds is not
    // null)
    boolean hasShards = false;
    StringBuilder streamsWithNoShardsFound = new StringBuilder();
    for (Map.Entry<String, String> streamToLastDiscoveredShardEntry : subscribedStreamsToLastDiscoveredShardIds.entrySet()) {
        if (streamToLastDiscoveredShardEntry.getValue() != null) {
            hasShards = true;
        } else {
            streamsWithNoShardsFound.append(streamToLastDiscoveredShardEntry.getKey()).append(", ");
        }
    }
    if (streamsWithNoShardsFound.length() != 0 && LOG.isWarnEnabled()) {
        LOG.warn("Subtask {} has failed to find any shards for the following subscribed streams: {}", indexOfThisConsumerSubtask, streamsWithNoShardsFound.toString());
    }
    if (!hasShards) {
        throw new RuntimeException("No shards can be found for all subscribed streams: " + streams);
    }
    // consumer using a restored state checkpoint
    for (int seededStateIndex = 0; seededStateIndex < subscribedShardsState.size(); seededStateIndex++) {
        KinesisStreamShardState seededShardState = subscribedShardsState.get(seededStateIndex);
        // read already
        if (!seededShardState.getLastProcessedSequenceNum().equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            if (LOG.isInfoEnabled()) {
                LOG.info("Subtask {} will start consuming seeded shard {} from sequence number {} with ShardConsumer {}", indexOfThisConsumerSubtask, seededShardState.getStreamShardHandle().toString(), seededShardState.getLastProcessedSequenceNum(), seededStateIndex);
            }
            StreamShardHandle streamShardHandle = subscribedShardsState.get(seededStateIndex).getStreamShardHandle();
            KinesisDeserializationSchema<T> shardDeserializationSchema = getClonedDeserializationSchema();
            shardDeserializationSchema.open(RuntimeContextInitializationContextAdapters.deserializationAdapter(runtimeContext, // ignore the provided metric group
            metricGroup -> consumerMetricGroup.addGroup("subtaskId", String.valueOf(indexOfThisConsumerSubtask)).addGroup("shardId", streamShardHandle.getShard().getShardId()).addGroup("user")));
            shardConsumersExecutor.submit(createShardConsumer(seededStateIndex, streamShardHandle, subscribedShardsState.get(seededStateIndex).getLastProcessedSequenceNum(), registerShardMetricGroup(consumerMetricGroup, subscribedShardsState.get(seededStateIndex)), shardDeserializationSchema));
        }
    }
    // start periodic watermark emitter, if a watermark assigner was configured
    if (periodicWatermarkAssigner != null) {
        long periodicWatermarkIntervalMillis = runtimeContext.getExecutionConfig().getAutoWatermarkInterval();
        if (periodicWatermarkIntervalMillis > 0) {
            ProcessingTimeService timerService = ((StreamingRuntimeContext) runtimeContext).getProcessingTimeService();
            LOG.info("Starting periodic watermark emitter with interval {}", periodicWatermarkIntervalMillis);
            new PeriodicWatermarkEmitter(timerService, periodicWatermarkIntervalMillis).start();
            if (watermarkTracker != null) {
                // setup global watermark tracking
                long watermarkSyncMillis = Long.parseLong(getConsumerConfiguration().getProperty(ConsumerConfigConstants.WATERMARK_SYNC_MILLIS, Long.toString(ConsumerConfigConstants.DEFAULT_WATERMARK_SYNC_MILLIS)));
                watermarkTracker.setUpdateTimeoutMillis(// synchronization latency
                watermarkSyncMillis * 3);
                watermarkTracker.open(runtimeContext);
                new WatermarkSyncCallback(timerService, watermarkSyncMillis).start();
                // emit records ahead of watermark to offset synchronization latency
                long lookaheadMillis = Long.parseLong(getConsumerConfiguration().getProperty(ConsumerConfigConstants.WATERMARK_LOOKAHEAD_MILLIS, Long.toString(0)));
                recordEmitter.setMaxLookaheadMillis(Math.max(lookaheadMillis, watermarkSyncMillis * 3));
                // record emitter depends on periodic watermark
                // it runs in a separate thread since main thread is used for discovery
                Runnable recordEmitterRunnable = new Runnable() {

                    @Override
                    public void run() {
                        try {
                            recordEmitter.run();
                        } catch (Throwable error) {
                            // report the error that terminated the emitter loop to
                            // source thread
                            stopWithError(error);
                        }
                    }
                };
                Thread thread = new Thread(recordEmitterRunnable);
                thread.setName("recordEmitter-" + runtimeContext.getTaskNameWithSubtasks());
                thread.setDaemon(true);
                thread.start();
            }
        }
        this.shardIdleIntervalMillis = Long.parseLong(getConsumerConfiguration().getProperty(ConsumerConfigConstants.SHARD_IDLE_INTERVAL_MILLIS, Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_IDLE_INTERVAL_MILLIS)));
    }
    // ------------------------------------------------------------------------
    // finally, start the infinite shard discovery and consumer launching loop;
    // we will escape from this loop only when shutdownFetcher() or stopWithError() is called
    // TODO: have this thread emit the records for tracking backpressure
    final long discoveryIntervalMillis = Long.parseLong(configProps.getProperty(ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS, Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_DISCOVERY_INTERVAL_MILLIS)));
    if (this.numberOfActiveShards.get() == 0) {
        LOG.info("Subtask {} has no active shards to read on startup; marking the subtask as temporarily idle ...", indexOfThisConsumerSubtask);
        sourceContext.markAsTemporarilyIdle();
    }
    while (running) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Subtask {} is trying to discover new shards that were created due to resharding ...", indexOfThisConsumerSubtask);
        }
        List<StreamShardHandle> newShardsDueToResharding = discoverNewShardsToSubscribe();
        for (StreamShardHandle shard : newShardsDueToResharding) {
            // since there may be delay in discovering a new shard, all new shards due to
            // resharding should be read starting from the earliest record possible
            KinesisStreamShardState newShardState = new KinesisStreamShardState(convertToStreamShardMetadata(shard), shard, SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get());
            int newStateIndex = registerNewSubscribedShardState(newShardState);
            if (LOG.isInfoEnabled()) {
                LOG.info("Subtask {} has discovered a new shard {} due to resharding, and will start consuming " + "the shard from sequence number {} with ShardConsumer {}", indexOfThisConsumerSubtask, newShardState.getStreamShardHandle().toString(), newShardState.getLastProcessedSequenceNum(), newStateIndex);
            }
            StreamShardHandle streamShardHandle = newShardState.getStreamShardHandle();
            KinesisDeserializationSchema<T> shardDeserializationSchema = getClonedDeserializationSchema();
            shardDeserializationSchema.open(RuntimeContextInitializationContextAdapters.deserializationAdapter(runtimeContext, // ignore the provided metric group
            metricGroup -> consumerMetricGroup.addGroup("subtaskId", String.valueOf(indexOfThisConsumerSubtask)).addGroup("shardId", streamShardHandle.getShard().getShardId()).addGroup("user")));
            shardConsumersExecutor.submit(createShardConsumer(newStateIndex, newShardState.getStreamShardHandle(), newShardState.getLastProcessedSequenceNum(), registerShardMetricGroup(consumerMetricGroup, newShardState), shardDeserializationSchema));
        }
        // interval if the running flag was set to false during the middle of the while loop
        if (running && discoveryIntervalMillis != 0) {
            try {
                cancelFuture.get(discoveryIntervalMillis, TimeUnit.MILLISECONDS);
                LOG.debug("Cancelled discovery");
            } catch (TimeoutException iex) {
            // timeout is expected when fetcher is not cancelled
            }
        }
    }
    // make sure all resources have been terminated before leaving
    try {
        awaitTermination();
    } catch (InterruptedException ie) {
        // If there is an original exception, preserve it, since that's more important/useful.
        this.error.compareAndSet(null, ie);
    }
    // any error thrown in the shard consumer threads will be thrown to the main thread
    Throwable throwable = this.error.get();
    if (throwable != null) {
        if (throwable instanceof Exception) {
            throw (Exception) throwable;
        } else if (throwable instanceof Error) {
            throw (Error) throwable;
        } else {
            throw new Exception(throwable);
        }
    }
}
Also used : Shard(com.amazonaws.services.kinesis.model.Shard) TimestampedValue(org.apache.flink.streaming.runtime.operators.windowing.TimestampedValue) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) KinesisConsumerMetricConstants(org.apache.flink.streaming.connectors.kinesis.metrics.KinesisConsumerMetricConstants) ConsumerConfigConstants(org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants) LoggerFactory(org.slf4j.LoggerFactory) StreamShardMetadata(org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata) TimeoutException(java.util.concurrent.TimeoutException) KinesisProxyV2Factory(org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Factory) StartingPosition(org.apache.flink.streaming.connectors.kinesis.model.StartingPosition) InstantiationUtil(org.apache.flink.util.InstantiationUtil) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ShardConsumerMetricsReporter(org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter) Map(java.util.Map) KinesisProxyInterface(org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) ThreadFactory(java.util.concurrent.ThreadFactory) KinesisShardAssigner(org.apache.flink.streaming.connectors.kinesis.KinesisShardAssigner) RecordPublisherType(org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType) PollingRecordPublisherFactory(org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling.PollingRecordPublisherFactory) StreamShardHandle(org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) POLLING(org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.POLLING) Preconditions(org.apache.flink.util.Preconditions) Executors(java.util.concurrent.Executors) MetricGroup(org.apache.flink.metrics.MetricGroup) List(java.util.List) RecordPublisherFactory(org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory) KinesisDeserializationSchema(org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema) RecordEmitter(org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter) HashKeyRange(com.amazonaws.services.kinesis.model.HashKeyRange) Watermark(org.apache.flink.streaming.api.watermark.Watermark) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RuntimeContextInitializationContextAdapters(org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters) RecordPublisher(org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher) AtomicReference(java.util.concurrent.atomic.AtomicReference) GetShardListResult(org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult) ArrayList(java.util.ArrayList) AssignerWithPeriodicWatermarks(org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) StreamConsumerRegistrarUtil(org.apache.flink.streaming.connectors.kinesis.util.StreamConsumerRegistrarUtil) KinesisProxy(org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy) LinkedList(java.util.LinkedList) KinesisStreamShardState(org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState) SequenceNumber(org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber) Nullable(javax.annotation.Nullable) ExecutorService(java.util.concurrent.ExecutorService) KinesisProxyV2Interface(org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface) FanOutRecordPublisherFactory(org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherFactory) RECORD_PUBLISHER_TYPE(org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE) Logger(org.slf4j.Logger) Properties(java.util.Properties) WatermarkTracker(org.apache.flink.streaming.connectors.kinesis.util.WatermarkTracker) ProcessingTimeService(org.apache.flink.streaming.runtime.tasks.ProcessingTimeService) SequenceNumberRange(com.amazonaws.services.kinesis.model.SequenceNumberRange) IOException(java.io.IOException) SentinelSequenceNumber(org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AWSUtil(org.apache.flink.streaming.connectors.kinesis.util.AWSUtil) Internal(org.apache.flink.annotation.Internal) ProcessingTimeCallback(org.apache.flink.api.common.operators.ProcessingTimeService.ProcessingTimeCallback) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) StreamShardHandle(org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle) ProcessingTimeService(org.apache.flink.streaming.runtime.tasks.ProcessingTimeService) TimeoutException(java.util.concurrent.TimeoutException) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) KinesisStreamShardState(org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 2 with RecordEmitter

use of org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter in project flink by apache.

the class FlinkKinesisConsumerTest method testSourceSynchronization.

@Test
public void testSourceSynchronization() throws Exception {
    final String streamName = "fakeStreamName";
    final Time maxOutOfOrderness = Time.milliseconds(5);
    final long autoWatermarkInterval = 1_000;
    final long watermarkSyncInterval = autoWatermarkInterval + 1;
    TestWatermarkTracker.WATERMARK.set(0);
    HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds = new HashMap<>();
    subscribedStreamsToLastDiscoveredShardIds.put(streamName, null);
    final KinesisDeserializationSchema<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(new OpenCheckingStringSchema());
    Properties props = new Properties();
    props.setProperty(ConsumerConfigConstants.AWS_REGION, "us-east-1");
    props.setProperty(ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS, Long.toString(10L));
    props.setProperty(ConsumerConfigConstants.WATERMARK_SYNC_MILLIS, Long.toString(watermarkSyncInterval));
    props.setProperty(ConsumerConfigConstants.WATERMARK_LOOKAHEAD_MILLIS, Long.toString(5));
    BlockingQueue<String> shard1 = new LinkedBlockingQueue<>();
    Map<String, List<BlockingQueue<String>>> streamToQueueMap = new HashMap<>();
    streamToQueueMap.put(streamName, Collections.singletonList(shard1));
    // override createFetcher to mock Kinesis
    FlinkKinesisConsumer<String> sourceFunc = new FlinkKinesisConsumer<String>(streamName, deserializationSchema, props) {

        @Override
        protected KinesisDataFetcher<String> createFetcher(List<String> streams, SourceFunction.SourceContext<String> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<String> deserializationSchema) {
            KinesisDataFetcher<String> fetcher = new KinesisDataFetcher<String>(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, getShardAssigner(), getPeriodicWatermarkAssigner(), getWatermarkTracker(), new AtomicReference<>(), new ArrayList<>(), subscribedStreamsToLastDiscoveredShardIds, (props) -> FakeKinesisBehavioursFactory.blockingQueueGetRecords(streamToQueueMap), null) {

                @Override
                protected void emitWatermark() {
                    // before the watermark timer callback is triggered
                    synchronized (sourceContext.getCheckpointLock()) {
                        super.emitWatermark();
                    }
                }
            };
            return fetcher;
        }
    };
    sourceFunc.setShardAssigner((streamShardHandle, i) -> {
        // shardId-000000000000
        return Integer.parseInt(streamShardHandle.getShard().getShardId().substring("shardId-".length()));
    });
    sourceFunc.setPeriodicWatermarkAssigner(new TestTimestampExtractor(maxOutOfOrderness));
    sourceFunc.setWatermarkTracker(new TestWatermarkTracker());
    // there is currently no test harness specifically for sources,
    // so we overlay the source thread here
    AbstractStreamOperatorTestHarness<Object> testHarness = new AbstractStreamOperatorTestHarness<Object>(new StreamSource(sourceFunc), 1, 1, 0);
    testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
    testHarness.getExecutionConfig().setAutoWatermarkInterval(autoWatermarkInterval);
    testHarness.initializeEmptyState();
    testHarness.open();
    final ConcurrentLinkedQueue<Object> results = testHarness.getOutput();
    final AtomicBoolean throwOnCollect = new AtomicBoolean();
    @SuppressWarnings("unchecked") SourceFunction.SourceContext<String> sourceContext = new CollectingSourceContext(testHarness.getCheckpointLock(), results) {

        @Override
        public void markAsTemporarilyIdle() {
        }

        @Override
        public void collect(Serializable element) {
            if (throwOnCollect.get()) {
                throw new RuntimeException("expected");
            }
            super.collect(element);
        }

        @Override
        public void emitWatermark(Watermark mark) {
            results.add(mark);
        }
    };
    final AtomicReference<Exception> sourceThreadError = new AtomicReference<>();
    new Thread(() -> {
        try {
            sourceFunc.run(sourceContext);
        } catch (InterruptedException e) {
        // expected on cancel
        } catch (Exception e) {
            sourceThreadError.set(e);
        }
    }).start();
    ArrayList<Object> expectedResults = new ArrayList<>();
    final long record1 = 1;
    shard1.put(Long.toString(record1));
    expectedResults.add(Long.toString(record1));
    awaitRecordCount(results, expectedResults.size());
    // at this point we know the fetcher was initialized
    final KinesisDataFetcher fetcher = org.powermock.reflect.Whitebox.getInternalState(sourceFunc, "fetcher");
    // trigger watermark emit
    testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval);
    expectedResults.add(new Watermark(-4));
    // verify watermark
    awaitRecordCount(results, expectedResults.size());
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    assertEquals(0, TestWatermarkTracker.WATERMARK.get());
    // trigger sync
    testHarness.setProcessingTime(testHarness.getProcessingTime() + 1);
    TestWatermarkTracker.assertGlobalWatermark(-4);
    final long record2 = record1 + (watermarkSyncInterval * 3) + 1;
    shard1.put(Long.toString(record2));
    // wait for the record to be buffered in the emitter
    final RecordEmitter<?> emitter = org.powermock.reflect.Whitebox.getInternalState(fetcher, "recordEmitter");
    RecordEmitter.RecordQueue emitterQueue = emitter.getQueue(0);
    Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
    while (deadline.hasTimeLeft() && emitterQueue.getSize() < 1) {
        Thread.sleep(10);
    }
    assertEquals("first record received", 1, emitterQueue.getSize());
    // Advance the watermark. Since the new record is past global watermark + threshold,
    // it won't be emitted and the watermark does not advance
    testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval);
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    assertEquals(3000L, (long) org.powermock.reflect.Whitebox.getInternalState(fetcher, "nextWatermark"));
    TestWatermarkTracker.assertGlobalWatermark(-4);
    // Trigger global watermark sync
    testHarness.setProcessingTime(testHarness.getProcessingTime() + 1);
    expectedResults.add(Long.toString(record2));
    awaitRecordCount(results, expectedResults.size());
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    TestWatermarkTracker.assertGlobalWatermark(3000);
    // Trigger watermark update and emit
    testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval);
    expectedResults.add(new Watermark(3000));
    assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray()));
    // verify exception propagation
    Assert.assertNull(sourceThreadError.get());
    throwOnCollect.set(true);
    shard1.put(Long.toString(record2 + 1));
    deadline = Deadline.fromNow(Duration.ofSeconds(10));
    while (deadline.hasTimeLeft() && sourceThreadError.get() == null) {
        Thread.sleep(10);
    }
    Assert.assertNotNull(sourceThreadError.get());
    Assert.assertNotNull("expected", sourceThreadError.get().getMessage());
    sourceFunc.cancel();
    testHarness.close();
}
Also used : Serializable(java.io.Serializable) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Time(org.apache.flink.streaming.api.windowing.time.Time) Properties(java.util.Properties) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) CollectingSourceContext(org.apache.flink.streaming.util.CollectingSourceContext) AbstractStreamOperatorTestHarness(org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness) List(java.util.List) ArrayList(java.util.ArrayList) KinesisDeserializationSchema(org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) StreamSource(org.apache.flink.streaming.api.operators.StreamSource) Deadline(org.apache.flink.api.common.time.Deadline) AtomicReference(java.util.concurrent.atomic.AtomicReference) KinesisDeserializationSchemaWrapper(org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper) CollectingSourceContext(org.apache.flink.streaming.util.CollectingSourceContext) KinesisDataFetcher(org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher) RecordEmitter(org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TestableFlinkKinesisConsumer(org.apache.flink.streaming.connectors.kinesis.testutils.TestableFlinkKinesisConsumer) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) Watermark(org.apache.flink.streaming.api.watermark.Watermark) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 List (java.util.List)2 Properties (java.util.Properties)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 RuntimeContext (org.apache.flink.api.common.functions.RuntimeContext)2 SourceFunction (org.apache.flink.streaming.api.functions.source.SourceFunction)2 Watermark (org.apache.flink.streaming.api.watermark.Watermark)2 KinesisDeserializationSchema (org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema)2 RecordEmitter (org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter)2 HashKeyRange (com.amazonaws.services.kinesis.model.HashKeyRange)1 SequenceNumberRange (com.amazonaws.services.kinesis.model.SequenceNumberRange)1 Shard (com.amazonaws.services.kinesis.model.Shard)1 IOException (java.io.IOException)1 Serializable (java.io.Serializable)1 LinkedList (java.util.LinkedList)1 Map (java.util.Map)1 Set (java.util.Set)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1