use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class SpoutWrapperTest method runAndExecuteFiniteSpout2.
@SuppressWarnings("unchecked")
@Test
public void runAndExecuteFiniteSpout2() throws Exception {
final FiniteSpout stormSpout = mock(FiniteSpout.class);
when(stormSpout.reachedEnd()).thenReturn(true, false, true, false, true, false, true);
final StreamingRuntimeContext taskContext = mock(StreamingRuntimeContext.class);
when(taskContext.getExecutionConfig()).thenReturn(mock(ExecutionConfig.class));
when(taskContext.getTaskName()).thenReturn("name");
final SpoutWrapper<?> wrapper = new SpoutWrapper<Object>(stormSpout);
wrapper.setRuntimeContext(taskContext);
wrapper.run(mock(SourceContext.class));
verify(stormSpout, never()).nextTuple();
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project rocketmq-externals by apache.
the class RocketMQSink method open.
@Override
public void open(Configuration parameters) throws Exception {
Validate.notEmpty(props, "Producer properties can not be empty");
Validate.notNull(topicSelector, "TopicSelector can not be null");
Validate.notNull(serializationSchema, "KeyValueSerializationSchema can not be null");
producer = new DefaultMQProducer();
producer.setInstanceName(String.valueOf(getRuntimeContext().getIndexOfThisSubtask()));
RocketMQConfig.buildProducerConfigs(props, producer);
batchList = new LinkedList<>();
if (batchFlushOnCheckpoint && !((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled()) {
LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
batchFlushOnCheckpoint = false;
}
try {
producer.start();
} catch (MQClientException e) {
throw new RuntimeException(e);
}
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class JobManagerWatermarkTracker method open.
@Override
public void open(RuntimeContext context) {
super.open(context);
this.aggregateFunction.updateTimeoutMillis = super.getUpdateTimeoutMillis();
this.aggregateFunction.logAccumulatorIntervalMillis = logAccumulatorIntervalMillis;
Preconditions.checkArgument(context instanceof StreamingRuntimeContext);
StreamingRuntimeContext runtimeContext = (StreamingRuntimeContext) context;
this.aggregateManager = runtimeContext.getGlobalAggregateManager();
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class FlinkKafkaProducer method initializeState.
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
if (semantic != FlinkKafkaProducer.Semantic.NONE && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
LOG.warn("Using {} semantic, but checkpointing is not enabled. Switching to {} semantic.", semantic, FlinkKafkaProducer.Semantic.NONE);
semantic = FlinkKafkaProducer.Semantic.NONE;
}
nextTransactionalIdHintState = context.getOperatorStateStore().getUnionListState(NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR_V2);
if (context.getOperatorStateStore().getRegisteredStateNames().contains(NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR)) {
migrateNextTransactionalIdHindState(context);
}
String actualTransactionalIdPrefix;
if (this.transactionalIdPrefix != null) {
actualTransactionalIdPrefix = this.transactionalIdPrefix;
} else {
String taskName = getRuntimeContext().getTaskName();
// a short, so we truncate here if necessary to a more reasonable length string.
if (taskName.length() > maxTaskNameSize) {
taskName = taskName.substring(0, maxTaskNameSize);
LOG.warn("Truncated task name for Kafka TransactionalId from {} to {}.", getRuntimeContext().getTaskName(), taskName);
}
actualTransactionalIdPrefix = taskName + "-" + ((StreamingRuntimeContext) getRuntimeContext()).getOperatorUniqueID();
}
transactionalIdsGenerator = new TransactionalIdsGenerator(actualTransactionalIdPrefix, getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks(), kafkaProducersPoolSize, SAFE_SCALE_DOWN_FACTOR);
if (semantic != FlinkKafkaProducer.Semantic.EXACTLY_ONCE) {
nextTransactionalIdHint = null;
} else {
ArrayList<FlinkKafkaProducer.NextTransactionalIdHint> transactionalIdHints = Lists.newArrayList(nextTransactionalIdHintState.get());
if (transactionalIdHints.size() > 1) {
throw new IllegalStateException("There should be at most one next transactional id hint written by the first subtask");
} else if (transactionalIdHints.size() == 0) {
nextTransactionalIdHint = new FlinkKafkaProducer.NextTransactionalIdHint(0, 0);
// this means that this is either:
// (1) the first execution of this application
// (2) previous execution has failed before first checkpoint completed
//
// in case of (2) we have to abort all previous transactions
abortTransactions(transactionalIdsGenerator.generateIdsToAbort());
} else {
nextTransactionalIdHint = transactionalIdHints.get(0);
}
}
super.initializeState(context);
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class KinesisDataFetcher method runFetcher.
/**
* Starts the fetcher. After starting the fetcher, it can only be stopped by calling {@link
* KinesisDataFetcher#shutdownFetcher()}.
*
* @throws Exception the first error or exception thrown by the fetcher or any of the threads
* created by the fetcher.
*/
public void runFetcher() throws Exception {
// check that we are running before proceeding
if (!running) {
return;
}
// ------------------------------------------------------------------------
// Procedures before starting the infinite while loop:
// ------------------------------------------------------------------------
// 1. check that there is at least one shard in the subscribed streams to consume from (can
// be done by
// checking if at least one value in subscribedStreamsToLastDiscoveredShardIds is not
// null)
boolean hasShards = false;
StringBuilder streamsWithNoShardsFound = new StringBuilder();
for (Map.Entry<String, String> streamToLastDiscoveredShardEntry : subscribedStreamsToLastDiscoveredShardIds.entrySet()) {
if (streamToLastDiscoveredShardEntry.getValue() != null) {
hasShards = true;
} else {
streamsWithNoShardsFound.append(streamToLastDiscoveredShardEntry.getKey()).append(", ");
}
}
if (streamsWithNoShardsFound.length() != 0 && LOG.isWarnEnabled()) {
LOG.warn("Subtask {} has failed to find any shards for the following subscribed streams: {}", indexOfThisConsumerSubtask, streamsWithNoShardsFound.toString());
}
if (!hasShards) {
throw new RuntimeException("No shards can be found for all subscribed streams: " + streams);
}
// consumer using a restored state checkpoint
for (int seededStateIndex = 0; seededStateIndex < subscribedShardsState.size(); seededStateIndex++) {
KinesisStreamShardState seededShardState = subscribedShardsState.get(seededStateIndex);
// read already
if (!seededShardState.getLastProcessedSequenceNum().equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
if (LOG.isInfoEnabled()) {
LOG.info("Subtask {} will start consuming seeded shard {} from sequence number {} with ShardConsumer {}", indexOfThisConsumerSubtask, seededShardState.getStreamShardHandle().toString(), seededShardState.getLastProcessedSequenceNum(), seededStateIndex);
}
StreamShardHandle streamShardHandle = subscribedShardsState.get(seededStateIndex).getStreamShardHandle();
KinesisDeserializationSchema<T> shardDeserializationSchema = getClonedDeserializationSchema();
shardDeserializationSchema.open(RuntimeContextInitializationContextAdapters.deserializationAdapter(runtimeContext, // ignore the provided metric group
metricGroup -> consumerMetricGroup.addGroup("subtaskId", String.valueOf(indexOfThisConsumerSubtask)).addGroup("shardId", streamShardHandle.getShard().getShardId()).addGroup("user")));
shardConsumersExecutor.submit(createShardConsumer(seededStateIndex, streamShardHandle, subscribedShardsState.get(seededStateIndex).getLastProcessedSequenceNum(), registerShardMetricGroup(consumerMetricGroup, subscribedShardsState.get(seededStateIndex)), shardDeserializationSchema));
}
}
// start periodic watermark emitter, if a watermark assigner was configured
if (periodicWatermarkAssigner != null) {
long periodicWatermarkIntervalMillis = runtimeContext.getExecutionConfig().getAutoWatermarkInterval();
if (periodicWatermarkIntervalMillis > 0) {
ProcessingTimeService timerService = ((StreamingRuntimeContext) runtimeContext).getProcessingTimeService();
LOG.info("Starting periodic watermark emitter with interval {}", periodicWatermarkIntervalMillis);
new PeriodicWatermarkEmitter(timerService, periodicWatermarkIntervalMillis).start();
if (watermarkTracker != null) {
// setup global watermark tracking
long watermarkSyncMillis = Long.parseLong(getConsumerConfiguration().getProperty(ConsumerConfigConstants.WATERMARK_SYNC_MILLIS, Long.toString(ConsumerConfigConstants.DEFAULT_WATERMARK_SYNC_MILLIS)));
watermarkTracker.setUpdateTimeoutMillis(// synchronization latency
watermarkSyncMillis * 3);
watermarkTracker.open(runtimeContext);
new WatermarkSyncCallback(timerService, watermarkSyncMillis).start();
// emit records ahead of watermark to offset synchronization latency
long lookaheadMillis = Long.parseLong(getConsumerConfiguration().getProperty(ConsumerConfigConstants.WATERMARK_LOOKAHEAD_MILLIS, Long.toString(0)));
recordEmitter.setMaxLookaheadMillis(Math.max(lookaheadMillis, watermarkSyncMillis * 3));
// record emitter depends on periodic watermark
// it runs in a separate thread since main thread is used for discovery
Runnable recordEmitterRunnable = new Runnable() {
@Override
public void run() {
try {
recordEmitter.run();
} catch (Throwable error) {
// report the error that terminated the emitter loop to
// source thread
stopWithError(error);
}
}
};
Thread thread = new Thread(recordEmitterRunnable);
thread.setName("recordEmitter-" + runtimeContext.getTaskNameWithSubtasks());
thread.setDaemon(true);
thread.start();
}
}
this.shardIdleIntervalMillis = Long.parseLong(getConsumerConfiguration().getProperty(ConsumerConfigConstants.SHARD_IDLE_INTERVAL_MILLIS, Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_IDLE_INTERVAL_MILLIS)));
}
// ------------------------------------------------------------------------
// finally, start the infinite shard discovery and consumer launching loop;
// we will escape from this loop only when shutdownFetcher() or stopWithError() is called
// TODO: have this thread emit the records for tracking backpressure
final long discoveryIntervalMillis = Long.parseLong(configProps.getProperty(ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS, Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_DISCOVERY_INTERVAL_MILLIS)));
if (this.numberOfActiveShards.get() == 0) {
LOG.info("Subtask {} has no active shards to read on startup; marking the subtask as temporarily idle ...", indexOfThisConsumerSubtask);
sourceContext.markAsTemporarilyIdle();
}
while (running) {
if (LOG.isDebugEnabled()) {
LOG.debug("Subtask {} is trying to discover new shards that were created due to resharding ...", indexOfThisConsumerSubtask);
}
List<StreamShardHandle> newShardsDueToResharding = discoverNewShardsToSubscribe();
for (StreamShardHandle shard : newShardsDueToResharding) {
// since there may be delay in discovering a new shard, all new shards due to
// resharding should be read starting from the earliest record possible
KinesisStreamShardState newShardState = new KinesisStreamShardState(convertToStreamShardMetadata(shard), shard, SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get());
int newStateIndex = registerNewSubscribedShardState(newShardState);
if (LOG.isInfoEnabled()) {
LOG.info("Subtask {} has discovered a new shard {} due to resharding, and will start consuming " + "the shard from sequence number {} with ShardConsumer {}", indexOfThisConsumerSubtask, newShardState.getStreamShardHandle().toString(), newShardState.getLastProcessedSequenceNum(), newStateIndex);
}
StreamShardHandle streamShardHandle = newShardState.getStreamShardHandle();
KinesisDeserializationSchema<T> shardDeserializationSchema = getClonedDeserializationSchema();
shardDeserializationSchema.open(RuntimeContextInitializationContextAdapters.deserializationAdapter(runtimeContext, // ignore the provided metric group
metricGroup -> consumerMetricGroup.addGroup("subtaskId", String.valueOf(indexOfThisConsumerSubtask)).addGroup("shardId", streamShardHandle.getShard().getShardId()).addGroup("user")));
shardConsumersExecutor.submit(createShardConsumer(newStateIndex, newShardState.getStreamShardHandle(), newShardState.getLastProcessedSequenceNum(), registerShardMetricGroup(consumerMetricGroup, newShardState), shardDeserializationSchema));
}
// interval if the running flag was set to false during the middle of the while loop
if (running && discoveryIntervalMillis != 0) {
try {
cancelFuture.get(discoveryIntervalMillis, TimeUnit.MILLISECONDS);
LOG.debug("Cancelled discovery");
} catch (TimeoutException iex) {
// timeout is expected when fetcher is not cancelled
}
}
}
// make sure all resources have been terminated before leaving
try {
awaitTermination();
} catch (InterruptedException ie) {
// If there is an original exception, preserve it, since that's more important/useful.
this.error.compareAndSet(null, ie);
}
// any error thrown in the shard consumer threads will be thrown to the main thread
Throwable throwable = this.error.get();
if (throwable != null) {
if (throwable instanceof Exception) {
throw (Exception) throwable;
} else if (throwable instanceof Error) {
throw (Error) throwable;
} else {
throw new Exception(throwable);
}
}
}
Aggregations