use of org.apache.flink.streaming.runtime.streamrecord.StreamElement in project flink by apache.
the class StreamInputProcessor method processInput.
public boolean processInput() throws Exception {
if (isFinished) {
return false;
}
if (numRecordsIn == null) {
numRecordsIn = ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter();
}
while (true) {
if (currentRecordDeserializer != null) {
DeserializationResult result = currentRecordDeserializer.getNextRecord(deserializationDelegate);
if (result.isBufferConsumed()) {
currentRecordDeserializer.getCurrentBuffer().recycle();
currentRecordDeserializer = null;
}
if (result.isFullRecord()) {
StreamElement recordOrMark = deserializationDelegate.getInstance();
if (recordOrMark.isWatermark()) {
// handle watermark
statusWatermarkValve.inputWatermark(recordOrMark.asWatermark(), currentChannel);
continue;
} else if (recordOrMark.isStreamStatus()) {
// handle stream status
statusWatermarkValve.inputStreamStatus(recordOrMark.asStreamStatus(), currentChannel);
continue;
} else if (recordOrMark.isLatencyMarker()) {
// handle latency marker
synchronized (lock) {
streamOperator.processLatencyMarker(recordOrMark.asLatencyMarker());
}
continue;
} else {
// now we can do the actual processing
StreamRecord<IN> record = recordOrMark.asRecord();
synchronized (lock) {
numRecordsIn.inc();
streamOperator.setKeyContextElement1(record);
streamOperator.processElement(record);
}
return true;
}
}
}
final BufferOrEvent bufferOrEvent = barrierHandler.getNextNonBlocked();
if (bufferOrEvent != null) {
if (bufferOrEvent.isBuffer()) {
currentChannel = bufferOrEvent.getChannelIndex();
currentRecordDeserializer = recordDeserializers[currentChannel];
currentRecordDeserializer.setNextBuffer(bufferOrEvent.getBuffer());
} else {
// Event received
final AbstractEvent event = bufferOrEvent.getEvent();
if (event.getClass() != EndOfPartitionEvent.class) {
throw new IOException("Unexpected event: " + event);
}
}
} else {
isFinished = true;
if (!barrierHandler.isEmpty()) {
throw new IllegalStateException("Trailing data in checkpoint barrier handler.");
}
return false;
}
}
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamElement in project flink by apache.
the class StreamTwoInputProcessor method processInput.
public boolean processInput() throws Exception {
if (isFinished) {
return false;
}
while (true) {
if (currentRecordDeserializer != null) {
DeserializationResult result;
if (currentChannel < numInputChannels1) {
result = currentRecordDeserializer.getNextRecord(deserializationDelegate1);
} else {
result = currentRecordDeserializer.getNextRecord(deserializationDelegate2);
}
if (result.isBufferConsumed()) {
currentRecordDeserializer.getCurrentBuffer().recycle();
currentRecordDeserializer = null;
}
if (result.isFullRecord()) {
if (currentChannel < numInputChannels1) {
StreamElement recordOrWatermark = deserializationDelegate1.getInstance();
if (recordOrWatermark.isWatermark()) {
statusWatermarkValve1.inputWatermark(recordOrWatermark.asWatermark(), currentChannel);
continue;
} else if (recordOrWatermark.isStreamStatus()) {
statusWatermarkValve1.inputStreamStatus(recordOrWatermark.asStreamStatus(), currentChannel);
continue;
} else if (recordOrWatermark.isLatencyMarker()) {
synchronized (lock) {
streamOperator.processLatencyMarker1(recordOrWatermark.asLatencyMarker());
}
continue;
} else {
StreamRecord<IN1> record = recordOrWatermark.asRecord();
synchronized (lock) {
streamOperator.setKeyContextElement1(record);
streamOperator.processElement1(record);
}
return true;
}
} else {
StreamElement recordOrWatermark = deserializationDelegate2.getInstance();
if (recordOrWatermark.isWatermark()) {
statusWatermarkValve2.inputWatermark(recordOrWatermark.asWatermark(), currentChannel - numInputChannels1);
continue;
} else if (recordOrWatermark.isStreamStatus()) {
statusWatermarkValve2.inputStreamStatus(recordOrWatermark.asStreamStatus(), currentChannel - numInputChannels1);
continue;
} else if (recordOrWatermark.isLatencyMarker()) {
synchronized (lock) {
streamOperator.processLatencyMarker2(recordOrWatermark.asLatencyMarker());
}
continue;
} else {
StreamRecord<IN2> record = recordOrWatermark.asRecord();
synchronized (lock) {
streamOperator.setKeyContextElement2(record);
streamOperator.processElement2(record);
}
return true;
}
}
}
}
final BufferOrEvent bufferOrEvent = barrierHandler.getNextNonBlocked();
if (bufferOrEvent != null) {
if (bufferOrEvent.isBuffer()) {
currentChannel = bufferOrEvent.getChannelIndex();
currentRecordDeserializer = recordDeserializers[currentChannel];
currentRecordDeserializer.setNextBuffer(bufferOrEvent.getBuffer());
} else {
// Event received
final AbstractEvent event = bufferOrEvent.getEvent();
if (event.getClass() != EndOfPartitionEvent.class) {
throw new IOException("Unexpected event: " + event);
}
}
} else {
isFinished = true;
if (!barrierHandler.isEmpty()) {
throw new IllegalStateException("Trailing data in checkpoint barrier handler.");
}
return false;
}
}
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamElement in project flink by apache.
the class EmitterTest method testEmitterWithExceptions.
/**
* Tests that the emitter handles exceptions occurring in the {@link AsyncCollector} correctly.
*/
@Test
public void testEmitterWithExceptions() throws Exception {
Object lock = new Object();
List<StreamElement> list = new ArrayList<>();
Output<StreamRecord<Integer>> output = new CollectorOutput<>(list);
List<StreamElement> expected = Arrays.asList(new StreamRecord<>(1, 0L), new Watermark(3L));
OperatorActions operatorActions = mock(OperatorActions.class);
final int capacity = 3;
StreamElementQueue queue = new OrderedStreamElementQueue(capacity, executor, operatorActions);
final Emitter<Integer> emitter = new Emitter<>(lock, output, queue, operatorActions);
final Thread emitterThread = new Thread(emitter);
emitterThread.start();
final Exception testException = new Exception("Test exception");
try {
StreamRecordQueueEntry<Integer> record1 = new StreamRecordQueueEntry<>(new StreamRecord<>(1, 0L));
StreamRecordQueueEntry<Integer> record2 = new StreamRecordQueueEntry<>(new StreamRecord<>(2, 1L));
WatermarkQueueEntry watermark1 = new WatermarkQueueEntry(new Watermark(3L));
queue.put(record1);
queue.put(record2);
queue.put(watermark1);
record2.collect(testException);
record1.collect(Arrays.asList(1));
synchronized (lock) {
while (!queue.isEmpty()) {
lock.wait();
}
}
Assert.assertEquals(expected, list);
ArgumentCaptor<Throwable> argumentCaptor = ArgumentCaptor.forClass(Throwable.class);
verify(operatorActions).failOperator(argumentCaptor.capture());
Throwable failureCause = argumentCaptor.getValue();
Assert.assertNotNull(failureCause.getCause());
Assert.assertTrue(failureCause.getCause() instanceof ExecutionException);
Assert.assertNotNull(failureCause.getCause().getCause());
Assert.assertEquals(testException, failureCause.getCause().getCause());
} finally {
emitter.stop();
emitterThread.interrupt();
}
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamElement in project flink by apache.
the class StreamSourceOperatorTest method testLatencyMarkEmission.
/**
* Test that latency marks are emitted
*/
@Test
public void testLatencyMarkEmission() throws Exception {
final List<StreamElement> output = new ArrayList<>();
final long maxProcessingTime = 100L;
final long latencyMarkInterval = 10L;
final TestProcessingTimeService testProcessingTimeService = new TestProcessingTimeService();
testProcessingTimeService.setCurrentTime(0L);
final List<Long> processingTimes = Arrays.asList(1L, 10L, 11L, 21L, maxProcessingTime);
// regular stream source operator
final StreamSource<Long, ProcessingTimeServiceSource> operator = new StreamSource<>(new ProcessingTimeServiceSource(testProcessingTimeService, processingTimes));
// emit latency marks every 10 milliseconds.
setupSourceOperator(operator, TimeCharacteristic.EventTime, 0, latencyMarkInterval, testProcessingTimeService);
// run and wait to be stopped
operator.run(new Object(), mock(StreamStatusMaintainer.class), new CollectorOutput<Long>(output));
int numberLatencyMarkers = (int) (maxProcessingTime / latencyMarkInterval) + 1;
assertEquals(// + 1 is the final watermark element
numberLatencyMarkers + 1, output.size());
long timestamp = 0L;
int i = 0;
// and that its only latency markers + a final watermark
for (; i < output.size() - 1; i++) {
StreamElement se = output.get(i);
Assert.assertTrue(se.isLatencyMarker());
Assert.assertEquals(-1, se.asLatencyMarker().getVertexID());
Assert.assertEquals(0, se.asLatencyMarker().getSubtaskIndex());
Assert.assertTrue(se.asLatencyMarker().getMarkedTime() == timestamp);
timestamp += latencyMarkInterval;
}
Assert.assertTrue(output.get(i).isWatermark());
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamElement in project flink by apache.
the class BootstrapStreamTask method processInput.
@Override
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
StreamElement element = input.take();
if (element.isRecord()) {
StreamRecord<IN> streamRecord = element.asRecord();
mainOperator.setKeyContextElement1(streamRecord);
mainOperator.processElement(streamRecord);
} else {
mainOperator.endInput();
mainOperator.finish();
controller.suspendDefaultAction();
mailboxProcessor.suspend();
}
}
Aggregations