use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class MultiInputSortingDataInputsTest method twoInputOrderTest.
@SuppressWarnings("unchecked")
public void twoInputOrderTest(int preferredIndex, int sortedIndex) throws Exception {
CollectingDataOutput<Object> collectingDataOutput = new CollectingDataOutput<>();
List<StreamElement> sortedInputElements = Arrays.asList(new StreamRecord<>(1, 3), new StreamRecord<>(1, 1), new StreamRecord<>(2, 1), new StreamRecord<>(2, 3), new StreamRecord<>(1, 2), new StreamRecord<>(2, 2), Watermark.MAX_WATERMARK);
CollectionDataInput<Integer> sortedInput = new CollectionDataInput<>(sortedInputElements, sortedIndex);
List<StreamElement> preferredInputElements = Arrays.asList(new StreamRecord<>(99, 3), new StreamRecord<>(99, 1), new Watermark(99L));
CollectionDataInput<Integer> preferredInput = new CollectionDataInput<>(preferredInputElements, preferredIndex);
KeySelector<Integer, Integer> keySelector = value -> value;
try (MockEnvironment environment = MockEnvironment.builder().build()) {
SelectableSortingInputs selectableSortingInputs = MultiInputSortingDataInput.wrapInputs(new DummyInvokable(), new StreamTaskInput[] { sortedInput }, new KeySelector[] { keySelector }, new TypeSerializer[] { new IntSerializer() }, new IntSerializer(), new StreamTaskInput[] { preferredInput }, environment.getMemoryManager(), environment.getIOManager(), true, 1.0, new Configuration(), new ExecutionConfig());
StreamTaskInput<?>[] sortingDataInputs = selectableSortingInputs.getSortedInputs();
StreamTaskInput<?>[] preferredDataInputs = selectableSortingInputs.getPassThroughInputs();
try (StreamTaskInput<Object> preferredTaskInput = (StreamTaskInput<Object>) preferredDataInputs[0];
StreamTaskInput<Object> sortedTaskInput = (StreamTaskInput<Object>) sortingDataInputs[0]) {
MultipleInputSelectionHandler selectionHandler = new MultipleInputSelectionHandler(selectableSortingInputs.getInputSelectable(), 2);
@SuppressWarnings("rawtypes") StreamOneInputProcessor[] inputProcessors = new StreamOneInputProcessor[2];
inputProcessors[preferredIndex] = new StreamOneInputProcessor<>(preferredTaskInput, collectingDataOutput, new DummyOperatorChain());
inputProcessors[sortedIndex] = new StreamOneInputProcessor<>(sortedTaskInput, collectingDataOutput, new DummyOperatorChain());
StreamMultipleInputProcessor processor = new StreamMultipleInputProcessor(selectionHandler, inputProcessors);
DataInputStatus inputStatus;
do {
inputStatus = processor.processInput();
} while (inputStatus != DataInputStatus.END_OF_INPUT);
}
}
assertThat(collectingDataOutput.events, equalTo(Arrays.asList(new StreamRecord<>(99, 3), new StreamRecord<>(99, 1), // max watermark from the preferred input
new Watermark(99L), new StreamRecord<>(1, 1), new StreamRecord<>(1, 2), new StreamRecord<>(1, 3), new StreamRecord<>(2, 1), new StreamRecord<>(2, 2), new StreamRecord<>(2, 3), // max watermark from the sorted input
Watermark.MAX_WATERMARK)));
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class SortingDataInputTest method simpleVariableLengthKeySorting.
@Test
public void simpleVariableLengthKeySorting() throws Exception {
CollectingDataOutput<Integer> collectingDataOutput = new CollectingDataOutput<>();
CollectionDataInput<Integer> input = new CollectionDataInput<>(Arrays.asList(new StreamRecord<>(1, 3), new StreamRecord<>(1, 1), new StreamRecord<>(2, 1), new StreamRecord<>(2, 3), new StreamRecord<>(1, 2), new StreamRecord<>(2, 2)));
MockEnvironment environment = MockEnvironment.builder().build();
SortingDataInput<Integer, String> sortingDataInput = new SortingDataInput<>(input, new IntSerializer(), new StringSerializer(), (KeySelector<Integer, String>) value -> "" + value, environment.getMemoryManager(), environment.getIOManager(), true, 1.0, new Configuration(), new DummyInvokable(), new ExecutionConfig());
DataInputStatus inputStatus;
do {
inputStatus = sortingDataInput.emitNext(collectingDataOutput);
} while (inputStatus != DataInputStatus.END_OF_INPUT);
assertThat(collectingDataOutput.events, equalTo(Arrays.asList(new StreamRecord<>(1, 1), new StreamRecord<>(1, 2), new StreamRecord<>(1, 3), new StreamRecord<>(2, 1), new StreamRecord<>(2, 2), new StreamRecord<>(2, 3))));
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class SerializerComparatorTestData method getOrderedIntTestData.
@SuppressWarnings("unchecked")
static Tuple2<byte[], StreamRecord<Integer>>[] getOrderedIntTestData() {
IntSerializer intSerializer = new IntSerializer();
DataOutputSerializer outputSerializer = new DataOutputSerializer(intSerializer.getLength());
return IntStream.range(-10, 10).mapToObj(idx -> {
try {
intSerializer.serialize(idx, outputSerializer);
byte[] copyOfBuffer = outputSerializer.getCopyOfBuffer();
outputSerializer.clear();
return Tuple2.of(copyOfBuffer, new StreamRecord<>(idx, idx));
} catch (IOException e) {
throw new AssertionError(e);
}
}).toArray(Tuple2[]::new);
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class AsyncWaitOperatorTest method testRestartWithFullQueue.
/**
* Tests that the AsyncWaitOperator can restart if checkpointed queue was full.
*
* <p>See FLINK-7949
*/
@Test(timeout = 10000)
public void testRestartWithFullQueue() throws Exception {
final int capacity = 10;
// 1. create the snapshot which contains capacity + 1 elements
final CompletableFuture<Void> trigger = new CompletableFuture<>();
final OneInputStreamOperatorTestHarness<Integer, Integer> snapshotHarness = createTestHarness(new ControllableAsyncFunction<>(// the NoOpAsyncFunction is like a blocking function
trigger), 1000L, capacity, AsyncDataStream.OutputMode.ORDERED);
snapshotHarness.open();
final OperatorSubtaskState snapshot;
final ArrayList<Integer> expectedOutput = new ArrayList<>(capacity);
try {
synchronized (snapshotHarness.getCheckpointLock()) {
for (int i = 0; i < capacity; i++) {
snapshotHarness.processElement(i, 0L);
expectedOutput.add(i);
}
}
synchronized (snapshotHarness.getCheckpointLock()) {
// execute the snapshot within the checkpoint lock, because then it is guaranteed
// that the lastElementWriter has written the exceeding element
snapshot = snapshotHarness.snapshot(0L, 0L);
}
// trigger the computation to make the close call finish
trigger.complete(null);
} finally {
synchronized (snapshotHarness.getCheckpointLock()) {
snapshotHarness.close();
}
}
// 2. restore the snapshot and check that we complete
final OneInputStreamOperatorTestHarness<Integer, Integer> recoverHarness = createTestHarness(new ControllableAsyncFunction<>(CompletableFuture.completedFuture(null)), 1000L, capacity, AsyncDataStream.OutputMode.ORDERED);
recoverHarness.initializeState(snapshot);
synchronized (recoverHarness.getCheckpointLock()) {
recoverHarness.open();
}
synchronized (recoverHarness.getCheckpointLock()) {
recoverHarness.endInput();
recoverHarness.close();
}
final ConcurrentLinkedQueue<Object> output = recoverHarness.getOutput();
final List<Integer> outputElements = output.stream().map(r -> ((StreamRecord<Integer>) r).getValue()).collect(Collectors.toList());
assertThat(outputElements, Matchers.equalTo(expectedOutput));
}
use of org.apache.flink.streaming.runtime.streamrecord.StreamRecord in project flink by apache.
the class UnorderedStreamElementQueueTest method testCompletionOrder.
/**
* Tests that only elements before the oldest watermark are returned if they are completed.
*/
@Test
public void testCompletionOrder() {
final UnorderedStreamElementQueue<Integer> queue = new UnorderedStreamElementQueue<>(8);
ResultFuture<Integer> record1 = putSuccessfully(queue, new StreamRecord<>(1, 0L));
ResultFuture<Integer> record2 = putSuccessfully(queue, new StreamRecord<>(2, 1L));
putSuccessfully(queue, new Watermark(2L));
ResultFuture<Integer> record3 = putSuccessfully(queue, new StreamRecord<>(3, 3L));
ResultFuture<Integer> record4 = putSuccessfully(queue, new StreamRecord<>(4, 4L));
putSuccessfully(queue, new Watermark(5L));
ResultFuture<Integer> record5 = putSuccessfully(queue, new StreamRecord<>(5, 6L));
ResultFuture<Integer> record6 = putSuccessfully(queue, new StreamRecord<>(6, 7L));
Assert.assertEquals(Collections.emptyList(), popCompleted(queue));
Assert.assertEquals(8, queue.size());
Assert.assertFalse(queue.isEmpty());
// this should not make any item completed, because R3 is behind W1
record3.complete(Arrays.asList(13));
Assert.assertEquals(Collections.emptyList(), popCompleted(queue));
Assert.assertEquals(8, queue.size());
Assert.assertFalse(queue.isEmpty());
record2.complete(Arrays.asList(12));
Assert.assertEquals(Arrays.asList(new StreamRecord<>(12, 1L)), popCompleted(queue));
Assert.assertEquals(7, queue.size());
Assert.assertFalse(queue.isEmpty());
// Should not be completed because R1 has not been completed yet
record6.complete(Arrays.asList(16));
record4.complete(Arrays.asList(14));
Assert.assertEquals(Collections.emptyList(), popCompleted(queue));
Assert.assertEquals(7, queue.size());
Assert.assertFalse(queue.isEmpty());
// Now W1, R3, R4 and W2 are completed and should be pollable
record1.complete(Arrays.asList(11));
Assert.assertEquals(Arrays.asList(new StreamRecord<>(11, 0L), new Watermark(2L), new StreamRecord<>(13, 3L), new StreamRecord<>(14, 4L), new Watermark(5L), new StreamRecord<>(16, 7L)), popCompleted(queue));
Assert.assertEquals(1, queue.size());
Assert.assertFalse(queue.isEmpty());
// only R5 left in the queue
record5.complete(Arrays.asList(15));
Assert.assertEquals(Arrays.asList(new StreamRecord<>(15, 6L)), popCompleted(queue));
Assert.assertEquals(0, queue.size());
Assert.assertTrue(queue.isEmpty());
Assert.assertEquals(Collections.emptyList(), popCompleted(queue));
}
Aggregations