use of org.apache.flink.api.java.typeutils.TupleTypeInfo in project flink by apache.
the class SourceStreamTaskTest method testCheckpointing.
/**
* This test ensures that the SourceStreamTask properly serializes checkpointing
* and element emission. This also verifies that there are no concurrent invocations
* of the checkpoint method on the source operator.
*
* The source emits elements and performs checkpoints. We have several checkpointer threads
* that fire checkpoint requests at the source task.
*
* If element emission and checkpointing are not in series the count of elements at the
* beginning of a checkpoint and at the end of a checkpoint are not the same because the
* source kept emitting elements while the checkpoint was ongoing.
*/
@Test
@SuppressWarnings("unchecked")
public void testCheckpointing() throws Exception {
final int NUM_ELEMENTS = 100;
final int NUM_CHECKPOINTS = 100;
final int NUM_CHECKPOINTERS = 1;
// in ms
final int CHECKPOINT_INTERVAL = 5;
// how many random values we sum up in storeCheckpoint
final int SOURCE_CHECKPOINT_DELAY = 1000;
// in ms
final int SOURCE_READ_DELAY = 1;
ExecutorService executor = Executors.newFixedThreadPool(10);
try {
final TupleTypeInfo<Tuple2<Long, Integer>> typeInfo = new TupleTypeInfo<Tuple2<Long, Integer>>(BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO);
final SourceStreamTask<Tuple2<Long, Integer>, SourceFunction<Tuple2<Long, Integer>>, StreamSource<Tuple2<Long, Integer>, SourceFunction<Tuple2<Long, Integer>>>> sourceTask = new SourceStreamTask<>();
final StreamTaskTestHarness<Tuple2<Long, Integer>> testHarness = new StreamTaskTestHarness<Tuple2<Long, Integer>>(sourceTask, typeInfo);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamSource<Tuple2<Long, Integer>, ?> sourceOperator = new StreamSource<>(new MockSource(NUM_ELEMENTS, SOURCE_CHECKPOINT_DELAY, SOURCE_READ_DELAY));
streamConfig.setStreamOperator(sourceOperator);
// prepare the
Future<Boolean>[] checkpointerResults = new Future[NUM_CHECKPOINTERS];
// invoke this first, so the tasks are actually running when the checkpoints are scheduled
testHarness.invoke();
for (int i = 0; i < NUM_CHECKPOINTERS; i++) {
checkpointerResults[i] = executor.submit(new Checkpointer(NUM_CHECKPOINTS, CHECKPOINT_INTERVAL, sourceTask));
}
testHarness.waitForTaskCompletion();
// will be rethrown here
for (int i = 0; i < NUM_CHECKPOINTERS; i++) {
if (!checkpointerResults[i].isDone()) {
checkpointerResults[i].cancel(true);
}
if (!checkpointerResults[i].isCancelled()) {
checkpointerResults[i].get();
}
}
List<Tuple2<Long, Integer>> resultElements = TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
Assert.assertEquals(NUM_ELEMENTS, resultElements.size());
} finally {
executor.shutdown();
}
}
use of org.apache.flink.api.java.typeutils.TupleTypeInfo in project flink by apache.
the class GroupReduceDriverTest method testAllReduceDriverMutable.
@Test
public void testAllReduceDriverMutable() {
try {
TestTaskContext<GroupReduceFunction<Tuple2<StringValue, IntValue>, Tuple2<StringValue, IntValue>>, Tuple2<StringValue, IntValue>> context = new TestTaskContext<GroupReduceFunction<Tuple2<StringValue, IntValue>, Tuple2<StringValue, IntValue>>, Tuple2<StringValue, IntValue>>();
List<Tuple2<StringValue, IntValue>> data = DriverTestData.createReduceMutableData();
TupleTypeInfo<Tuple2<StringValue, IntValue>> typeInfo = (TupleTypeInfo<Tuple2<StringValue, IntValue>>) TypeExtractor.getForObject(data.get(0));
MutableObjectIterator<Tuple2<StringValue, IntValue>> input = new RegularToMutableObjectIterator<Tuple2<StringValue, IntValue>>(data.iterator(), typeInfo.createSerializer(new ExecutionConfig()));
TypeComparator<Tuple2<StringValue, IntValue>> comparator = typeInfo.createComparator(new int[] { 0 }, new boolean[] { true }, 0, new ExecutionConfig());
GatheringCollector<Tuple2<StringValue, IntValue>> result = new GatheringCollector<Tuple2<StringValue, IntValue>>(typeInfo.createSerializer(new ExecutionConfig()));
context.setDriverStrategy(DriverStrategy.SORTED_GROUP_REDUCE);
context.setInput1(input, typeInfo.createSerializer(new ExecutionConfig()));
context.setComparator1(comparator);
context.setCollector(result);
context.setUdf(new ConcatSumMutableReducer());
GroupReduceDriver<Tuple2<StringValue, IntValue>, Tuple2<StringValue, IntValue>> driver = new GroupReduceDriver<Tuple2<StringValue, IntValue>, Tuple2<StringValue, IntValue>>();
driver.setup(context);
driver.prepare();
driver.run();
Object[] res = result.getList().toArray();
Object[] expected = DriverTestData.createReduceMutableDataGroupedResult().toArray();
DriverTestData.compareTupleArrays(expected, res);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
}
}
use of org.apache.flink.api.java.typeutils.TupleTypeInfo in project flink by apache.
the class ReduceCombineDriverTest method testImmutableEmpty.
@Test
public void testImmutableEmpty() {
try {
TestTaskContext<ReduceFunction<Tuple2<String, Integer>>, Tuple2<String, Integer>> context = new TestTaskContext<ReduceFunction<Tuple2<String, Integer>>, Tuple2<String, Integer>>(1024 * 1024);
context.getTaskConfig().setRelativeMemoryDriver(0.5);
List<Tuple2<String, Integer>> data = DriverTestData.createReduceImmutableData();
Collections.shuffle(data);
TupleTypeInfo<Tuple2<String, Integer>> typeInfo = (TupleTypeInfo<Tuple2<String, Integer>>) TypeExtractor.getForObject(data.get(0));
MutableObjectIterator<Tuple2<String, Integer>> input = EmptyMutableObjectIterator.get();
context.setDriverStrategy(DriverStrategy.SORTED_PARTIAL_REDUCE);
TypeComparator<Tuple2<String, Integer>> comparator = typeInfo.createComparator(new int[] { 0 }, new boolean[] { true }, 0, new ExecutionConfig());
GatheringCollector<Tuple2<String, Integer>> result = new GatheringCollector<Tuple2<String, Integer>>(typeInfo.createSerializer(new ExecutionConfig()));
context.setInput1(input, typeInfo.createSerializer(new ExecutionConfig()));
context.setComparator1(comparator);
context.setCollector(result);
ReduceCombineDriver<Tuple2<String, Integer>> driver = new ReduceCombineDriver<Tuple2<String, Integer>>();
driver.setup(context);
driver.prepare();
driver.run();
Assert.assertEquals(0, result.getList().size());
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
}
}
use of org.apache.flink.api.java.typeutils.TupleTypeInfo in project flink by apache.
the class LargeRecordHandlerTest method testRecordHandlerSingleKey.
@Test
public void testRecordHandlerSingleKey() {
final IOManager ioMan = new IOManagerAsync();
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 24;
final int NUM_RECORDS = 25000;
try {
final MemoryManager memMan = new MemoryManager(NUM_PAGES * PAGE_SIZE, 1, PAGE_SIZE, MemoryType.HEAP, true);
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> initialMemory = memMan.allocatePages(owner, 6);
final List<MemorySegment> sortMemory = memMan.allocatePages(owner, NUM_PAGES - 6);
final TupleTypeInfo<Tuple2<Long, String>> typeInfo = (TupleTypeInfo<Tuple2<Long, String>>) TypeInfoParser.<Tuple2<Long, String>>parse("Tuple2<Long, String>");
final TypeSerializer<Tuple2<Long, String>> serializer = typeInfo.createSerializer(new ExecutionConfig());
final TypeComparator<Tuple2<Long, String>> comparator = typeInfo.createComparator(new int[] { 0 }, new boolean[] { true }, 0, new ExecutionConfig());
LargeRecordHandler<Tuple2<Long, String>> handler = new LargeRecordHandler<Tuple2<Long, String>>(serializer, comparator, ioMan, memMan, initialMemory, owner, 128);
assertFalse(handler.hasData());
// add the test data
Random rnd = new Random();
for (int i = 0; i < NUM_RECORDS; i++) {
long val = rnd.nextLong();
handler.addRecord(new Tuple2<Long, String>(val, String.valueOf(val)));
assertTrue(handler.hasData());
}
MutableObjectIterator<Tuple2<Long, String>> sorted = handler.finishWriteAndSortKeys(sortMemory);
try {
handler.addRecord(new Tuple2<Long, String>(92L, "peter pepper"));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
Tuple2<Long, String> previous = null;
Tuple2<Long, String> next;
while ((next = sorted.next(null)) != null) {
// key and value must be equal
assertTrue(next.f0.equals(Long.parseLong(next.f1)));
// order must be correct
if (previous != null) {
assertTrue(previous.f0 <= next.f0);
}
previous = next;
}
handler.close();
assertFalse(handler.hasData());
handler.close();
try {
handler.addRecord(new Tuple2<Long, String>(92L, "peter pepper"));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
assertTrue(memMan.verifyEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
ioMan.shutdown();
}
}
use of org.apache.flink.api.java.typeutils.TupleTypeInfo in project flink by apache.
the class LargeRecordHandlerTest method testEmptyRecordHandler.
@Test
public void testEmptyRecordHandler() {
final IOManager ioMan = new IOManagerAsync();
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 50;
try {
final MemoryManager memMan = new MemoryManager(NUM_PAGES * PAGE_SIZE, 1, PAGE_SIZE, MemoryType.HEAP, true);
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> memory = memMan.allocatePages(owner, NUM_PAGES);
final TupleTypeInfo<Tuple2<Long, String>> typeInfo = (TupleTypeInfo<Tuple2<Long, String>>) TypeInfoParser.<Tuple2<Long, String>>parse("Tuple2<Long, String>");
final TypeSerializer<Tuple2<Long, String>> serializer = typeInfo.createSerializer(new ExecutionConfig());
final TypeComparator<Tuple2<Long, String>> comparator = typeInfo.createComparator(new int[] { 0 }, new boolean[] { true }, 0, new ExecutionConfig());
LargeRecordHandler<Tuple2<Long, String>> handler = new LargeRecordHandler<Tuple2<Long, String>>(serializer, comparator, ioMan, memMan, memory, owner, 128);
assertFalse(handler.hasData());
handler.close();
assertFalse(handler.hasData());
handler.close();
try {
handler.addRecord(new Tuple2<Long, String>(92L, "peter pepper"));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
assertTrue(memMan.verifyEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
ioMan.shutdown();
}
}
Aggregations