use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.
the class TaskManagerComponentsStartupShutdownTest method testComponentsStartupShutdown.
/**
* Makes sure that all components are shut down when the TaskManager
* actor is shut down.
*/
@Test
public void testComponentsStartupShutdown() {
final String[] TMP_DIR = new String[] { ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH };
final Time timeout = Time.seconds(100);
final int BUFFER_SIZE = 32 * 1024;
Configuration config = new Configuration();
config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "200 ms");
config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "1 s");
config.setInteger(ConfigConstants.AKKA_WATCH_THRESHOLD, 1);
ActorSystem actorSystem = null;
try {
actorSystem = AkkaUtils.createLocalActorSystem(config);
final ActorRef jobManager = JobManager.startJobManagerActors(config, actorSystem, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), JobManager.class, MemoryArchivist.class)._1();
FlinkResourceManager.startResourceManagerActors(config, actorSystem, LeaderRetrievalUtils.createLeaderRetrievalService(config, jobManager), StandaloneResourceManager.class);
final int numberOfSlots = 1;
// create the components for the TaskManager manually
final TaskManagerConfiguration tmConfig = new TaskManagerConfiguration(numberOfSlots, TMP_DIR, timeout, null, Time.milliseconds(500), Time.seconds(30), Time.seconds(10), // cleanup interval
1000000, config, // exit-jvm-on-fatal-error
false);
final NetworkEnvironmentConfiguration netConf = new NetworkEnvironmentConfiguration(32, BUFFER_SIZE, MemoryType.HEAP, IOManager.IOMode.SYNC, 0, 0, 2, 8, null);
ResourceID taskManagerId = ResourceID.generate();
final TaskManagerLocation connectionInfo = new TaskManagerLocation(taskManagerId, InetAddress.getLocalHost(), 10000);
final MemoryManager memManager = new MemoryManager(32 * BUFFER_SIZE, 1, BUFFER_SIZE, MemoryType.HEAP, false);
final IOManager ioManager = new IOManagerAsync(TMP_DIR);
final NetworkEnvironment network = new NetworkEnvironment(new NetworkBufferPool(netConf.numNetworkBuffers(), netConf.networkBufferSize(), netConf.memoryType()), new LocalConnectionManager(), new ResultPartitionManager(), new TaskEventDispatcher(), new KvStateRegistry(), null, netConf.ioMode(), netConf.partitionRequestInitialBackoff(), netConf.partitionRequestMaxBackoff(), netConf.networkBuffersPerChannel(), netConf.extraNetworkBuffersPerGate());
network.start();
LeaderRetrievalService leaderRetrievalService = new StandaloneLeaderRetrievalService(jobManager.path().toString());
MetricRegistryConfiguration metricRegistryConfiguration = MetricRegistryConfiguration.fromConfiguration(config);
// create the task manager
final Props tmProps = Props.create(TaskManager.class, tmConfig, taskManagerId, connectionInfo, memManager, ioManager, network, numberOfSlots, leaderRetrievalService, new MetricRegistry(metricRegistryConfiguration));
final ActorRef taskManager = actorSystem.actorOf(tmProps);
new JavaTestKit(actorSystem) {
{
// wait for the TaskManager to be registered
new Within(new FiniteDuration(5000, TimeUnit.SECONDS)) {
@Override
protected void run() {
taskManager.tell(TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), getTestActor());
expectMsgEquals(TaskManagerMessages.getRegisteredAtJobManagerMessage());
}
};
}
};
// shut down all actors and the actor system
// Kill the Task down the JobManager
taskManager.tell(Kill.getInstance(), ActorRef.noSender());
jobManager.tell(Kill.getInstance(), ActorRef.noSender());
// shut down the actors and the actor system
actorSystem.shutdown();
actorSystem.awaitTermination();
actorSystem = null;
// now that the TaskManager is shut down, the components should be shut down as well
assertTrue(network.isShutdown());
assertTrue(ioManager.isProperlyShutDown());
assertTrue(memManager.isShutdown());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (actorSystem != null) {
actorSystem.shutdown();
}
}
}
use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.
the class HashTableRecordWidthCombinations method main.
public static void main(String[] args) throws Exception {
@SuppressWarnings("unchecked") final TypeSerializer<Tuple2<Long, byte[]>> buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>((Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class, new TypeSerializer<?>[] { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE });
final TypeSerializer<Long> probeSerializer = LongSerializer.INSTANCE;
final TypeComparator<Tuple2<Long, byte[]>> buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] { 0 }, new TypeComparator<?>[] { new LongComparator(true) }, new TypeSerializer<?>[] { LongSerializer.INSTANCE });
final TypeComparator<Long> probeComparator = new LongComparator(true);
final TypePairComparator<Long, Tuple2<Long, byte[]>> pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() {
private long ref;
@Override
public void setReference(Long reference) {
ref = reference;
}
@Override
public boolean equalToReference(Tuple2<Long, byte[]> candidate) {
//noinspection UnnecessaryUnboxing
return candidate.f0.longValue() == ref;
}
@Override
public int compareToReference(Tuple2<Long, byte[]> candidate) {
long x = ref;
long y = candidate.f0;
return (x < y) ? -1 : ((x == y) ? 0 : 1);
}
};
final IOManager ioMan = new IOManagerAsync();
try {
final int pageSize = 32 * 1024;
final int numSegments = 34;
for (int num = 3400; num < 3550; num++) {
final int numRecords = num;
for (int recordLen = 270; recordLen < 320; recordLen++) {
final byte[] payload = new byte[recordLen - 8 - 4];
System.out.println("testing " + numRecords + " / " + recordLen);
List<MemorySegment> memory = getMemory(numSegments, pageSize);
// we create a hash table that thinks the records are super large. that makes it choose initially
// a lot of memory for the partition buffers, and start with a smaller hash table. that way
// we trigger a hash table growth early.
MutableHashTable<Tuple2<Long, byte[]>, Long> table = new MutableHashTable<>(buildSerializer, probeSerializer, buildComparator, probeComparator, pairComparator, memory, ioMan, 16, false);
final MutableObjectIterator<Tuple2<Long, byte[]>> buildInput = new MutableObjectIterator<Tuple2<Long, byte[]>>() {
private int count = 0;
@Override
public Tuple2<Long, byte[]> next(Tuple2<Long, byte[]> reuse) {
return next();
}
@Override
public Tuple2<Long, byte[]> next() {
if (count++ < numRecords) {
return new Tuple2<>(42L, payload);
} else {
return null;
}
}
};
// probe side
final MutableObjectIterator<Long> probeInput = new MutableObjectIterator<Long>() {
private final long numRecords = 10000;
private long value = 0;
@Override
public Long next(Long aLong) {
return next();
}
@Override
public Long next() {
if (value < numRecords) {
return value++;
} else {
return null;
}
}
};
table.open(buildInput, probeInput);
try {
while (table.nextRecord()) {
MutableObjectIterator<Tuple2<Long, byte[]>> matches = table.getBuildSideIterator();
while (matches.next() != null) ;
}
} catch (RuntimeException e) {
if (!e.getMessage().contains("exceeded maximum number of recursions")) {
throw e;
}
} finally {
table.close();
}
// make sure no temp files are left
checkNoTempFilesRemain(ioMan);
}
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
ioMan.shutdown();
}
}
use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.
the class MassiveStringValueSorting method testStringValueSorting.
public void testStringValueSorting() {
File input = null;
File sorted = null;
try {
// the source file
input = generateFileWithStrings(300000, "http://some-uri.com/that/is/a/common/prefix/to/all");
// the sorted file
sorted = File.createTempFile("sorted_strings", "txt");
String[] command = { "/bin/bash", "-c", "export LC_ALL=\"C\" && cat \"" + input.getAbsolutePath() + "\" | sort > \"" + sorted.getAbsolutePath() + "\"" };
Process p = null;
try {
p = Runtime.getRuntime().exec(command);
int retCode = p.waitFor();
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode);
}
p = null;
} finally {
if (p != null) {
p.destroy();
}
}
// sort the data
UnilateralSortMerger<StringValue> sorter = null;
BufferedReader reader = null;
BufferedReader verifyReader = null;
try {
MemoryManager mm = new MemoryManager(1024 * 1024, 1);
IOManager ioMan = new IOManagerAsync();
TypeSerializer<StringValue> serializer = new CopyableValueSerializer<StringValue>(StringValue.class);
TypeComparator<StringValue> comparator = new CopyableValueComparator<StringValue>(true, StringValue.class);
reader = new BufferedReader(new FileReader(input));
MutableObjectIterator<StringValue> inputIterator = new StringValueReaderMutableObjectIterator(reader);
sorter = new UnilateralSortMerger<StringValue>(mm, ioMan, inputIterator, new DummyInvokable(), new RuntimeSerializerFactory<StringValue>(serializer, StringValue.class), comparator, 1.0, 4, 0.8f, true, /* use large record handler */
true);
MutableObjectIterator<StringValue> sortedData = sorter.getIterator();
reader.close();
// verify
verifyReader = new BufferedReader(new FileReader(sorted));
String nextVerify;
StringValue nextFromFlinkSort = new StringValue();
while ((nextVerify = verifyReader.readLine()) != null) {
nextFromFlinkSort = sortedData.next(nextFromFlinkSort);
Assert.assertNotNull(nextFromFlinkSort);
Assert.assertEquals(nextVerify, nextFromFlinkSort.getValue());
}
} finally {
if (reader != null) {
reader.close();
}
if (verifyReader != null) {
verifyReader.close();
}
if (sorter != null) {
sorter.close();
}
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
} finally {
if (input != null) {
//noinspection ResultOfMethodCallIgnored
input.delete();
}
if (sorted != null) {
//noinspection ResultOfMethodCallIgnored
sorted.delete();
}
}
}
use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.
the class LargeRecordHandlerTest method testRecordHandlerSingleKey.
@Test
public void testRecordHandlerSingleKey() {
final IOManager ioMan = new IOManagerAsync();
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 24;
final int NUM_RECORDS = 25000;
try {
final MemoryManager memMan = new MemoryManager(NUM_PAGES * PAGE_SIZE, 1, PAGE_SIZE, MemoryType.HEAP, true);
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> initialMemory = memMan.allocatePages(owner, 6);
final List<MemorySegment> sortMemory = memMan.allocatePages(owner, NUM_PAGES - 6);
final TupleTypeInfo<Tuple2<Long, String>> typeInfo = (TupleTypeInfo<Tuple2<Long, String>>) TypeInfoParser.<Tuple2<Long, String>>parse("Tuple2<Long, String>");
final TypeSerializer<Tuple2<Long, String>> serializer = typeInfo.createSerializer(new ExecutionConfig());
final TypeComparator<Tuple2<Long, String>> comparator = typeInfo.createComparator(new int[] { 0 }, new boolean[] { true }, 0, new ExecutionConfig());
LargeRecordHandler<Tuple2<Long, String>> handler = new LargeRecordHandler<Tuple2<Long, String>>(serializer, comparator, ioMan, memMan, initialMemory, owner, 128);
assertFalse(handler.hasData());
// add the test data
Random rnd = new Random();
for (int i = 0; i < NUM_RECORDS; i++) {
long val = rnd.nextLong();
handler.addRecord(new Tuple2<Long, String>(val, String.valueOf(val)));
assertTrue(handler.hasData());
}
MutableObjectIterator<Tuple2<Long, String>> sorted = handler.finishWriteAndSortKeys(sortMemory);
try {
handler.addRecord(new Tuple2<Long, String>(92L, "peter pepper"));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
Tuple2<Long, String> previous = null;
Tuple2<Long, String> next;
while ((next = sorted.next(null)) != null) {
// key and value must be equal
assertTrue(next.f0.equals(Long.parseLong(next.f1)));
// order must be correct
if (previous != null) {
assertTrue(previous.f0 <= next.f0);
}
previous = next;
}
handler.close();
assertFalse(handler.hasData());
handler.close();
try {
handler.addRecord(new Tuple2<Long, String>(92L, "peter pepper"));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
assertTrue(memMan.verifyEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
ioMan.shutdown();
}
}
use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.
the class LargeRecordHandlerTest method testEmptyRecordHandler.
@Test
public void testEmptyRecordHandler() {
final IOManager ioMan = new IOManagerAsync();
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 50;
try {
final MemoryManager memMan = new MemoryManager(NUM_PAGES * PAGE_SIZE, 1, PAGE_SIZE, MemoryType.HEAP, true);
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> memory = memMan.allocatePages(owner, NUM_PAGES);
final TupleTypeInfo<Tuple2<Long, String>> typeInfo = (TupleTypeInfo<Tuple2<Long, String>>) TypeInfoParser.<Tuple2<Long, String>>parse("Tuple2<Long, String>");
final TypeSerializer<Tuple2<Long, String>> serializer = typeInfo.createSerializer(new ExecutionConfig());
final TypeComparator<Tuple2<Long, String>> comparator = typeInfo.createComparator(new int[] { 0 }, new boolean[] { true }, 0, new ExecutionConfig());
LargeRecordHandler<Tuple2<Long, String>> handler = new LargeRecordHandler<Tuple2<Long, String>>(serializer, comparator, ioMan, memMan, memory, owner, 128);
assertFalse(handler.hasData());
handler.close();
assertFalse(handler.hasData());
handler.close();
try {
handler.addRecord(new Tuple2<Long, String>(92L, "peter pepper"));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
assertTrue(memMan.verifyEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
ioMan.shutdown();
}
}
Aggregations