use of io.prestosql.execution.TaskId in project hetu-core by openlookeng.
the class TestTaskExecutor method testLevelContributionCap.
@Test
public void testLevelContributionCap() {
MultilevelSplitQueue splitQueue = new MultilevelSplitQueue(2);
TaskHandle handle0 = new TaskHandle(new TaskId("test0", 0, 0), splitQueue, () -> 1, 1, new Duration(1, SECONDS), OptionalInt.empty());
TaskHandle handle1 = new TaskHandle(new TaskId("test1", 0, 0), splitQueue, () -> 1, 1, new Duration(1, SECONDS), OptionalInt.empty());
for (int i = 0; i < (LEVEL_THRESHOLD_SECONDS.length - 1); i++) {
long levelAdvanceTime = SECONDS.toNanos(LEVEL_THRESHOLD_SECONDS[i + 1] - LEVEL_THRESHOLD_SECONDS[i]);
handle0.addScheduledNanos(levelAdvanceTime);
assertEquals(handle0.getPriority().getLevel(), i + 1);
handle1.addScheduledNanos(levelAdvanceTime);
assertEquals(handle1.getPriority().getLevel(), i + 1);
assertEquals(splitQueue.getLevelScheduledTime(i), 2 * Math.min(levelAdvanceTime, LEVEL_CONTRIBUTION_CAP));
assertEquals(splitQueue.getLevelScheduledTime(i + 1), 0);
}
}
use of io.prestosql.execution.TaskId in project hetu-core by openlookeng.
the class TestTaskExecutor method testLevelMultipliers.
@Test(invocationCount = 100)
public void testLevelMultipliers() throws Exception {
TestingTicker ticker = new TestingTicker();
TaskExecutor taskExecutor = new TaskExecutor(1, 3, 3, 4, new MultilevelSplitQueue(2), ticker);
taskExecutor.start();
ticker.increment(20, MILLISECONDS);
try {
for (int i = 0; i < (LEVEL_THRESHOLD_SECONDS.length - 1); i++) {
TaskHandle[] taskHandles = { taskExecutor.addTask(new TaskId("test1", 0, 0), () -> 0, 10, new Duration(1, MILLISECONDS), OptionalInt.empty()), taskExecutor.addTask(new TaskId("test2", 0, 0), () -> 0, 10, new Duration(1, MILLISECONDS), OptionalInt.empty()), taskExecutor.addTask(new TaskId("test3", 0, 0), () -> 0, 10, new Duration(1, MILLISECONDS), OptionalInt.empty()) };
// move task 0 to next level
TestingJob task0Job = new TestingJob(ticker, new Phaser(1), new Phaser(), new Phaser(), 1, LEVEL_THRESHOLD_SECONDS[i + 1] * 1000);
taskExecutor.enqueueSplits(taskHandles[0], true, ImmutableList.of(task0Job));
// move tasks 1 and 2 to this level
TestingJob task1Job = new TestingJob(ticker, new Phaser(1), new Phaser(), new Phaser(), 1, LEVEL_THRESHOLD_SECONDS[i] * 1000);
taskExecutor.enqueueSplits(taskHandles[1], true, ImmutableList.of(task1Job));
TestingJob task2Job = new TestingJob(ticker, new Phaser(1), new Phaser(), new Phaser(), 1, LEVEL_THRESHOLD_SECONDS[i] * 1000);
taskExecutor.enqueueSplits(taskHandles[2], true, ImmutableList.of(task2Job));
task0Job.getCompletedFuture().get();
task1Job.getCompletedFuture().get();
task2Job.getCompletedFuture().get();
// then, start new drivers for all tasks
Phaser globalPhaser = new Phaser(2);
int phasesForNextLevel = LEVEL_THRESHOLD_SECONDS[i + 1] - LEVEL_THRESHOLD_SECONDS[i];
TestingJob[] drivers = new TestingJob[6];
for (int j = 0; j < 6; j++) {
drivers[j] = new TestingJob(ticker, globalPhaser, new Phaser(), new Phaser(), phasesForNextLevel, 1000);
}
taskExecutor.enqueueSplits(taskHandles[0], true, ImmutableList.of(drivers[0], drivers[1]));
taskExecutor.enqueueSplits(taskHandles[1], true, ImmutableList.of(drivers[2], drivers[3]));
taskExecutor.enqueueSplits(taskHandles[2], true, ImmutableList.of(drivers[4], drivers[5]));
// run all three drivers
int lowerLevelStart = drivers[2].getCompletedPhases() + drivers[3].getCompletedPhases() + drivers[4].getCompletedPhases() + drivers[5].getCompletedPhases();
int higherLevelStart = drivers[0].getCompletedPhases() + drivers[1].getCompletedPhases();
while (Arrays.stream(drivers).noneMatch(TestingJob::isFinished)) {
globalPhaser.arriveAndAwaitAdvance();
int lowerLevelEnd = drivers[2].getCompletedPhases() + drivers[3].getCompletedPhases() + drivers[4].getCompletedPhases() + drivers[5].getCompletedPhases();
int lowerLevelTime = lowerLevelEnd - lowerLevelStart;
int higherLevelEnd = drivers[0].getCompletedPhases() + drivers[1].getCompletedPhases();
int higherLevelTime = higherLevelEnd - higherLevelStart;
if (higherLevelTime > 20) {
assertGreaterThan(lowerLevelTime, (higherLevelTime * 2) - 10);
assertLessThan(higherLevelTime, (lowerLevelTime * 2) + 10);
}
}
try {
globalPhaser.arriveAndDeregister();
} catch (IllegalStateException e) {
// under high concurrency sometimes the deregister call can occur after completion
// this is not a real problem
// could be ignored
}
taskExecutor.removeTask(taskHandles[0]);
taskExecutor.removeTask(taskHandles[1]);
taskExecutor.removeTask(taskHandles[2]);
}
} finally {
taskExecutor.stop();
}
}
use of io.prestosql.execution.TaskId in project hetu-core by openlookeng.
the class TestTaskExecutor method testMinMaxDriversPerTask.
@Test(timeOut = 30_000)
public void testMinMaxDriversPerTask() {
int maxDriversPerTask = 2;
MultilevelSplitQueue splitQueue = new MultilevelSplitQueue(2);
TestingTicker ticker = new TestingTicker();
TaskExecutor taskExecutor = new TaskExecutor(4, 16, 1, maxDriversPerTask, splitQueue, ticker);
taskExecutor.start();
try {
TaskHandle testTaskHandle = taskExecutor.addTask(new TaskId("test", 0, 0), () -> 0, 10, new Duration(1, MILLISECONDS), OptionalInt.empty());
// enqueue all batches of splits
int batchCount = 4;
TestingJob[] splits = new TestingJob[8];
Phaser[] phasers = new Phaser[batchCount];
for (int batch = 0; batch < batchCount; batch++) {
phasers[batch] = new Phaser();
phasers[batch].register();
TestingJob split1 = new TestingJob(ticker, new Phaser(), new Phaser(), phasers[batch], 1, 0);
TestingJob split2 = new TestingJob(ticker, new Phaser(), new Phaser(), phasers[batch], 1, 0);
splits[2 * batch] = split1;
splits[2 * batch + 1] = split2;
taskExecutor.enqueueSplits(testTaskHandle, false, ImmutableList.of(split1, split2));
}
// assert that the splits are processed in batches as expected
for (int batch = 0; batch < batchCount; batch++) {
// wait until the current batch starts
waitUntilSplitsStart(ImmutableList.of(splits[2 * batch], splits[2 * batch + 1]));
// assert that only the splits including and up to the current batch are running and the rest haven't started yet
assertSplitStates(2 * batch + 1, splits);
// complete the current batch
phasers[batch].arriveAndDeregister();
}
} finally {
taskExecutor.stop();
}
}
use of io.prestosql.execution.TaskId in project hetu-core by openlookeng.
the class TestNodeScheduler method testTopologyAwareScheduling.
@Test(timeOut = 60 * 1000)
public void testTopologyAwareScheduling() throws Exception {
NodeTaskMap nodeMap = new NodeTaskMap(finalizerService);
InMemoryNodeManager memoryNodeManager = new InMemoryNodeManager();
ImmutableList.Builder<InternalNode> nodeBuilder = ImmutableList.builder();
nodeBuilder.add(new InternalNode("node1", URI.create("http://host1.rack1:11"), NodeVersion.UNKNOWN, false));
nodeBuilder.add(new InternalNode("node2", URI.create("http://host2.rack1:12"), NodeVersion.UNKNOWN, false));
nodeBuilder.add(new InternalNode("node3", URI.create("http://host3.rack2:13"), NodeVersion.UNKNOWN, false));
ImmutableList<InternalNode> nodes = nodeBuilder.build();
memoryNodeManager.addNode(CONNECTOR_ID, nodes);
// contents of taskMap indicate the node-task map for the current stage
Map<InternalNode, RemoteTask> nodeRemoteTaskHashMap = new HashMap<>();
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(25).setIncludeCoordinator(false).setNetworkTopology("test").setMaxPendingSplitsPerTask(20);
TestNetworkTopology topology = new TestNetworkTopology();
NetworkLocationCache locationCache = new NetworkLocationCache(topology) {
@Override
public NetworkLocation get(HostAddress host) {
// Bypass the cache for workers, since we only look them up once and they would all be unresolved otherwise
if (host.getHostText().startsWith("host")) {
return topology.locate(host);
} else {
return super.get(host);
}
}
};
NodeScheduler nodeScheduler = new NodeScheduler(locationCache, topology, memoryNodeManager, nodeSchedulerConfig, nodeMap);
NodeSelector selector = nodeScheduler.createNodeSelector(CONNECTOR_ID, false, null);
// Fill up the nodes with non-local data
ImmutableSet.Builder<Split> nonRackLocalBuilder = ImmutableSet.builder();
for (int i = 0; i < (25 + 11) * 3; i++) {
nonRackLocalBuilder.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("data.other_rack", 1)), Lifespan.taskWide()));
}
Set<Split> nonRackLocalSplits = nonRackLocalBuilder.build();
Multimap<InternalNode, Split> assignments = selector.computeAssignments(nonRackLocalSplits, ImmutableList.copyOf(nodeRemoteTaskHashMap.values()), Optional.empty()).getAssignments();
MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor);
int task = 0;
for (InternalNode node : assignments.keySet()) {
TaskId taskId = new TaskId("test", 1, task);
task++;
MockRemoteTaskFactory.MockRemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, node, ImmutableList.copyOf(assignments.get(node)), nodeMap.createPartitionedSplitCountTracker(node, taskId));
remoteTask.startSplits(25);
nodeMap.addTask(node, remoteTask);
nodeRemoteTaskHashMap.put(node, remoteTask);
}
// Continue assigning to fill up part of the queue
nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values()));
assignments = selector.computeAssignments(nonRackLocalSplits, ImmutableList.copyOf(nodeRemoteTaskHashMap.values()), Optional.empty()).getAssignments();
for (InternalNode node : assignments.keySet()) {
RemoteTask remoteTask = nodeRemoteTaskHashMap.get(node);
remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
}
nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values()));
// Check that 3 of the splits were rejected, since they're non-local
assertEquals(nonRackLocalSplits.size(), 3);
// Assign rack-local splits
ImmutableSet.Builder<Split> rackLocalSplits = ImmutableSet.builder();
HostAddress dataHost1 = HostAddress.fromParts("data.rack1", 1);
HostAddress dataHost2 = HostAddress.fromParts("data.rack2", 1);
for (int i = 0; i < 6 * 2; i++) {
rackLocalSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(dataHost1), Lifespan.taskWide()));
}
for (int i = 0; i < 6; i++) {
rackLocalSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(dataHost2), Lifespan.taskWide()));
}
assignments = selector.computeAssignments(rackLocalSplits.build(), ImmutableList.copyOf(nodeRemoteTaskHashMap.values()), Optional.empty()).getAssignments();
for (InternalNode node : assignments.keySet()) {
RemoteTask remoteTask = nodeRemoteTaskHashMap.get(node);
remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
}
Set<Split> unassigned = Sets.difference(rackLocalSplits.build(), new HashSet<>(assignments.values()));
// Compute the assignments a second time to account for the fact that some splits may not have been assigned due to asynchronous
// loading of the NetworkLocationCache
boolean cacheRefreshed = false;
while (!cacheRefreshed) {
cacheRefreshed = true;
if (locationCache.get(dataHost1).equals(ROOT_LOCATION)) {
cacheRefreshed = false;
}
if (locationCache.get(dataHost2).equals(ROOT_LOCATION)) {
cacheRefreshed = false;
}
MILLISECONDS.sleep(10);
}
assignments = selector.computeAssignments(unassigned, ImmutableList.copyOf(nodeRemoteTaskHashMap.values()), Optional.empty()).getAssignments();
for (InternalNode node : assignments.keySet()) {
RemoteTask remoteTask = nodeRemoteTaskHashMap.get(node);
remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
}
unassigned = Sets.difference(unassigned, new HashSet<>(assignments.values()));
assertEquals(unassigned.size(), 3);
int rack1 = 0;
int rack2 = 0;
for (Split split : unassigned) {
String rack = topology.locate(split.getAddresses().get(0)).getSegments().get(0);
switch(rack) {
case "rack1":
rack1++;
break;
case "rack2":
rack2++;
break;
default:
fail();
}
}
assertEquals(rack1, 2);
assertEquals(rack2, 1);
// Assign local splits
ImmutableSet.Builder<Split> localSplits = ImmutableSet.builder();
localSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("host1.rack1", 1)), Lifespan.taskWide()));
localSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("host2.rack1", 1)), Lifespan.taskWide()));
localSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("host3.rack2", 1)), Lifespan.taskWide()));
assignments = selector.computeAssignments(localSplits.build(), ImmutableList.copyOf(nodeRemoteTaskHashMap.values()), Optional.empty()).getAssignments();
assertEquals(assignments.size(), 3);
assertEquals(assignments.keySet().size(), 3);
}
use of io.prestosql.execution.TaskId in project hetu-core by openlookeng.
the class TestNodeScheduler method testTaskCompletion.
@Test
public void testTaskCompletion() throws Exception {
setUpNodes();
MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor);
InternalNode chosenNode = Iterables.get(nodeManager.getActiveConnectorNodes(CONNECTOR_ID), 0);
TaskId taskId = new TaskId("test", 1, 1);
RemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, chosenNode, ImmutableList.of(new Split(CONNECTOR_ID, new TestSplitRemote(), Lifespan.taskWide())), nodeTaskMap.createPartitionedSplitCountTracker(chosenNode, taskId));
nodeTaskMap.addTask(chosenNode, remoteTask);
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode), 1);
remoteTask.abort();
// Sleep until cache expires
MILLISECONDS.sleep(100);
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode), 0);
remoteTask.abort();
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode), 0);
}
Aggregations