use of org.apache.flink.runtime.jobgraph.JobVertexID in project flink by apache.
the class SharedSlotsTest method testReleaseTwoLevelsFromRoot.
/**
* We allocate and the structure below and release it from the root.
*
* <pre>
* Shared(0)(root)
* |
* +-- Simple(2)(sink)
* |
* +-- Shared(1)(co-location-group)
* | |
* | +-- Simple(0)(tail)
* | +-- Simple(1)(head)
* |
* +-- Simple(0)(source)
* </pre>
*/
@Test
public void testReleaseTwoLevelsFromRoot() {
try {
JobVertexID sourceId = new JobVertexID();
JobVertexID headId = new JobVertexID();
JobVertexID tailId = new JobVertexID();
JobVertexID sinkId = new JobVertexID();
JobVertex headVertex = new JobVertex("head", headId);
JobVertex tailVertex = new JobVertex("tail", tailId);
SlotSharingGroup sharingGroup = new SlotSharingGroup(sourceId, headId, tailId, sinkId);
SlotSharingGroupAssignment assignment = sharingGroup.getTaskAssignment();
assertEquals(0, assignment.getNumberOfSlots());
CoLocationGroup coLocationGroup = new CoLocationGroup(headVertex, tailVertex);
CoLocationConstraint constraint = coLocationGroup.getLocationConstraint(0);
assertFalse(constraint.isAssigned());
Instance instance = SchedulerTestUtils.getRandomInstance(1);
// allocate a shared slot
SharedSlot sharedSlot = instance.allocateSharedSlot(new JobID(), assignment);
// get the first simple slot
SimpleSlot sourceSlot = assignment.addSharedSlotAndAllocateSubSlot(sharedSlot, Locality.LOCAL, sourceId);
SimpleSlot headSlot = assignment.getSlotForTask(constraint, NO_LOCATION);
constraint.lockLocation();
SimpleSlot tailSlot = assignment.getSlotForTask(constraint, NO_LOCATION);
SimpleSlot sinkSlot = assignment.getSlotForTask(sinkId, NO_LOCATION);
assertEquals(4, sharedSlot.getNumberLeaves());
// release all
sourceSlot.releaseSlot();
headSlot.releaseSlot();
tailSlot.releaseSlot();
sinkSlot.releaseSlot();
assertTrue(sharedSlot.isReleased());
assertTrue(sourceSlot.isReleased());
assertTrue(headSlot.isReleased());
assertTrue(tailSlot.isReleased());
assertTrue(sinkSlot.isReleased());
assertTrue(constraint.getSharedSlot().isReleased());
assertTrue(constraint.isAssigned());
assertFalse(constraint.isAssignedAndAlive());
assertEquals(1, instance.getNumberOfAvailableSlots());
assertEquals(0, instance.getNumberOfAllocatedSlots());
assertEquals(0, assignment.getNumberOfSlots());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobgraph.JobVertexID in project flink by apache.
the class SharedSlotsTest method allocateAndReleaseEmptySlot.
@Test
public void allocateAndReleaseEmptySlot() {
try {
JobID jobId = new JobID();
JobVertexID vertexId = new JobVertexID();
SlotSharingGroup sharingGroup = new SlotSharingGroup(vertexId);
SlotSharingGroupAssignment assignment = sharingGroup.getTaskAssignment();
assertEquals(0, assignment.getNumberOfSlots());
assertEquals(0, assignment.getNumberOfAvailableSlotsForGroup(vertexId));
Instance instance = SchedulerTestUtils.getRandomInstance(2);
assertEquals(2, instance.getTotalNumberOfSlots());
assertEquals(0, instance.getNumberOfAllocatedSlots());
assertEquals(2, instance.getNumberOfAvailableSlots());
// allocate a shared slot
SharedSlot slot = instance.allocateSharedSlot(jobId, assignment);
assertEquals(2, instance.getTotalNumberOfSlots());
assertEquals(1, instance.getNumberOfAllocatedSlots());
assertEquals(1, instance.getNumberOfAvailableSlots());
// check that the new slot is fresh
assertTrue(slot.isAlive());
assertFalse(slot.isCanceled());
assertFalse(slot.isReleased());
assertEquals(0, slot.getNumberLeaves());
assertFalse(slot.hasChildren());
assertTrue(slot.isRootAndEmpty());
assertNotNull(slot.toString());
assertTrue(slot.getSubSlots().isEmpty());
assertEquals(0, slot.getSlotNumber());
assertEquals(0, slot.getRootSlotNumber());
// release the slot immediately.
slot.releaseSlot();
assertTrue(slot.isCanceled());
assertTrue(slot.isReleased());
// the slot sharing group and instance should not
assertEquals(2, instance.getTotalNumberOfSlots());
assertEquals(0, instance.getNumberOfAllocatedSlots());
assertEquals(2, instance.getNumberOfAvailableSlots());
assertEquals(0, assignment.getNumberOfSlots());
assertEquals(0, assignment.getNumberOfAvailableSlotsForGroup(vertexId));
// we should not be able to allocate any children from this released slot
assertNull(slot.allocateSharedSlot(new AbstractID()));
assertNull(slot.allocateSubSlot(new AbstractID()));
// we cannot add this slot to the assignment group
assertNull(assignment.addSharedSlotAndAllocateSubSlot(slot, Locality.NON_LOCAL, vertexId));
assertEquals(0, assignment.getNumberOfSlots());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobgraph.JobVertexID in project flink by apache.
the class SharedSlotsTest method testAllocateAndReleaseTwoLevels.
/**
* We allocate and release the structure below, starting by allocating a simple slot in the
* shared slot and finishing by releasing a simple slot.
*
* <pre>
* Shared(0)(root)
* |
* +-- Simple(2)(sink)
* |
* +-- Shared(1)(co-location-group)
* | |
* | +-- Simple(0)(tail)
* | +-- Simple(1)(head)
* |
* +-- Simple(0)(source)
* </pre>
*/
@Test
public void testAllocateAndReleaseTwoLevels() {
try {
JobVertexID sourceId = new JobVertexID();
JobVertexID headId = new JobVertexID();
JobVertexID tailId = new JobVertexID();
JobVertexID sinkId = new JobVertexID();
JobVertex headVertex = new JobVertex("head", headId);
JobVertex tailVertex = new JobVertex("tail", tailId);
SlotSharingGroup sharingGroup = new SlotSharingGroup(sourceId, headId, tailId, sinkId);
SlotSharingGroupAssignment assignment = sharingGroup.getTaskAssignment();
assertEquals(0, assignment.getNumberOfSlots());
CoLocationGroup coLocationGroup = new CoLocationGroup(headVertex, tailVertex);
CoLocationConstraint constraint = coLocationGroup.getLocationConstraint(0);
assertFalse(constraint.isAssigned());
Instance instance = SchedulerTestUtils.getRandomInstance(1);
// allocate a shared slot
SharedSlot sharedSlot = instance.allocateSharedSlot(new JobID(), assignment);
// get the first simple slot
SimpleSlot sourceSlot = assignment.addSharedSlotAndAllocateSubSlot(sharedSlot, Locality.LOCAL, sourceId);
assertEquals(1, sharedSlot.getNumberLeaves());
// get the first slot in the nested shared slot from the co-location constraint
SimpleSlot headSlot = assignment.getSlotForTask(constraint, Collections.<TaskManagerLocation>emptySet());
assertEquals(2, sharedSlot.getNumberLeaves());
assertNotNull(constraint.getSharedSlot());
assertTrue(constraint.getSharedSlot().isAlive());
assertFalse(constraint.isAssigned());
// we do not immediately lock the location
headSlot.releaseSlot();
assertEquals(1, sharedSlot.getNumberLeaves());
assertNotNull(constraint.getSharedSlot());
assertTrue(constraint.getSharedSlot().isReleased());
assertFalse(constraint.isAssigned());
// re-allocate the head slot
headSlot = assignment.getSlotForTask(constraint, Collections.<TaskManagerLocation>emptySet());
constraint.lockLocation();
assertNotNull(constraint.getSharedSlot());
assertTrue(constraint.isAssigned());
assertTrue(constraint.isAssignedAndAlive());
assertEquals(instance.getTaskManagerLocation(), constraint.getLocation());
SimpleSlot tailSlot = assignment.getSlotForTask(constraint, Collections.<TaskManagerLocation>emptySet());
assertEquals(constraint.getSharedSlot(), headSlot.getParent());
assertEquals(constraint.getSharedSlot(), tailSlot.getParent());
SimpleSlot sinkSlot = assignment.getSlotForTask(sinkId, Collections.<TaskManagerLocation>emptySet());
assertEquals(4, sharedSlot.getNumberLeaves());
// we release our co-location constraint tasks
headSlot.releaseSlot();
tailSlot.releaseSlot();
assertEquals(2, sharedSlot.getNumberLeaves());
assertTrue(headSlot.isReleased());
assertTrue(tailSlot.isReleased());
assertTrue(constraint.isAssigned());
assertFalse(constraint.isAssignedAndAlive());
assertEquals(instance.getTaskManagerLocation(), constraint.getLocation());
// we should have resources again for the co-location constraint
assertEquals(1, assignment.getNumberOfAvailableSlotsForGroup(constraint.getGroupId()));
// re-allocate head and tail from the constraint
headSlot = assignment.getSlotForTask(constraint, NO_LOCATION);
tailSlot = assignment.getSlotForTask(constraint, NO_LOCATION);
assertEquals(4, sharedSlot.getNumberLeaves());
assertEquals(0, assignment.getNumberOfAvailableSlotsForGroup(constraint.getGroupId()));
// verify some basic properties of the slots
assertEquals(instance.getTaskManagerID(), sourceSlot.getTaskManagerID());
assertEquals(instance.getTaskManagerID(), headSlot.getTaskManagerID());
assertEquals(instance.getTaskManagerID(), tailSlot.getTaskManagerID());
assertEquals(instance.getTaskManagerID(), sinkSlot.getTaskManagerID());
assertEquals(sourceId, sourceSlot.getGroupID());
assertEquals(sinkId, sinkSlot.getGroupID());
assertNull(headSlot.getGroupID());
assertNull(tailSlot.getGroupID());
assertEquals(constraint.getGroupId(), constraint.getSharedSlot().getGroupID());
// release all
sourceSlot.releaseSlot();
headSlot.releaseSlot();
tailSlot.releaseSlot();
sinkSlot.releaseSlot();
assertTrue(sharedSlot.isReleased());
assertTrue(sourceSlot.isReleased());
assertTrue(headSlot.isReleased());
assertTrue(tailSlot.isReleased());
assertTrue(sinkSlot.isReleased());
assertTrue(constraint.getSharedSlot().isReleased());
assertTrue(constraint.isAssigned());
assertFalse(constraint.isAssignedAndAlive());
assertEquals(1, instance.getNumberOfAvailableSlots());
assertEquals(0, assignment.getNumberOfSlots());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobgraph.JobVertexID in project flink by apache.
the class TaskDeploymentDescriptorTest method testSerialization.
@Test
public void testSerialization() {
try {
final JobID jobID = new JobID();
final JobVertexID vertexID = new JobVertexID();
final ExecutionAttemptID execId = new ExecutionAttemptID();
final AllocationID allocationId = new AllocationID();
final String jobName = "job name";
final String taskName = "task name";
final int numberOfKeyGroups = 1;
final int indexInSubtaskGroup = 0;
final int currentNumberOfSubtasks = 1;
final int attemptNumber = 0;
final Configuration jobConfiguration = new Configuration();
final Configuration taskConfiguration = new Configuration();
final Class<? extends AbstractInvokable> invokableClass = BatchTask.class;
final List<ResultPartitionDeploymentDescriptor> producedResults = new ArrayList<ResultPartitionDeploymentDescriptor>(0);
final List<InputGateDeploymentDescriptor> inputGates = new ArrayList<InputGateDeploymentDescriptor>(0);
final List<BlobKey> requiredJars = new ArrayList<BlobKey>(0);
final List<URL> requiredClasspaths = new ArrayList<URL>(0);
final SerializedValue<ExecutionConfig> executionConfig = new SerializedValue<>(new ExecutionConfig());
final SerializedValue<JobInformation> serializedJobInformation = new SerializedValue<>(new JobInformation(jobID, jobName, executionConfig, jobConfiguration, requiredJars, requiredClasspaths));
final SerializedValue<TaskInformation> serializedJobVertexInformation = new SerializedValue<>(new TaskInformation(vertexID, taskName, currentNumberOfSubtasks, numberOfKeyGroups, invokableClass.getName(), taskConfiguration));
final int targetSlotNumber = 47;
final TaskStateHandles taskStateHandles = new TaskStateHandles();
final TaskDeploymentDescriptor orig = new TaskDeploymentDescriptor(serializedJobInformation, serializedJobVertexInformation, execId, allocationId, indexInSubtaskGroup, attemptNumber, targetSlotNumber, taskStateHandles, producedResults, inputGates);
final TaskDeploymentDescriptor copy = CommonTestUtils.createCopySerializable(orig);
assertFalse(orig.getSerializedJobInformation() == copy.getSerializedJobInformation());
assertFalse(orig.getSerializedTaskInformation() == copy.getSerializedTaskInformation());
assertFalse(orig.getExecutionAttemptId() == copy.getExecutionAttemptId());
assertFalse(orig.getTaskStateHandles() == copy.getTaskStateHandles());
assertFalse(orig.getProducedPartitions() == copy.getProducedPartitions());
assertFalse(orig.getInputGates() == copy.getInputGates());
assertEquals(orig.getSerializedJobInformation(), copy.getSerializedJobInformation());
assertEquals(orig.getSerializedTaskInformation(), copy.getSerializedTaskInformation());
assertEquals(orig.getExecutionAttemptId(), copy.getExecutionAttemptId());
assertEquals(orig.getAllocationId(), copy.getAllocationId());
assertEquals(orig.getSubtaskIndex(), copy.getSubtaskIndex());
assertEquals(orig.getAttemptNumber(), copy.getAttemptNumber());
assertEquals(orig.getTargetSlotNumber(), copy.getTargetSlotNumber());
assertEquals(orig.getTaskStateHandles(), copy.getTaskStateHandles());
assertEquals(orig.getProducedPartitions(), copy.getProducedPartitions());
assertEquals(orig.getInputGates(), copy.getInputGates());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobgraph.JobVertexID in project flink by apache.
the class ExecutionGraphConstructionTest method verifyTestGraph.
private void verifyTestGraph(ExecutionGraph eg, JobID jobId, JobVertex v1, JobVertex v2, JobVertex v3, JobVertex v4, JobVertex v5) {
Map<JobVertexID, ExecutionJobVertex> vertices = eg.getAllVertices();
// verify v1
{
ExecutionJobVertex e1 = vertices.get(v1.getID());
assertNotNull(e1);
// basic properties
assertEquals(v1.getParallelism(), e1.getParallelism());
assertEquals(v1.getID(), e1.getJobVertexId());
assertEquals(jobId, e1.getJobId());
assertEquals(v1, e1.getJobVertex());
// produced data sets
assertEquals(1, e1.getProducedDataSets().length);
assertEquals(v1.getProducedDataSets().get(0).getId(), e1.getProducedDataSets()[0].getId());
assertEquals(v1.getParallelism(), e1.getProducedDataSets()[0].getPartitions().length);
// task vertices
assertEquals(v1.getParallelism(), e1.getTaskVertices().length);
int num = 0;
for (ExecutionVertex ev : e1.getTaskVertices()) {
assertEquals(jobId, ev.getJobId());
assertEquals(v1.getID(), ev.getJobvertexId());
assertEquals(v1.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(num++, ev.getParallelSubtaskIndex());
assertEquals(0, ev.getNumberOfInputs());
assertTrue(ev.getStateTimestamp(ExecutionState.CREATED) > 0);
}
}
// verify v2
{
ExecutionJobVertex e2 = vertices.get(v2.getID());
assertNotNull(e2);
// produced data sets
assertEquals(1, e2.getProducedDataSets().length);
assertEquals(v2.getProducedDataSets().get(0).getId(), e2.getProducedDataSets()[0].getId());
assertEquals(v2.getParallelism(), e2.getProducedDataSets()[0].getPartitions().length);
// task vertices
assertEquals(v2.getParallelism(), e2.getTaskVertices().length);
int num = 0;
for (ExecutionVertex ev : e2.getTaskVertices()) {
assertEquals(jobId, ev.getJobId());
assertEquals(v2.getID(), ev.getJobvertexId());
assertEquals(v2.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(num++, ev.getParallelSubtaskIndex());
assertEquals(1, ev.getNumberOfInputs());
ExecutionEdge[] inputs = ev.getInputEdges(0);
assertEquals(v1.getParallelism(), inputs.length);
int sumOfPartitions = 0;
for (ExecutionEdge inEdge : inputs) {
assertEquals(0, inEdge.getInputNum());
sumOfPartitions += inEdge.getSource().getPartitionNumber();
}
assertEquals(10, sumOfPartitions);
}
}
// verify v3
{
ExecutionJobVertex e3 = vertices.get(v3.getID());
assertNotNull(e3);
// produced data sets
assertEquals(2, e3.getProducedDataSets().length);
assertEquals(v3.getProducedDataSets().get(0).getId(), e3.getProducedDataSets()[0].getId());
assertEquals(v3.getProducedDataSets().get(1).getId(), e3.getProducedDataSets()[1].getId());
assertEquals(v3.getParallelism(), e3.getProducedDataSets()[0].getPartitions().length);
assertEquals(v3.getParallelism(), e3.getProducedDataSets()[1].getPartitions().length);
// task vertices
assertEquals(v3.getParallelism(), e3.getTaskVertices().length);
int num = 0;
for (ExecutionVertex ev : e3.getTaskVertices()) {
assertEquals(jobId, ev.getJobId());
assertEquals(v3.getID(), ev.getJobvertexId());
assertEquals(v3.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(num++, ev.getParallelSubtaskIndex());
assertEquals(0, ev.getNumberOfInputs());
}
}
// verify v4
{
ExecutionJobVertex e4 = vertices.get(v4.getID());
assertNotNull(e4);
// produced data sets
assertEquals(1, e4.getProducedDataSets().length);
assertEquals(v4.getProducedDataSets().get(0).getId(), e4.getProducedDataSets()[0].getId());
// task vertices
assertEquals(v4.getParallelism(), e4.getTaskVertices().length);
int num = 0;
for (ExecutionVertex ev : e4.getTaskVertices()) {
assertEquals(jobId, ev.getJobId());
assertEquals(v4.getID(), ev.getJobvertexId());
assertEquals(v4.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(num++, ev.getParallelSubtaskIndex());
assertEquals(2, ev.getNumberOfInputs());
// first input
{
ExecutionEdge[] inputs = ev.getInputEdges(0);
assertEquals(v2.getParallelism(), inputs.length);
int sumOfPartitions = 0;
for (ExecutionEdge inEdge : inputs) {
assertEquals(0, inEdge.getInputNum());
sumOfPartitions += inEdge.getSource().getPartitionNumber();
}
assertEquals(21, sumOfPartitions);
}
// second input
{
ExecutionEdge[] inputs = ev.getInputEdges(1);
assertEquals(v3.getParallelism(), inputs.length);
int sumOfPartitions = 0;
for (ExecutionEdge inEdge : inputs) {
assertEquals(1, inEdge.getInputNum());
sumOfPartitions += inEdge.getSource().getPartitionNumber();
}
assertEquals(1, sumOfPartitions);
}
}
}
// verify v5
{
ExecutionJobVertex e5 = vertices.get(v5.getID());
assertNotNull(e5);
// produced data sets
assertEquals(0, e5.getProducedDataSets().length);
// task vertices
assertEquals(v5.getParallelism(), e5.getTaskVertices().length);
int num = 0;
for (ExecutionVertex ev : e5.getTaskVertices()) {
assertEquals(jobId, ev.getJobId());
assertEquals(v5.getID(), ev.getJobvertexId());
assertEquals(v5.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(num++, ev.getParallelSubtaskIndex());
assertEquals(2, ev.getNumberOfInputs());
// first input
{
ExecutionEdge[] inputs = ev.getInputEdges(0);
assertEquals(v4.getParallelism(), inputs.length);
int sumOfPartitions = 0;
for (ExecutionEdge inEdge : inputs) {
assertEquals(0, inEdge.getInputNum());
sumOfPartitions += inEdge.getSource().getPartitionNumber();
}
assertEquals(55, sumOfPartitions);
}
// second input
{
ExecutionEdge[] inputs = ev.getInputEdges(1);
assertEquals(v3.getParallelism(), inputs.length);
int sumOfPartitions = 0;
for (ExecutionEdge inEdge : inputs) {
assertEquals(1, inEdge.getInputNum());
sumOfPartitions += inEdge.getSource().getPartitionNumber();
}
assertEquals(1, sumOfPartitions);
}
}
}
}
Aggregations