use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.
the class Execution method scheduleOrUpdateConsumers.
void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) {
final int numConsumers = allConsumers.size();
if (numConsumers > 1) {
fail(new IllegalStateException("Currently, only a single consumer group per partition is supported."));
} else if (numConsumers == 0) {
return;
}
for (ExecutionEdge edge : allConsumers.get(0)) {
final ExecutionVertex consumerVertex = edge.getTarget();
final Execution consumer = consumerVertex.getCurrentExecutionAttempt();
final ExecutionState consumerState = consumer.getState();
final IntermediateResultPartition partition = edge.getSource();
// ----------------------------------------------------------------
if (consumerState == CREATED) {
final Execution partitionExecution = partition.getProducer().getCurrentExecutionAttempt();
consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor.fromEdge(partition, partitionExecution));
// When deploying a consuming task, its task deployment descriptor will contain all
// deployment information available at the respective time. It is possible that some
// of the partitions to be consumed have not been created yet. These are updated
// runtime via the update messages.
//
// TODO The current approach may send many update messages even though the consuming
// task has already been deployed with all necessary information. We have to check
// whether this is a problem and fix it, if it is.
FlinkFuture.supplyAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
consumerVertex.scheduleForExecution(consumerVertex.getExecutionGraph().getSlotProvider(), consumerVertex.getExecutionGraph().isQueuedSchedulingAllowed());
} catch (Throwable t) {
consumerVertex.fail(new IllegalStateException("Could not schedule consumer " + "vertex " + consumerVertex, t));
}
return null;
}
}, executor);
// double check to resolve race conditions
if (consumerVertex.getExecutionState() == RUNNING) {
consumerVertex.sendPartitionInfos();
}
} else // ----------------------------------------------------------------
// Consumer is running => send update message now
// ----------------------------------------------------------------
{
if (consumerState == RUNNING) {
final SimpleSlot consumerSlot = consumer.getAssignedResource();
if (consumerSlot == null) {
// The consumer has been reset concurrently
continue;
}
final TaskManagerLocation partitionTaskManagerLocation = partition.getProducer().getCurrentAssignedResource().getTaskManagerLocation();
final ResourceID partitionTaskManager = partitionTaskManagerLocation.getResourceID();
final ResourceID consumerTaskManager = consumerSlot.getTaskManagerID();
final ResultPartitionID partitionId = new ResultPartitionID(partition.getPartitionId(), attemptId);
final ResultPartitionLocation partitionLocation;
if (consumerTaskManager.equals(partitionTaskManager)) {
// Consuming task is deployed to the same instance as the partition => local
partitionLocation = ResultPartitionLocation.createLocal();
} else {
// Different instances => remote
final ConnectionID connectionId = new ConnectionID(partitionTaskManagerLocation, partition.getIntermediateResult().getConnectionIndex());
partitionLocation = ResultPartitionLocation.createRemote(connectionId);
}
final InputChannelDeploymentDescriptor descriptor = new InputChannelDeploymentDescriptor(partitionId, partitionLocation);
consumer.sendUpdatePartitionInfoRpcCall(Collections.singleton(new PartitionInfo(partition.getIntermediateResult().getId(), descriptor)));
} else // ----------------------------------------------------------------
if (consumerState == SCHEDULED || consumerState == DEPLOYING) {
final Execution partitionExecution = partition.getProducer().getCurrentExecutionAttempt();
consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor.fromEdge(partition, partitionExecution));
// double check to resolve race conditions
if (consumerVertex.getExecutionState() == RUNNING) {
consumerVertex.sendPartitionInfos();
}
}
}
}
}
use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.
the class SlotSharingGroupAssignment method getSlotForTaskInternal.
private Tuple2<SharedSlot, Locality> getSlotForTaskInternal(AbstractID groupId, Iterable<TaskManagerLocation> preferredLocations, boolean localOnly) {
// check if there is anything at all in this group assignment
if (allSlots.isEmpty()) {
return null;
}
// get the available slots for the group
Map<ResourceID, List<SharedSlot>> slotsForGroup = availableSlotsPerJid.get(groupId);
if (slotsForGroup == null) {
// we have a new group, so all slots are available
slotsForGroup = new LinkedHashMap<>();
availableSlotsPerJid.put(groupId, slotsForGroup);
for (SharedSlot availableSlot : allSlots) {
putIntoMultiMap(slotsForGroup, availableSlot.getTaskManagerID(), availableSlot);
}
} else if (slotsForGroup.isEmpty()) {
// the group exists, but nothing is available for that group
return null;
}
// check whether we can schedule the task to a preferred location
boolean didNotGetPreferred = false;
if (preferredLocations != null) {
for (TaskManagerLocation location : preferredLocations) {
// set the flag that we failed a preferred location. If one will be found,
// we return early anyways and skip the flag evaluation
didNotGetPreferred = true;
SharedSlot slot = removeFromMultiMap(slotsForGroup, location.getResourceID());
if (slot != null && slot.isAlive()) {
return new Tuple2<>(slot, Locality.LOCAL);
}
}
}
// if we want only local assignments, exit now with a "not found" result
if (didNotGetPreferred && localOnly) {
return null;
}
Locality locality = didNotGetPreferred ? Locality.NON_LOCAL : Locality.UNCONSTRAINED;
// schedule the task to any available location
SharedSlot slot;
while ((slot = pollFromMultiMap(slotsForGroup)) != null) {
if (slot.isAlive()) {
return new Tuple2<>(slot, locality);
}
}
// nothing available after all, all slots were dead
return null;
}
use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.
the class ResourceManagerITCase method testResourceManagerReconciliation.
/**
* Tests whether the resource manager connects and reconciles existing task managers.
*/
@Test
public void testResourceManagerReconciliation() {
new JavaTestKit(system) {
{
new Within(duration("10 seconds")) {
@Override
protected void run() {
ActorGateway jobManager = TestingUtils.createJobManager(system, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), config, "ReconciliationTest");
ActorGateway me = TestingUtils.createForwardingActor(system, getTestActor(), Option.<String>empty());
// !! no resource manager started !!
ResourceID resourceID = ResourceID.generate();
TaskManagerLocation location = mock(TaskManagerLocation.class);
when(location.getResourceID()).thenReturn(resourceID);
HardwareDescription resourceProfile = HardwareDescription.extractFromSystem(1_000_000);
jobManager.tell(new RegistrationMessages.RegisterTaskManager(resourceID, location, resourceProfile, 1), me);
expectMsgClass(RegistrationMessages.AcknowledgeRegistration.class);
// now start the resource manager
ActorGateway resourceManager = TestingUtils.createResourceManager(system, jobManager.actor(), config);
// register at testing job manager to receive a message once a resource manager registers
resourceManager.tell(new TestingResourceManager.NotifyWhenResourceManagerConnected(), me);
// Wait for resource manager
expectMsgEquals(Acknowledge.get());
// check if we registered the task manager resource
resourceManager.tell(new TestingResourceManager.GetRegisteredResources(), me);
TestingResourceManager.GetRegisteredResourcesReply reply = expectMsgClass(TestingResourceManager.GetRegisteredResourcesReply.class);
assertEquals(1, reply.resources.size());
assertTrue(reply.resources.contains(resourceID));
}
};
}
};
}
use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.
the class SlotPoolTest method createAllocatedSlot.
static AllocatedSlot createAllocatedSlot(final ResourceID resourceId, final AllocationID allocationId, final JobID jobId, final ResourceProfile resourceProfile) {
TaskManagerLocation mockTaskManagerLocation = mock(TaskManagerLocation.class);
when(mockTaskManagerLocation.getResourceID()).thenReturn(resourceId);
TaskManagerGateway mockTaskManagerGateway = mock(TaskManagerGateway.class);
return new AllocatedSlot(allocationId, jobId, mockTaskManagerLocation, 0, resourceProfile, mockTaskManagerGateway);
}
use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.
the class InstanceManagerTest method testRegisteringAlreadyRegistered.
@Test
public void testRegisteringAlreadyRegistered() {
try {
InstanceManager cm = new InstanceManager();
final int dataPort = 20000;
ResourceID resID1 = ResourceID.generate();
ResourceID resID2 = ResourceID.generate();
HardwareDescription resources = HardwareDescription.extractFromSystem(4096);
InetAddress address = InetAddress.getByName("127.0.0.1");
TaskManagerLocation ici = new TaskManagerLocation(resID1, address, dataPort);
JavaTestKit probe = new JavaTestKit(system);
cm.registerTaskManager(new ActorTaskManagerGateway(new AkkaActorGateway(probe.getRef(), leaderSessionID)), ici, resources, 1);
assertEquals(1, cm.getNumberOfRegisteredTaskManagers());
assertEquals(1, cm.getTotalNumberOfSlots());
try {
cm.registerTaskManager(new ActorTaskManagerGateway(new AkkaActorGateway(probe.getRef(), leaderSessionID)), ici, resources, 1);
} catch (Exception e) {
// good
}
// check for correct number of registered instances
assertEquals(1, cm.getNumberOfRegisteredTaskManagers());
assertEquals(1, cm.getTotalNumberOfSlots());
cm.shutdown();
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail("Test erroneous: " + e.getMessage());
}
}
Aggregations