use of org.apache.heron.spi.packing.PackingPlan in project heron by twitter.
the class CommonPackingTests method doScaleDownTest.
protected void doScaleDownTest(Pair<Integer, InstanceId>[] initialComponentInstances, Map<String, Integer> componentChanges, Pair<Integer, InstanceId>[] expectedComponentInstances) throws ConstraintViolationException {
String topologyId = this.topology.getId();
// The padding percentage used in repack() must be <= one as used in pack(), otherwise we can't
// reconstruct the PackingPlan, see https://github.com/apache/incubator-heron/issues/1577
PackingPlan initialPackingPlan = PackingTestHelper.addToTestPackingPlan(topologyId, null, PackingTestHelper.toContainerIdComponentNames(initialComponentInstances), DEFAULT_CONTAINER_PADDING_PERCENT);
AssertPacking.assertPackingPlan(topologyId, initialComponentInstances, initialPackingPlan);
PackingPlan newPackingPlan = repack(this.topology, initialPackingPlan, componentChanges);
AssertPacking.assertPackingPlan(topologyId, expectedComponentInstances, newPackingPlan);
}
use of org.apache.heron.spi.packing.PackingPlan in project heron by twitter.
the class PackingPlanBuilder method getContainers.
/**
* Generates the containers that correspond to the current packing plan
* along with their associated instances.
*
* @return Map of containers for the current packing plan, keyed by containerId
*/
@VisibleForTesting
static Map<Integer, Container> getContainers(PackingPlan currentPackingPlan, Resource maxContainerResource, Resource padding, Map<String, TreeSet<Integer>> componentIndexes, TreeSet<Integer> taskIds) {
Map<Integer, Container> containers = new HashMap<>();
Resource capacity = maxContainerResource;
for (PackingPlan.ContainerPlan currentContainerPlan : currentPackingPlan.getContainers()) {
Container container = new Container(currentContainerPlan.getId(), capacity, padding);
for (PackingPlan.InstancePlan instancePlan : currentContainerPlan.getInstances()) {
addToContainer(container, instancePlan, componentIndexes, taskIds);
}
containers.put(currentContainerPlan.getId(), container);
}
return containers;
}
use of org.apache.heron.spi.packing.PackingPlan in project heron by twitter.
the class SchedulerMain method runScheduler.
/**
* Run the scheduler.
* It is a blocking call, and it will return in 2 cases:
* 1. The topology is requested to kill
* 2. Unexpected exceptions happen
*
* @return true if scheduled successfully
*/
public boolean runScheduler() {
IScheduler scheduler = null;
String statemgrClass = Context.stateManagerClass(config);
IStateManager statemgr;
try {
// create an instance of state manager
statemgr = ReflectionUtils.newInstance(statemgrClass);
} catch (IllegalAccessException | InstantiationException | ClassNotFoundException e) {
LOG.log(Level.SEVERE, "Failed to instantiate instances using config: " + config, e);
return false;
}
SchedulerServer server = null;
boolean isSuccessful = false;
// Put it in a try block so that we can always clean resources
try {
// initialize the state manager
statemgr.initialize(config);
// TODO(mfu): timeout should read from config
SchedulerStateManagerAdaptor adaptor = new SchedulerStateManagerAdaptor(statemgr, 5000);
// get a packed plan and schedule it
PackingPlans.PackingPlan serializedPackingPlan = adaptor.getPackingPlan(topology.getName());
if (serializedPackingPlan == null) {
LOG.log(Level.SEVERE, "Failed to fetch PackingPlan for topology:{0} from the state manager", topology.getName());
return false;
}
LOG.log(Level.INFO, "Packing plan fetched from state: {0}", serializedPackingPlan);
PackingPlan packedPlan = new PackingPlanProtoDeserializer().fromProto(serializedPackingPlan);
// build the runtime config
LauncherUtils launcherUtils = LauncherUtils.getInstance();
Config runtime = Config.newBuilder().putAll(launcherUtils.createPrimaryRuntime(topology)).putAll(launcherUtils.createAdaptorRuntime(adaptor)).put(Key.SCHEDULER_SHUTDOWN, getShutdown()).put(Key.SCHEDULER_PROPERTIES, properties).build();
Config ytruntime = launcherUtils.createConfigWithPackingDetails(runtime, packedPlan);
// invoke scheduler
scheduler = launcherUtils.getSchedulerInstance(config, ytruntime);
if (scheduler == null) {
return false;
}
isSuccessful = scheduler.onSchedule(packedPlan);
if (!isSuccessful) {
LOG.severe("Failed to schedule topology");
return false;
}
// Failures in server initialization throw exceptions
// get the scheduler server endpoint for receiving requests
server = getServer(ytruntime, scheduler, schedulerServerPort);
// start the server to manage runtime requests
server.start();
// write the scheduler location to state manager
// Make sure it happens after IScheduler.onScheduler
isSuccessful = SchedulerUtils.setSchedulerLocation(runtime, String.format("%s:%d", server.getHost(), server.getPort()), scheduler);
if (isSuccessful) {
// wait until kill request or some interrupt occurs if the scheduler starts successfully
LOG.info("Waiting for termination... ");
Runtime.schedulerShutdown(ytruntime).await();
}
} catch (IOException e) {
LOG.log(Level.SEVERE, "Failed to start server", e);
return false;
} finally {
// Clean the resources
if (server != null) {
server.stop();
}
// 4. Close the resources
SysUtils.closeIgnoringExceptions(scheduler);
SysUtils.closeIgnoringExceptions(statemgr);
}
return isSuccessful;
}
use of org.apache.heron.spi.packing.PackingPlan in project heron by twitter.
the class UpdateTopologyManager method updateTopology.
private void updateTopology(final PackingPlans.PackingPlan existingProtoPackingPlan, final PackingPlans.PackingPlan proposedProtoPackingPlan, SchedulerStateManagerAdaptor stateManager) throws ExecutionException, InterruptedException {
String topologyName = Runtime.topologyName(runtime);
PackingPlan existingPackingPlan = deserializer.fromProto(existingProtoPackingPlan);
PackingPlan proposedPackingPlan = deserializer.fromProto(proposedProtoPackingPlan);
Preconditions.checkArgument(proposedPackingPlan.getContainers().size() > 0, String.format("proposed packing plan must have at least 1 container %s", proposedPackingPlan));
ContainerDelta containerDelta = new ContainerDelta(existingPackingPlan.getContainers(), proposedPackingPlan.getContainers());
int newContainerCount = containerDelta.getContainersToAdd().size();
int removableContainerCount = containerDelta.getContainersToRemove().size();
String message = String.format("Topology change requires %s new containers and removing %s " + "existing containers, but the scheduler does not support scaling, aborting. " + "Existing packing plan: %s, proposed packing plan: %s", newContainerCount, removableContainerCount, existingPackingPlan, proposedPackingPlan);
Preconditions.checkState(newContainerCount + removableContainerCount == 0 || scalableScheduler.isPresent(), message);
TopologyAPI.Topology topology = getTopology(stateManager, topologyName);
boolean initiallyRunning = topology.getState() == TopologyAPI.TopologyState.RUNNING;
// deactivate and sleep
if (initiallyRunning) {
// Update the topology since the state should have changed from RUNNING to PAUSED
// Will throw exceptions internally if tmanager fails to deactivate
deactivateTopology(stateManager, topology, proposedPackingPlan);
}
Set<PackingPlan.ContainerPlan> updatedContainers = new HashSet<>(proposedPackingPlan.getContainers());
// to state manager quickly, otherwise the scheduler might penalize for thrashing on start-up
if (newContainerCount > 0 && scalableScheduler.isPresent()) {
Set<PackingPlan.ContainerPlan> containersToAdd = containerDelta.getContainersToAdd();
Set<PackingPlan.ContainerPlan> containersAdded = scalableScheduler.get().addContainers(containersToAdd);
// Update the PackingPlan with new container-ids
if (containersAdded != null) {
if (containersAdded.size() != containersToAdd.size()) {
throw new RuntimeException("Scheduler failed to add requested containers. Requested " + containersToAdd.size() + ", added " + containersAdded.size() + ". " + "The topology can be in a strange stage. " + "Please check carefully or redeploy the topology !!");
}
updatedContainers.removeAll(containersToAdd);
updatedContainers.addAll(containersAdded);
}
}
PackingPlan updatedPackingPlan = new PackingPlan(proposedPackingPlan.getId(), updatedContainers);
PackingPlanProtoSerializer serializer = new PackingPlanProtoSerializer();
PackingPlans.PackingPlan updatedProtoPackingPlan = serializer.toProto(updatedPackingPlan);
LOG.fine("The updated Packing Plan: " + updatedProtoPackingPlan);
// update packing plan to trigger the scaling event
logInfo("Update new PackingPlan: %s", stateManager.updatePackingPlan(updatedProtoPackingPlan, topologyName));
// reactivate topology
if (initiallyRunning) {
// wait before reactivating to give the tmanager a chance to receive the packing update and
// delete the packing plan. Instead we could message tmanager to invalidate the physical plan
// and/or possibly even update the packing plan directly
SysUtils.sleep(Duration.ofSeconds(10));
// Will throw exceptions internally if tmanager fails to deactivate
reactivateTopology(stateManager, topology, removableContainerCount);
}
if (removableContainerCount > 0 && scalableScheduler.isPresent()) {
scalableScheduler.get().removeContainers(containerDelta.getContainersToRemove());
}
}
use of org.apache.heron.spi.packing.PackingPlan in project heron by twitter.
the class SubmitDryRunRenderTest method setUp.
@Before
public void setUp() throws Exception {
final String COMPONENT_A = "exclaim1";
final String COMPONENT_B = "word";
ContainerPlan containerPlanA = PackingTestUtils.testContainerPlan(1, new Pair<>(COMPONENT_A, 1), new Pair<>(COMPONENT_A, 3), new Pair<>(COMPONENT_B, 5));
ContainerPlan containerPlanB = PackingTestUtils.testContainerPlan(2, new Pair<>(COMPONENT_A, 2), new Pair<>(COMPONENT_A, 4), new Pair<>(COMPONENT_B, 6));
Set<ContainerPlan> containerPlans = new HashSet<>();
containerPlans.add(containerPlanA);
containerPlans.add(containerPlanB);
plan = new PackingPlan("A", containerPlans);
}
Aggregations