use of org.apache.heron.spi.packing.Resource in project heron by twitter.
the class AuroraSchedulerTest method testProperties.
@Test
public void testProperties() throws URISyntaxException {
TopologyAPI.Topology topology = TopologyTests.createTopology(TOPOLOGY_NAME, new org.apache.heron.api.Config(), "spoutName", "boltName", 1, 1);
Config runtime = mock(Config.class);
when(runtime.get(Key.TOPOLOGY_DEFINITION)).thenReturn(topology);
when(runtime.get(Key.TOPOLOGY_PACKAGE_URI)).thenReturn(new URI("http://foo/bar"));
// This must mimic how SubmitterMain loads configs
CommandLine commandLine = mock(CommandLine.class);
when(commandLine.getOptionValue("cluster")).thenReturn("some_cluster");
when(commandLine.getOptionValue("role")).thenReturn("some_role");
when(commandLine.getOptionValue("environment")).thenReturn("some_env");
when(commandLine.getOptionValue("heron_home")).thenReturn("/some/heron/home");
when(commandLine.getOptionValue("config_path")).thenReturn("/some/config/path");
when(commandLine.getOptionValue("topology_package")).thenReturn("jar");
when(commandLine.getOptionValue("topology_defn")).thenReturn("/mock/defnFile.defn");
when(commandLine.getOptionValue("topology_bin")).thenReturn("binaryFile.jar");
Config config = Mockito.spy(SubmitterMain.loadConfig(commandLine, topology));
AuroraScheduler testScheduler = new AuroraScheduler();
testScheduler.initialize(config, runtime);
Resource containerResource = new Resource(2.3, ByteAmount.fromGigabytes(2), ByteAmount.fromGigabytes(3));
Map<AuroraField, String> properties = testScheduler.createAuroraProperties(containerResource);
// this part is key, the conf path in the config is absolute to the install dir, but what
// aurora properties get below is the relative ./heron-conf path to be used when run remotely
assertEquals("Invalid value for key " + Key.HERON_CONF, "/some/config/path", config.getStringValue(Key.HERON_CONF));
String expectedConf = "./heron-conf";
String expectedBin = "./heron-core/bin";
String expectedLib = "./heron-core/lib";
String expectedDist = "./heron-core/dist";
for (AuroraField field : AuroraField.values()) {
boolean asserted = false;
Object expected = null;
Object found = properties.get(field);
switch(field) {
case CORE_PACKAGE_URI:
expected = expectedDist + "/heron-core.tar.gz";
break;
case CPUS_PER_CONTAINER:
expected = Double.valueOf(containerResource.getCpu()).toString();
break;
case DISK_PER_CONTAINER:
expected = Long.valueOf(containerResource.getDisk().asBytes()).toString();
break;
case RAM_PER_CONTAINER:
expected = Long.valueOf(containerResource.getRam().asBytes()).toString();
break;
case TIER:
expected = "preemptible";
break;
case NUM_CONTAINERS:
expected = "2";
break;
case EXECUTOR_BINARY:
expected = expectedBin + "/heron-executor";
break;
case TOPOLOGY_PACKAGE_URI:
expected = "http://foo/bar";
break;
case TOPOLOGY_ARGUMENTS:
expected = "--topology-name=topologyName" + " --topology-id=" + topology.getId() + " --topology-defn-file=defnFile.defn" + " --state-manager-connection=null" + " --state-manager-root=null" + " --state-manager-config-file=" + expectedConf + "/statemgr.yaml" + " --tmanager-binary=" + expectedBin + "/heron-tmanager" + " --stmgr-binary=" + expectedBin + "/heron-stmgr" + " --metrics-manager-classpath=" + expectedLib + "/metricsmgr/*" + " --instance-jvm-opts=\"\"" + " --classpath=binaryFile.jar" + " --heron-internals-config-file=" + expectedConf + "/heron_internals.yaml" + " --override-config-file=" + expectedConf + "/override.yaml" + " --component-ram-map=null" + " --component-jvm-opts=\"\"" + " --pkg-type=jar" + " --topology-binary-file=binaryFile.jar" + " --heron-java-home=/usr/lib/jvm/default-java" + " --heron-shell-binary=" + expectedBin + "/heron-shell" + " --cluster=some_cluster" + " --role=some_role" + " --environment=some_env" + " --instance-classpath=" + expectedLib + "/instance/*" + " --metrics-sinks-config-file=" + expectedConf + "/metrics_sinks.yaml" + " --scheduler-classpath=" + expectedLib + "/scheduler/*:./heron-core" + "/lib/packing/*:" + expectedLib + "/statemgr/*" + " --python-instance-binary=" + expectedBin + "/heron-python-instance" + " --cpp-instance-binary=" + expectedBin + "/heron-cpp-instance" + " --metricscache-manager-classpath=" + expectedLib + "/metricscachemgr/*" + " --metricscache-manager-mode=disabled" + " --is-stateful=false" + " --checkpoint-manager-classpath=" + expectedLib + "/ckptmgr/*:" + expectedLib + "/statefulstorage/*:" + " --stateful-config-file=" + expectedConf + "/stateful.yaml" + " --checkpoint-manager-ram=1073741824" + " --health-manager-mode=disabled" + " --health-manager-classpath=" + expectedLib + "/healthmgr/*";
break;
case CLUSTER:
expected = "some_cluster";
break;
case ENVIRON:
expected = "some_env";
break;
case ROLE:
expected = "some_role";
break;
case TOPOLOGY_NAME:
expected = "topologyName";
break;
default:
fail(String.format("Expected value for Aurora field %s not found in test (found=%s)", field, found));
}
if (!asserted) {
assertEquals("Incorrect value found for field " + field, expected, found);
}
properties.remove(field);
}
assertTrue("The following aurora fields were not set by the scheduler: " + properties, properties.isEmpty());
}
use of org.apache.heron.spi.packing.Resource in project heron by twitter.
the class NomadScheduler method getJobs.
List<Job> getJobs(PackingPlan packing) {
List<Job> ret = new LinkedList<>();
PackingPlan homogeneousPackingPlan = getHomogeneousPackingPlan(packing);
Resource resource = getHomogeneousContainerResource(homogeneousPackingPlan);
for (int i = 0; i < Runtime.numContainers(this.runtimeConfig); i++) {
Optional<PackingPlan.ContainerPlan> containerPlan = homogeneousPackingPlan.getContainer(i);
ret.add(getJob(i, containerPlan, resource));
}
return ret;
}
use of org.apache.heron.spi.packing.Resource in project heron by twitter.
the class AuroraScheduler method onSchedule.
@Override
public boolean onSchedule(PackingPlan packing) {
if (packing == null || packing.getContainers().isEmpty()) {
LOG.severe("No container requested. Can't schedule");
return false;
}
LOG.info("Launching topology in aurora");
// Align the cpu, RAM, disk to the maximal one, and set them to ScheduledResource
PackingPlan updatedPackingPlan = packing.cloneWithHomogeneousScheduledResource();
SchedulerUtils.persistUpdatedPackingPlan(Runtime.topologyName(runtime), updatedPackingPlan, Runtime.schedulerStateManagerAdaptor(runtime));
// Use the ScheduledResource to create aurora properties
// the ScheduledResource is guaranteed to be set after calling
// cloneWithHomogeneousScheduledResource in the above code
Resource containerResource = updatedPackingPlan.getContainers().iterator().next().getScheduledResource().get();
Map<AuroraField, String> auroraProperties = createAuroraProperties(containerResource);
Map<String, String> extraProperties = createExtraProperties(containerResource);
return controller.createJob(auroraProperties, extraProperties);
}
use of org.apache.heron.spi.packing.Resource in project heron by twitter.
the class ResourceConstraint method validate.
@Override
public void validate(Container container, PackingPlan.InstancePlan instancePlan) throws ResourceExceededException {
Resource usedResource = container.getTotalUsedResources();
Resource newUsedResource = usedResource.plus(instancePlan.getResource());
Resource capacity = container.getCapacity();
if (capacity.getCpu() < newUsedResource.getCpu()) {
throw new ResourceExceededException(String.format("Adding instance %s with %.3f cores " + "to container %d with existing %.3f cores " + "would exceed its capacity of %.3f cores", instancePlan.getComponentName(), instancePlan.getResource().getCpu(), container.getContainerId(), usedResource.getCpu(), capacity.getCpu()));
}
if (capacity.getRam().lessThan(newUsedResource.getRam())) {
throw new ResourceExceededException(String.format("Adding instance %s with %s RAM " + "to container %d with existing %s RAM " + "would exceed its capacity of %s RAM", instancePlan.getComponentName(), instancePlan.getResource().getRam().toString(), container.getContainerId(), usedResource.getRam().toString(), capacity.getRam().toString()));
}
if (capacity.getDisk().lessThan(newUsedResource.getDisk())) {
throw new ResourceExceededException(String.format("Adding instance %s with %s disk " + "to container %d with existing %s disk " + "would exceed its capacity of %s disk", instancePlan.getComponentName(), instancePlan.getResource().getDisk().toString(), container.getContainerId(), usedResource.getDisk().toString(), capacity.getDisk().toString()));
}
}
use of org.apache.heron.spi.packing.Resource in project heron by twitter.
the class RoundRobinPacking method packInternal.
private PackingPlan packInternal(int numContainer, Map<String, Integer> parallelismMap) {
// Get the instances' round-robin allocation
Map<Integer, List<InstanceId>> roundRobinAllocation = getRoundRobinAllocation(numContainer, parallelismMap);
Resource containerResourceHint = getContainerResourceHint(roundRobinAllocation);
int largestContainerSize = getLargestContainerSize(roundRobinAllocation);
// Get the RAM map for every instance
ByteAmount containerRamDefault = instanceRamDefault.multiply(largestContainerSize).plus(containerRamPadding);
Map<Integer, Map<InstanceId, ByteAmount>> instancesRamMap = calculateInstancesResourceMapInContainer(roundRobinAllocation, TopologyUtils.getComponentRamMapConfig(topology), containerResourceHint.getRam(), containerRamDefault, instanceRamDefault, containerRamPadding, ByteAmount.ZERO, NOT_SPECIFIED_BYTE_AMOUNT, RAM);
// Get the CPU map for every instance
float containerCPUDefault = Math.round(instanceCpuDefault * largestContainerSize + containerCpuPadding);
Map<Integer, Map<InstanceId, CPUShare>> instancesCpuMap = calculateInstancesResourceMapInContainer(roundRobinAllocation, CPUShare.convertDoubleMapToCpuShareMap(TopologyUtils.getComponentCpuMapConfig(topology)), CPUShare.fromDouble(containerResourceHint.getCpu()), CPUShare.fromDouble(containerCPUDefault), CPUShare.fromDouble(instanceCpuDefault), CPUShare.fromDouble(containerCpuPadding), CPUShare.fromDouble(0.0), CPUShare.fromDouble(NOT_SPECIFIED_CPU_SHARE), CPU);
LOG.info(String.format("Pack internal: container CPU hint: %.3f, RAM hint: %s, disk hint: %s.", containerResourceHint.getCpu(), containerResourceHint.getRam().toString(), containerResourceHint.getDisk().toString()));
// Construct the PackingPlan
Set<PackingPlan.ContainerPlan> containerPlans = new HashSet<>();
for (int containerId : roundRobinAllocation.keySet()) {
List<InstanceId> instanceList = roundRobinAllocation.get(containerId);
// Calculate the resource required for single instance
Map<InstanceId, PackingPlan.InstancePlan> instancePlanMap = new HashMap<>();
ByteAmount containerRam = ByteAmount.ZERO;
double containerCpu = 0.0;
for (InstanceId instanceId : instanceList) {
ByteAmount instanceRam = instancesRamMap.get(containerId).get(instanceId);
Double instanceCpu = instancesCpuMap.get(containerId).get(instanceId).getValue();
// Currently not yet support disk config for different components, just use the default.
ByteAmount instanceDisk = instanceDiskDefault;
Resource resource = new Resource(instanceCpu, instanceRam, instanceDisk);
// Insert it into the map
instancePlanMap.put(instanceId, new PackingPlan.InstancePlan(instanceId, resource));
containerRam = containerRam.plus(instanceRam);
containerCpu += instanceCpu;
}
// finalize container resource
containerCpu += containerCpuPadding;
if (containerResourceHint.getCpu() != NOT_SPECIFIED_CPU_SHARE) {
containerCpu = Math.min(containerCpu, containerResourceHint.getCpu());
}
containerRam = containerRam.plus(containerRamPadding);
if (!containerResourceHint.getRam().equals(NOT_SPECIFIED_BYTE_AMOUNT)) {
containerRam = ByteAmount.fromBytes(Math.min(containerRam.asBytes(), containerResourceHint.getRam().asBytes()));
}
ByteAmount containerDisk = containerResourceHint.getDisk();
if (containerDisk.equals(NOT_SPECIFIED_BYTE_AMOUNT)) {
containerDisk = instanceDiskDefault.multiply(largestContainerSize).plus(DEFAULT_DISK_PADDING_PER_CONTAINER);
}
Resource resource = new Resource(Math.max(containerCpu, containerResourceHint.getCpu()), containerRam, containerDisk);
PackingPlan.ContainerPlan containerPlan = new PackingPlan.ContainerPlan(containerId, new HashSet<>(instancePlanMap.values()), resource);
containerPlans.add(containerPlan);
LOG.info(String.format("Pack internal finalized: container#%d CPU: %f, RAM: %s, disk: %s.", containerId, resource.getCpu(), resource.getRam().toString(), resource.getDisk().toString()));
}
PackingPlan plan = new PackingPlan(topology.getId(), containerPlans);
validatePackingPlan(plan);
return plan;
}
Aggregations