use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestResourceAwareScheduler method testResourceLimitation.
@Test
public void testResourceLimitation() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(2, 2, 400, 2000);
// a topology with multiple spouts
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout", new TestWordSpout(), 2).setCPULoad(250.0).setMemoryLoad(1000.0, 200.0);
builder1.setBolt("wordCountBolt", new TestWordCounter(), 1).shuffleGrouping("wordSpout").setCPULoad(100.0).setMemoryLoad(500.0, 100.0);
StormTopology stormTopology1 = builder1.createTopology();
Config config = new Config();
config.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 2, executorMap1, 0, "user");
ResourceAwareScheduler rs = new ResourceAwareScheduler();
scheduler = rs;
Topologies topologies = new Topologies(topology1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
rs.prepare(config, new StormMetricsRegistry());
rs.schedule(topologies, cluster);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
Set<String> nodesIDs1 = new HashSet<>();
for (WorkerSlot slot : assignedSlots1) {
nodesIDs1.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
List<Double> assignedExecutorMemory = new ArrayList<>();
List<Double> assignedExecutorCpu = new ArrayList<>();
for (ExecutorDetails executor : executors1) {
assignedExecutorMemory.add(topology1.getTotalMemReqTask(executor));
assignedExecutorCpu.add(topology1.getTotalCpuReqTask(executor));
}
Collections.sort(assignedExecutorCpu);
Collections.sort(assignedExecutorMemory);
Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
Map<Double, Double> cpuAvailableToUsed = new HashMap<>();
Map<Double, Double> memoryAvailableToUsed = new HashMap<>();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment1.getExecutorToSlot().entrySet()) {
executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
}
for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
supervisorToExecutors.computeIfAbsent(entry.getValue(), k -> new ArrayList<>()).add(entry.getKey());
}
for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
Double supervisorTotalCpu = entry.getKey().getTotalCpu();
Double supervisorTotalMemory = entry.getKey().getTotalMemory();
Double supervisorUsedCpu = 0.0;
Double supervisorUsedMemory = 0.0;
for (ExecutorDetails executor : entry.getValue()) {
supervisorUsedMemory += topology1.getTotalCpuReqTask(executor);
supervisorTotalCpu += topology1.getTotalMemReqTask(executor);
}
cpuAvailableToUsed.put(supervisorTotalCpu, supervisorUsedCpu);
memoryAvailableToUsed.put(supervisorTotalMemory, supervisorUsedMemory);
}
// executor0 resides one one worker (on one), executor1 and executor2 on another worker (on the other node)
assertEquals(2, assignedSlots1.size());
assertEquals(2, nodesIDs1.size());
assertEquals(3, executors1.size());
assertEquals(100.0, assignedExecutorCpu.get(0), 0.001);
assertEquals(250.0, assignedExecutorCpu.get(1), 0.001);
assertEquals(250.0, assignedExecutorCpu.get(2), 0.001);
assertEquals(600.0, assignedExecutorMemory.get(0), 0.001);
assertEquals(1200.0, assignedExecutorMemory.get(1), 0.001);
assertEquals(1200.0, assignedExecutorMemory.get(2), 0.001);
for (Map.Entry<Double, Double> entry : memoryAvailableToUsed.entrySet()) {
assertTrue(entry.getKey() - entry.getValue() >= 0);
}
for (Map.Entry<Double, Double> entry : cpuAvailableToUsed.entrySet()) {
assertTrue(entry.getKey() - entry.getValue() >= 0);
}
assertFalse(cluster.needsSchedulingRas(topology1));
assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestResourceAwareScheduler method testTopologyWithMultipleSpouts.
@Test
public void testTopologyWithMultipleSpouts() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(2, 4, 400, 2000);
// a topology with multiple spouts
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 1);
builder1.setSpout("wordSpout2", new TestWordSpout(), 1);
builder1.setBolt("wordCountBolt1", new TestWordCounter(), 1).shuffleGrouping("wordSpout1").shuffleGrouping("wordSpout2");
builder1.setBolt("wordCountBolt2", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
builder1.setBolt("wordCountBolt3", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
builder1.setBolt("wordCountBolt4", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt2");
builder1.setBolt("wordCountBolt5", new TestWordCounter(), 1).shuffleGrouping("wordSpout2");
StormTopology stormTopology1 = builder1.createTopology();
Config config = new Config();
config.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 0, executorMap1, 0, "user");
// a topology with two unconnected partitions
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpoutX", new TestWordSpout(), 1);
builder2.setSpout("wordSpoutY", new TestWordSpout(), 1);
StormTopology stormTopology2 = builder2.createTopology();
Map<ExecutorDetails, String> executorMap2 = genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config, stormTopology2, 0, executorMap2, 0, "user");
scheduler = new ResourceAwareScheduler();
Topologies topologies = new Topologies(topology1, topology2);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
scheduler.prepare(config, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
Set<String> nodesIDs1 = new HashSet<>();
for (WorkerSlot slot : assignedSlots1) {
nodesIDs1.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
assertEquals(1, assignedSlots1.size());
assertEquals(1, nodesIDs1.size());
assertEquals(7, executors1.size());
assertFalse(cluster.needsSchedulingRas(topology1));
assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
SchedulerAssignment assignment2 = cluster.getAssignmentById(topology2.getId());
Set<WorkerSlot> assignedSlots2 = assignment2.getSlots();
Set<String> nodesIDs2 = new HashSet<>();
for (WorkerSlot slot : assignedSlots2) {
nodesIDs2.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors2 = assignment2.getExecutors();
assertEquals(1, assignedSlots2.size());
assertEquals(1, nodesIDs2.size());
assertEquals(2, executors2.size());
assertFalse(cluster.needsSchedulingRas(topology2));
assertTrue(cluster.getStatusMap().get(topology2.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategySharedMemory.
/**
* test if the scheduling logic for the GenericResourceAwareStrategy is correct.
*/
@Test
public void testGenericResourceAwareStrategySharedMemory() {
int spoutParallelism = 2;
int boltParallelism = 2;
int numBolts = 3;
double cpuPercent = 10;
double memoryOnHeap = 10;
double memoryOffHeap = 10;
double sharedOnHeap = 500;
double sharedOffHeapNode = 700;
double sharedOffHeapWorker = 500;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestSpout(), spoutParallelism).addResource("gpu.count", 1.0);
builder.setBolt("bolt-1", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWorker, "bolt-1 shared off heap worker")).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapNode, "bolt-2 shared node")).shuffleGrouping("bolt-1");
builder.setBolt("bolt-3", new TestBolt(), boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeap, "bolt-3 shared worker")).shuffleGrouping("bolt-2");
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Config conf = createGrasClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null, Collections.emptyMap());
Map<String, Double> genericResourcesMap = new HashMap<>();
genericResourcesMap.put("gpu.count", 1.0);
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000, genericResourcesMap);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), currentTime, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(conf, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
for (Entry<String, SupervisorResources> entry : cluster.getSupervisorsResourcesMap().entrySet()) {
String supervisorId = entry.getKey();
SupervisorResources resources = entry.getValue();
assertTrue(supervisorId, resources.getTotalCpu() >= resources.getUsedCpu());
assertTrue(supervisorId, resources.getTotalMem() >= resources.getUsedMem());
}
// If we didn't take GPUs into account everything would fit under a single slot
// But because there is only 1 GPU per node, and each of the 2 spouts needs a GPU
// It has to be scheduled on at least 2 nodes, and hence 2 slots.
// Because of this all of the bolts will be scheduled on a single slot with one of
// the spouts and the other spout is on its own slot. So everything that can be shared is
// shared.
int totalNumberOfTasks = (spoutParallelism + (boltParallelism * numBolts));
double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
double totalExpectedOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeap;
double totalExpectedWorkerOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWorker;
SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
Set<WorkerSlot> slots = assignment.getSlots();
Map<String, Double> nodeToTotalShared = assignment.getNodeIdToTotalSharedOffHeapNodeMemory();
LOG.info("NODE TO SHARED OFF HEAP {}", nodeToTotalShared);
Map<WorkerSlot, WorkerResources> scheduledResources = assignment.getScheduledResources();
assertEquals(2, slots.size());
assertEquals(2, nodeToTotalShared.size());
assertEquals(2, scheduledResources.size());
double totalFoundCPU = 0.0;
double totalFoundOnHeap = 0.0;
double totalFoundWorkerOffHeap = 0.0;
for (WorkerSlot ws : slots) {
WorkerResources resources = scheduledResources.get(ws);
totalFoundCPU += resources.get_cpu();
totalFoundOnHeap += resources.get_mem_on_heap();
totalFoundWorkerOffHeap += resources.get_mem_off_heap();
}
assertEquals(totalExpectedCPU, totalFoundCPU, 0.01);
assertEquals(totalExpectedOnHeap, totalFoundOnHeap, 0.01);
assertEquals(totalExpectedWorkerOffHeap, totalFoundWorkerOffHeap, 0.01);
assertEquals(sharedOffHeapNode, nodeToTotalShared.values().stream().mapToDouble((d) -> d).sum(), 0.01);
assertEquals(sharedOnHeap, scheduledResources.values().stream().mapToDouble(WorkerResources::get_shared_mem_on_heap).sum(), 0.01);
assertEquals(sharedOffHeapWorker, scheduledResources.values().stream().mapToDouble(WorkerResources::get_shared_mem_off_heap).sum(), 0.01);
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategyWithSettingAckerExecutors.
/**
* Test if the scheduling logic for the GenericResourceAwareStrategy is correct
* with setting {@link Config#TOPOLOGY_ACKER_EXECUTORS}.
*
* Test details refer to {@link TestDefaultResourceAwareStrategy#testDefaultResourceAwareStrategyWithSettingAckerExecutors(int)}
*/
@ParameterizedTest
@ValueSource(ints = { -1, 0, 2, 200 })
public void testGenericResourceAwareStrategyWithSettingAckerExecutors(int numOfAckersPerWorker) throws InvalidTopologyException {
int spoutParallelism = 1;
int boltParallelism = 2;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestSpout(), spoutParallelism);
builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1").addResource("gpu.count", 1.0);
builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2").addResource("gpu.count", 2.0);
String topoName = "testTopology";
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Config conf = createGrasClusterConfig(50, 500, 0, null, Collections.emptyMap());
Map<String, Double> genericResourcesMap = new HashMap<>();
genericResourcesMap.put("gpu.count", 2.0);
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000, genericResourcesMap);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, topoName);
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, 4);
if (numOfAckersPerWorker == -1) {
// Leave topology.acker.executors.per.worker unset
} else {
conf.put(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER, numOfAckersPerWorker);
}
int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(conf, stormToplogy);
Nimbus.setUpAckerExecutorConfigs(topoName, conf, conf, estimatedNumWorker);
conf.put(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB, 250);
conf.put(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT, 50);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), currentTime, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(conf, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
// We need to have 3 slots on 3 separate hosts. The topology needs 6 GPUs 3500 MB memory and 350% CPU
// The bolt-3 instances must be on separate nodes because they each need 2 GPUs.
// The bolt-2 instances must be on the same node as they each need 1 GPU
// (this assumes that we are packing the components to avoid fragmentation).
// The bolt-1 and spout instances fill in the rest.
// Ordered execs: [[6, 6], [2, 2], [4, 4], [5, 5], [1, 1], [3, 3], [0, 0]]
// Ackers: [[8, 8], [7, 7]] (+ [[9, 9], [10, 10]] when numOfAckersPerWorker=2)
HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
new ExecutorDetails(3, 3))));
// Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// acker - 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(7, 7), // acker - 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(8, 8), // bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(2, 2))));
// Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// acker- 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(9, 9), // acker- 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(10, 10), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(1, 1), // bolt-3 500 MB, 50% cpu, 2 GPU
new ExecutorDetails(4, 4))));
// Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// Spout - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(0, 0), // bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(5, 5))));
// Total 1000 MB, 100% CPU, 2 GPU -> this node has 1000 MB, 100% CPU, 0 GPU left
HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
foundScheduling.add(new HashSet<>(execs));
}
assertEquals(expectedScheduling, foundScheduling);
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestLargeCluster method createTopoDetailsArray.
/**
* Create an array of TopologyDetails by reading serialized files for topology and configuration in the
* resource path. Skip topologies with no executors/components.
*
* @param failOnParseError throw exception if there are unmatched files, otherwise ignore unmatched and read errors.
* @return An array of TopologyDetails representing resource files.
* @throws Exception upon error in reading topology serialized files.
*/
public static TopologyDetails[] createTopoDetailsArray(String resourcePath, boolean failOnParseError) throws Exception {
List<TopologyDetails> topoDetailsList = new ArrayList<>();
List<String> errors = new ArrayList<>();
List<String> resources = getResourceFiles(resourcePath);
Map<String, String> codeResourceMap = new TreeMap<>();
Map<String, String> confResourceMap = new HashMap<>();
for (String resource : resources) {
int idxOfSlash = resource.lastIndexOf("/");
int idxOfDash = resource.lastIndexOf("-");
String nm = idxOfDash > idxOfSlash ? resource.substring(idxOfSlash + 1, idxOfDash) : resource.substring(idxOfSlash + 1, resource.length() - COMPRESSED_SERIALIZED_TOPOLOGY_FILENAME_ENDING.length());
if (resource.endsWith(COMPRESSED_SERIALIZED_TOPOLOGY_FILENAME_ENDING)) {
codeResourceMap.put(nm, resource);
} else if (resource.endsWith(COMPRESSED_SERIALIZED_CONFIG_FILENAME_ENDING)) {
confResourceMap.put(nm, resource);
} else {
LOG.info("Ignoring unsupported resource file " + resource);
}
}
String[] examinedConfParams = { Config.TOPOLOGY_NAME, Config.TOPOLOGY_SCHEDULER_STRATEGY, Config.TOPOLOGY_PRIORITY, Config.TOPOLOGY_WORKERS, Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Config.TOPOLOGY_SUBMITTER_USER, Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT, Config.TOPOLOGY_ACKER_RESOURCES_OFFHEAP_MEMORY_MB, Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB };
for (String topoId : codeResourceMap.keySet()) {
String codeResource = codeResourceMap.get(topoId);
if (!confResourceMap.containsKey(topoId)) {
String err = String.format("Ignoring topology file %s because of missing config file for %s", codeResource, topoId);
errors.add(err);
LOG.error(err);
continue;
}
String confResource = confResourceMap.get(topoId);
LOG.info("Found matching topology and config files: {}, {}", codeResource, confResource);
StormTopology stormTopology;
try {
stormTopology = Utils.deserialize(getResourceAsBytes(codeResource), StormTopology.class);
} catch (Exception ex) {
String err = String.format("Cannot read topology from resource %s", codeResource);
errors.add(err);
LOG.error(err, ex);
continue;
}
Map<String, Object> conf;
try {
conf = Utils.fromCompressedJsonConf(getResourceAsBytes(confResource));
} catch (RuntimeException | IOException ex) {
String err = String.format("Cannot read configuration from resource %s", confResource);
errors.add(err);
LOG.error(err, ex);
continue;
}
// fix 0.10 conf class names
String[] configParamsToFix = { Config.TOPOLOGY_SCHEDULER_STRATEGY, Config.STORM_NETWORK_TOPOGRAPHY_PLUGIN, DaemonConfig.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY };
for (String configParam : configParamsToFix) {
if (!conf.containsKey(configParam)) {
continue;
}
String className = (String) conf.get(configParam);
if (className.startsWith("backtype")) {
className = className.replace("backtype", "org.apache");
conf.put(configParam, className);
}
}
// fix conf params used by ConstraintSolverStrategy
if (!conf.containsKey(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_STATE_SEARCH)) {
conf.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_STATE_SEARCH, 10_000);
}
if (!conf.containsKey(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH)) {
conf.put(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH, 10_000);
}
if (!conf.containsKey(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER)) {
conf.put(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER, 1);
}
String topoName = (String) conf.getOrDefault(Config.TOPOLOGY_NAME, topoId);
// conf
StringBuilder sb = new StringBuilder("Config for " + topoId + ": ");
for (String param : examinedConfParams) {
Object val = conf.getOrDefault(param, "<null>");
sb.append(param).append("=").append(val).append(", ");
}
LOG.info(sb.toString());
// topo
Map<ExecutorDetails, String> execToComp = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology);
LOG.info("Topology \"{}\" spouts={}, bolts={}, execToComp size is {}", topoName, stormTopology.get_spouts_size(), stormTopology.get_bolts_size(), execToComp.size());
if (execToComp.isEmpty()) {
LOG.error("Topology \"{}\" Ignoring BAD topology with zero executors", topoName);
continue;
}
int numWorkers = Integer.parseInt("" + conf.getOrDefault(Config.TOPOLOGY_WORKERS, "0"));
TopologyDetails topo = new TopologyDetails(topoId, conf, stormTopology, numWorkers, execToComp, Time.currentTimeSecs(), "user");
// sanity check - normally this should not fail
topo.getUserTopolgyComponents();
topoDetailsList.add(topo);
}
if (!errors.isEmpty() && failOnParseError) {
throw new Exception("Unable to parse all serialized objects\n\t" + String.join("\n\t", errors));
}
return topoDetailsList.toArray(new TopologyDetails[0]);
}
Aggregations