use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.
the class WorkerLogsTest method testIdentifyWorkerLogDirs.
/**
* Build up workerid-workerlogdir map for the old workers' dirs.
*/
@Test
public void testIdentifyWorkerLogDirs() throws Exception {
try (TmpPath testDir = new TmpPath()) {
Path port1Dir = Files.createDirectories(testDir.getFile().toPath().resolve("workers-artifacts/topo1/port1"));
Path metaFile = Files.createFile(testDir.getFile().toPath().resolve("worker.yaml"));
String expId = "id12345";
SortedSet<Path> expected = new TreeSet<>();
expected.add(port1Dir);
SupervisorUtils mockedSupervisorUtils = mock(SupervisorUtils.class);
SupervisorUtils.setInstance(mockedSupervisorUtils);
Map<String, Object> stormConf = Utils.readStormConfig();
WorkerLogs workerLogs = new WorkerLogs(stormConf, port1Dir, new StormMetricsRegistry()) {
@Override
public Optional<Path> getMetadataFileForWorkerLogDir(Path logDir) throws IOException {
return Optional.of(metaFile);
}
@Override
public String getWorkerIdFromMetadataFile(Path metaFile) {
return expId;
}
};
when(mockedSupervisorUtils.readWorkerHeartbeatsImpl(anyMap())).thenReturn(null);
assertEquals(expected, workerLogs.getLogDirs(Collections.singleton(port1Dir), (wid) -> true));
} finally {
SupervisorUtils.resetInstance();
}
}
use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.
the class Supervisor method main.
/**
* supervisor daemon enter entrance.
*/
public static void main(String[] args) throws Exception {
Utils.setupDefaultUncaughtExceptionHandler();
StormMetricsRegistry metricsRegistry = new StormMetricsRegistry();
@SuppressWarnings("resource") Supervisor instance = new Supervisor(new StandaloneSupervisor(), metricsRegistry);
instance.launchDaemon();
}
use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.
the class Pacemaker method main.
public static void main(String[] args) {
SysOutOverSLF4J.sendSystemOutAndErrToSLF4J();
Map<String, Object> conf = ConfigUtils.overrideLoginConfigWithSystemProperty(ConfigUtils.readStormConfig());
StormMetricsRegistry metricsRegistry = new StormMetricsRegistry();
final Pacemaker serverHandler = new Pacemaker(conf, metricsRegistry);
serverHandler.launchServer();
metricsRegistry.startMetricsReporters(conf);
Utils.addShutdownHookWithForceKillIn1Sec(metricsRegistry::stopMetricsReporters);
}
use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.
the class TestResourceAwareScheduler method testHeterogeneousCluster.
public void testHeterogeneousCluster(Config topologyConf, String strategyName) {
LOG.info("\n\n\t\ttestHeterogeneousCluster");
INimbus iNimbus = new INimbusTest();
// strong supervisor node
Map<String, Double> resourceMap1 = new HashMap<>();
resourceMap1.put(Config.SUPERVISOR_CPU_CAPACITY, 800.0);
resourceMap1.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 4096.0);
// weak supervisor node
Map<String, Double> resourceMap2 = new HashMap<>();
resourceMap2.put(Config.SUPERVISOR_CPU_CAPACITY, 200.0);
resourceMap2.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1024.0);
resourceMap1 = NormalizedResources.RESOURCE_NAME_NORMALIZER.normalizedResourceMap(resourceMap1);
resourceMap2 = NormalizedResources.RESOURCE_NAME_NORMALIZER.normalizedResourceMap(resourceMap2);
Map<String, SupervisorDetails> supMap = new HashMap<>();
for (int i = 0; i < 2; i++) {
List<Number> ports = new LinkedList<>();
for (int j = 0; j < 4; j++) {
ports.add(j);
}
SupervisorDetails sup = new SupervisorDetails("r00s00" + i, "host-" + i, null, ports, i == 0 ? resourceMap1 : resourceMap2);
supMap.put(sup.getId(), sup);
}
LOG.info("SUPERVISORS = {}", supMap);
// topo1 has one single huge task that can not be handled by the small-super
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 1).setCPULoad(300.0).setMemoryLoad(2000.0, 48.0);
StormTopology stormTopology1 = builder1.createTopology();
Config config1 = new Config();
config1.putAll(topologyConf);
Map<ExecutorDetails, String> executorMap1 = genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 1, executorMap1, 0, "user");
// topo2 has 4 large tasks
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpout2", new TestWordSpout(), 4).setCPULoad(100.0).setMemoryLoad(500.0, 12.0);
StormTopology stormTopology2 = builder2.createTopology();
Config config2 = new Config();
config2.putAll(topologyConf);
Map<ExecutorDetails, String> executorMap2 = genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 1, executorMap2, 0, "user");
// topo3 has 4 large tasks
TopologyBuilder builder3 = new TopologyBuilder();
builder3.setSpout("wordSpout3", new TestWordSpout(), 4).setCPULoad(20.0).setMemoryLoad(200.0, 56.0);
StormTopology stormTopology3 = builder3.createTopology();
Config config3 = new Config();
config3.putAll(topologyConf);
Map<ExecutorDetails, String> executorMap3 = genExecsAndComps(stormTopology3);
TopologyDetails topology3 = new TopologyDetails("topology3", config3, stormTopology3, 1, executorMap3, 0, "user");
// topo4 has 12 small tasks, whose mem usage does not exactly divide a node's mem capacity
TopologyBuilder builder4 = new TopologyBuilder();
builder4.setSpout("wordSpout4", new TestWordSpout(), 12).setCPULoad(30.0).setMemoryLoad(100.0, 0.0);
StormTopology stormTopology4 = builder4.createTopology();
Config config4 = new Config();
config4.putAll(topologyConf);
Map<ExecutorDetails, String> executorMap4 = genExecsAndComps(stormTopology4);
TopologyDetails topology4 = new TopologyDetails("topology4", config4, stormTopology4, 1, executorMap4, 0, "user");
// topo5 has 40 small tasks, it should be able to exactly use up both the cpu and mem in the cluster
TopologyBuilder builder5 = new TopologyBuilder();
builder5.setSpout("wordSpout5", new TestWordSpout(), 40).setCPULoad(25.0).setMemoryLoad(100.0, 28.0);
StormTopology stormTopology5 = builder5.createTopology();
Config config5 = new Config();
config5.putAll(topologyConf);
Map<ExecutorDetails, String> executorMap5 = genExecsAndComps(stormTopology5);
TopologyDetails topology5 = new TopologyDetails("topology5", config5, stormTopology5, 1, executorMap5, 0, "user");
// Test1: Launch topo 1-3 together, it should be able to use up either mem or cpu resource due to exact division
ResourceAwareScheduler rs = new ResourceAwareScheduler();
LOG.info("\n\n\t\tScheduling topologies 1, 2 and 3");
Topologies topologies = new Topologies(topology1, topology2, topology3);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config1);
rs.prepare(config1, new StormMetricsRegistry());
Map<SupervisorDetails, Double> superToCpu = null;
Map<SupervisorDetails, Double> superToMem = null;
try {
rs.schedule(topologies, cluster);
assertFalse(cluster.needsSchedulingRas(topology1));
assertFalse(cluster.needsSchedulingRas(topology2));
assertFalse(cluster.needsSchedulingRas(topology3));
String expectedMsgPrefix = "Running - Fully Scheduled by " + strategyName;
assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith(expectedMsgPrefix));
assertTrue(cluster.getStatusMap().get(topology2.getId()).startsWith(expectedMsgPrefix));
assertTrue(cluster.getStatusMap().get(topology3.getId()).startsWith(expectedMsgPrefix));
superToCpu = getSupervisorToCpuUsage(cluster, topologies);
superToMem = getSupervisorToMemoryUsage(cluster, topologies);
final Double EPSILON = 0.0001;
for (SupervisorDetails supervisor : supMap.values()) {
Double cpuAvailable = supervisor.getTotalCpu();
Double memAvailable = supervisor.getTotalMemory();
Double cpuUsed = superToCpu.get(supervisor);
Double memUsed = superToMem.get(supervisor);
assertTrue(supervisor.getId() + " MEM: " + memAvailable + " == " + memUsed + " OR CPU: " + cpuAvailable + " == " + cpuUsed, (Math.abs(memAvailable - memUsed) < EPSILON) || (Math.abs(cpuAvailable - cpuUsed) < EPSILON));
}
} finally {
rs.cleanup();
}
// end of Test1
LOG.warn("\n\n\t\tSwitching to topologies 1, 2 and 4");
// Test2: Launch topo 1, 2 and 4, they together request a little more mem than available, so one of the 3 topos will not be
// scheduled
topologies = new Topologies(topology1, topology2, topology4);
cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config1);
rs.prepare(config1, new StormMetricsRegistry());
try {
rs.schedule(topologies, cluster);
int numTopologiesAssigned = 0;
if (cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by " + strategyName)) {
LOG.info("TOPO 1 scheduled");
numTopologiesAssigned++;
}
if (cluster.getStatusMap().get(topology2.getId()).startsWith("Running - Fully Scheduled by " + strategyName)) {
LOG.info("TOPO 2 scheduled");
numTopologiesAssigned++;
}
if (cluster.getStatusMap().get(topology4.getId()).startsWith("Running - Fully Scheduled by " + strategyName)) {
LOG.info("TOPO 3 scheduled");
numTopologiesAssigned++;
}
assertEquals(2, numTopologiesAssigned);
} finally {
rs.cleanup();
}
// end of Test2
LOG.info("\n\n\t\tScheduling just topo 5");
// Test3: "Launch topo5 only, both mem and cpu should be exactly used up"
topologies = new Topologies(topology5);
cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config1);
rs.prepare(config1, new StormMetricsRegistry());
try {
rs.schedule(topologies, cluster);
superToCpu = getSupervisorToCpuUsage(cluster, topologies);
superToMem = getSupervisorToMemoryUsage(cluster, topologies);
for (SupervisorDetails supervisor : supMap.values()) {
Double cpuAvailable = supervisor.getTotalCpu();
Double memAvailable = supervisor.getTotalMemory();
Double cpuUsed = superToCpu.get(supervisor);
Double memUsed = superToMem.get(supervisor);
assertEquals(cpuAvailable, cpuUsed, 0.0001);
assertEquals(memAvailable, memUsed, 0.0001);
}
} finally {
rs.cleanup();
}
// end of Test3
}
use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.
the class TestResourceAwareScheduler method minCpuWorkerSplitFails.
/**
* Min CPU for worker set to 50%. 1 supervisor with 100% CPU.
* A topology with 3 workers should fail scheduling even if under CPU.
*/
@Test
public void minCpuWorkerSplitFails() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(1, 4, 100, 60000);
Config config = createClusterConfig(10, 500, 500, null);
config.put(DaemonConfig.STORM_WORKER_MIN_CPU_PCORE_PERCENT, 50.0);
TopologyDetails topo1 = genTopology("topo-1", config, 10, 0, 1, 1, currentTime - 2, 20, "jerry", 2000.0);
Topologies topologies = new Topologies(topo1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
assertTrue(cluster.needsSchedulingRas(topo1));
assertFalse("Topo-1 unscheduled?", cluster.getAssignmentById(topo1.getId()) != null);
}
Aggregations