use of org.apache.storm.generated.ExecutorSummary in project storm by apache.
the class HdfsSpoutTopology method printMetrics.
static void printMetrics(Nimbus.Iface client, String name) throws Exception {
TopologyInfo info = client.getTopologyInfoByName(name);
int uptime = info.get_uptime_secs();
long acked = 0;
long failed = 0;
double weightedAvgTotal = 0.0;
for (ExecutorSummary exec : info.get_executors()) {
if ("spout".equals(exec.get_component_id())) {
SpoutStats stats = exec.get_stats().get_specific().get_spout();
Map<String, Long> failedMap = stats.get_failed().get(":all-time");
Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
for (String key : ackedMap.keySet()) {
if (failedMap != null) {
Long tmp = failedMap.get(key);
if (tmp != null) {
failed += tmp;
}
}
long ackVal = ackedMap.get(key);
double latVal = avgLatMap.get(key) * ackVal;
acked += ackVal;
weightedAvgTotal += latVal;
}
}
}
double avgLatency = weightedAvgTotal / acked;
System.out.println("uptime: " + uptime + " acked: " + acked + " avgLatency: " + avgLatency + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed));
}
use of org.apache.storm.generated.ExecutorSummary in project storm by apache.
the class HdfsSpoutTopology method printMetrics.
static void printMetrics(Nimbus.Client client, String name) throws Exception {
ClusterSummary summary = client.getClusterInfo();
String id = null;
for (TopologySummary ts : summary.get_topologies()) {
if (name.equals(ts.get_name())) {
id = ts.get_id();
}
}
if (id == null) {
throw new Exception("Could not find a topology named " + name);
}
TopologyInfo info = client.getTopologyInfo(id);
int uptime = info.get_uptime_secs();
long acked = 0;
long failed = 0;
double weightedAvgTotal = 0.0;
for (ExecutorSummary exec : info.get_executors()) {
if ("spout".equals(exec.get_component_id())) {
SpoutStats stats = exec.get_stats().get_specific().get_spout();
Map<String, Long> failedMap = stats.get_failed().get(":all-time");
Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
for (String key : ackedMap.keySet()) {
if (failedMap != null) {
Long tmp = failedMap.get(key);
if (tmp != null) {
failed += tmp;
}
}
long ackVal = ackedMap.get(key);
double latVal = avgLatMap.get(key) * ackVal;
acked += ackVal;
weightedAvgTotal += latVal;
}
}
}
double avgLatency = weightedAvgTotal / acked;
System.out.println("uptime: " + uptime + " acked: " + acked + " avgLatency: " + avgLatency + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed));
}
use of org.apache.storm.generated.ExecutorSummary in project storm by apache.
the class Monitor method getComponents.
private HashSet<String> getComponents(Nimbus.Iface client, String topology) throws Exception {
HashSet<String> components = new HashSet<>();
GetInfoOptions getInfoOpts = new GetInfoOptions();
getInfoOpts.set_num_err_choice(NumErrorsChoice.NONE);
TopologyInfo info = client.getTopologyInfoByNameWithOpts(topology, getInfoOpts);
for (ExecutorSummary es : info.get_executors()) {
components.add(es.get_component_id());
}
return components;
}
use of org.apache.storm.generated.ExecutorSummary in project storm by apache.
the class Monitor method metrics.
public void metrics(Nimbus.Iface client, long now, MetricsState state) throws Exception {
long totalStatted = 0;
int componentParallelism = 0;
boolean streamFound = false;
GetInfoOptions getInfoOpts = new GetInfoOptions();
getInfoOpts.set_num_err_choice(NumErrorsChoice.NONE);
TopologyInfo info = client.getTopologyInfoByNameWithOpts(topology, getInfoOpts);
for (ExecutorSummary es : info.get_executors()) {
if (component.equals(es.get_component_id())) {
componentParallelism++;
ExecutorStats stats = es.get_stats();
if (stats != null) {
Map<String, Map<String, Long>> statted = WATCH_EMITTED.equals(watch) ? stats.get_emitted() : stats.get_transferred();
if (statted != null) {
Map<String, Long> e2 = statted.get(":all-time");
if (e2 != null) {
Long stream = e2.get(this.stream);
if (stream != null) {
streamFound = true;
totalStatted += stream;
}
}
}
}
}
}
if (componentParallelism <= 0) {
HashSet<String> components = getComponents(client, topology);
System.out.println("Available components for " + topology + " :");
System.out.println("------------------");
for (String comp : components) {
System.out.println(comp);
}
System.out.println("------------------");
throw new IllegalArgumentException("component: " + component + " not found");
}
if (!streamFound) {
throw new IllegalArgumentException("stream: " + stream + " not found");
}
long timeDelta = now - state.getLastTime();
long stattedDelta = totalStatted - state.getLastStatted();
state.setLastTime(now);
state.setLastStatted(totalStatted);
double throughput = (stattedDelta == 0 || timeDelta == 0) ? 0.0 : ((double) stattedDelta / (double) timeDelta);
System.out.println(topology + "\t" + component + "\t" + componentParallelism + "\t" + stream + "\t" + timeDelta + "\t" + stattedDelta + "\t" + throughput);
}
use of org.apache.storm.generated.ExecutorSummary in project storm by apache.
the class StatsUtil method aggregateBoltStats.
/**
* aggregate bolt stats.
*
* @param statsSeq a seq of ExecutorStats
* @param includeSys whether to include system streams
* @return aggregated bolt stats: {metric -> win -> global stream id -> value}
*/
public static <T> Map<String, Map> aggregateBoltStats(List<ExecutorSummary> statsSeq, boolean includeSys) {
Map<String, Map> ret = new HashMap<>();
Map<String, Map<String, Map<T, Long>>> commonStats = aggregateCommonStats(statsSeq);
// filter sys streams if necessary
commonStats = preProcessStreamSummary(commonStats, includeSys);
List<Map<String, Map<GlobalStreamId, Long>>> acked = new ArrayList<>();
List<Map<String, Map<GlobalStreamId, Long>>> failed = new ArrayList<>();
List<Map<String, Map<GlobalStreamId, Long>>> executed = new ArrayList<>();
List<Map<String, Map<GlobalStreamId, Double>>> processLatencies = new ArrayList<>();
List<Map<String, Map<GlobalStreamId, Double>>> executeLatencies = new ArrayList<>();
for (ExecutorSummary summary : statsSeq) {
ExecutorStats stat = summary.get_stats();
acked.add(stat.get_specific().get_bolt().get_acked());
failed.add(stat.get_specific().get_bolt().get_failed());
executed.add(stat.get_specific().get_bolt().get_executed());
processLatencies.add(stat.get_specific().get_bolt().get_process_ms_avg());
executeLatencies.add(stat.get_specific().get_bolt().get_execute_ms_avg());
}
mergeMaps(ret, commonStats);
((Map) ret).put(ACKED, aggregateCounts(acked));
((Map) ret).put(FAILED, aggregateCounts(failed));
((Map) ret).put(EXECUTED, aggregateCounts(executed));
((Map) ret).put(PROC_LATENCIES, aggregateAverages(processLatencies, acked));
((Map) ret).put(EXEC_LATENCIES, aggregateAverages(executeLatencies, executed));
return ret;
}
Aggregations