use of org.apache.storm.generated.SpoutStats in project storm by apache.
the class InOrderDeliveryTest method printMetrics.
public static void printMetrics(Nimbus.Iface client, String name) throws Exception {
TopologyInfo info = client.getTopologyInfoByName(name);
int uptime = info.get_uptime_secs();
long acked = 0;
long failed = 0;
double weightedAvgTotal = 0.0;
for (ExecutorSummary exec : info.get_executors()) {
if ("spout".equals(exec.get_component_id())) {
SpoutStats stats = exec.get_stats().get_specific().get_spout();
Map<String, Long> failedMap = stats.get_failed().get(":all-time");
Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
for (String key : ackedMap.keySet()) {
if (failedMap != null) {
Long tmp = failedMap.get(key);
if (tmp != null) {
failed += tmp;
}
}
long ackVal = ackedMap.get(key);
double latVal = avgLatMap.get(key) * ackVal;
acked += ackVal;
weightedAvgTotal += latVal;
}
}
}
double avgLatency = weightedAvgTotal / acked;
System.out.println("uptime: " + uptime + " acked: " + acked + " avgLatency: " + avgLatency + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed));
}
use of org.apache.storm.generated.SpoutStats in project storm by apache.
the class FastWordCountTopology method printMetrics.
public static void printMetrics(Nimbus.Iface client, String name) throws Exception {
TopologyInfo info = client.getTopologyInfoByName(name);
int uptime = info.get_uptime_secs();
long acked = 0;
long failed = 0;
double weightedAvgTotal = 0.0;
for (ExecutorSummary exec : info.get_executors()) {
if ("spout".equals(exec.get_component_id())) {
SpoutStats stats = exec.get_stats().get_specific().get_spout();
Map<String, Long> failedMap = stats.get_failed().get(":all-time");
Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
for (String key : ackedMap.keySet()) {
if (failedMap != null) {
Long tmp = failedMap.get(key);
if (tmp != null) {
failed += tmp;
}
}
long ackVal = ackedMap.get(key);
double latVal = avgLatMap.get(key) * ackVal;
acked += ackVal;
weightedAvgTotal += latVal;
}
}
}
double avgLatency = weightedAvgTotal / acked;
System.out.println("uptime: " + uptime + " acked: " + acked + " avgLatency: " + avgLatency + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed));
}
use of org.apache.storm.generated.SpoutStats in project storm by apache.
the class MetricsSample method getMetricsSample.
private static MetricsSample getMetricsSample(TopologyInfo topInfo) {
List<ExecutorSummary> executorSummaries = topInfo.get_executors();
// totals
long totalTransferred = 0L;
long totalEmitted = 0L;
long totalAcked = 0L;
long totalFailed = 0L;
// number of spout executors
int spoutExecCount = 0;
double spoutLatencySum = 0.0;
long spoutTransferred = 0L;
// Executor summaries
for (ExecutorSummary executorSummary : executorSummaries) {
ExecutorStats executorStats = executorSummary.get_stats();
if (executorStats == null) {
continue;
}
ExecutorSpecificStats executorSpecificStats = executorStats.get_specific();
if (executorSpecificStats == null) {
// bail out
continue;
}
// transferred totals
Map<String, Map<String, Long>> transferred = executorStats.get_transferred();
Map<String, Long> txMap = transferred.get(":all-time");
if (txMap == null) {
continue;
}
for (String key : txMap.keySet()) {
// todo, ignore the master batch coordinator ?
if (!Utils.isSystemId(key)) {
Long count = txMap.get(key);
totalTransferred += count;
if (executorSpecificStats.is_set_spout()) {
spoutTransferred += count;
}
}
}
// we found a spout
if (executorSpecificStats.isSet(2)) {
// spout
SpoutStats spoutStats = executorSpecificStats.get_spout();
Map<String, Long> acked = spoutStats.get_acked().get(":all-time");
if (acked != null) {
for (String key : acked.keySet()) {
totalAcked += acked.get(key);
}
}
Map<String, Long> failed = spoutStats.get_failed().get(":all-time");
if (failed != null) {
for (String key : failed.keySet()) {
totalFailed += failed.get(key);
}
}
Double total = 0d;
Map<String, Double> vals = spoutStats.get_complete_ms_avg().get(":all-time");
if (vals != null) {
for (String key : vals.keySet()) {
total += vals.get(key);
}
Double latency = total / vals.size();
spoutLatencySum += latency;
}
spoutExecCount++;
}
}
// end executor summary
MetricsSample ret = new MetricsSample();
ret.totalEmitted = totalEmitted;
ret.totalTransferred = totalTransferred;
ret.totalAcked = totalAcked;
ret.totalFailed = totalFailed;
ret.totalLatency = spoutLatencySum / spoutExecCount;
long spoutEmitted = 0L;
ret.spoutEmitted = spoutEmitted;
ret.spoutTransferred = spoutTransferred;
ret.sampleTime = System.currentTimeMillis();
// ret.numSupervisors = clusterSummary.get_supervisors_size();
ret.numWorkers = 0;
ret.numExecutors = 0;
ret.numTasks = 0;
ret.spoutExecutors = spoutExecCount;
return ret;
}
use of org.apache.storm.generated.SpoutStats in project storm by apache.
the class HdfsSpoutTopology method printMetrics.
static void printMetrics(Nimbus.Iface client, String name) throws Exception {
TopologyInfo info = client.getTopologyInfoByName(name);
int uptime = info.get_uptime_secs();
long acked = 0;
long failed = 0;
double weightedAvgTotal = 0.0;
for (ExecutorSummary exec : info.get_executors()) {
if ("spout".equals(exec.get_component_id())) {
SpoutStats stats = exec.get_stats().get_specific().get_spout();
Map<String, Long> failedMap = stats.get_failed().get(":all-time");
Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
for (String key : ackedMap.keySet()) {
if (failedMap != null) {
Long tmp = failedMap.get(key);
if (tmp != null) {
failed += tmp;
}
}
long ackVal = ackedMap.get(key);
double latVal = avgLatMap.get(key) * ackVal;
acked += ackVal;
weightedAvgTotal += latVal;
}
}
}
double avgLatency = weightedAvgTotal / acked;
System.out.println("uptime: " + uptime + " acked: " + acked + " avgLatency: " + avgLatency + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed));
}
use of org.apache.storm.generated.SpoutStats in project storm by apache.
the class HdfsSpoutTopology method printMetrics.
static void printMetrics(Nimbus.Client client, String name) throws Exception {
ClusterSummary summary = client.getClusterInfo();
String id = null;
for (TopologySummary ts : summary.get_topologies()) {
if (name.equals(ts.get_name())) {
id = ts.get_id();
}
}
if (id == null) {
throw new Exception("Could not find a topology named " + name);
}
TopologyInfo info = client.getTopologyInfo(id);
int uptime = info.get_uptime_secs();
long acked = 0;
long failed = 0;
double weightedAvgTotal = 0.0;
for (ExecutorSummary exec : info.get_executors()) {
if ("spout".equals(exec.get_component_id())) {
SpoutStats stats = exec.get_stats().get_specific().get_spout();
Map<String, Long> failedMap = stats.get_failed().get(":all-time");
Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
for (String key : ackedMap.keySet()) {
if (failedMap != null) {
Long tmp = failedMap.get(key);
if (tmp != null) {
failed += tmp;
}
}
long ackVal = ackedMap.get(key);
double latVal = avgLatMap.get(key) * ackVal;
acked += ackVal;
weightedAvgTotal += latVal;
}
}
}
double avgLatency = weightedAvgTotal / acked;
System.out.println("uptime: " + uptime + " acked: " + acked + " avgLatency: " + avgLatency + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed));
}
Aggregations