use of org.apache.cassandra.utils.EstimatedHistogram in project eiger by wlloyd.
the class NodeCmd method printCfHistograms.
private void printCfHistograms(String keySpace, String columnFamily, PrintStream output) {
ColumnFamilyStoreMBean store = this.probe.getCfsProxy(keySpace, columnFamily);
// default is 90 offsets
long[] offsets = new EstimatedHistogram().getBucketOffsets();
long[] rrlh = store.getRecentReadLatencyHistogramMicros();
long[] rwlh = store.getRecentWriteLatencyHistogramMicros();
long[] sprh = store.getRecentSSTablesPerReadHistogram();
long[] ersh = store.getEstimatedRowSizeHistogram();
long[] ecch = store.getEstimatedColumnCountHistogram();
output.println(String.format("%s/%s histograms", keySpace, columnFamily));
output.println(String.format("%-10s%10s%18s%18s%18s%18s", "Offset", "SSTables", "Write Latency", "Read Latency", "Row Size", "Column Count"));
for (int i = 0; i < offsets.length; i++) {
output.println(String.format("%-10d%10s%18s%18s%18s%18s", offsets[i], (i < sprh.length ? sprh[i] : ""), (i < rwlh.length ? rwlh[i] : ""), (i < rrlh.length ? rrlh[i] : ""), (i < ersh.length ? ersh[i] : ""), (i < ecch.length ? ecch[i] : "")));
}
}
use of org.apache.cassandra.utils.EstimatedHistogram in project cassandra by apache.
the class TableHistograms method execute.
@Override
public void execute(NodeProbe probe) {
PrintStream out = probe.output().out;
Multimap<String, String> tablesList = HashMultimap.create();
// a <keyspace, set<table>> mapping for verification or as reference if none provided
Multimap<String, String> allTables = HashMultimap.create();
Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> tableMBeans = probe.getColumnFamilyStoreMBeanProxies();
while (tableMBeans.hasNext()) {
Map.Entry<String, ColumnFamilyStoreMBean> entry = tableMBeans.next();
allTables.put(entry.getKey(), entry.getValue().getTableName());
}
if (args.size() == 2) {
tablesList.put(args.get(0), args.get(1));
} else if (args.size() == 1) {
String[] input = args.get(0).split("\\.");
checkArgument(input.length == 2, "tablehistograms requires keyspace and table name arguments");
tablesList.put(input[0], input[1]);
} else {
// use all tables
tablesList = allTables;
}
// verify that all tables to list exist
for (String keyspace : tablesList.keys()) {
for (String table : tablesList.get(keyspace)) {
if (!allTables.containsEntry(keyspace, table))
throw new IllegalArgumentException("Unknown table " + keyspace + '.' + table);
}
}
for (String keyspace : tablesList.keys()) {
for (String table : tablesList.get(keyspace)) {
// calculate percentile of row size and column count
long[] estimatedPartitionSize = (long[]) probe.getColumnFamilyMetric(keyspace, table, "EstimatedPartitionSizeHistogram");
long[] estimatedColumnCount = (long[]) probe.getColumnFamilyMetric(keyspace, table, "EstimatedColumnCountHistogram");
// build arrays to store percentile values
double[] estimatedRowSizePercentiles = new double[7];
double[] estimatedColumnCountPercentiles = new double[7];
double[] offsetPercentiles = new double[] { 0.5, 0.75, 0.95, 0.98, 0.99 };
if (ArrayUtils.isEmpty(estimatedPartitionSize) || ArrayUtils.isEmpty(estimatedColumnCount)) {
out.println("No SSTables exists, unable to calculate 'Partition Size' and 'Cell Count' percentiles");
for (int i = 0; i < 7; i++) {
estimatedRowSizePercentiles[i] = Double.NaN;
estimatedColumnCountPercentiles[i] = Double.NaN;
}
} else {
EstimatedHistogram partitionSizeHist = new EstimatedHistogram(estimatedPartitionSize);
EstimatedHistogram columnCountHist = new EstimatedHistogram(estimatedColumnCount);
if (partitionSizeHist.isOverflowed()) {
out.println(String.format("Row sizes are larger than %s, unable to calculate percentiles", partitionSizeHist.getLargestBucketOffset()));
for (int i = 0; i < offsetPercentiles.length; i++) estimatedRowSizePercentiles[i] = Double.NaN;
} else {
for (int i = 0; i < offsetPercentiles.length; i++) estimatedRowSizePercentiles[i] = partitionSizeHist.percentile(offsetPercentiles[i]);
}
if (columnCountHist.isOverflowed()) {
out.println(String.format("Column counts are larger than %s, unable to calculate percentiles", columnCountHist.getLargestBucketOffset()));
for (int i = 0; i < estimatedColumnCountPercentiles.length; i++) estimatedColumnCountPercentiles[i] = Double.NaN;
} else {
for (int i = 0; i < offsetPercentiles.length; i++) estimatedColumnCountPercentiles[i] = columnCountHist.percentile(offsetPercentiles[i]);
}
// min value
estimatedRowSizePercentiles[5] = partitionSizeHist.min();
estimatedColumnCountPercentiles[5] = columnCountHist.min();
// max value
estimatedRowSizePercentiles[6] = partitionSizeHist.max();
estimatedColumnCountPercentiles[6] = columnCountHist.max();
}
String[] percentiles = new String[] { "50%", "75%", "95%", "98%", "99%", "Min", "Max" };
Double[] readLatency = probe.metricPercentilesAsArray((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspace, table, "ReadLatency"));
Double[] writeLatency = probe.metricPercentilesAsArray((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspace, table, "WriteLatency"));
Double[] sstablesPerRead = probe.metricPercentilesAsArray((CassandraMetricsRegistry.JmxHistogramMBean) probe.getColumnFamilyMetric(keyspace, table, "SSTablesPerReadHistogram"));
out.println(format("%s/%s histograms", keyspace, table));
out.println(format("%-10s%18s%18s%18s%18s%18s", "Percentile", "Read Latency", "Write Latency", "SSTables", "Partition Size", "Cell Count"));
out.println(format("%-10s%18s%18s%18s%18s%18s", "", "(micros)", "(micros)", "", "(bytes)", ""));
for (int i = 0; i < percentiles.length; i++) {
out.println(format("%-10s%18.2f%18.2f%18.2f%18.0f%18.0f", percentiles[i], readLatency[i], writeLatency[i], sstablesPerRead[i], estimatedRowSizePercentiles[i], estimatedColumnCountPercentiles[i]));
}
out.println();
}
}
}
use of org.apache.cassandra.utils.EstimatedHistogram in project eiger by wlloyd.
the class SSTableMetadataSerializerTest method testSerialization.
@Test
public void testSerialization() throws IOException {
EstimatedHistogram rowSizes = new EstimatedHistogram(new long[] { 1L, 2L }, new long[] { 3L, 4L, 5L });
EstimatedHistogram columnCounts = new EstimatedHistogram(new long[] { 6L, 7L }, new long[] { 8L, 9L, 10L });
ReplayPosition rp = new ReplayPosition(11L, 12);
long maxTimestamp = 4162517136L;
SSTableMetadata.Collector collector = SSTableMetadata.createCollector().estimatedRowSize(rowSizes).estimatedColumnCount(columnCounts).replayPosition(rp);
collector.updateMaxTimestamp(maxTimestamp);
SSTableMetadata originalMetadata = collector.finalizeMetadata(RandomPartitioner.class.getCanonicalName());
ByteArrayOutputStream byteOutput = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOutput);
SSTableMetadata.serializer.serialize(originalMetadata, dos);
ByteArrayInputStream byteInput = new ByteArrayInputStream(byteOutput.toByteArray());
DataInputStream dis = new DataInputStream(byteInput);
Descriptor desc = new Descriptor(Descriptor.CURRENT_VERSION, new File("."), "", "", 0, false);
SSTableMetadata stats = SSTableMetadata.serializer.deserialize(dis, desc);
assert stats.estimatedRowSize.equals(originalMetadata.estimatedRowSize);
assert stats.estimatedRowSize.equals(rowSizes);
assert stats.estimatedColumnCount.equals(originalMetadata.estimatedColumnCount);
assert stats.estimatedColumnCount.equals(columnCounts);
assert stats.replayPosition.equals(originalMetadata.replayPosition);
assert stats.replayPosition.equals(rp);
assert stats.maxTimestamp == maxTimestamp;
assert stats.maxTimestamp == originalMetadata.maxTimestamp;
assert RandomPartitioner.class.getCanonicalName().equals(stats.partitioner);
}
Aggregations