use of org.rocksdb.HistogramData in project kafka by apache.
the class RocksDBMetricsRecorderTest method shouldRecordStatisticsBasedMetrics.
@Test
public void shouldRecordStatisticsBasedMetrics() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
recorder.addValueProviders(SEGMENT_STORE_NAME_2, dbToAdd2, cacheToAdd2, statisticsToAdd2);
reset(statisticsToAdd1);
reset(statisticsToAdd2);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BYTES_WRITTEN)).andReturn(1L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BYTES_WRITTEN)).andReturn(2L);
bytesWrittenToDatabaseSensor.record(1 + 2, 0L);
replay(bytesWrittenToDatabaseSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BYTES_READ)).andReturn(2L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BYTES_READ)).andReturn(3L);
bytesReadFromDatabaseSensor.record(2 + 3, 0L);
replay(bytesReadFromDatabaseSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.FLUSH_WRITE_BYTES)).andReturn(3L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.FLUSH_WRITE_BYTES)).andReturn(4L);
memtableBytesFlushedSensor.record(3 + 4, 0L);
replay(memtableBytesFlushedSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.MEMTABLE_HIT)).andReturn(1L);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.MEMTABLE_MISS)).andReturn(2L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.MEMTABLE_HIT)).andReturn(3L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.MEMTABLE_MISS)).andReturn(4L);
memtableHitRatioSensor.record((double) 4 / (4 + 6), 0L);
replay(memtableHitRatioSensor);
final HistogramData memtableFlushTimeData1 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 2L, 10L, 3.0);
final HistogramData memtableFlushTimeData2 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 4L, 8L, 10.0);
expect(statisticsToAdd1.getHistogramData(HistogramType.FLUSH_TIME)).andReturn(memtableFlushTimeData1);
expect(statisticsToAdd2.getHistogramData(HistogramType.FLUSH_TIME)).andReturn(memtableFlushTimeData2);
memtableAvgFlushTimeSensor.record((double) (10 + 8) / (2 + 4), 0L);
replay(memtableAvgFlushTimeSensor);
memtableMinFlushTimeSensor.record(3.0, 0L);
replay(memtableMinFlushTimeSensor);
memtableMaxFlushTimeSensor.record(20.0, 0L);
replay(memtableMaxFlushTimeSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.STALL_MICROS)).andReturn(4L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.STALL_MICROS)).andReturn(5L);
writeStallDurationSensor.record(4 + 5, 0L);
replay(writeStallDurationSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BLOCK_CACHE_DATA_HIT)).andReturn(5L);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BLOCK_CACHE_DATA_MISS)).andReturn(4L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BLOCK_CACHE_DATA_HIT)).andReturn(3L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BLOCK_CACHE_DATA_MISS)).andReturn(2L);
blockCacheDataHitRatioSensor.record((double) 8 / (8 + 6), 0L);
replay(blockCacheDataHitRatioSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BLOCK_CACHE_INDEX_HIT)).andReturn(4L);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BLOCK_CACHE_INDEX_MISS)).andReturn(2L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BLOCK_CACHE_INDEX_HIT)).andReturn(2L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BLOCK_CACHE_INDEX_MISS)).andReturn(4L);
blockCacheIndexHitRatioSensor.record((double) 6 / (6 + 6), 0L);
replay(blockCacheIndexHitRatioSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BLOCK_CACHE_FILTER_HIT)).andReturn(2L);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.BLOCK_CACHE_FILTER_MISS)).andReturn(4L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BLOCK_CACHE_FILTER_HIT)).andReturn(3L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.BLOCK_CACHE_FILTER_MISS)).andReturn(5L);
blockCacheFilterHitRatioSensor.record((double) 5 / (5 + 9), 0L);
replay(blockCacheFilterHitRatioSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.COMPACT_WRITE_BYTES)).andReturn(2L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.COMPACT_WRITE_BYTES)).andReturn(4L);
bytesWrittenDuringCompactionSensor.record(2 + 4, 0L);
replay(bytesWrittenDuringCompactionSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.COMPACT_READ_BYTES)).andReturn(5L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.COMPACT_READ_BYTES)).andReturn(6L);
bytesReadDuringCompactionSensor.record(5 + 6, 0L);
replay(bytesReadDuringCompactionSensor);
final HistogramData compactionTimeData1 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 2L, 8L, 6.0);
final HistogramData compactionTimeData2 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 24.0, 2L, 8L, 4.0);
expect(statisticsToAdd1.getHistogramData(HistogramType.COMPACTION_TIME)).andReturn(compactionTimeData1);
expect(statisticsToAdd2.getHistogramData(HistogramType.COMPACTION_TIME)).andReturn(compactionTimeData2);
compactionTimeAvgSensor.record((double) (8 + 8) / (2 + 2), 0L);
replay(compactionTimeAvgSensor);
compactionTimeMinSensor.record(4.0, 0L);
replay(compactionTimeMinSensor);
compactionTimeMaxSensor.record(24.0, 0L);
replay(compactionTimeMaxSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_OPENS)).andReturn(5L);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_CLOSES)).andReturn(3L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_OPENS)).andReturn(7L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_CLOSES)).andReturn(4L);
numberOfOpenFilesSensor.record((5 + 7) - (3 + 4), 0L);
replay(numberOfOpenFilesSensor);
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_ERRORS)).andReturn(34L);
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_ERRORS)).andReturn(11L);
numberOfFileErrorsSensor.record(11 + 34, 0L);
replay(numberOfFileErrorsSensor);
replay(statisticsToAdd1);
replay(statisticsToAdd2);
recorder.record(0L);
verify(statisticsToAdd1);
verify(statisticsToAdd2);
verify(bytesWrittenToDatabaseSensor, bytesReadFromDatabaseSensor, memtableBytesFlushedSensor, memtableHitRatioSensor, memtableAvgFlushTimeSensor, memtableMinFlushTimeSensor, memtableMaxFlushTimeSensor, writeStallDurationSensor, blockCacheDataHitRatioSensor, blockCacheIndexHitRatioSensor, blockCacheFilterHitRatioSensor, bytesWrittenDuringCompactionSensor, bytesReadDuringCompactionSensor, compactionTimeAvgSensor, compactionTimeMinSensor, compactionTimeMaxSensor, numberOfOpenFilesSensor, numberOfFileErrorsSensor);
}
use of org.rocksdb.HistogramData in project kafka by apache.
the class RocksDBMetricsRecorder method record.
public void record(final long now) {
logger.debug("Recording metrics for store {}", storeName);
long bytesWrittenToDatabase = 0;
long bytesReadFromDatabase = 0;
long memtableBytesFlushed = 0;
long memtableHits = 0;
long memtableMisses = 0;
long blockCacheDataHits = 0;
long blockCacheDataMisses = 0;
long blockCacheIndexHits = 0;
long blockCacheIndexMisses = 0;
long blockCacheFilterHits = 0;
long blockCacheFilterMisses = 0;
long writeStallDuration = 0;
long bytesWrittenDuringCompaction = 0;
long bytesReadDuringCompaction = 0;
long numberOfOpenFiles = 0;
long numberOfFileErrors = 0;
long memtableFlushTimeSum = 0;
long memtableFlushTimeCount = 0;
double memtableFlushTimeMin = Double.MAX_VALUE;
double memtableFlushTimeMax = 0.0;
long compactionTimeSum = 0;
long compactionTimeCount = 0;
double compactionTimeMin = Double.MAX_VALUE;
double compactionTimeMax = 0.0;
boolean shouldRecord = true;
for (final DbAndCacheAndStatistics valueProviders : storeToValueProviders.values()) {
if (valueProviders.statistics == null) {
shouldRecord = false;
break;
}
bytesWrittenToDatabase += valueProviders.statistics.getAndResetTickerCount(TickerType.BYTES_WRITTEN);
bytesReadFromDatabase += valueProviders.statistics.getAndResetTickerCount(TickerType.BYTES_READ);
memtableBytesFlushed += valueProviders.statistics.getAndResetTickerCount(TickerType.FLUSH_WRITE_BYTES);
memtableHits += valueProviders.statistics.getAndResetTickerCount(TickerType.MEMTABLE_HIT);
memtableMisses += valueProviders.statistics.getAndResetTickerCount(TickerType.MEMTABLE_MISS);
blockCacheDataHits += valueProviders.statistics.getAndResetTickerCount(TickerType.BLOCK_CACHE_DATA_HIT);
blockCacheDataMisses += valueProviders.statistics.getAndResetTickerCount(TickerType.BLOCK_CACHE_DATA_MISS);
blockCacheIndexHits += valueProviders.statistics.getAndResetTickerCount(TickerType.BLOCK_CACHE_INDEX_HIT);
blockCacheIndexMisses += valueProviders.statistics.getAndResetTickerCount(TickerType.BLOCK_CACHE_INDEX_MISS);
blockCacheFilterHits += valueProviders.statistics.getAndResetTickerCount(TickerType.BLOCK_CACHE_FILTER_HIT);
blockCacheFilterMisses += valueProviders.statistics.getAndResetTickerCount(TickerType.BLOCK_CACHE_FILTER_MISS);
writeStallDuration += valueProviders.statistics.getAndResetTickerCount(TickerType.STALL_MICROS);
bytesWrittenDuringCompaction += valueProviders.statistics.getAndResetTickerCount(TickerType.COMPACT_WRITE_BYTES);
bytesReadDuringCompaction += valueProviders.statistics.getAndResetTickerCount(TickerType.COMPACT_READ_BYTES);
numberOfOpenFiles += valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_OPENS) - valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_CLOSES);
numberOfFileErrors += valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_ERRORS);
final HistogramData memtableFlushTimeData = valueProviders.statistics.getHistogramData(HistogramType.FLUSH_TIME);
memtableFlushTimeSum += memtableFlushTimeData.getSum();
memtableFlushTimeCount += memtableFlushTimeData.getCount();
memtableFlushTimeMin = Double.min(memtableFlushTimeMin, memtableFlushTimeData.getMin());
memtableFlushTimeMax = Double.max(memtableFlushTimeMax, memtableFlushTimeData.getMax());
final HistogramData compactionTimeData = valueProviders.statistics.getHistogramData(HistogramType.COMPACTION_TIME);
compactionTimeSum += compactionTimeData.getSum();
compactionTimeCount += compactionTimeData.getCount();
compactionTimeMin = Double.min(compactionTimeMin, compactionTimeData.getMin());
compactionTimeMax = Double.max(compactionTimeMax, compactionTimeData.getMax());
}
if (shouldRecord) {
bytesWrittenToDatabaseSensor.record(bytesWrittenToDatabase, now);
bytesReadFromDatabaseSensor.record(bytesReadFromDatabase, now);
memtableBytesFlushedSensor.record(memtableBytesFlushed, now);
memtableHitRatioSensor.record(computeHitRatio(memtableHits, memtableMisses), now);
memtableAvgFlushTimeSensor.record(computeAvg(memtableFlushTimeSum, memtableFlushTimeCount), now);
memtableMinFlushTimeSensor.record(memtableFlushTimeMin, now);
memtableMaxFlushTimeSensor.record(memtableFlushTimeMax, now);
blockCacheDataHitRatioSensor.record(computeHitRatio(blockCacheDataHits, blockCacheDataMisses), now);
blockCacheIndexHitRatioSensor.record(computeHitRatio(blockCacheIndexHits, blockCacheIndexMisses), now);
blockCacheFilterHitRatioSensor.record(computeHitRatio(blockCacheFilterHits, blockCacheFilterMisses), now);
writeStallDurationSensor.record(writeStallDuration, now);
bytesWrittenDuringCompactionSensor.record(bytesWrittenDuringCompaction, now);
bytesReadDuringCompactionSensor.record(bytesReadDuringCompaction, now);
compactionTimeAvgSensor.record(computeAvg(compactionTimeSum, compactionTimeCount), now);
compactionTimeMinSensor.record(compactionTimeMin, now);
compactionTimeMaxSensor.record(compactionTimeMax, now);
numberOfOpenFilesSensor.record(numberOfOpenFiles, now);
numberOfFileErrorsSensor.record(numberOfFileErrors, now);
}
}
use of org.rocksdb.HistogramData in project ozone by apache.
the class RocksDBStoreMBean method getHistogramData.
/**
* Collect all histogram metrics from RocksDB statistics.
* @param rb Metrics Record Builder.
*/
private void getHistogramData(MetricsRecordBuilder rb) {
for (HistogramType histogramType : HistogramType.values()) {
HistogramData histogram = statistics.getHistogramData(HistogramType.valueOf(histogramType.name()));
for (String histogramAttribute : histogramAttributes) {
try {
Method method = HistogramData.class.getMethod("get" + histogramAttribute);
double metricValue = (double) method.invoke(histogram);
rb.addGauge(Interns.info(histogramType.name() + "_" + histogramAttribute.toUpperCase(), "RocksDBStat"), metricValue);
} catch (Exception e) {
LOG.error("Error reading histogram data", e);
}
}
}
}
use of org.rocksdb.HistogramData in project mercury by yellow013.
the class RocksDBSample method main.
@SuppressWarnings("deprecation")
public static void main(final String[] args) {
// if (args.length < 1) {
// System.out.println("usage: RocksDBSample db_path");
// System.exit(-1);
// }
// args[0];
final String db_path = dbPath;
final String db_path_not_found = db_path + "_not_found";
System.out.println("RocksDBSample");
try (final Options options = new Options();
final Filter bloomFilter = new BloomFilter(10);
final ReadOptions readOptions = new ReadOptions().setFillCache(false);
final Statistics stats = new Statistics();
final RateLimiter rateLimiter = new RateLimiter(10000000, 10000, 10)) {
try (final RocksDB db = RocksDB.open(options, db_path_not_found)) {
assert (false);
} catch (final RocksDBException e) {
System.out.format("Caught the expected exception -- %s\n", e);
}
try {
options.setCreateIfMissing(true).setStatistics(stats).setWriteBufferSize(8 * SizeUnit.KB).setMaxWriteBufferNumber(3).setMaxBackgroundCompactions(10).setCompressionType(CompressionType.SNAPPY_COMPRESSION).setCompactionStyle(CompactionStyle.UNIVERSAL);
} catch (final IllegalArgumentException e) {
assert (false);
}
assert (options.createIfMissing() == true);
assert (options.writeBufferSize() == 8 * SizeUnit.KB);
assert (options.maxWriteBufferNumber() == 3);
assert (options.maxBackgroundCompactions() == 10);
assert (options.compressionType() == CompressionType.SNAPPY_COMPRESSION);
assert (options.compactionStyle() == CompactionStyle.UNIVERSAL);
assert (options.memTableFactoryName().equals("SkipListFactory"));
options.setMemTableConfig(new HashSkipListMemTableConfig().setHeight(4).setBranchingFactor(4).setBucketCount(2000000));
assert (options.memTableFactoryName().equals("HashSkipListRepFactory"));
options.setMemTableConfig(new HashLinkedListMemTableConfig().setBucketCount(100000));
assert (options.memTableFactoryName().equals("HashLinkedListRepFactory"));
options.setMemTableConfig(new VectorMemTableConfig().setReservedSize(10000));
assert (options.memTableFactoryName().equals("VectorRepFactory"));
options.setMemTableConfig(new SkipListMemTableConfig());
assert (options.memTableFactoryName().equals("SkipListFactory"));
options.setTableFormatConfig(new PlainTableConfig());
// Plain-Table requires mmap read
options.setAllowMmapReads(true);
assert (options.tableFactoryName().equals("PlainTable"));
options.setRateLimiter(rateLimiter);
final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCache(new LRUCache(512)).setFilterPolicy(bloomFilter).setBlockSizeDeviation(5).setBlockRestartInterval(10).setCacheIndexAndFilterBlocks(true).setBlockCacheCompressed(new LRUCache(512));
// assert (table_options.cacheNumShardBits() == 6);
assert (table_options.blockSizeDeviation() == 5);
assert (table_options.blockRestartInterval() == 10);
assert (table_options.cacheIndexAndFilterBlocks() == true);
// assert (table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB);
// assert (table_options.blockCacheCompressedNumShardBits() == 10);
options.setTableFormatConfig(table_options);
assert (options.tableFactoryName().equals("BlockBasedTable"));
try (final RocksDB db = RocksDB.open(options, db_path)) {
db.put("hello".getBytes(), "world".getBytes());
final byte[] value = db.get("hello".getBytes());
assert ("world".equals(new String(value)));
final String str = db.getProperty("rocksdb.stats");
assert (str != null && !str.equals(""));
} catch (final RocksDBException e) {
System.out.format("[ERROR] caught the unexpected exception -- %s\n", e);
assert (false);
}
try (final RocksDB db = RocksDB.open(options, db_path)) {
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
System.out.format("Get('hello') = %s\n", new String(value));
for (int i = 1; i <= 9; ++i) {
for (int j = 1; j <= 9; ++j) {
db.put(String.format("%dx%d", i, j).getBytes(), String.format("%d", i * j).getBytes());
}
}
for (int i = 1; i <= 9; ++i) {
for (int j = 1; j <= 9; ++j) {
System.out.format("%s ", new String(db.get(String.format("%dx%d", i, j).getBytes())));
}
System.out.println("");
}
// write batch test
try (final WriteOptions writeOpt = new WriteOptions()) {
for (int i = 10; i <= 19; ++i) {
try (final WriteBatch batch = new WriteBatch()) {
for (int j = 10; j <= 19; ++j) {
batch.put(String.format("%dx%d", i, j).getBytes(), String.format("%d", i * j).getBytes());
}
db.write(writeOpt, batch);
}
}
}
for (int i = 10; i <= 19; ++i) {
for (int j = 10; j <= 19; ++j) {
assert (new String(db.get(String.format("%dx%d", i, j).getBytes())).equals(String.format("%d", i * j)));
System.out.format("%s ", new String(db.get(String.format("%dx%d", i, j).getBytes())));
}
System.out.println("");
}
value = db.get("1x1".getBytes());
assert (value != null);
value = db.get("world".getBytes());
assert (value == null);
value = db.get(readOptions, "world".getBytes());
assert (value == null);
final byte[] testKey = "asdf".getBytes();
final byte[] testValue = "asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
db.put(testKey, testValue);
byte[] testResult = db.get(testKey);
assert (testResult != null);
assert (Arrays.equals(testValue, testResult));
assert (new String(testValue).equals(new String(testResult)));
testResult = db.get(readOptions, testKey);
assert (testResult != null);
assert (Arrays.equals(testValue, testResult));
assert (new String(testValue).equals(new String(testResult)));
final byte[] insufficientArray = new byte[10];
final byte[] enoughArray = new byte[50];
int len;
len = db.get(testKey, insufficientArray);
assert (len > insufficientArray.length);
len = db.get("asdfjkl;".getBytes(), enoughArray);
assert (len == RocksDB.NOT_FOUND);
len = db.get(testKey, enoughArray);
assert (len == testValue.length);
len = db.get(readOptions, testKey, insufficientArray);
assert (len > insufficientArray.length);
len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray);
assert (len == RocksDB.NOT_FOUND);
len = db.get(readOptions, testKey, enoughArray);
assert (len == testValue.length);
db.delete(testKey);
len = db.get(testKey, enoughArray);
assert (len == RocksDB.NOT_FOUND);
// repeat the test with WriteOptions
try (final WriteOptions writeOpts = new WriteOptions()) {
writeOpts.setSync(true);
writeOpts.setDisableWAL(true);
db.put(writeOpts, testKey, testValue);
len = db.get(testKey, enoughArray);
assert (len == testValue.length);
assert (new String(testValue).equals(new String(enoughArray, 0, len)));
}
try {
for (final TickerType statsType : TickerType.values()) {
if (statsType != TickerType.TICKER_ENUM_MAX) {
stats.getTickerCount(statsType);
}
}
System.out.println("getTickerCount() passed.");
} catch (final Exception e) {
System.out.println("Failed in call to getTickerCount()");
// Should never reach here.
assert (false);
}
try {
for (final HistogramType histogramType : HistogramType.values()) {
if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
@SuppressWarnings("unused") HistogramData data = stats.getHistogramData(histogramType);
}
}
System.out.println("getHistogramData() passed.");
} catch (final Exception e) {
System.out.println("Failed in call to getHistogramData()");
// Should never reach here.
assert (false);
}
try (final RocksIterator iterator = db.newIterator()) {
boolean seekToFirstPassed = false;
for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
iterator.status();
assert (iterator.key() != null);
assert (iterator.value() != null);
seekToFirstPassed = true;
}
if (seekToFirstPassed) {
System.out.println("iterator seekToFirst tests passed.");
}
boolean seekToLastPassed = false;
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
iterator.status();
assert (iterator.key() != null);
assert (iterator.value() != null);
seekToLastPassed = true;
}
if (seekToLastPassed) {
System.out.println("iterator seekToLastPassed tests passed.");
}
iterator.seekToFirst();
iterator.seek(iterator.key());
assert (iterator.key() != null);
assert (iterator.value() != null);
System.out.println("iterator seek test passed.");
}
System.out.println("iterator tests passed.");
final List<byte[]> keys = new ArrayList<>();
try (final RocksIterator iterator = db.newIterator()) {
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
keys.add(iterator.key());
}
}
List<byte[]> values = db.multiGetAsList(keys);
assert (values.size() == keys.size());
for (final byte[] value1 : values) {
assert (value1 != null);
}
values = db.multiGetAsList(new ReadOptions(), keys);
assert (values.size() == keys.size());
for (final byte[] value1 : values) {
assert (value1 != null);
}
} catch (final RocksDBException e) {
System.err.println(e);
}
}
}
use of org.rocksdb.HistogramData in project kafka by apache.
the class RocksDBMetricsRecorderTest method shouldCorrectlyHandleAvgRecordingsWithZeroSumAndCount.
@Test
public void shouldCorrectlyHandleAvgRecordingsWithZeroSumAndCount() {
reset(statisticsToAdd1);
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
expect(statisticsToAdd1.getHistogramData(anyObject())).andStubReturn(new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0L, 0L, 0.0));
expect(statisticsToAdd1.getAndResetTickerCount(anyObject())).andStubReturn(0L);
replay(statisticsToAdd1);
memtableAvgFlushTimeSensor.record(0, 0L);
compactionTimeAvgSensor.record(0, 0L);
replay(memtableAvgFlushTimeSensor);
replay(compactionTimeAvgSensor);
recorder.record(0L);
verify(memtableAvgFlushTimeSensor);
verify(compactionTimeAvgSensor);
}
Aggregations