use of io.cdap.cdap.api.dataset.metrics.MeteredDataset in project cdap by caskdata.
the class SingleThreadDatasetCache method createDatasetInstance.
/**
* Creates a new instance of a dataset based on the given information.
*/
private Dataset createDatasetInstance(DatasetCacheKey key, boolean recordLineage) {
DatasetId datasetId = new DatasetId(key.getNamespace(), key.getName());
Dataset dataset = instantiator.getDataset(datasetId, key.getArguments(), key.getAccessType());
if (dataset instanceof MeteredDataset && metricsContext != null) {
((MeteredDataset) dataset).setMetricsCollector(metricsContext.childContext(Constants.Metrics.Tag.DATASET, key.getName()));
}
if (recordLineage) {
instantiator.writeLineage(datasetId, key.getAccessType());
}
return dataset;
}
use of io.cdap.cdap.api.dataset.metrics.MeteredDataset in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
try (Table table = getTable(CONTEXT1, tableName, props)) {
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
} finally {
// drop table
admin.drop();
}
}
Aggregations