use of co.cask.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class SparkMetricsIntegrationTestRun method getTotalCounter.
private long getTotalCounter(Map<String, String> context, String metricName) throws Exception {
MetricDataQuery query = new MetricDataQuery(0, 0, Integer.MAX_VALUE, metricName, AggregationFunction.SUM, context, new ArrayList<String>());
try {
Collection<MetricTimeSeries> result = getMetricsManager().query(query);
if (result.isEmpty()) {
return 0;
}
// since it is totals query and not groupBy specified, we know there's one time series
List<TimeValue> timeValues = result.iterator().next().getTimeValues();
if (timeValues.isEmpty()) {
return 0;
}
// since it is totals, we know there's one value only
return timeValues.get(0).getValue();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of co.cask.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class MapReduceProgramRunnerTest method testMapreduceWithDynamicDatasets.
@Test
public void testMapreduceWithDynamicDatasets() throws Exception {
DatasetId rtInput1 = DefaultId.NAMESPACE.dataset("rtInput1");
DatasetId rtInput2 = DefaultId.NAMESPACE.dataset("rtInput2");
DatasetId rtOutput1 = DefaultId.NAMESPACE.dataset("rtOutput1");
// create the datasets here because they are not created by the app
dsFramework.addInstance("fileSet", rtInput1, FileSetProperties.builder().setBasePath("rtInput1").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
dsFramework.addInstance("fileSet", rtOutput1, FileSetProperties.builder().setBasePath("rtOutput1").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
// build runtime args for app
Map<String, String> runtimeArguments = Maps.newHashMap();
// Make sure there is only one mapper running at a time since this test has the Mapper writing
// to a dataset using increment and the in-memory table doesn't really support concurrent increment
runtimeArguments.put("mr.job.conf.mapreduce.local.map.tasks.maximum", "1");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_NAME, "rtInput1");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_PATHS, "abc, xyz");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_NAME, "rtOutput1");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_PATH, "a001");
// test reading and writing distinct datasets, reading more than one path
testMapreduceWithFile("rtInput1", "abc, xyz", "rtOutput1", "a001", AppWithMapReduceUsingRuntimeDatasets.class, AppWithMapReduceUsingRuntimeDatasets.ComputeSum.class, runtimeArguments, AppWithMapReduceUsingRuntimeDatasets.COUNTERS, null);
// validate that the table emitted metrics
Collection<MetricTimeSeries> metrics = metricStore.query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getNamespace(), Constants.Metrics.Tag.APP, AppWithMapReduceUsingRuntimeDatasets.APP_NAME, Constants.Metrics.Tag.MAPREDUCE, AppWithMapReduceUsingRuntimeDatasets.MR_NAME, Constants.Metrics.Tag.DATASET, "rtt"), Collections.<String>emptyList()));
Assert.assertEquals(1, metrics.size());
MetricTimeSeries ts = metrics.iterator().next();
Assert.assertEquals(1, ts.getTimeValues().size());
Assert.assertEquals(1, ts.getTimeValues().get(0).getValue());
// test reading and writing same dataset
dsFramework.addInstance("fileSet", rtInput2, FileSetProperties.builder().setBasePath("rtInput2").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
runtimeArguments = Maps.newHashMap();
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_NAME, "rtInput2");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_PATHS, "zzz");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_NAME, "rtInput2");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_PATH, "f123");
testMapreduceWithFile("rtInput2", "zzz", "rtInput2", "f123", AppWithMapReduceUsingRuntimeDatasets.class, AppWithMapReduceUsingRuntimeDatasets.ComputeSum.class, runtimeArguments, AppWithMapReduceUsingRuntimeDatasets.COUNTERS, null);
}
use of co.cask.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class AbstractStreamService method getStreamEventsSize.
/**
* Get the size of events ingested by a stream since its creation, in bytes.
* @param streamId id of the stream
* @return Size of events ingested by a stream since its creation
* @throws IOException when getting an error retrieving the metric
*/
protected long getStreamEventsSize(StreamId streamId) throws IOException {
MetricDataQuery metricDataQuery = new MetricDataQuery(0L, TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), Integer.MAX_VALUE, "system.collect.bytes", AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, streamId.getNamespace(), Constants.Metrics.Tag.STREAM, streamId.getEntityName()), ImmutableList.<String>of());
try {
Collection<MetricTimeSeries> metrics = metricStore.query(metricDataQuery);
if (metrics == null || metrics.isEmpty()) {
// Data is not yet available, which means no data has been ingested by the stream yet
return 0L;
}
MetricTimeSeries metric = metrics.iterator().next();
List<TimeValue> timeValues = metric.getTimeValues();
if (timeValues == null || timeValues.size() != 1) {
throw new IOException("Should collect exactly one time value");
}
return timeValues.get(0).getValue();
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, IOException.class);
throw new IOException(e);
}
}
use of co.cask.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class WorkflowStatsSLAHttpHandler method getSparkDetails.
private Map<String, Long> getSparkDetails(ProgramId sparkProgram, String runId) throws Exception {
Map<String, String> context = new HashMap<>();
context.put(Constants.Metrics.Tag.NAMESPACE, sparkProgram.getNamespace());
context.put(Constants.Metrics.Tag.APP, sparkProgram.getApplication());
context.put(Constants.Metrics.Tag.SPARK, sparkProgram.getProgram());
context.put(Constants.Metrics.Tag.RUN_ID, runId);
List<TagValue> tags = new ArrayList<>();
for (Map.Entry<String, String> entry : context.entrySet()) {
tags.add(new TagValue(entry.getKey(), entry.getValue()));
}
MetricSearchQuery metricSearchQuery = new MetricSearchQuery(0, 0, Integer.MAX_VALUE, tags);
Collection<String> metricNames = metricStore.findMetricNames(metricSearchQuery);
Map<String, Long> overallResult = new HashMap<>();
for (String metricName : metricNames) {
Collection<MetricTimeSeries> resultPerQuery = metricStore.query(new MetricDataQuery(0, 0, Integer.MAX_VALUE, metricName, AggregationFunction.SUM, context, new ArrayList<String>()));
for (MetricTimeSeries metricTimeSeries : resultPerQuery) {
overallResult.put(metricTimeSeries.getMetricName(), metricTimeSeries.getTimeValues().get(0).getValue());
}
}
return overallResult;
}
use of co.cask.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class WorkerProgramRunnerTest method testWorkerDatasetWithMetrics.
@Test
public void testWorkerDatasetWithMetrics() throws Throwable {
final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(AppWithWorker.class, TEMP_FOLDER_SUPPLIER);
ProgramController controller = startProgram(app, AppWithWorker.TableWriter.class);
// validate worker wrote the "initialize" and "run" rows
final TransactionExecutor executor = txExecutorFactory.createExecutor(datasetCache);
// wait at most 5 seconds until the "RUN" row is set (indicates the worker has started running)
Tasks.waitFor(AppWithWorker.RUN, new Callable<String>() {
@Override
public String call() throws Exception {
return executor.execute(new Callable<String>() {
@Override
public String call() throws Exception {
KeyValueTable kvTable = datasetCache.getDataset(AppWithWorker.DATASET);
return Bytes.toString(kvTable.read(AppWithWorker.RUN));
}
});
}
}, 5, TimeUnit.SECONDS);
stopProgram(controller);
txExecutorFactory.createExecutor(datasetCache.getTransactionAwares()).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
KeyValueTable kvTable = datasetCache.getDataset(AppWithWorker.DATASET);
Assert.assertEquals(AppWithWorker.RUN, Bytes.toString(kvTable.read(AppWithWorker.RUN)));
Assert.assertEquals(AppWithWorker.INITIALIZE, Bytes.toString(kvTable.read(AppWithWorker.INITIALIZE)));
Assert.assertEquals(AppWithWorker.STOP, Bytes.toString(kvTable.read(AppWithWorker.STOP)));
}
});
// validate that the table emitted metrics
Tasks.waitFor(3L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Collection<MetricTimeSeries> metrics = metricStore.query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getEntityName(), Constants.Metrics.Tag.APP, AppWithWorker.NAME, Constants.Metrics.Tag.WORKER, AppWithWorker.WORKER, Constants.Metrics.Tag.DATASET, AppWithWorker.DATASET), Collections.<String>emptyList()));
if (metrics.isEmpty()) {
return 0L;
}
Assert.assertEquals(1, metrics.size());
MetricTimeSeries ts = metrics.iterator().next();
Assert.assertEquals(1, ts.getTimeValues().size());
return ts.getTimeValues().get(0).getValue();
}
}, 5L, TimeUnit.SECONDS, 50L, TimeUnit.MILLISECONDS);
}
Aggregations