use of io.cdap.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class MapReduceProgramRunnerTest method testMapReduceMetricsControl.
@Test
public void testMapReduceMetricsControl() throws Exception {
final ApplicationWithPrograms app = deployApp(Id.Namespace.fromEntityId(new NamespaceId("metrics_ns")), AppWithMapReduce.class);
Map<String, String> runtimeArguments = Maps.newHashMap();
// do not emit metrics for this app
runtimeArguments.put("metric", "metric");
runtimeArguments.put("startTs", "1");
runtimeArguments.put("stopTs", "3");
runtimeArguments.put("tag", "tag1");
// Do not emit metrics for mapreduce
runtimeArguments.put(SystemArguments.METRICS_ENABLED, "false");
runProgram(app, AppWithMapReduce.AggregateTimeseriesByTag.class, new BasicArguments(runtimeArguments));
Collection<MetricTimeSeries> metrics = getMetricTimeSeries();
Assert.assertEquals(0, metrics.size());
// emit metrics for mapreduce
runtimeArguments.put(SystemArguments.METRICS_ENABLED, "true");
runProgram(app, AppWithMapReduce.AggregateTimeseriesByTag.class, new BasicArguments(runtimeArguments));
metrics = getMetricTimeSeries();
Assert.assertTrue(metrics.size() > 0);
}
use of io.cdap.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class MapReduceProgramRunnerTest method testMapreduceWithDynamicDatasets.
@Test
public void testMapreduceWithDynamicDatasets() throws Exception {
DatasetId rtInput1 = DefaultId.NAMESPACE.dataset("rtInput1");
DatasetId rtInput2 = DefaultId.NAMESPACE.dataset("rtInput2");
DatasetId rtOutput1 = DefaultId.NAMESPACE.dataset("rtOutput1");
// create the datasets here because they are not created by the app
dsFramework.addInstance("fileSet", rtInput1, FileSetProperties.builder().setBasePath("rtInput1").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
dsFramework.addInstance("fileSet", rtOutput1, FileSetProperties.builder().setBasePath("rtOutput1").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
// build runtime args for app
Map<String, String> runtimeArguments = Maps.newHashMap();
// Make sure there is only one mapper running at a time since this test has the Mapper writing
// to a dataset using increment and the in-memory table doesn't really support concurrent increment
runtimeArguments.put("mr.job.conf.mapreduce.local.map.tasks.maximum", "1");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_NAME, "rtInput1");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_PATHS, "abc, xyz");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_NAME, "rtOutput1");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_PATH, "a001");
// test reading and writing distinct datasets, reading more than one path
testMapreduceWithFile("rtInput1", "abc, xyz", "rtOutput1", "a001", AppWithMapReduceUsingRuntimeDatasets.class, AppWithMapReduceUsingRuntimeDatasets.ComputeSum.class, runtimeArguments, AppWithMapReduceUsingRuntimeDatasets.COUNTERS, null);
// validate that the table emitted metrics
Collection<MetricTimeSeries> metrics = metricStore.query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getNamespace(), Constants.Metrics.Tag.APP, AppWithMapReduceUsingRuntimeDatasets.APP_NAME, Constants.Metrics.Tag.MAPREDUCE, AppWithMapReduceUsingRuntimeDatasets.MR_NAME, Constants.Metrics.Tag.DATASET, "rtt"), Collections.<String>emptyList()));
Assert.assertEquals(1, metrics.size());
MetricTimeSeries ts = metrics.iterator().next();
Assert.assertEquals(1, ts.getTimeValues().size());
Assert.assertEquals(1, ts.getTimeValues().get(0).getValue());
// test reading and writing same dataset
dsFramework.addInstance("fileSet", rtInput2, FileSetProperties.builder().setBasePath("rtInput2").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
runtimeArguments = Maps.newHashMap();
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_NAME, "rtInput2");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_PATHS, "zzz");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_NAME, "rtInput2");
runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_PATH, "f123");
testMapreduceWithFile("rtInput2", "zzz", "rtInput2", "f123", AppWithMapReduceUsingRuntimeDatasets.class, AppWithMapReduceUsingRuntimeDatasets.ComputeSum.class, runtimeArguments, AppWithMapReduceUsingRuntimeDatasets.COUNTERS, null);
}
use of io.cdap.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class Spark2Test method testSparkWithObjectStore.
@Test
public void testSparkWithObjectStore() throws Exception {
ApplicationManager applicationManager = deploy(NamespaceId.DEFAULT, SparkAppUsingObjectStore.class);
DataSetManager<ObjectStore<String>> keysManager = getDataset("keys");
prepareInputData(keysManager);
SparkManager sparkManager = applicationManager.getSparkManager(CharCountProgram.class.getSimpleName()).start();
sparkManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
sparkManager.waitForStopped(60, TimeUnit.SECONDS);
DataSetManager<KeyValueTable> countManager = getDataset("count");
checkOutputData(countManager);
// validate that the table emitted metrics
// one read + one write in beforeSubmit(), increment (= read + write) in main -> 4
Tasks.waitFor(4L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Collection<MetricTimeSeries> metrics = getMetricsManager().query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getNamespace(), Constants.Metrics.Tag.APP, SparkAppUsingObjectStore.class.getSimpleName(), Constants.Metrics.Tag.SPARK, CharCountProgram.class.getSimpleName(), Constants.Metrics.Tag.DATASET, "totals"), Collections.<String>emptyList()));
if (metrics.isEmpty()) {
return 0L;
}
Assert.assertEquals(1, metrics.size());
MetricTimeSeries ts = metrics.iterator().next();
Assert.assertEquals(1, ts.getTimeValues().size());
return ts.getTimeValues().get(0).getValue();
}
}, 10L, TimeUnit.SECONDS, 50L, TimeUnit.MILLISECONDS);
}
use of io.cdap.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class PreviewDataStreamsTest method getTotalMetric.
private long getTotalMetric(Map<String, String> tags, String metricName, PreviewManager previewManager) {
MetricDataQuery query = new MetricDataQuery(0, 0, Integer.MAX_VALUE, metricName, AggregationFunction.SUM, tags, new ArrayList<String>());
Collection<MetricTimeSeries> result = previewManager.getMetricsQueryHelper().getMetricStore().query(query);
if (result.isEmpty()) {
return 0;
}
List<TimeValue> timeValues = result.iterator().next().getTimeValues();
if (timeValues.isEmpty()) {
return 0;
}
return timeValues.get(0).getValue();
}
use of io.cdap.cdap.api.metrics.MetricTimeSeries in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.
@Test
public void testContentConsumerLifecycle() throws Exception {
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
// Set to have one thread only for testing context capture and release
serviceManager = appManager.getServiceManager("test").start(ImmutableMap.of(SystemArguments.SERVICE_THREADS, "1"));
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, () -> getStates(serviceManager).size(), 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Finish the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states, there should still be six handler instances initialized.
final Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Do another round of six concurrent upload. It should reuse all of the existing six contexts
completions.clear();
uploadLatch = new CountDownLatch(1);
for (int i = 0; i < 6; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be seven handler instances initialized.
// Six for the in-progress upload, one for the getStates call
// Out of the 7 states, six of them should be the same as the old one
Tasks.waitFor(true, () -> {
Multimap<Integer, String> newStates = getStates(serviceManager);
if (newStates.size() != 7) {
return false;
}
for (Map.Entry<Integer, String> entry : states.entries()) {
if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
return false;
}
}
return true;
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Query the queue size metrics. Expect the maximum be 6.
// This is because only the six from the concurrent upload will get captured added back to the queue,
// while the one created for the getState() call will be stated in the thread cache, but not in the queue.
Tasks.waitFor(6L, () -> {
Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, NamespaceId.DEFAULT.getNamespace(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, Collections.emptyList());
Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
serviceManager.stop();
serviceManager.waitForStopped(10, TimeUnit.SECONDS);
}
}
Aggregations