Search in sources :

Example 36 with MetricDataQuery

use of co.cask.cdap.api.metrics.MetricDataQuery in project cdap by caskdata.

the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.

@Test
public void testContentConsumerLifecycle() throws Exception {
    try {
        ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
        // Set to have one thread only for testing context capture and release
        serviceManager = appManager.getServiceManager("test").start(ImmutableMap.of(SystemArguments.SERVICE_THREADS, "1"));
        CountDownLatch uploadLatch = new CountDownLatch(1);
        // Create five concurrent upload
        List<ListenableFuture<Integer>> completions = new ArrayList<>();
        for (int i = 0; i < 5; i++) {
            completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
        }
        // Get the states, there should be six handler instances initialized.
        // Five for the in-progress upload, one for the getStates call
        Tasks.waitFor(6, () -> getStates(serviceManager).size(), 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
        // Finish the upload
        uploadLatch.countDown();
        Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
        // Verify the result
        for (ListenableFuture<Integer> future : completions) {
            Assert.assertEquals(200, future.get().intValue());
        }
        // Get the states, there should still be six handler instances initialized.
        final Multimap<Integer, String> states = getStates(serviceManager);
        Assert.assertEquals(6, states.size());
        // Do another round of six concurrent upload. It should reuse all of the existing six contexts
        completions.clear();
        uploadLatch = new CountDownLatch(1);
        for (int i = 0; i < 6; i++) {
            completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
        }
        // Get the states, there should be seven handler instances initialized.
        // Six for the in-progress upload, one for the getStates call
        // Out of the 7 states, six of them should be the same as the old one
        Tasks.waitFor(true, () -> {
            Multimap<Integer, String> newStates = getStates(serviceManager);
            if (newStates.size() != 7) {
                return false;
            }
            for (Map.Entry<Integer, String> entry : states.entries()) {
                if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
                    return false;
                }
            }
            return true;
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
        // Complete the upload
        uploadLatch.countDown();
        Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
        // Verify the result
        for (ListenableFuture<Integer> future : completions) {
            Assert.assertEquals(200, future.get().intValue());
        }
        // Query the queue size metrics. Expect the maximum be 6.
        // This is because only the six from the concurrent upload will get captured added back to the queue,
        // while the one created for the getState() call will be stated in the thread cache, but not in the queue.
        Tasks.waitFor(6L, () -> {
            Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, NamespaceId.DEFAULT.getNamespace(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
            MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, Collections.emptyList());
            Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
            return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    } finally {
        serviceManager.stop();
        serviceManager.waitForStatus(false);
    }
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) ArrayList(java.util.ArrayList) MetricTimeSeries(co.cask.cdap.api.metrics.MetricTimeSeries) ServiceLifecycleApp(co.cask.cdap.test.app.ServiceLifecycleApp) CountDownLatch(java.util.concurrent.CountDownLatch) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) MetricDataQuery(co.cask.cdap.api.metrics.MetricDataQuery) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 37 with MetricDataQuery

use of co.cask.cdap.api.metrics.MetricDataQuery in project cdap by caskdata.

the class CDAPTransactions method collect.

@Override
public void collect() throws Exception {
    Collection<MetricTimeSeries> collection = metricStore.query(new MetricDataQuery(0, 0, Integer.MAX_VALUE, Integer.MAX_VALUE, METRICS, Constants.Metrics.TRANSACTION_MANAGER_CONTEXT, Collections.<String>emptyList(), null));
    for (MetricTimeSeries metricTimeSeries : collection) {
        if (metricTimeSeries.getMetricName().equals("system.committing.size")) {
            numCommittingChangeSets = (int) aggregateMetricValue(metricTimeSeries);
        }
        if (metricTimeSeries.getMetricName().equals("system.committed.size")) {
            numCommittedChangeSets = (int) aggregateMetricValue(metricTimeSeries);
        }
    }
    Transaction transaction = txClient.startShort();
    readPointer = transaction.getReadPointer();
    writePointer = transaction.getWritePointer();
    numInProgressTx = transaction.getInProgress().length;
    numInvalidTx = transaction.getInvalids().length;
    txClient.abort(transaction);
}
Also used : Transaction(org.apache.tephra.Transaction) MetricTimeSeries(co.cask.cdap.api.metrics.MetricTimeSeries) MetricDataQuery(co.cask.cdap.api.metrics.MetricDataQuery)

Example 38 with MetricDataQuery

use of co.cask.cdap.api.metrics.MetricDataQuery in project cdap by caskdata.

the class WorkflowStatsSLAHttpHandler method getSparkDetails.

private Map<String, Long> getSparkDetails(ProgramId sparkProgram, String runId) throws Exception {
    Map<String, String> context = new HashMap<>();
    context.put(Constants.Metrics.Tag.NAMESPACE, sparkProgram.getNamespace());
    context.put(Constants.Metrics.Tag.APP, sparkProgram.getApplication());
    context.put(Constants.Metrics.Tag.SPARK, sparkProgram.getProgram());
    context.put(Constants.Metrics.Tag.RUN_ID, runId);
    List<TagValue> tags = new ArrayList<>();
    for (Map.Entry<String, String> entry : context.entrySet()) {
        tags.add(new TagValue(entry.getKey(), entry.getValue()));
    }
    MetricSearchQuery metricSearchQuery = new MetricSearchQuery(0, 0, Integer.MAX_VALUE, tags);
    Collection<String> metricNames = metricStore.findMetricNames(metricSearchQuery);
    Map<String, Long> overallResult = new HashMap<>();
    for (String metricName : metricNames) {
        Collection<MetricTimeSeries> resultPerQuery = metricStore.query(new MetricDataQuery(0, 0, Integer.MAX_VALUE, metricName, AggregationFunction.SUM, context, new ArrayList<String>()));
        for (MetricTimeSeries metricTimeSeries : resultPerQuery) {
            overallResult.put(metricTimeSeries.getMetricName(), metricTimeSeries.getTimeValues().get(0).getValue());
        }
    }
    return overallResult;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MetricTimeSeries(co.cask.cdap.api.metrics.MetricTimeSeries) MetricSearchQuery(co.cask.cdap.api.metrics.MetricSearchQuery) TagValue(co.cask.cdap.api.metrics.TagValue) MetricDataQuery(co.cask.cdap.api.metrics.MetricDataQuery) HashMap(java.util.HashMap) Map(java.util.Map)

Example 39 with MetricDataQuery

use of co.cask.cdap.api.metrics.MetricDataQuery in project cdap by caskdata.

the class MapReduceProgramRunnerTest method testMapreduceWithDynamicDatasets.

@Test
public void testMapreduceWithDynamicDatasets() throws Exception {
    DatasetId rtInput1 = DefaultId.NAMESPACE.dataset("rtInput1");
    DatasetId rtInput2 = DefaultId.NAMESPACE.dataset("rtInput2");
    DatasetId rtOutput1 = DefaultId.NAMESPACE.dataset("rtOutput1");
    // create the datasets here because they are not created by the app
    dsFramework.addInstance("fileSet", rtInput1, FileSetProperties.builder().setBasePath("rtInput1").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
    dsFramework.addInstance("fileSet", rtOutput1, FileSetProperties.builder().setBasePath("rtOutput1").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
    // build runtime args for app
    Map<String, String> runtimeArguments = Maps.newHashMap();
    // Make sure there is only one mapper running at a time since this test has the Mapper writing
    // to a dataset using increment and the in-memory table doesn't really support concurrent increment
    runtimeArguments.put("mr.job.conf.mapreduce.local.map.tasks.maximum", "1");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_NAME, "rtInput1");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_PATHS, "abc, xyz");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_NAME, "rtOutput1");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_PATH, "a001");
    // test reading and writing distinct datasets, reading more than one path
    testMapreduceWithFile("rtInput1", "abc, xyz", "rtOutput1", "a001", AppWithMapReduceUsingRuntimeDatasets.class, AppWithMapReduceUsingRuntimeDatasets.ComputeSum.class, runtimeArguments, AppWithMapReduceUsingRuntimeDatasets.COUNTERS, null);
    // validate that the table emitted metrics
    Collection<MetricTimeSeries> metrics = metricStore.query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getNamespace(), Constants.Metrics.Tag.APP, AppWithMapReduceUsingRuntimeDatasets.APP_NAME, Constants.Metrics.Tag.MAPREDUCE, AppWithMapReduceUsingRuntimeDatasets.MR_NAME, Constants.Metrics.Tag.DATASET, "rtt"), Collections.<String>emptyList()));
    Assert.assertEquals(1, metrics.size());
    MetricTimeSeries ts = metrics.iterator().next();
    Assert.assertEquals(1, ts.getTimeValues().size());
    Assert.assertEquals(1, ts.getTimeValues().get(0).getValue());
    // test reading and writing same dataset
    dsFramework.addInstance("fileSet", rtInput2, FileSetProperties.builder().setBasePath("rtInput2").setInputFormat(TextInputFormat.class).setOutputFormat(TextOutputFormat.class).setOutputProperty(TextOutputFormat.SEPERATOR, ":").build());
    runtimeArguments = Maps.newHashMap();
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_NAME, "rtInput2");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.INPUT_PATHS, "zzz");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_NAME, "rtInput2");
    runtimeArguments.put(AppWithMapReduceUsingRuntimeDatasets.OUTPUT_PATH, "f123");
    testMapreduceWithFile("rtInput2", "zzz", "rtInput2", "f123", AppWithMapReduceUsingRuntimeDatasets.class, AppWithMapReduceUsingRuntimeDatasets.ComputeSum.class, runtimeArguments, AppWithMapReduceUsingRuntimeDatasets.COUNTERS, null);
}
Also used : TextOutputFormat(org.apache.hadoop.mapreduce.lib.output.TextOutputFormat) MetricTimeSeries(co.cask.cdap.api.metrics.MetricTimeSeries) MetricDataQuery(co.cask.cdap.api.metrics.MetricDataQuery) DatasetId(co.cask.cdap.proto.id.DatasetId) Test(org.junit.Test)

Example 40 with MetricDataQuery

use of co.cask.cdap.api.metrics.MetricDataQuery in project cdap by caskdata.

the class WorkerProgramRunnerTest method testWorkerDatasetWithMetrics.

@Test
public void testWorkerDatasetWithMetrics() throws Throwable {
    final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(AppWithWorker.class, TEMP_FOLDER_SUPPLIER);
    ProgramController controller = startProgram(app, AppWithWorker.TableWriter.class);
    // validate worker wrote the "initialize" and "run" rows
    final TransactionExecutor executor = txExecutorFactory.createExecutor(datasetCache);
    // wait at most 5 seconds until the "RUN" row is set (indicates the worker has started running)
    Tasks.waitFor(AppWithWorker.RUN, new Callable<String>() {

        @Override
        public String call() throws Exception {
            return executor.execute(new Callable<String>() {

                @Override
                public String call() throws Exception {
                    KeyValueTable kvTable = datasetCache.getDataset(AppWithWorker.DATASET);
                    return Bytes.toString(kvTable.read(AppWithWorker.RUN));
                }
            });
        }
    }, 5, TimeUnit.SECONDS);
    stopProgram(controller);
    txExecutorFactory.createExecutor(datasetCache.getTransactionAwares()).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            KeyValueTable kvTable = datasetCache.getDataset(AppWithWorker.DATASET);
            Assert.assertEquals(AppWithWorker.RUN, Bytes.toString(kvTable.read(AppWithWorker.RUN)));
            Assert.assertEquals(AppWithWorker.INITIALIZE, Bytes.toString(kvTable.read(AppWithWorker.INITIALIZE)));
            Assert.assertEquals(AppWithWorker.STOP, Bytes.toString(kvTable.read(AppWithWorker.STOP)));
        }
    });
    // validate that the table emitted metrics
    Tasks.waitFor(3L, new Callable<Long>() {

        @Override
        public Long call() throws Exception {
            Collection<MetricTimeSeries> metrics = metricStore.query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getEntityName(), Constants.Metrics.Tag.APP, AppWithWorker.NAME, Constants.Metrics.Tag.WORKER, AppWithWorker.WORKER, Constants.Metrics.Tag.DATASET, AppWithWorker.DATASET), Collections.<String>emptyList()));
            if (metrics.isEmpty()) {
                return 0L;
            }
            Assert.assertEquals(1, metrics.size());
            MetricTimeSeries ts = metrics.iterator().next();
            Assert.assertEquals(1, ts.getTimeValues().size());
            return ts.getTimeValues().get(0).getValue();
        }
    }, 5L, TimeUnit.SECONDS, 50L, TimeUnit.MILLISECONDS);
}
Also used : ProgramController(co.cask.cdap.app.runtime.ProgramController) MetricTimeSeries(co.cask.cdap.api.metrics.MetricTimeSeries) TransactionExecutor(org.apache.tephra.TransactionExecutor) AppWithWorker(co.cask.cdap.AppWithWorker) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) Collection(java.util.Collection) MetricDataQuery(co.cask.cdap.api.metrics.MetricDataQuery) Test(org.junit.Test)

Aggregations

MetricDataQuery (co.cask.cdap.api.metrics.MetricDataQuery)42 Test (org.junit.Test)25 MetricTimeSeries (co.cask.cdap.api.metrics.MetricTimeSeries)21 TimeValue (co.cask.cdap.api.dataset.lib.cube.TimeValue)11 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 Map (java.util.Map)5 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)4 ApplicationManager (co.cask.cdap.test.ApplicationManager)4 AggregationFunction (co.cask.cdap.api.dataset.lib.cube.AggregationFunction)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Collection (java.util.Collection)3 ObjectStore (co.cask.cdap.api.dataset.lib.ObjectStore)2 MetricStore (co.cask.cdap.api.metrics.MetricStore)2 QueueName (co.cask.cdap.common.queue.QueueName)2 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)2 SparkAppUsingObjectStore (co.cask.cdap.spark.app.SparkAppUsingObjectStore)2 SparkManager (co.cask.cdap.test.SparkManager)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 LoggerContext (ch.qos.logback.classic.LoggerContext)1