Search in sources :

Example 1 with KeyValueTable

use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.

the class ServiceLifeCycleTestRun method testContentConsumerProducerLifecycle.

@Test
public void testContentConsumerProducerLifecycle() throws Exception {
    // Set to have one thread only for testing context capture and release
    System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
    try {
        ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
        final ServiceManager serviceManager = appManager.getServiceManager("test").start();
        final DataSetManager<KeyValueTable> datasetManager = getDataset(ServiceLifecycleApp.HANDLER_TABLE_NAME);
        // Clean up the dataset first to avoid being affected by other tests
        datasetManager.get().delete(Bytes.toBytes("called"));
        datasetManager.get().delete(Bytes.toBytes("completed"));
        datasetManager.flush();
        CountDownLatch uploadLatch = new CountDownLatch(1);
        // Create five concurrent upload
        List<ListenableFuture<Integer>> completions = new ArrayList<>();
        for (int i = 0; i < 5; i++) {
            completions.add(slowUpload(serviceManager, "POST", "uploadDownload", uploadLatch));
        }
        // Get the states, there should be six handler instances initialized.
        // Five for the in-progress upload, one for the getStates call
        Tasks.waitFor(6, new Callable<Integer>() {

            @Override
            public Integer call() throws Exception {
                return getStates(serviceManager).size();
            }
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
        // Complete the upload
        uploadLatch.countDown();
        // Make sure the download through content producer has started
        Tasks.waitFor(true, new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                byte[] value = datasetManager.get().read("called");
                datasetManager.flush();
                if (value == null || value.length != Bytes.SIZEOF_LONG) {
                    return false;
                }
                return Bytes.toLong(value) > 5;
            }
        }, 10L, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
        // Get the states, there should still be six handler instances since the ContentConsumer should
        // be passing it's captured context to the ContentProducer without creating new one.
        Multimap<Integer, String> states = getStates(serviceManager);
        Assert.assertEquals(6, states.size());
        // Set the complete flag in the dataset
        datasetManager.get().write("completed", Bytes.toBytes(true));
        datasetManager.flush();
        // Wait for completion
        Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
        // Verify the upload result
        for (ListenableFuture<Integer> future : completions) {
            Assert.assertEquals(200, future.get().intValue());
        }
        // Get the states again, it should still be 6 same instances
        Assert.assertEquals(states, getStates(serviceManager));
    } finally {
        System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
    }
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) ServiceManager(co.cask.cdap.test.ServiceManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Test(org.junit.Test)

Example 2 with KeyValueTable

use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.

the class SparkTestRun method testScalaSparkCrossNSStream.

@Test
public void testScalaSparkCrossNSStream() throws Exception {
    // create a namespace for stream and create the stream in it
    NamespaceMeta crossNSStreamMeta = new NamespaceMeta.Builder().setName("streamSpaceForSpark").build();
    getNamespaceAdmin().create(crossNSStreamMeta);
    StreamManager streamManager = getStreamManager(crossNSStreamMeta.getNamespaceId().stream("testStream"));
    // create a namespace for dataset and add the dataset instance in it
    NamespaceMeta crossNSDatasetMeta = new NamespaceMeta.Builder().setName("crossNSDataset").build();
    getNamespaceAdmin().create(crossNSDatasetMeta);
    addDatasetInstance(crossNSDatasetMeta.getNamespaceId().dataset("count"), "keyValueTable");
    // write something to the stream
    streamManager.createStream();
    for (int i = 0; i < 50; i++) {
        streamManager.send(String.valueOf(i));
    }
    // deploy the spark app in another namespace (default)
    ApplicationManager applicationManager = deploy(SparkAppUsingObjectStore.class);
    Map<String, String> args = ImmutableMap.of(ScalaCrossNSProgram.STREAM_NAMESPACE(), crossNSStreamMeta.getNamespaceId().getNamespace(), ScalaCrossNSProgram.DATASET_NAMESPACE(), crossNSDatasetMeta.getNamespaceId().getNamespace(), ScalaCrossNSProgram.DATASET_NAME(), "count");
    SparkManager sparkManager = applicationManager.getSparkManager(ScalaCrossNSProgram.class.getSimpleName()).start(args);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES);
    // get the dataset from the other namespace where we expect it to exist and compare the data
    DataSetManager<KeyValueTable> countManager = getDataset(crossNSDatasetMeta.getNamespaceId().dataset("count"));
    KeyValueTable results = countManager.get();
    for (int i = 0; i < 50; i++) {
        byte[] key = String.valueOf(i).getBytes(Charsets.UTF_8);
        Assert.assertArrayEquals(key, results.read(key));
    }
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) NamespaceMeta(co.cask.cdap.proto.NamespaceMeta) StreamManager(co.cask.cdap.test.StreamManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) Test(org.junit.Test)

Example 3 with KeyValueTable

use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.

the class SparkTestRun method testClassicSpark.

@Test
public void testClassicSpark() throws Exception {
    ApplicationManager appManager = deploy(TestSparkApp.class);
    for (Class<?> sparkClass : Arrays.asList(TestSparkApp.ClassicSpark.class, TestSparkApp.ScalaClassicSpark.class)) {
        final SparkManager sparkManager = appManager.getSparkManager(sparkClass.getSimpleName()).start();
        sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
    }
    KeyValueTable resultTable = this.<KeyValueTable>getDataset("ResultTable").get();
    Assert.assertEquals(1L, Bytes.toLong(resultTable.read(ClassicSparkProgram.class.getName())));
    Assert.assertEquals(1L, Bytes.toLong(resultTable.read(ScalaClassicSparkProgram.class.getName())));
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) TestSparkApp(co.cask.cdap.spark.app.TestSparkApp) ScalaClassicSparkProgram(co.cask.cdap.spark.app.ScalaClassicSparkProgram) ScalaClassicSparkProgram(co.cask.cdap.spark.app.ScalaClassicSparkProgram) ClassicSparkProgram(co.cask.cdap.spark.app.ClassicSparkProgram) Test(org.junit.Test)

Example 4 with KeyValueTable

use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.

the class SparkTestRun method testScalaSparkCrossNSDataset.

@Test
public void testScalaSparkCrossNSDataset() throws Exception {
    // Deploy and create a dataset in namespace datasetSpaceForSpark
    NamespaceMeta inputDSNSMeta = new NamespaceMeta.Builder().setName("datasetSpaceForSpark").build();
    getNamespaceAdmin().create(inputDSNSMeta);
    deploy(inputDSNSMeta.getNamespaceId(), SparkAppUsingObjectStore.class);
    DataSetManager<ObjectStore<String>> keysManager = getDataset(inputDSNSMeta.getNamespaceId().dataset("keys"));
    prepareInputData(keysManager);
    Map<String, String> args = ImmutableMap.of(ScalaCharCountProgram.INPUT_DATASET_NAMESPACE(), inputDSNSMeta.getNamespaceId().getNamespace(), ScalaCharCountProgram.INPUT_DATASET_NAME(), "keys");
    ApplicationManager applicationManager = deploy(SparkAppUsingObjectStore.class);
    SparkManager sparkManager = applicationManager.getSparkManager(ScalaCharCountProgram.class.getSimpleName()).start(args);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES);
    DataSetManager<KeyValueTable> countManager = getDataset("count");
    checkOutputData(countManager);
}
Also used : ObjectStore(co.cask.cdap.api.dataset.lib.ObjectStore) SparkAppUsingObjectStore(co.cask.cdap.spark.app.SparkAppUsingObjectStore) ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) NamespaceMeta(co.cask.cdap.proto.NamespaceMeta) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) Test(org.junit.Test)

Example 5 with KeyValueTable

use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.

the class SparkTestRun method testSparkWithGetDataset.

private void testSparkWithGetDataset(Class<? extends Application> appClass, String sparkProgram) throws Exception {
    ApplicationManager applicationManager = deploy(appClass);
    DataSetManager<FileSet> filesetManager = getDataset("logs");
    FileSet fileset = filesetManager.get();
    Location location = fileset.getLocation("nn");
    prepareInputFileSetWithLogData(location);
    Map<String, String> inputArgs = new HashMap<>();
    FileSetArguments.setInputPath(inputArgs, "nn");
    Map<String, String> args = new HashMap<>();
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, "logs", inputArgs));
    args.put("input", "logs");
    args.put("output", "logStats");
    SparkManager sparkManager = applicationManager.getSparkManager(sparkProgram).start(args);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
    DataSetManager<KeyValueTable> logStatsManager = getDataset("logStats");
    KeyValueTable logStatsTable = logStatsManager.get();
    validateGetDatasetOutput(logStatsTable);
    // Cleanup after run
    location.delete(true);
    logStatsManager.flush();
    try (CloseableIterator<KeyValue<byte[], byte[]>> scan = logStatsTable.scan(null, null)) {
        while (scan.hasNext()) {
            logStatsTable.delete(scan.next().getKey());
        }
    }
    logStatsManager.flush();
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) KeyValue(co.cask.cdap.api.dataset.lib.KeyValue) FileSet(co.cask.cdap.api.dataset.lib.FileSet) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) Location(org.apache.twill.filesystem.Location)

Aggregations

KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)84 Test (org.junit.Test)49 ApplicationManager (co.cask.cdap.test.ApplicationManager)45 SparkManager (co.cask.cdap.test.SparkManager)25 StreamManager (co.cask.cdap.test.StreamManager)16 IOException (java.io.IOException)16 TransactionExecutor (org.apache.tephra.TransactionExecutor)12 ApplicationWithPrograms (co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms)11 HashMap (java.util.HashMap)11 ArrayList (java.util.ArrayList)10 FileSet (co.cask.cdap.api.dataset.lib.FileSet)8 KeyValue (co.cask.cdap.api.dataset.lib.KeyValue)8 Table (co.cask.cdap.api.dataset.table.Table)8 NamespaceMeta (co.cask.cdap.proto.NamespaceMeta)8 ObjectStore (co.cask.cdap.api.dataset.lib.ObjectStore)7 MapReduceManager (co.cask.cdap.test.MapReduceManager)7 ServiceManager (co.cask.cdap.test.ServiceManager)7 WorkflowManager (co.cask.cdap.test.WorkflowManager)7 Set (java.util.Set)7 Category (org.junit.experimental.categories.Category)7