Search in sources :

Example 41 with SparkManager

use of io.cdap.cdap.test.SparkManager in project cdap by cdapio.

the class DataStreamsTest method testWindower.

@Test
public void testWindower() throws Exception {
    /*
     * source --> window(width=10,interval=1) --> aggregator --> filter --> sink
     */
    Schema schema = Schema.recordOf("data", Schema.Field.of("x", Schema.of(Schema.Type.STRING)));
    List<StructuredRecord> input = ImmutableList.of(StructuredRecord.builder(schema).set("x", "abc").build(), StructuredRecord.builder(schema).set("x", "abc").build(), StructuredRecord.builder(schema).set("x", "abc").build());
    String sinkName = "windowOut";
    // source sleeps 1 second between outputs
    DataStreamsConfig etlConfig = DataStreamsConfig.builder().addStage(new ETLStage("source", MockSource.getPlugin(schema, input, 1000L))).addStage(new ETLStage("window", Window.getPlugin(30, 1))).addStage(new ETLStage("agg", FieldCountAggregator.getPlugin("x", "string"))).addStage(new ETLStage("filter", StringValueFilterTransform.getPlugin("x", "all"))).addStage(new ETLStage("sink", MockSink.getPlugin(sinkName))).addConnection("source", "window").addConnection("window", "agg").addConnection("agg", "filter").addConnection("filter", "sink").setBatchInterval("1s").setCheckpointDir(checkpointDir).build();
    AppRequest<DataStreamsConfig> appRequest = new AppRequest<>(APP_ARTIFACT, etlConfig);
    ApplicationId appId = NamespaceId.DEFAULT.app("WindowerApp");
    ApplicationManager appManager = deployApplication(appId, appRequest);
    SparkManager sparkManager = appManager.getSparkManager(DataStreamsSparkLauncher.NAME);
    sparkManager.start();
    sparkManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
    // the sink should contain at least one record with count of 3, and no records with more than 3.
    // less than 3 if the window doesn't contain all 3 records yet, but there should eventually be a window
    // that contains all 3.
    final DataSetManager<Table> outputManager = getDataset(sinkName);
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            outputManager.flush();
            boolean sawThree = false;
            for (StructuredRecord record : MockSink.readOutput(outputManager)) {
                long count = record.get("ct");
                if (count == 3L) {
                    sawThree = true;
                }
                Assert.assertTrue(count <= 3L);
            }
            return sawThree;
        }
    }, 2, TimeUnit.MINUTES);
    sparkManager.stop();
}
Also used : ApplicationManager(io.cdap.cdap.test.ApplicationManager) SparkManager(io.cdap.cdap.test.SparkManager) Table(io.cdap.cdap.api.dataset.table.Table) Schema(io.cdap.cdap.api.data.schema.Schema) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) TimeoutException(java.util.concurrent.TimeoutException) TopicNotFoundException(io.cdap.cdap.api.messaging.TopicNotFoundException) DataStreamsConfig(io.cdap.cdap.etl.proto.v2.DataStreamsConfig) AppRequest(io.cdap.cdap.proto.artifact.AppRequest) ETLStage(io.cdap.cdap.etl.proto.v2.ETLStage) ApplicationId(io.cdap.cdap.proto.id.ApplicationId) Test(org.junit.Test)

Example 42 with SparkManager

use of io.cdap.cdap.test.SparkManager in project cdap by cdapio.

the class DataStreamsTest method testAutoJoin.

@Test
public void testAutoJoin() throws Exception {
    /*
     * customers ----------|
     *                     |
     *                     |---> join ---> sink
     *                     |
     * transactions -------|
     */
    Schema inputSchema1 = Schema.recordOf("customer", Schema.Field.of("customer_id", Schema.of(Schema.Type.STRING)), Schema.Field.of("customer_name", Schema.of(Schema.Type.STRING)));
    Schema inputSchema2 = Schema.recordOf("transaction", Schema.Field.of("t_id", Schema.of(Schema.Type.STRING)), Schema.Field.of("customer_id", Schema.of(Schema.Type.STRING)), Schema.Field.of("item_id", Schema.of(Schema.Type.STRING)));
    Schema outSchema = Schema.recordOf("customers.transactions", Schema.Field.of("customers_customer_id", Schema.nullableOf(Schema.of(Schema.Type.STRING))), Schema.Field.of("customers_customer_name", Schema.nullableOf(Schema.of(Schema.Type.STRING))), Schema.Field.of("transactions_t_id", Schema.of(Schema.Type.STRING)), Schema.Field.of("transactions_customer_id", Schema.of(Schema.Type.STRING)), Schema.Field.of("transactions_item_id", Schema.of(Schema.Type.STRING)));
    StructuredRecord recordSamuel = StructuredRecord.builder(inputSchema1).set("customer_id", "1").set("customer_name", "samuel").build();
    StructuredRecord recordBob = StructuredRecord.builder(inputSchema1).set("customer_id", "2").set("customer_name", "bob").build();
    StructuredRecord recordJane = StructuredRecord.builder(inputSchema1).set("customer_id", "3").set("customer_name", "jane").build();
    StructuredRecord tx1 = StructuredRecord.builder(inputSchema2).set("t_id", "1").set("customer_id", "1").set("item_id", "11").build();
    StructuredRecord tx2 = StructuredRecord.builder(inputSchema2).set("t_id", "2").set("customer_id", "3").set("item_id", "22").build();
    StructuredRecord tx3 = StructuredRecord.builder(inputSchema2).set("t_id", "3").set("customer_id", "4").set("item_id", "33").build();
    List<StructuredRecord> input1 = ImmutableList.of(recordSamuel, recordBob, recordJane);
    List<StructuredRecord> input2 = ImmutableList.of(tx1, tx2, tx3);
    String outputName = UUID.randomUUID().toString();
    DataStreamsConfig etlConfig = DataStreamsConfig.builder().addStage(new ETLStage("customers", MockSource.getPlugin(inputSchema1, input1))).addStage(new ETLStage("transactions", MockSource.getPlugin(inputSchema2, input2))).addStage(new ETLStage("join", MockAutoJoiner.getPlugin(Arrays.asList("customers", "transactions"), Collections.singletonList("customer_id"), Collections.singletonList("transactions"), Collections.emptyList(), Collections.emptyList(), true))).addStage(new ETLStage("sink", MockSink.getPlugin(outputName))).addConnection("customers", "join").addConnection("transactions", "join").addConnection("join", "sink").setBatchInterval("5s").setCheckpointDir(checkpointDir).build();
    AppRequest<DataStreamsConfig> appRequest = new AppRequest<>(APP_ARTIFACT, etlConfig);
    ApplicationId appId = NamespaceId.DEFAULT.app("AutoJoinerApp");
    ApplicationManager appManager = deployApplication(appId, appRequest);
    SparkManager sparkManager = appManager.getSparkManager(DataStreamsSparkLauncher.NAME);
    sparkManager.start();
    sparkManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
    StructuredRecord join1 = StructuredRecord.builder(outSchema).set("customers_customer_id", "1").set("customers_customer_name", "samuel").set("transactions_t_id", "1").set("transactions_customer_id", "1").set("transactions_item_id", "11").build();
    StructuredRecord join2 = StructuredRecord.builder(outSchema).set("customers_customer_id", "3").set("customers_customer_name", "jane").set("transactions_t_id", "2").set("transactions_customer_id", "3").set("transactions_item_id", "22").build();
    StructuredRecord join3 = StructuredRecord.builder(outSchema).set("transactions_t_id", "3").set("transactions_customer_id", "4").set("transactions_item_id", "33").build();
    Set<StructuredRecord> expected = ImmutableSet.of(join1, join2, join3);
    DataSetManager<Table> outputManager = getDataset(outputName);
    Tasks.waitFor(true, () -> {
        outputManager.flush();
        Set<StructuredRecord> outputRecords = new HashSet<>(MockSink.readOutput(outputManager));
        return expected.equals(outputRecords);
    }, 4, TimeUnit.MINUTES);
    sparkManager.stop();
    sparkManager.waitForStopped(10, TimeUnit.SECONDS);
}
Also used : ApplicationManager(io.cdap.cdap.test.ApplicationManager) SparkManager(io.cdap.cdap.test.SparkManager) Table(io.cdap.cdap.api.dataset.table.Table) Schema(io.cdap.cdap.api.data.schema.Schema) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) DataStreamsConfig(io.cdap.cdap.etl.proto.v2.DataStreamsConfig) AppRequest(io.cdap.cdap.proto.artifact.AppRequest) ETLStage(io.cdap.cdap.etl.proto.v2.ETLStage) ApplicationId(io.cdap.cdap.proto.id.ApplicationId) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 43 with SparkManager

use of io.cdap.cdap.test.SparkManager in project cdap by cdapio.

the class DataStreamsTest method testStageConsolidation.

@Test
public void testStageConsolidation() throws Exception {
    /*
                                   |non-null port --> sink1
        items1 --> null splitter --|
                                   |null port--> sink2
                                                   ^
                 |--> filter out id == 0  ---------|
        items2 --|    |
                 |    |--> error collector --> sink3
                 |
                 |                 |--> filter out id == 1 --> sink4
                 |--> aggregator --|
                                   |--> filter out id == 2 --> sink5
     */
    Schema schema = Schema.recordOf("item", Schema.Field.of("id", Schema.nullableOf(Schema.of(Schema.Type.INT))));
    StructuredRecord itemNull = StructuredRecord.builder(schema).build();
    StructuredRecord item0 = StructuredRecord.builder(schema).set("id", 0).build();
    StructuredRecord item1 = StructuredRecord.builder(schema).set("id", 1).build();
    StructuredRecord item2 = StructuredRecord.builder(schema).set("id", 2).build();
    StructuredRecord item3 = StructuredRecord.builder(schema).set("id", 3).build();
    List<StructuredRecord> input1Records = Arrays.asList(itemNull, item3);
    List<StructuredRecord> input2Records = Arrays.asList(item0, item1, item2);
    File outputDir = TMP_FOLDER.newFolder();
    String output1 = new File(outputDir, "output1").getAbsolutePath();
    String output2 = new File(outputDir, "output2").getAbsolutePath();
    String output3 = new File(outputDir, "output3").getAbsolutePath();
    String output4 = new File(outputDir, "output4").getAbsolutePath();
    String output5 = new File(outputDir, "output5").getAbsolutePath();
    DataStreamsConfig config = DataStreamsConfig.builder().addStage(new ETLStage("items1", MockSource.getPlugin(schema, input1Records))).addStage(new ETLStage("items2", MockSource.getPlugin(schema, input2Records))).addStage(new ETLStage("nullSplitter", NullFieldSplitterTransform.getPlugin("id"))).addStage(new ETLStage("filter0", IntValueFilterTransform.getPlugin("id", 0))).addStage(new ETLStage("collector", FlattenErrorTransform.getPlugin())).addStage(new ETLStage("filter1", IntValueFilterTransform.getPlugin("id", 1))).addStage(new ETLStage("filter2", IntValueFilterTransform.getPlugin("id", 2))).addStage(new ETLStage("identityAggregator", IdentityAggregator.getPlugin())).addStage(new ETLStage("sink1", MockExternalSink.getPlugin(UUID.randomUUID().toString(), "s1", output1))).addStage(new ETLStage("sink2", MockExternalSink.getPlugin(UUID.randomUUID().toString(), "s2", output2))).addStage(new ETLStage("sink3", MockExternalSink.getPlugin(UUID.randomUUID().toString(), "s3", output3))).addStage(new ETLStage("sink4", MockExternalSink.getPlugin(UUID.randomUUID().toString(), "s4", output4))).addStage(new ETLStage("sink5", MockExternalSink.getPlugin(UUID.randomUUID().toString(), "s5", output5))).addConnection("items1", "nullSplitter").addConnection("nullSplitter", "sink1", "non-null").addConnection("nullSplitter", "sink2", "null").addConnection("items2", "filter0").addConnection("items2", "identityAggregator").addConnection("filter0", "sink2").addConnection("filter0", "collector").addConnection("identityAggregator", "filter1").addConnection("identityAggregator", "filter2").addConnection("collector", "sink3").addConnection("filter1", "sink4").addConnection("filter2", "sink5").setProperties(Collections.singletonMap(io.cdap.cdap.etl.common.Constants.SPARK_PIPELINE_AUTOCACHE_ENABLE_FLAG, "false")).build();
    AppRequest<DataStreamsConfig> appRequest = new AppRequest<>(APP_ARTIFACT, config);
    ApplicationId appId = NamespaceId.DEFAULT.app("StageConsolidationTest");
    ApplicationManager appManager = deployApplication(appId, appRequest);
    // run pipeline
    Map<String, String> args = Collections.singletonMap(io.cdap.cdap.etl.common.Constants.CONSOLIDATE_STAGES, "true");
    SparkManager sparkManager = appManager.getSparkManager(DataStreamsSparkLauncher.NAME);
    sparkManager.startAndWaitForGoodRun(args, ProgramRunStatus.RUNNING, 5, TimeUnit.MINUTES);
    Schema errorSchema = Schema.recordOf("erroritem", Schema.Field.of("id", Schema.nullableOf(Schema.of(Schema.Type.INT))), Schema.Field.of("errMsg", Schema.nullableOf(Schema.of(Schema.Type.STRING))), Schema.Field.of("errCode", Schema.nullableOf(Schema.of(Schema.Type.INT))), Schema.Field.of("errStage", Schema.nullableOf(Schema.of(Schema.Type.STRING))));
    StructuredRecord expectedError = StructuredRecord.builder(errorSchema).set("id", 0).set("errMsg", IntValueFilterTransform.ERROR_MESSAGE).set("errCode", IntValueFilterTransform.ERROR_CODE).set("errStage", "filter0").build();
    Set<StructuredRecord> sink1Expected = Collections.singleton(item3);
    Set<StructuredRecord> sink2Expected = new HashSet<>(Arrays.asList(itemNull, item1, item2));
    Set<StructuredRecord> sink3Expected = Collections.singleton(expectedError);
    Set<StructuredRecord> sink4Expected = new HashSet<>(Arrays.asList(item0, item2));
    Set<StructuredRecord> sink5Expected = new HashSet<>(Arrays.asList(item0, item1));
    Tasks.waitFor(true, () -> sink1Expected.equals(new HashSet<>(MockExternalSink.readOutput(output1, schema))) && sink2Expected.equals(new HashSet<>(MockExternalSink.readOutput(output2, schema))) && sink3Expected.equals(new HashSet<>(MockExternalSink.readOutput(output3, errorSchema))) && sink4Expected.equals(new HashSet<>(MockExternalSink.readOutput(output4, schema))) && sink5Expected.equals(new HashSet<>(MockExternalSink.readOutput(output5, schema))), 3, TimeUnit.MINUTES);
    sparkManager.stop();
    sparkManager.waitForStopped(1, TimeUnit.MINUTES);
    // check output
    validateMetric(appId, "nullSplitter.records.in", 2);
    validateMetric(appId, "nullSplitter.records.out.null", 1);
    validateMetric(appId, "nullSplitter.records.out.non-null", 1);
    validateMetric(appId, "filter0.records.out", 2);
    validateMetric(appId, "filter0.records.error", 1);
    validateMetric(appId, "identityAggregator.records.in", 3);
    validateMetric(appId, "identityAggregator.records.out", 3);
}
Also used : ApplicationManager(io.cdap.cdap.test.ApplicationManager) SparkManager(io.cdap.cdap.test.SparkManager) Schema(io.cdap.cdap.api.data.schema.Schema) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) DataStreamsConfig(io.cdap.cdap.etl.proto.v2.DataStreamsConfig) AppRequest(io.cdap.cdap.proto.artifact.AppRequest) ETLStage(io.cdap.cdap.etl.proto.v2.ETLStage) ApplicationId(io.cdap.cdap.proto.id.ApplicationId) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 44 with SparkManager

use of io.cdap.cdap.test.SparkManager in project cdap by cdapio.

the class ReportGenerationAppTest method testReportExpiration.

@Test
public void testReportExpiration() throws Exception {
    NamespaceId testNamespace = new NamespaceId("reporting");
    NamespaceMeta reportingNamespace = new NamespaceMeta.Builder().setName(testNamespace).setDescription("Reporting namespace used to test report expiry").build();
    getNamespaceAdmin().create(reportingNamespace);
    long currentTimeMillis = System.currentTimeMillis();
    DatasetId datasetId = createAndInitializeDataset(testNamespace, currentTimeMillis);
    Map<String, String> runTimeArguments = new HashMap<>();
    // set report expiration time to be 10 seconds
    runTimeArguments.put(Constants.Report.REPORT_EXPIRY_TIME_SECONDS, "1");
    runTimeArguments.put(Constants.DISABLE_TMS_SUBSCRIBER_THREAD, "true");
    SparkManager sparkManager = deployAndStartReportingApplication(testNamespace, runTimeArguments);
    URL url = sparkManager.getServiceURL(1, TimeUnit.MINUTES);
    Assert.assertNotNull(url);
    URL reportURL = url.toURI().resolve("reports/").toURL();
    List<Filter> filters = ImmutableList.of(new ValueFilter<>(Constants.NAMESPACE, ImmutableSet.of("ns1", "ns2"), null), new RangeFilter<>(Constants.DURATION, new RangeFilter.Range<>(null, 500L)));
    long startSecs = TimeUnit.MILLISECONDS.toSeconds(currentTimeMillis);
    ReportGenerationRequest request = new ReportGenerationRequest("ns1_ns2_report", startSecs, startSecs + 30, new ArrayList<>(ReportField.FIELD_NAME_MAP.keySet()), ImmutableList.of(new Sort(Constants.DURATION, Sort.Order.DESCENDING)), filters);
    HttpURLConnection urlConn = (HttpURLConnection) reportURL.openConnection();
    urlConn.setDoOutput(true);
    urlConn.setRequestMethod("POST");
    urlConn.getOutputStream().write(GSON.toJson(request).getBytes(StandardCharsets.UTF_8));
    if (urlConn.getErrorStream() != null) {
        Assert.fail(Bytes.toString(ByteStreams.toByteArray(urlConn.getErrorStream())));
    }
    Assert.assertEquals(200, urlConn.getResponseCode());
    Map<String, String> reportIdMap = getResponseObject(urlConn, STRING_STRING_MAP);
    String reportId = reportIdMap.get("id");
    Assert.assertNotNull(reportId);
    // perform get reports list for 60 seconds and make sure we get 0 reports due to expiry
    Tasks.waitFor(0, () -> {
        return getReportsList(url);
    }, 5, TimeUnit.MINUTES, 1, TimeUnit.SECONDS);
    sparkManager.stop();
    sparkManager.waitForStopped(2, TimeUnit.MINUTES);
    deleteDatasetInstance(datasetId);
    getNamespaceAdmin().delete(testNamespace);
}
Also used : SparkManager(io.cdap.cdap.test.SparkManager) HashMap(java.util.HashMap) ReportGenerationRequest(io.cdap.cdap.report.proto.ReportGenerationRequest) URL(java.net.URL) DatasetId(io.cdap.cdap.proto.id.DatasetId) HttpURLConnection(java.net.HttpURLConnection) ValueFilter(io.cdap.cdap.report.proto.ValueFilter) RangeFilter(io.cdap.cdap.report.proto.RangeFilter) Filter(io.cdap.cdap.report.proto.Filter) NamespaceMeta(io.cdap.cdap.proto.NamespaceMeta) Sort(io.cdap.cdap.report.proto.Sort) NamespaceId(io.cdap.cdap.proto.id.NamespaceId) Test(org.junit.Test)

Example 45 with SparkManager

use of io.cdap.cdap.test.SparkManager in project cdap by caskdata.

the class Spark2Test method testScalaSparkWithObjectStore.

@Test
public void testScalaSparkWithObjectStore() throws Exception {
    ApplicationManager applicationManager = deploy(NamespaceId.DEFAULT, SparkAppUsingObjectStore.class);
    DataSetManager<ObjectStore<String>> keysManager = getDataset("keys");
    prepareInputData(keysManager);
    SparkManager sparkManager = applicationManager.getSparkManager(ScalaCharCountProgram.class.getSimpleName()).start();
    sparkManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
    sparkManager.waitForStopped(60, TimeUnit.SECONDS);
    DataSetManager<KeyValueTable> countManager = getDataset("count");
    checkOutputData(countManager);
}
Also used : ApplicationManager(io.cdap.cdap.test.ApplicationManager) ObjectStore(io.cdap.cdap.api.dataset.lib.ObjectStore) SparkAppUsingObjectStore(io.cdap.cdap.spark.app.SparkAppUsingObjectStore) SparkManager(io.cdap.cdap.test.SparkManager) KeyValueTable(io.cdap.cdap.api.dataset.lib.KeyValueTable) Test(org.junit.Test)

Aggregations

SparkManager (io.cdap.cdap.test.SparkManager)84 ApplicationManager (io.cdap.cdap.test.ApplicationManager)66 Test (org.junit.Test)60 KeyValueTable (io.cdap.cdap.api.dataset.lib.KeyValueTable)34 HashMap (java.util.HashMap)31 StructuredRecord (io.cdap.cdap.api.data.format.StructuredRecord)26 ApplicationId (io.cdap.cdap.proto.id.ApplicationId)26 HashSet (java.util.HashSet)25 Table (io.cdap.cdap.api.dataset.table.Table)24 AppRequest (io.cdap.cdap.proto.artifact.AppRequest)24 DataStreamsConfig (io.cdap.cdap.etl.proto.v2.DataStreamsConfig)22 ETLStage (io.cdap.cdap.etl.proto.v2.ETLStage)22 Schema (io.cdap.cdap.api.data.schema.Schema)21 FileSet (io.cdap.cdap.api.dataset.lib.FileSet)18 Location (org.apache.twill.filesystem.Location)18 File (java.io.File)17 WorkflowManager (io.cdap.cdap.test.WorkflowManager)14 URL (java.net.URL)14 PrintStream (java.io.PrintStream)12 HttpURLConnection (java.net.HttpURLConnection)12