use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class SparkTestRun method testDynamicSpark.
@Test
public void testDynamicSpark() throws Exception {
ApplicationManager appManager = deploy(TestSparkApp.class);
// Populate data into the stream
StreamManager streamManager = getStreamManager("SparkStream");
for (int i = 0; i < 10; i++) {
streamManager.send("Line " + (i + 1));
}
SparkManager sparkManager = appManager.getSparkManager(ScalaDynamicSpark.class.getSimpleName());
sparkManager.start(ImmutableMap.of("input", "SparkStream", "output", "ResultTable", "tmpdir", TMP_FOLDER.newFolder().getAbsolutePath()));
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
// Validate the result written to dataset
KeyValueTable resultTable = this.<KeyValueTable>getDataset("ResultTable").get();
// There should be ten "Line"
Assert.assertEquals(10, Bytes.toInt(resultTable.read("Line")));
// Each number should appear once
for (int i = 0; i < 10; i++) {
Assert.assertEquals(1, Bytes.toInt(resultTable.read(Integer.toString(i + 1))));
}
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class SparkTestRun method testSparkWithLocalFiles.
private void testSparkWithLocalFiles(Class<? extends Application> appClass, String sparkProgram, String prefix) throws Exception {
ApplicationManager applicationManager = deploy(appClass);
URI localFile = createLocalPropertiesFile(prefix);
SparkManager sparkManager = applicationManager.getSparkManager(sparkProgram).start(Collections.singletonMap(SparkAppUsingLocalFiles.LOCAL_FILE_RUNTIME_ARG, localFile.toString()));
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
DataSetManager<KeyValueTable> kvTableManager = getDataset(SparkAppUsingLocalFiles.OUTPUT_DATASET_NAME);
KeyValueTable kvTable = kvTableManager.get();
Map<String, String> expected = ImmutableMap.of("a", "1", "b", "2", "c", "3");
List<byte[]> deleteKeys = new ArrayList<>();
try (CloseableIterator<KeyValue<byte[], byte[]>> scan = kvTable.scan(null, null)) {
for (int i = 0; i < 3; i++) {
KeyValue<byte[], byte[]> next = scan.next();
Assert.assertEquals(expected.get(Bytes.toString(next.getKey())), Bytes.toString(next.getValue()));
deleteKeys.add(next.getKey());
}
Assert.assertFalse(scan.hasNext());
}
// Cleanup after run
kvTableManager.flush();
for (byte[] key : deleteKeys) {
kvTable.delete(key);
}
kvTableManager.flush();
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class SparkTestRun method testDatasetSQL.
@Test
public void testDatasetSQL() throws Exception {
ApplicationManager appManager = deploy(TestSparkApp.class);
DataSetManager<ObjectMappedTable<Person>> tableManager = getDataset("PersonTable");
ObjectMappedTable<Person> table = tableManager.get();
table.write("1", new Person("1", "Bob", 10));
table.write("2", new Person("2", "Bill", 20));
table.write("3", new Person("3", "Berry", 30));
tableManager.flush();
SparkManager sparkManager = appManager.getSparkManager(DatasetSQLSpark.class.getSimpleName()).start();
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
// The program executes "SELECT * FROM Person WHERE age > 10", hence expected two new entries for Bill and Berry.
tableManager.flush();
Person person = table.read("new:2");
Assert.assertEquals("Bill", person.name());
Assert.assertEquals(20, person.age());
person = table.read("new:3");
Assert.assertEquals("Berry", person.name());
Assert.assertEquals(30, person.age());
// Shouldn't have new Bob
Assert.assertNull(table.read("new:1"));
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testLifecycleWithGC.
@Test
public void testLifecycleWithGC() throws Exception {
// Set the http server properties to speed up test
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
System.setProperty(ServiceHttpServer.THREAD_KEEP_ALIVE_SECONDS, "1");
System.setProperty(ServiceHttpServer.HANDLER_CLEANUP_PERIOD_MILLIS, "100");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
// Make 5 consecutive calls, there should be one handler instance being created,
// since there is only one handler thread.
Multimap<Integer, String> states = null;
for (int i = 0; i < 5; i++) {
states = getStates(serviceManager);
// There should only be one instance created
Assert.assertEquals(1, states.size());
// For the instance, there should only be INIT state.
Assert.assertEquals(ImmutableList.of("INIT"), ImmutableList.copyOf(states.get(states.keySet().iterator().next())));
}
// Capture the current state
final Multimap<Integer, String> lastStates = states;
// TTL for the thread is 1 second, hence sleep for 2 second to make sure the thread is gone
TimeUnit.SECONDS.sleep(2);
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
// Force a gc to have weak references cleanup
System.gc();
Multimap<Integer, String> newStates = getStates(serviceManager);
// and an INIT for the new handler that just handle the getState call
if (newStates.size() != 3) {
return false;
}
// A INIT and a DESTROY is expected for the old handler
return ImmutableList.of("INIT", "DESTROY").equals(ImmutableList.copyOf(newStates.get(lastStates.keySet().iterator().next())));
}
}, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
// Reset the http server properties to speed up test
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
System.clearProperty(ServiceHttpServer.THREAD_KEEP_ALIVE_SECONDS);
System.clearProperty(ServiceHttpServer.HANDLER_CLEANUP_PERIOD_MILLIS);
}
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class ETLWorkerTest method testDAG.
@Test
public void testDAG() throws Exception {
Schema schema = Schema.recordOf("testRecord", Schema.Field.of("x", Schema.of(Schema.Type.INT)));
StructuredRecord record1 = StructuredRecord.builder(schema).set("x", 1).build();
StructuredRecord record2 = StructuredRecord.builder(schema).set("x", 2).build();
StructuredRecord record3 = StructuredRecord.builder(schema).set("x", 3).build();
List<StructuredRecord> input = ImmutableList.of(record1, record2, record3);
/*
* ----- value filter ------- sink1
* |
* source --------- double --------
* | |---- sink2
* ----- identity ------
*/
File sink1Out = TMP_FOLDER.newFolder();
File sink2Out = TMP_FOLDER.newFolder();
ETLRealtimeConfig etlConfig = ETLRealtimeConfig.builder().addStage(new ETLStage("source", MockSource.getPlugin(input))).addStage(new ETLStage("sink1", MockSink.getPlugin(sink1Out))).addStage(new ETLStage("sink2", MockSink.getPlugin(sink2Out))).addStage(new ETLStage("valueFilter", IntValueFilterTransform.getPlugin("x", 2))).addStage(new ETLStage("double", DoubleTransform.getPlugin())).addStage(new ETLStage("identity", IdentityTransform.getPlugin())).addConnection("source", "valueFilter").addConnection("source", "double").addConnection("source", "identity").addConnection("valueFilter", "sink1").addConnection("double", "sink2").addConnection("identity", "sink2").build();
ApplicationId appId = NamespaceId.DEFAULT.app("dagTest");
AppRequest<ETLRealtimeConfig> appRequest = new AppRequest<>(APP_ARTIFACT, etlConfig);
ApplicationManager appManager = deployApplication(appId, appRequest);
Assert.assertNotNull(appManager);
WorkerManager workerManager = appManager.getWorkerManager(ETLWorker.NAME);
workerManager.start();
workerManager.waitForStatus(true, 10, 1);
try {
List<StructuredRecord> sink1output = MockSink.getRecords(sink1Out, 0, 10, TimeUnit.SECONDS);
List<StructuredRecord> sink1expected = ImmutableList.of(record1, record3);
Assert.assertEquals(sink1expected, sink1output);
List<StructuredRecord> sink2output = MockSink.getRecords(sink2Out, 0, 10, TimeUnit.SECONDS);
Assert.assertEquals(9, sink2output.size());
} finally {
stopWorker(workerManager);
}
validateMetric(3, appId, "source.records.out");
validateMetric(3, appId, "valueFilter.records.in");
validateMetric(2, appId, "valueFilter.records.out");
validateMetric(3, appId, "double.records.in");
validateMetric(6, appId, "double.records.out");
validateMetric(3, appId, "identity.records.in");
validateMetric(3, appId, "identity.records.out");
validateMetric(2, appId, "sink1.records.in");
validateMetric(9, appId, "sink2.records.in");
}
Aggregations