use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class MapReduceStreamInputTestRun method test.
@Test
public void test() throws Exception {
ApplicationManager applicationManager = deployApplication(AppWithMapReduceUsingStream.class);
Schema schema = new Schema.Parser().parse(AppWithMapReduceUsingStream.SCHEMA.toString());
StreamManager streamManager = getStreamManager("mrStream");
streamManager.send(createEvent(schema, "YHOO", 100, 10.0f));
streamManager.send(createEvent(schema, "YHOO", 10, 10.1f));
streamManager.send(createEvent(schema, "YHOO", 13, 9.9f));
float yhooTotal = 100 * 10.0f + 10 * 10.1f + 13 * 9.9f;
streamManager.send(createEvent(schema, "AAPL", 5, 300.0f));
streamManager.send(createEvent(schema, "AAPL", 3, 298.34f));
streamManager.send(createEvent(schema, "AAPL", 50, 305.23f));
streamManager.send(createEvent(schema, "AAPL", 1000, 284.13f));
float aaplTotal = 5 * 300.0f + 3 * 298.34f + 50 * 305.23f + 1000 * 284.13f;
MapReduceManager mrManager = applicationManager.getMapReduceManager("BodyTracker").start();
mrManager.waitForRun(ProgramRunStatus.COMPLETED, 180, TimeUnit.SECONDS);
KeyValueTable pricesDS = (KeyValueTable) getDataset("prices").get();
float yhooVal = Bytes.toFloat(pricesDS.read(Bytes.toBytes("YHOO")));
float aaplVal = Bytes.toFloat(pricesDS.read(Bytes.toBytes("AAPL")));
Assert.assertTrue(Math.abs(yhooTotal - yhooVal) < 0.0000001);
Assert.assertTrue(Math.abs(aaplTotal - aaplVal) < 0.0000001);
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class FlowStreamIntegrationTestRun method testStreamFromOtherNamespaceBatch.
@Test
public void testStreamFromOtherNamespaceBatch() throws Exception {
NamespaceId streamSpace = new NamespaceId("streamSpace");
getNamespaceAdmin().create(new NamespaceMeta.Builder().setName(streamSpace).build());
// Deploy an app to add a stream in streamSpace
deployApplication(streamSpace, TestFlowStreamIntegrationAcrossNSApp.class);
ApplicationManager applicationManager = deployApplication(TestFlowStreamIntegrationAcrossNSApp.class);
StreamManager s1 = getStreamManager(streamSpace.stream("s1"));
StreamManager s1Default = getStreamManager("s1");
// Send to both stream
for (int i = 0; i < 50; i++) {
s1.send(String.valueOf(i));
s1Default.send(String.valueOf(i));
}
FlowManager flowManager = applicationManager.getFlowManager("StreamAcrossNSTestFlow");
submitAndVerifyFlowProgram(flowManager);
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class SparkTestRun method testTransaction.
@Test
public void testTransaction() throws Exception {
ApplicationManager applicationManager = deploy(TestSparkApp.class);
StreamManager streamManager = getStreamManager("SparkStream");
// Write some sentences to the stream
streamManager.send("red fox");
streamManager.send("brown fox");
streamManager.send("grey fox");
streamManager.send("brown bear");
streamManager.send("black bear");
// Run the spark program
SparkManager sparkManager = applicationManager.getSparkManager(TransactionSpark.class.getSimpleName());
sparkManager.start(ImmutableMap.of("source.stream", "SparkStream", "keyvalue.table", "KeyValueTable", "result.all.dataset", "SparkResult", "result.threshold", "2", "result.threshold.dataset", "SparkThresholdResult"));
// Verify result from dataset before the Spark program terminates
final DataSetManager<KeyValueTable> resultManager = getDataset("SparkThresholdResult");
final KeyValueTable resultTable = resultManager.get();
// Expect the threshold result dataset, with threshold >=2, contains [brown, fox, bear]
Tasks.waitFor(ImmutableSet.of("brown", "fox", "bear"), new Callable<Set<String>>() {
@Override
public Set<String> call() throws Exception {
// This is to start a new TX
resultManager.flush();
LOG.info("Reading from threshold result");
try (CloseableIterator<KeyValue<byte[], byte[]>> itor = resultTable.scan(null, null)) {
return ImmutableSet.copyOf(Iterators.transform(itor, new Function<KeyValue<byte[], byte[]>, String>() {
@Override
public String apply(KeyValue<byte[], byte[]> input) {
String word = Bytes.toString(input.getKey());
LOG.info("{}, {}", word, Bytes.toInt(input.getValue()));
return word;
}
}));
}
}
}, 3, TimeUnit.MINUTES, 1, TimeUnit.SECONDS);
sparkManager.stop();
sparkManager.waitForRun(ProgramRunStatus.KILLED, 60, TimeUnit.SECONDS);
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class SparkTestRun method testDynamicSpark.
@Test
public void testDynamicSpark() throws Exception {
ApplicationManager appManager = deploy(TestSparkApp.class);
// Populate data into the stream
StreamManager streamManager = getStreamManager("SparkStream");
for (int i = 0; i < 10; i++) {
streamManager.send("Line " + (i + 1));
}
SparkManager sparkManager = appManager.getSparkManager(ScalaDynamicSpark.class.getSimpleName());
sparkManager.start(ImmutableMap.of("input", "SparkStream", "output", "ResultTable", "tmpdir", TMP_FOLDER.newFolder().getAbsolutePath()));
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
// Validate the result written to dataset
KeyValueTable resultTable = this.<KeyValueTable>getDataset("ResultTable").get();
// There should be ten "Line"
Assert.assertEquals(10, Bytes.toInt(resultTable.read("Line")));
// Each number should appear once
for (int i = 0; i < 10; i++) {
Assert.assertEquals(1, Bytes.toInt(resultTable.read(Integer.toString(i + 1))));
}
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class SparkStreamIntegrationTestRun method testSparkCrossNS.
@Test
public void testSparkCrossNS() throws Exception {
// Test for reading stream cross namespace, reading and writing to dataset cross namespace
// TestSparkStreamIntegrationApp deployed in default namespace
// which reads a stream from streamNS and writes to a dataset in its own ns (default)
// TestSparkCrossNSDatasetApp deployed at crossNSDatasetAppNS:
// reading from the dataset in default (created by TestSparkStreamIntegrationApp) and write to a dataset
// in outputDatasetNS
NamespaceMeta streamNSMeta = new NamespaceMeta.Builder().setName("streamNS").build();
NamespaceMeta crossNSDatasetAppNS = new NamespaceMeta.Builder().setName("crossNSDatasetAppNS").build();
NamespaceMeta outputDatasetNS = new NamespaceMeta.Builder().setName("outputDatasetNS").build();
getNamespaceAdmin().create(streamNSMeta);
getNamespaceAdmin().create(crossNSDatasetAppNS);
getNamespaceAdmin().create(outputDatasetNS);
addDatasetInstance(outputDatasetNS.getNamespaceId().dataset("finalDataset"), "keyValueTable");
StreamManager streamManager = getStreamManager(streamNSMeta.getNamespaceId().stream("testStream"));
streamManager.createStream();
for (int i = 0; i < 50; i++) {
streamManager.send(String.valueOf(i));
}
// deploy TestSparkStreamIntegrationApp in default namespace
ApplicationManager spark1 = deployApplication(TestSparkStreamIntegrationApp.class);
Map<String, String> args = ImmutableMap.of(TestSparkStreamIntegrationApp.SparkStreamProgram.INPUT_STREAM_NAMESPACE, streamNSMeta.getNamespaceId().getNamespace(), TestSparkStreamIntegrationApp.SparkStreamProgram.INPUT_STREAM_NAME, "testStream");
SparkManager sparkManager = spark1.getSparkManager("SparkStreamProgram").start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
// Verify the results written in default namespace by spark1
DataSetManager<KeyValueTable> datasetManager = getDataset("result");
verifyDatasetResult(datasetManager);
// deploy the cross ns dataset app in datasetNS namespace
ApplicationManager spark2 = deployApplication(crossNSDatasetAppNS.getNamespaceId(), TestSparkCrossNSDatasetApp.class);
args = ImmutableMap.of(TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.INPUT_DATASET_NAMESPACE, NamespaceId.DEFAULT.getNamespace(), TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.INPUT_DATASET_NAME, "result", TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.OUTPUT_DATASET_NAMESPACE, outputDatasetNS.getNamespaceId().getNamespace(), TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.OUTPUT_DATASET_NAME, "finalDataset");
sparkManager = spark2.getSparkManager("SparkCrossNSDatasetProgram").start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
// Verify the results written in DEFAULT by spark2
datasetManager = getDataset(outputDatasetNS.getNamespaceId().dataset("finalDataset"));
verifyDatasetResult(datasetManager);
}
Aggregations