use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class TestFrameworkTestRun method testFlowRuntimeArguments.
@Test
public void testFlowRuntimeArguments() throws Exception {
ApplicationManager applicationManager = deployApplication(FilterAppWithNewFlowAPI.class);
Map<String, String> args = Maps.newHashMap();
args.put("threshold", "10");
applicationManager.getFlowManager("FilterFlow").start(args);
StreamManager input = getStreamManager("input");
input.send("2");
input.send("21");
ServiceManager serviceManager = applicationManager.getServiceManager("CountService").start();
serviceManager.waitForStatus(true, 2, 1);
Assert.assertEquals("1", new Gson().fromJson(callServiceGet(serviceManager.getServiceURL(), "result"), String.class));
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class AuthorizationTest method testCrossNSFlowlet.
@Test
public void testCrossNSFlowlet() throws Exception {
createAuthNamespace();
ApplicationManager appManager = deployApplication(AUTH_NAMESPACE, CrossNsDatasetAccessApp.class);
// give BOB ALL permissions on the auth namespace so he can execute programs and also read the stream.
grantAndAssertSuccess(AUTH_NAMESPACE, BOB, EnumSet.allOf(Action.class));
// switch to BOB
SecurityRequestContext.setUserId(BOB.getName());
// Send data to stream as BOB this ensures that BOB can write to a stream in auth namespace
StreamManager streamManager = getStreamManager(AUTH_NAMESPACE.stream(CrossNsDatasetAccessApp.STREAM_NAME));
for (int i = 0; i < 10; i++) {
streamManager.send(String.valueOf(i).getBytes());
}
// switch to back to ALICE
SecurityRequestContext.setUserId(ALICE.getName());
final FlowManager flowManager = appManager.getFlowManager(CrossNsDatasetAccessApp.FLOW_NAME);
testSystemDatasetAccessFromFlowlet(flowManager);
testCrossNSDatasetAccessFromFlowlet(flowManager);
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class SparkTestRun method testScalaSparkCrossNSStream.
@Test
public void testScalaSparkCrossNSStream() throws Exception {
// create a namespace for stream and create the stream in it
NamespaceMeta crossNSStreamMeta = new NamespaceMeta.Builder().setName("streamSpaceForSpark").build();
getNamespaceAdmin().create(crossNSStreamMeta);
StreamManager streamManager = getStreamManager(crossNSStreamMeta.getNamespaceId().stream("testStream"));
// create a namespace for dataset and add the dataset instance in it
NamespaceMeta crossNSDatasetMeta = new NamespaceMeta.Builder().setName("crossNSDataset").build();
getNamespaceAdmin().create(crossNSDatasetMeta);
addDatasetInstance(crossNSDatasetMeta.getNamespaceId().dataset("count"), "keyValueTable");
// write something to the stream
streamManager.createStream();
for (int i = 0; i < 50; i++) {
streamManager.send(String.valueOf(i));
}
// deploy the spark app in another namespace (default)
ApplicationManager applicationManager = deploy(SparkAppUsingObjectStore.class);
Map<String, String> args = ImmutableMap.of(ScalaCrossNSProgram.STREAM_NAMESPACE(), crossNSStreamMeta.getNamespaceId().getNamespace(), ScalaCrossNSProgram.DATASET_NAMESPACE(), crossNSDatasetMeta.getNamespaceId().getNamespace(), ScalaCrossNSProgram.DATASET_NAME(), "count");
SparkManager sparkManager = applicationManager.getSparkManager(ScalaCrossNSProgram.class.getSimpleName()).start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES);
// get the dataset from the other namespace where we expect it to exist and compare the data
DataSetManager<KeyValueTable> countManager = getDataset(crossNSDatasetMeta.getNamespaceId().dataset("count"));
KeyValueTable results = countManager.get();
for (int i = 0; i < 50; i++) {
byte[] key = String.valueOf(i).getBytes(Charsets.UTF_8);
Assert.assertArrayEquals(key, results.read(key));
}
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class SparkTestRun method testStreamFormatSpec.
@Test
public void testStreamFormatSpec() throws Exception {
ApplicationManager appManager = deploy(TestSparkApp.class);
StreamManager stream = getStreamManager("PeopleStream");
stream.send("Old Man,50");
stream.send("Baby,1");
stream.send("Young Guy,18");
stream.send("Small Kid,5");
stream.send("Legal Drinker,21");
Map<String, String> outputArgs = new HashMap<>();
FileSetArguments.setOutputPath(outputArgs, "output");
Map<String, String> runtimeArgs = new HashMap<>();
runtimeArgs.putAll(RuntimeArguments.addScope(Scope.DATASET, "PeopleFileSet", outputArgs));
runtimeArgs.put("stream.name", "PeopleStream");
runtimeArgs.put("output.dataset", "PeopleFileSet");
runtimeArgs.put("sql.statement", "SELECT name, age FROM people WHERE age >= 21");
List<String> programs = Arrays.asList(ScalaStreamFormatSpecSpark.class.getSimpleName(), StreamFormatSpecSpark.class.getSimpleName());
for (String sparkProgramName : programs) {
// Clean the output before starting
DataSetManager<FileSet> fileSetManager = getDataset("PeopleFileSet");
Location outputDir = fileSetManager.get().getLocation("output");
outputDir.delete(true);
SparkManager sparkManager = appManager.getSparkManager(sparkProgramName);
sparkManager.start(runtimeArgs);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 180, TimeUnit.SECONDS);
// Find the output part file. There is only one because the program repartition to 1
Location outputFile = Iterables.find(outputDir.list(), new Predicate<Location>() {
@Override
public boolean apply(Location input) {
return input.getName().startsWith("part-r-");
}
});
// Verify the result
List<String> lines = CharStreams.readLines(CharStreams.newReaderSupplier(Locations.newInputSupplier(outputFile), Charsets.UTF_8));
Map<String, Integer> result = new HashMap<>();
for (String line : lines) {
String[] parts = line.split(":");
result.put(parts[0], Integer.parseInt(parts[1]));
}
Assert.assertEquals(ImmutableMap.of("Old Man", 50, "Legal Drinker", 21), result);
}
}
use of co.cask.cdap.test.StreamManager in project cdap by caskdata.
the class SparkStreamIntegrationTestRun method testSparkWithStream.
@Test
public void testSparkWithStream() throws Exception {
ApplicationManager applicationManager = deployApplication(TestSparkStreamIntegrationApp.class);
StreamManager streamManager = getStreamManager("testStream");
for (int i = 0; i < 50; i++) {
streamManager.send(String.valueOf(i));
}
SparkManager sparkManager = applicationManager.getSparkManager("SparkStreamProgram").start();
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
// The Spark job simply turns every stream event body into key/value pairs, with key==value.
DataSetManager<KeyValueTable> datasetManager = getDataset("result");
verifyDatasetResult(datasetManager);
}
Aggregations