use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.
the class TestFrameworkTestRun method testTransactionHandlerService.
@Test
public void testTransactionHandlerService() throws Exception {
ApplicationManager applicationManager = deployApplication(testSpace, AppWithServices.class);
LOG.info("Deployed.");
ServiceManager serviceManager = applicationManager.getServiceManager(AppWithServices.TRANSACTIONS_SERVICE_NAME).start();
serviceManager.waitForStatus(true);
LOG.info("Service Started");
final URL baseUrl = serviceManager.getServiceURL(15, TimeUnit.SECONDS);
Assert.assertNotNull(baseUrl);
// Make a request to write in a separate thread and wait for it to return.
ExecutorService executorService = Executors.newSingleThreadExecutor();
Future<Integer> requestFuture = executorService.submit(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
try {
URL url = new URL(String.format("%s/write/%s/%s/%d", baseUrl, AppWithServices.DATASET_TEST_KEY, AppWithServices.DATASET_TEST_VALUE, 10000));
HttpRequest request = HttpRequest.get(url).build();
HttpResponse response = HttpRequests.execute(request);
return response.getResponseCode();
} catch (Exception e) {
LOG.error("Request thread got exception.", e);
throw Throwables.propagate(e);
}
}
});
// The dataset should not be written by the time this request is made, since the transaction to write
// has not been committed yet.
URL url = new URL(String.format("%s/read/%s", baseUrl, AppWithServices.DATASET_TEST_KEY));
HttpRequest request = HttpRequest.get(url).build();
HttpResponse response = HttpRequests.execute(request);
Assert.assertEquals(204, response.getResponseCode());
// Wait for the transaction to commit.
Integer writeStatusCode = requestFuture.get();
Assert.assertEquals(200, writeStatusCode.intValue());
// Make the same request again. By now the transaction should've completed.
request = HttpRequest.get(url).build();
response = HttpRequests.execute(request);
Assert.assertEquals(200, response.getResponseCode());
Assert.assertEquals(AppWithServices.DATASET_TEST_VALUE, new Gson().fromJson(response.getResponseBodyAsString(), String.class));
executorService.shutdown();
serviceManager.stop();
serviceManager.waitForStatus(false);
DataSetManager<KeyValueTable> dsManager = getDataset(testSpace.dataset(AppWithServices.TRANSACTIONS_DATASET_NAME));
String value = Bytes.toString(dsManager.get().read(AppWithServices.DESTROY_KEY));
Assert.assertEquals(AppWithServices.VALUE, value);
}
use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.
the class TestFrameworkTestRun method verifyMapperJobOutput.
private void verifyMapperJobOutput(Class<?> appClass, DataSetManager<KeyValueTable> outTableManager) throws Exception {
KeyValueTable outputTable = outTableManager.get();
Assert.assertEquals("world", Bytes.toString(outputTable.read("hello")));
// Verify dataset metrics
String readCountName = "system." + Constants.Metrics.Name.Dataset.READ_COUNT;
String writeCountName = "system." + Constants.Metrics.Name.Dataset.WRITE_COUNT;
Collection<MetricTimeSeries> metrics = getMetricsManager().query(new MetricDataQuery(0, System.currentTimeMillis() / 1000, Integer.MAX_VALUE, ImmutableMap.of(readCountName, AggregationFunction.SUM, writeCountName, AggregationFunction.SUM), ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getNamespace(), Constants.Metrics.Tag.APP, appClass.getSimpleName(), Constants.Metrics.Tag.MAPREDUCE, DatasetWithMRApp.MAPREDUCE_PROGRAM), ImmutableList.<String>of()));
// Transform the collection of metrics into a map from metrics name to aggregated sum
Map<String, Long> aggs = Maps.transformEntries(Maps.uniqueIndex(metrics, new Function<MetricTimeSeries, String>() {
@Override
public String apply(MetricTimeSeries input) {
return input.getMetricName();
}
}), new Maps.EntryTransformer<String, MetricTimeSeries, Long>() {
@Override
public Long transformEntry(String key, MetricTimeSeries value) {
Preconditions.checkArgument(value.getTimeValues().size() == 1, "Expected one value for aggregated sum for metrics %s", key);
return value.getTimeValues().get(0).getValue();
}
});
Assert.assertEquals(Long.valueOf(1), aggs.get(readCountName));
Assert.assertEquals(Long.valueOf(1), aggs.get(writeCountName));
}
use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.
the class TestFrameworkTestRun method testAppWithPlugin.
@Test
public void testAppWithPlugin() throws Exception {
ArtifactId artifactId = NamespaceId.DEFAULT.artifact("app-with-plugin", "1.0.0-SNAPSHOT");
addAppArtifact(artifactId, AppWithPlugin.class);
ArtifactId pluginArtifactId = NamespaceId.DEFAULT.artifact("test-plugin", "1.0.0-SNAPSHOT");
addPluginArtifact(pluginArtifactId, artifactId, ToStringPlugin.class);
ApplicationId appId = NamespaceId.DEFAULT.app("AppWithPlugin");
AppRequest createRequest = new AppRequest(new ArtifactSummary(artifactId.getArtifact(), artifactId.getVersion()));
ApplicationManager appManager = deployApplication(appId, createRequest);
final WorkerManager workerManager = appManager.getWorkerManager(AppWithPlugin.WORKER);
workerManager.start();
workerManager.waitForStatus(false, 5, 1);
Tasks.waitFor(false, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return workerManager.getHistory(ProgramRunStatus.COMPLETED).isEmpty();
}
}, 5, TimeUnit.SECONDS, 10, TimeUnit.MILLISECONDS);
final ServiceManager serviceManager = appManager.getServiceManager(AppWithPlugin.SERVICE);
serviceManager.start();
serviceManager.waitForStatus(true, 1, 10);
URL serviceURL = serviceManager.getServiceURL(5, TimeUnit.SECONDS);
callServiceGet(serviceURL, "dummy");
serviceManager.stop();
serviceManager.waitForStatus(false, 1, 10);
Tasks.waitFor(false, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return serviceManager.getHistory(ProgramRunStatus.KILLED).isEmpty();
}
}, 5, TimeUnit.SECONDS, 10, TimeUnit.MILLISECONDS);
WorkflowManager workflowManager = appManager.getWorkflowManager(AppWithPlugin.WORKFLOW);
workflowManager.start();
workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
List<RunRecord> runRecords = workflowManager.getHistory();
Assert.assertNotEquals(ProgramRunStatus.FAILED, runRecords.get(0).getStatus());
DataSetManager<KeyValueTable> workflowTableManager = getDataset(AppWithPlugin.WORKFLOW_TABLE);
String value = Bytes.toString(workflowTableManager.get().read("val"));
Assert.assertEquals(AppWithPlugin.TEST, value);
Map<String, String> workflowTags = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, NamespaceId.DEFAULT.getNamespace(), Constants.Metrics.Tag.APP, "AppWithPlugin", Constants.Metrics.Tag.WORKFLOW, AppWithPlugin.WORKFLOW, Constants.Metrics.Tag.RUN_ID, runRecords.get(0).getPid());
getMetricsManager().waitForTotalMetricCount(workflowTags, String.format("user.destroy.%s", AppWithPlugin.WORKFLOW), 1, 60, TimeUnit.SECONDS);
// Testing Spark Plugins. First send some data to stream for the Spark program to process
StreamManager streamManager = getStreamManager(AppWithPlugin.SPARK_STREAM);
for (int i = 0; i < 5; i++) {
streamManager.send("Message " + i);
}
SparkManager sparkManager = appManager.getSparkManager(AppWithPlugin.SPARK).start();
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
// Verify the Spark result.
DataSetManager<Table> dataSetManager = getDataset(AppWithPlugin.SPARK_TABLE);
Table table = dataSetManager.get();
try (Scanner scanner = table.scan(null, null)) {
for (int i = 0; i < 5; i++) {
Row row = scanner.next();
Assert.assertNotNull(row);
String expected = "Message " + i + " " + AppWithPlugin.TEST;
Assert.assertEquals(expected, Bytes.toString(row.getRow()));
Assert.assertEquals(expected, Bytes.toString(row.get(expected)));
}
// There shouldn't be any more rows in the table.
Assert.assertNull(scanner.next());
}
}
use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.
the class TestFrameworkTestRun method testClusterName.
@Test
public void testClusterName() throws Exception {
String clusterName = getConfiguration().get(Constants.CLUSTER_NAME);
ApplicationManager appManager = deployApplication(ClusterNameTestApp.class);
final DataSetManager<KeyValueTable> datasetManager = getDataset(ClusterNameTestApp.CLUSTER_NAME_TABLE);
final KeyValueTable clusterNameTable = datasetManager.get();
// A callable for reading the cluster name from the ClusterNameTable.
// It is used for Tasks.waitFor call down below.
final AtomicReference<String> key = new AtomicReference<>();
Callable<String> readClusterName = new Callable<String>() {
@Nullable
@Override
public String call() throws Exception {
datasetManager.flush();
byte[] bytes = clusterNameTable.read(key.get());
return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
}
};
// Service
ServiceManager serviceManager = appManager.getServiceManager(ClusterNameTestApp.ClusterNameServiceHandler.class.getSimpleName()).start();
Assert.assertEquals(clusterName, callServiceGet(serviceManager.getServiceURL(10, TimeUnit.SECONDS), "clusterName"));
serviceManager.stop();
// Worker
WorkerManager workerManager = appManager.getWorkerManager(ClusterNameTestApp.ClusterNameWorker.class.getSimpleName()).start();
key.set("worker.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// The worker will stop by itself. No need to call stop
workerManager.waitForRun(ProgramRunStatus.COMPLETED, 10, TimeUnit.SECONDS);
// Flow
FlowManager flowManager = appManager.getFlowManager(ClusterNameTestApp.ClusterNameFlow.class.getSimpleName()).start();
key.set("flow.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
flowManager.stop();
// MapReduce
// Setup the input file used by MR
Location location = this.<FileSet>getDataset(ClusterNameTestApp.INPUT_FILE_SET).get().getLocation("input");
try (PrintStream printer = new PrintStream(location.getOutputStream(), true, "UTF-8")) {
for (int i = 0; i < 10; i++) {
printer.println("Hello World " + i);
}
}
// Setup input and output dataset arguments
Map<String, String> inputArgs = new HashMap<>();
FileSetArguments.setInputPath(inputArgs, "input");
Map<String, String> outputArgs = new HashMap<>();
FileSetArguments.setOutputPath(outputArgs, "output");
Map<String, String> args = new HashMap<>();
args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.INPUT_FILE_SET, inputArgs));
args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.OUTPUT_FILE_SET, outputArgs));
MapReduceManager mrManager = appManager.getMapReduceManager(ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName()).start(args);
key.set("mr.client.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set("mapper.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set("reducer.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
mrManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
// Spark
SparkManager sparkManager = appManager.getSparkManager(ClusterNameTestApp.ClusterNameSpark.class.getSimpleName()).start();
key.set("spark.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
// Workflow
// Cleanup the output path for the MR job in the workflow first
this.<FileSet>getDataset(ClusterNameTestApp.OUTPUT_FILE_SET).get().getLocation("output").delete(true);
args = RuntimeArguments.addScope(Scope.MAPREDUCE, ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName(), args);
WorkflowManager workflowManager = appManager.getWorkflowManager(ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName()).start(args);
String prefix = ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName() + ".";
key.set(prefix + "mr.client.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "mapper.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "reducer.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "spark.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "action.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
}
use of co.cask.cdap.api.dataset.lib.KeyValueTable in project cdap by caskdata.
the class TestFrameworkTestRun method testByteCodeClassLoader.
@Category(XSlowTests.class)
@Test
public void testByteCodeClassLoader() throws Exception {
// This test verify bytecode generated classes ClassLoading
ApplicationManager appManager = deployApplication(testSpace, ClassLoaderTestApp.class);
FlowManager flowManager = appManager.getFlowManager("BasicFlow").start();
// Wait for at least 10 records being generated
RuntimeMetrics flowMetrics = flowManager.getFlowletMetrics("Sink");
flowMetrics.waitForProcessed(10, 5000, TimeUnit.MILLISECONDS);
flowManager.stop();
ServiceManager serviceManager = appManager.getServiceManager("RecordQuery").start();
URL serviceURL = serviceManager.getServiceURL(15, TimeUnit.SECONDS);
Assert.assertNotNull(serviceURL);
// Query record
URL url = new URL(serviceURL, "query?type=public");
HttpRequest request = HttpRequest.get(url).build();
HttpResponse response = HttpRequests.execute(request);
Assert.assertEquals(200, response.getResponseCode());
long count = Long.parseLong(response.getResponseBodyAsString());
serviceManager.stop();
// Verify the record count with dataset
DataSetManager<KeyValueTable> recordsManager = getDataset(testSpace.dataset("records"));
KeyValueTable records = recordsManager.get();
Assert.assertTrue(count == Bytes.toLong(records.read("PUBLIC")));
}
Aggregations