use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.
@Test
public void testContentConsumerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Finish the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states, there should still be six handler instances initialized.
final Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Do another round of six concurrent upload. It should reuse all of the existing six contexts
completions.clear();
uploadLatch = new CountDownLatch(1);
for (int i = 0; i < 6; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be seven handler instances initialized.
// Six for the in-progress upload, one for the getStates call
// Out of the 7 states, six of them should be the same as the old one
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Multimap<Integer, String> newStates = getStates(serviceManager);
if (newStates.size() != 7) {
return false;
}
for (Map.Entry<Integer, String> entry : states.entries()) {
if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
return false;
}
}
return true;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Query the queue size metrics. Expect the maximum be 6.
// This is because only the six from the concurrent upload will get captured added back to the queue,
// while the one created for the getState() call will be stated in the thread cache, but not in the queue.
Tasks.waitFor(6L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, Id.Namespace.DEFAULT.getId(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, ImmutableList.<String>of());
Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testInvalidContentProducer.
@Test
public void testInvalidContentProducer() throws Exception {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
URL serviceURL = serviceManager.getServiceURL(10, TimeUnit.SECONDS);
URL url = serviceURL.toURI().resolve("invalid?methods=getContentLength").toURL();
HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
try {
Assert.assertEquals(500, urlConn.getResponseCode());
} finally {
urlConn.disconnect();
}
// Exception from both nextChunk and onError
url = serviceURL.toURI().resolve("invalid?methods=nextChunk&methods=onError").toURL();
urlConn = (HttpURLConnection) url.openConnection();
try {
// 200 will be the status code
Assert.assertEquals(200, urlConn.getResponseCode());
// Expect IOException when trying to read since the server closed the connection
try {
ByteStreams.toByteArray(urlConn.getInputStream());
Assert.fail("Expected IOException");
} catch (IOException e) {
// expected
}
} finally {
urlConn.disconnect();
}
// Exception from both onFinish
url = serviceURL.toURI().resolve("invalid?methods=onFinish").toURL();
urlConn = (HttpURLConnection) url.openConnection();
try {
// 200 will be the status code. Since the response is completed, from the client perspective, there is no error.
Assert.assertEquals(200, urlConn.getResponseCode());
Assert.assertEquals("0123456789", new String(ByteStreams.toByteArray(urlConn.getInputStream()), "UTF-8"));
} finally {
urlConn.disconnect();
}
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testLifecycleWithThreadTerminates.
@Test
public void testLifecycleWithThreadTerminates() throws Exception {
// Set the http server properties to speed up test
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
System.setProperty(ServiceHttpServer.THREAD_KEEP_ALIVE_SECONDS, "1");
System.setProperty(ServiceHttpServer.HANDLER_CLEANUP_PERIOD_MILLIS, "100");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
// Make a call to the service, expect an init state
Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(1, states.size());
int handlerHashCode = states.keySet().iterator().next();
Assert.assertEquals(ImmutableList.of("INIT"), ImmutableList.copyOf(states.get(handlerHashCode)));
// Sleep for 3 seconds for the thread going IDLE, gets terminated and cleanup
TimeUnit.SECONDS.sleep(3);
states = getStates(serviceManager);
// Size of states keys should be two, since the old instance must get destroy and there is a new
// one created to handle the getStates request.
Assert.assertEquals(2, states.keySet().size());
// For the state changes for the old handler, it should have INIT, DESTROY
Assert.assertEquals(ImmutableList.of("INIT", "DESTROY"), ImmutableList.copyOf(states.get(handlerHashCode)));
// For the state changes for the new handler, it should be INIT
for (int key : states.keys()) {
if (key != handlerHashCode) {
Assert.assertEquals(ImmutableList.of("INIT"), ImmutableList.copyOf(states.get(key)));
}
}
} finally {
// Reset the http server properties to speed up test
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
System.clearProperty(ServiceHttpServer.THREAD_KEEP_ALIVE_SECONDS);
System.clearProperty(ServiceHttpServer.HANDLER_CLEANUP_PERIOD_MILLIS);
}
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerProducerLifecycle.
@Test
public void testContentConsumerProducerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
final DataSetManager<KeyValueTable> datasetManager = getDataset(ServiceLifecycleApp.HANDLER_TABLE_NAME);
// Clean up the dataset first to avoid being affected by other tests
datasetManager.get().delete(Bytes.toBytes("called"));
datasetManager.get().delete(Bytes.toBytes("completed"));
datasetManager.flush();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "POST", "uploadDownload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
// Make sure the download through content producer has started
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
byte[] value = datasetManager.get().read("called");
datasetManager.flush();
if (value == null || value.length != Bytes.SIZEOF_LONG) {
return false;
}
return Bytes.toLong(value) > 5;
}
}, 10L, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Get the states, there should still be six handler instances since the ContentConsumer should
// be passing it's captured context to the ContentProducer without creating new one.
Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Set the complete flag in the dataset
datasetManager.get().write("completed", Bytes.toBytes(true));
datasetManager.flush();
// Wait for completion
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the upload result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states again, it should still be 6 same instances
Assert.assertEquals(states, getStates(serviceManager));
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class SparkTestRun method testScalaSparkCrossNSStream.
@Test
public void testScalaSparkCrossNSStream() throws Exception {
// create a namespace for stream and create the stream in it
NamespaceMeta crossNSStreamMeta = new NamespaceMeta.Builder().setName("streamSpaceForSpark").build();
getNamespaceAdmin().create(crossNSStreamMeta);
StreamManager streamManager = getStreamManager(crossNSStreamMeta.getNamespaceId().stream("testStream"));
// create a namespace for dataset and add the dataset instance in it
NamespaceMeta crossNSDatasetMeta = new NamespaceMeta.Builder().setName("crossNSDataset").build();
getNamespaceAdmin().create(crossNSDatasetMeta);
addDatasetInstance(crossNSDatasetMeta.getNamespaceId().dataset("count"), "keyValueTable");
// write something to the stream
streamManager.createStream();
for (int i = 0; i < 50; i++) {
streamManager.send(String.valueOf(i));
}
// deploy the spark app in another namespace (default)
ApplicationManager applicationManager = deploy(SparkAppUsingObjectStore.class);
Map<String, String> args = ImmutableMap.of(ScalaCrossNSProgram.STREAM_NAMESPACE(), crossNSStreamMeta.getNamespaceId().getNamespace(), ScalaCrossNSProgram.DATASET_NAMESPACE(), crossNSDatasetMeta.getNamespaceId().getNamespace(), ScalaCrossNSProgram.DATASET_NAME(), "count");
SparkManager sparkManager = applicationManager.getSparkManager(ScalaCrossNSProgram.class.getSimpleName()).start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES);
// get the dataset from the other namespace where we expect it to exist and compare the data
DataSetManager<KeyValueTable> countManager = getDataset(crossNSDatasetMeta.getNamespaceId().dataset("count"));
KeyValueTable results = countManager.get();
for (int i = 0; i < 50; i++) {
byte[] key = String.valueOf(i).getBytes(Charsets.UTF_8);
Assert.assertArrayEquals(key, results.read(key));
}
}
Aggregations