use of co.cask.cdap.data2.queue.QueueProducer in project cdap by caskdata.
the class FlowTest method testFlow.
@Test
public void testFlow() throws Exception {
final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(WordCountApp.class, TEMP_FOLDER_SUPPLIER);
List<ProgramController> controllers = Lists.newArrayList();
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
// running mapreduce is out of scope of this tests (there's separate unit-test for that)
if (programDescriptor.getProgramId().getType() == ProgramType.MAPREDUCE) {
continue;
}
controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
}
TimeUnit.SECONDS.sleep(1);
TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "text");
QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
QueueProducer producer = queueClientFactory.createProducer(queueName);
// start tx to write in queue in tx
Transaction tx = txSystemClient.startShort();
((TransactionAware) producer).startTx(tx);
StreamEventCodec codec = new StreamEventCodec();
for (int i = 0; i < 10; i++) {
String msg = "Testing message " + i;
StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
producer.enqueue(new QueueEntry(codec.encodePayload(event)));
}
// commit tx
((TransactionAware) producer).commitTx();
txSystemClient.commit(tx);
// Query the service for at most 10 seconds for the expected result
Gson gson = new Gson();
DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getNamespace(), "WordCountApp", "WordFrequencyService"));
EndpointStrategy endpointStrategy = new RandomEndpointStrategy(serviceDiscovered);
int trials = 0;
while (trials++ < 10) {
Discoverable discoverable = endpointStrategy.pick(2, TimeUnit.SECONDS);
URL url = new URL(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "WordCountApp", "WordFrequencyService", "wordfreq", "text:Testing"));
try {
HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
Map<String, Long> responseContent = gson.fromJson(new InputStreamReader(urlConn.getInputStream(), Charsets.UTF_8), new TypeToken<Map<String, Long>>() {
}.getType());
LOG.info("Service response: " + responseContent);
if (ImmutableMap.of("text:Testing", 10L).equals(responseContent)) {
break;
}
} catch (Throwable t) {
LOG.info("Exception when trying to query service.", t);
}
TimeUnit.SECONDS.sleep(1);
}
Assert.assertTrue(trials < 10);
for (ProgramController controller : controllers) {
controller.stop().get();
}
}
use of co.cask.cdap.data2.queue.QueueProducer in project cdap by caskdata.
the class InMemoryStreamFileWriterFactory method create.
@Override
public FileWriter<StreamEvent> create(StreamConfig config, int generation) throws IOException {
final QueueProducer producer = queueClientFactory.createProducer(QueueName.fromStream(config.getStreamId()));
final List<TransactionAware> txAwares = Lists.newArrayList();
if (producer instanceof TransactionAware) {
txAwares.add((TransactionAware) producer);
}
final TransactionExecutor txExecutor = executorFactory.createExecutor(txAwares);
// Adapt the FileWriter interface into Queue2Producer
return new FileWriter<StreamEvent>() {
private final List<StreamEvent> events = Lists.newArrayList();
@Override
public void append(StreamEvent event) throws IOException {
events.add(event);
}
@Override
public void appendAll(Iterator<? extends StreamEvent> events) throws IOException {
Iterators.addAll(this.events, events);
}
@Override
public void close() throws IOException {
producer.close();
}
@Override
public void flush() throws IOException {
try {
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (StreamEvent event : events) {
producer.enqueue(new QueueEntry(STREAM_EVENT_CODEC.encodePayload(event)));
}
events.clear();
}
});
} catch (TransactionFailureException e) {
throw new IOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException();
}
}
};
}
use of co.cask.cdap.data2.queue.QueueProducer in project cdap by caskdata.
the class QueueTest method testMultiStageConsumer.
@Test
public void testMultiStageConsumer() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "multistage");
ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, 2, DequeueStrategy.HASH, "key");
configureGroups(queueName, ImmutableList.of(groupConfig));
List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(groupConfig, 0), new ConsumerConfig(groupConfig, 1));
// Enqueue 10 items
try (QueueProducer producer = queueClientFactory.createProducer(queueName)) {
for (int i = 0; i < 10; i++) {
TransactionContext txContext = createTxContext(producer);
txContext.start();
producer.enqueue(new QueueEntry("key", i, Bytes.toBytes(i)));
txContext.finish();
}
}
// Consumer all even entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 1)) {
for (int i = 0; i < 5; i++) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
Assert.assertTrue(!result.isEmpty());
Assert.assertEquals(i * 2, Bytes.toInt(result.iterator().next()));
txContext.finish();
}
}
// Consume 2 odd entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 1)) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue(2);
Assert.assertEquals(2, result.size());
Iterator<byte[]> iter = result.iterator();
for (int i = 0; i < 2; i++) {
Assert.assertEquals(i * 2 + 1, Bytes.toInt(iter.next()));
}
txContext.finish();
}
// Consume the rest odd entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 1)) {
for (int i = 2; i < 5; i++) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
Assert.assertTrue(!result.isEmpty());
Assert.assertEquals(i * 2 + 1, Bytes.toInt(result.iterator().next()));
txContext.finish();
}
}
}
use of co.cask.cdap.data2.queue.QueueProducer in project cdap by caskdata.
the class QueueTest method createEnqueueRunnable.
protected Runnable createEnqueueRunnable(final QueueName queueName, final int count, final int batchSize, final CyclicBarrier barrier) {
return new Runnable() {
@Override
public void run() {
try {
if (barrier != null) {
barrier.await();
}
try (QueueProducer producer = queueClientFactory.createProducer(queueName)) {
TransactionContext txContext = createTxContext(producer);
LOG.info("Start enqueue {} entries.", count);
Stopwatch stopwatch = new Stopwatch();
stopwatch.start();
// Pre-Enqueue
int batches = count / batchSize;
List<QueueEntry> queueEntries = Lists.newArrayListWithCapacity(batchSize);
// include some negative hash values and some positive ones
int hashValueMultiplier = -1;
for (int i = 0; i < batches; i++) {
txContext.start();
try {
queueEntries.clear();
for (int j = 0; j < batchSize; j++) {
int val = i * batchSize + j;
byte[] queueData = Bytes.toBytes(val);
queueEntries.add(new QueueEntry("key", hashValueMultiplier * val, queueData));
hashValueMultiplier *= -1;
}
producer.enqueue(queueEntries);
txContext.finish();
} catch (TransactionFailureException e) {
LOG.error("Operation error", e);
txContext.abort();
throw Throwables.propagate(e);
}
}
long elapsed = stopwatch.elapsedTime(TimeUnit.MILLISECONDS);
LOG.info("Enqueue {} entries in {} ms for {}", count, elapsed, queueName.getSimpleName());
LOG.info("Enqueue avg {} entries per seconds for {}", (double) count * 1000 / elapsed, queueName.getSimpleName());
stopwatch.stop();
}
} catch (Exception e) {
LOG.error(e.getMessage(), e);
}
}
};
}
use of co.cask.cdap.data2.queue.QueueProducer in project cdap by caskdata.
the class QueueTest method testClearOrDropAllForFlow.
private void testClearOrDropAllForFlow(boolean doDrop) throws Exception {
// this test is the same for clear and drop, except for two small places...
// using a different app name for each case as this test leaves some entries
String app = doDrop ? "tDAFF" : "tCAFF";
QueueName queueName1 = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), app, "flow1", "flowlet1", "out1");
QueueName queueName2 = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), app, "flow1", "flowlet2", "out2");
QueueName queueName3 = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), app, "flow2", "flowlet1", "out");
List<ConsumerGroupConfig> groupConfigs = ImmutableList.of(new ConsumerGroupConfig(0L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(1L, 1, DequeueStrategy.FIFO, null));
configureGroups(queueName1, groupConfigs);
configureGroups(queueName2, groupConfigs);
configureGroups(queueName3, groupConfigs);
try (QueueProducer producer1 = queueClientFactory.createProducer(queueName1);
QueueProducer producer2 = queueClientFactory.createProducer(queueName2);
QueueProducer producer3 = queueClientFactory.createProducer(queueName3)) {
TransactionContext txContext = createTxContext(producer1, producer2, producer3);
txContext.start();
for (int i = 0; i < 10; i++) {
for (QueueProducer producer : Arrays.asList(producer1, producer2, producer3)) {
producer.enqueue(new QueueEntry(Bytes.toBytes(i)));
}
}
txContext.finish();
}
// consume 1 element from each queue
ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
try (QueueConsumer consumer1 = queueClientFactory.createConsumer(queueName1, consumerConfig, 1);
QueueConsumer consumer2 = queueClientFactory.createConsumer(queueName2, consumerConfig, 1);
QueueConsumer consumer3 = queueClientFactory.createConsumer(queueName3, consumerConfig, 1)) {
TransactionContext txContext = createTxContext(consumer1, consumer2, consumer3);
txContext.start();
for (QueueConsumer consumer : Arrays.asList(consumer1, consumer2, consumer3)) {
DequeueResult<byte[]> result = consumer.dequeue(1);
Assert.assertFalse(result.isEmpty());
Assert.assertArrayEquals(Bytes.toBytes(0), result.iterator().next());
}
txContext.finish();
}
// verify the consumer config was deleted
verifyConsumerConfigExists(queueName1, queueName2);
// clear/drop all queues for flow1
FlowId flow1Id = NamespaceId.DEFAULT.app(app).flow("flow1");
if (doDrop) {
queueAdmin.dropAllForFlow(flow1Id);
} else {
queueAdmin.clearAllForFlow(flow1Id);
}
if (doDrop) {
// verify that only flow2's queues still exist
Assert.assertFalse(queueAdmin.exists(queueName1));
Assert.assertFalse(queueAdmin.exists(queueName2));
Assert.assertTrue(queueAdmin.exists(queueName3));
} else {
// verify all queues still exist
Assert.assertTrue(queueAdmin.exists(queueName1));
Assert.assertTrue(queueAdmin.exists(queueName2));
Assert.assertTrue(queueAdmin.exists(queueName3));
}
// verify the consumer config was deleted
verifyConsumerConfigIsDeleted(queueName1, queueName2);
// create new consumers because existing ones may have pre-fetched and cached some entries
configureGroups(queueName1, groupConfigs);
configureGroups(queueName2, groupConfigs);
try (QueueConsumer consumer1 = queueClientFactory.createConsumer(queueName1, consumerConfig, 1);
QueueConsumer consumer2 = queueClientFactory.createConsumer(queueName2, consumerConfig, 1);
QueueConsumer consumer3 = queueClientFactory.createConsumer(queueName3, consumerConfig, 1)) {
TransactionContext txContext = createTxContext(consumer1, consumer2, consumer3);
txContext.start();
// attempt to consume from flow1's queues, should be empty
for (QueueConsumer consumer : Arrays.asList(consumer1, consumer2)) {
DequeueResult<byte[]> result = consumer.dequeue(1);
Assert.assertTrue(result.isEmpty());
}
// but flow2 was not deleted -> consumer 3 should get another entry
DequeueResult<byte[]> result = consumer3.dequeue(1);
Assert.assertFalse(result.isEmpty());
Assert.assertArrayEquals(Bytes.toBytes(1), result.iterator().next());
txContext.finish();
}
}
Aggregations