use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.
the class QueueTest method enqueueDequeue.
private void enqueueDequeue(final QueueName queueName, int preEnqueueCount, int concurrentCount, int enqueueBatchSize, int consumerSize, DequeueStrategy dequeueStrategy, final int dequeueBatchSize) throws Exception {
ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, consumerSize, dequeueStrategy, "key");
configureGroups(queueName, ImmutableList.of(groupConfig));
Preconditions.checkArgument(preEnqueueCount % enqueueBatchSize == 0, "Count must be divisible by enqueueBatchSize");
Preconditions.checkArgument(concurrentCount % enqueueBatchSize == 0, "Count must be divisible by enqueueBatchSize");
final List<ConsumerConfig> consumerConfigs = Lists.newArrayList();
for (int i = 0; i < consumerSize; i++) {
consumerConfigs.add(new ConsumerConfig(groupConfig, i));
}
createEnqueueRunnable(queueName, preEnqueueCount, enqueueBatchSize, null).run();
final CyclicBarrier startBarrier = new CyclicBarrier(consumerSize + 2);
ExecutorService executor = Executors.newFixedThreadPool(consumerSize + 1);
// Enqueue thread
executor.submit(createEnqueueRunnable(queueName, concurrentCount, enqueueBatchSize, startBarrier));
// Dequeue
final long expectedSum = ((long) preEnqueueCount / 2 * ((long) preEnqueueCount - 1)) + ((long) concurrentCount / 2 * ((long) concurrentCount - 1));
final AtomicLong valueSum = new AtomicLong();
final CountDownLatch completeLatch = new CountDownLatch(consumerSize);
for (int i = 0; i < consumerSize; i++) {
final int instanceId = i;
executor.submit(new Runnable() {
@Override
public void run() {
try {
startBarrier.await();
LOG.info("Consumer {} starts consuming {}", instanceId, queueName.getSimpleName());
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(instanceId), 1)) {
TransactionContext txContext = createTxContext(consumer);
Stopwatch stopwatch = new Stopwatch();
stopwatch.start();
int dequeueCount = 0;
while (valueSum.get() < expectedSum) {
txContext.start();
try {
DequeueResult<byte[]> result = consumer.dequeue(dequeueBatchSize);
txContext.finish();
if (result.isEmpty()) {
continue;
}
for (byte[] data : result) {
valueSum.addAndGet(Bytes.toInt(data));
dequeueCount++;
}
} catch (TransactionFailureException e) {
LOG.error("Operation error", e);
txContext.abort();
throw Throwables.propagate(e);
}
}
long elapsed = stopwatch.elapsedTime(TimeUnit.MILLISECONDS);
LOG.info("Dequeue {} entries in {} ms for {}", dequeueCount, elapsed, queueName.getSimpleName());
LOG.info("Dequeue avg {} entries per seconds for {}", (double) dequeueCount * 1000 / elapsed, queueName.getSimpleName());
consumer.close();
completeLatch.countDown();
}
} catch (Exception e) {
LOG.error(e.getMessage(), e);
}
}
});
}
startBarrier.await();
completeLatch.await();
Assert.assertEquals(expectedSum, valueSum.get());
// Only check eviction for queue.
if (!queueName.isStream()) {
verifyQueueIsEmpty(queueName, consumerConfigs);
}
executor.shutdownNow();
}
use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.
the class QueueTest method testMultiStageConsumer.
@Test
public void testMultiStageConsumer() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "multistage");
ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, 2, DequeueStrategy.HASH, "key");
configureGroups(queueName, ImmutableList.of(groupConfig));
List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(groupConfig, 0), new ConsumerConfig(groupConfig, 1));
// Enqueue 10 items
try (QueueProducer producer = queueClientFactory.createProducer(queueName)) {
for (int i = 0; i < 10; i++) {
TransactionContext txContext = createTxContext(producer);
txContext.start();
producer.enqueue(new QueueEntry("key", i, Bytes.toBytes(i)));
txContext.finish();
}
}
// Consumer all even entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 1)) {
for (int i = 0; i < 5; i++) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
Assert.assertTrue(!result.isEmpty());
Assert.assertEquals(i * 2, Bytes.toInt(result.iterator().next()));
txContext.finish();
}
}
// Consume 2 odd entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 1)) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue(2);
Assert.assertEquals(2, result.size());
Iterator<byte[]> iter = result.iterator();
for (int i = 0; i < 2; i++) {
Assert.assertEquals(i * 2 + 1, Bytes.toInt(iter.next()));
}
txContext.finish();
}
// Consume the rest odd entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 1)) {
for (int i = 2; i < 5; i++) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
Assert.assertTrue(!result.isEmpty());
Assert.assertEquals(i * 2 + 1, Bytes.toInt(result.iterator().next()));
txContext.finish();
}
}
}
use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.
the class HBaseQueueTest method testQueueUpgrade.
// This test upgrade from old queue (salted base) to new queue (sharded base)
@Test(timeout = 30000L)
public void testQueueUpgrade() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "upgrade");
HBaseQueueAdmin hbaseQueueAdmin = (HBaseQueueAdmin) queueAdmin;
HBaseQueueClientFactory hBaseQueueClientFactory = (HBaseQueueClientFactory) queueClientFactory;
// Create the old queue table explicitly
HBaseQueueAdmin oldQueueAdmin = new HBaseQueueAdmin(hConf, cConf, injector.getInstance(LocationFactory.class), injector.getInstance(HBaseTableUtil.class), injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionExecutorFactory.class), QueueConstants.QueueType.QUEUE, injector.getInstance(NamespaceQueryAdmin.class), injector.getInstance(Impersonator.class));
oldQueueAdmin.create(queueName);
int buckets = cConf.getInt(QueueConstants.ConfigKeys.QUEUE_TABLE_PRESPLITS);
try (final HBaseQueueProducer oldProducer = hBaseQueueClientFactory.createProducer(oldQueueAdmin, queueName, QueueConstants.QueueType.QUEUE, QueueMetrics.NOOP_QUEUE_METRICS, new SaltedHBaseQueueStrategy(tableUtil, buckets), new ArrayList<ConsumerGroupConfig>())) {
// Enqueue 10 items to old queue table
Transactions.createTransactionExecutor(executorFactory, oldProducer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (int i = 0; i < 10; i++) {
oldProducer.enqueue(new QueueEntry("key", i, Bytes.toBytes("Message " + i)));
}
}
});
}
// Configure the consumer
final ConsumerConfig consumerConfig = new ConsumerConfig(0L, 0, 1, DequeueStrategy.HASH, "key");
try (QueueConfigurer configurer = queueAdmin.getQueueConfigurer(queueName)) {
Transactions.createTransactionExecutor(executorFactory, configurer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
configurer.configureGroups(ImmutableList.of(consumerConfig));
}
});
}
// explicit set the consumer state to be the lowest start row
try (HBaseConsumerStateStore stateStore = hbaseQueueAdmin.getConsumerStateStore(queueName)) {
Transactions.createTransactionExecutor(executorFactory, stateStore).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.updateState(consumerConfig.getGroupId(), consumerConfig.getInstanceId(), QueueEntryRow.getQueueEntryRowKey(queueName, 0L, 0));
}
});
}
// Enqueue 10 more items to new queue table
createEnqueueRunnable(queueName, 10, 1, null).run();
// Verify both old and new table have 10 rows each
Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.QUEUE)));
Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE)));
// Create a consumer. It should see all 20 items
final List<String> messages = Lists.newArrayList();
try (final QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
while (messages.size() != 20) {
Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
DequeueResult<byte[]> result = consumer.dequeue(20);
for (byte[] data : result) {
messages.add(Bytes.toString(data));
}
}
});
}
}
verifyQueueIsEmpty(queueName, ImmutableList.of(consumerConfig));
}
use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.
the class QueueTest method testClearOrDropAllForFlow.
private void testClearOrDropAllForFlow(boolean doDrop) throws Exception {
// this test is the same for clear and drop, except for two small places...
// using a different app name for each case as this test leaves some entries
String app = doDrop ? "tDAFF" : "tCAFF";
QueueName queueName1 = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), app, "flow1", "flowlet1", "out1");
QueueName queueName2 = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), app, "flow1", "flowlet2", "out2");
QueueName queueName3 = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), app, "flow2", "flowlet1", "out");
List<ConsumerGroupConfig> groupConfigs = ImmutableList.of(new ConsumerGroupConfig(0L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(1L, 1, DequeueStrategy.FIFO, null));
configureGroups(queueName1, groupConfigs);
configureGroups(queueName2, groupConfigs);
configureGroups(queueName3, groupConfigs);
try (QueueProducer producer1 = queueClientFactory.createProducer(queueName1);
QueueProducer producer2 = queueClientFactory.createProducer(queueName2);
QueueProducer producer3 = queueClientFactory.createProducer(queueName3)) {
TransactionContext txContext = createTxContext(producer1, producer2, producer3);
txContext.start();
for (int i = 0; i < 10; i++) {
for (QueueProducer producer : Arrays.asList(producer1, producer2, producer3)) {
producer.enqueue(new QueueEntry(Bytes.toBytes(i)));
}
}
txContext.finish();
}
// consume 1 element from each queue
ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
try (QueueConsumer consumer1 = queueClientFactory.createConsumer(queueName1, consumerConfig, 1);
QueueConsumer consumer2 = queueClientFactory.createConsumer(queueName2, consumerConfig, 1);
QueueConsumer consumer3 = queueClientFactory.createConsumer(queueName3, consumerConfig, 1)) {
TransactionContext txContext = createTxContext(consumer1, consumer2, consumer3);
txContext.start();
for (QueueConsumer consumer : Arrays.asList(consumer1, consumer2, consumer3)) {
DequeueResult<byte[]> result = consumer.dequeue(1);
Assert.assertFalse(result.isEmpty());
Assert.assertArrayEquals(Bytes.toBytes(0), result.iterator().next());
}
txContext.finish();
}
// verify the consumer config was deleted
verifyConsumerConfigExists(queueName1, queueName2);
// clear/drop all queues for flow1
FlowId flow1Id = NamespaceId.DEFAULT.app(app).flow("flow1");
if (doDrop) {
queueAdmin.dropAllForFlow(flow1Id);
} else {
queueAdmin.clearAllForFlow(flow1Id);
}
if (doDrop) {
// verify that only flow2's queues still exist
Assert.assertFalse(queueAdmin.exists(queueName1));
Assert.assertFalse(queueAdmin.exists(queueName2));
Assert.assertTrue(queueAdmin.exists(queueName3));
} else {
// verify all queues still exist
Assert.assertTrue(queueAdmin.exists(queueName1));
Assert.assertTrue(queueAdmin.exists(queueName2));
Assert.assertTrue(queueAdmin.exists(queueName3));
}
// verify the consumer config was deleted
verifyConsumerConfigIsDeleted(queueName1, queueName2);
// create new consumers because existing ones may have pre-fetched and cached some entries
configureGroups(queueName1, groupConfigs);
configureGroups(queueName2, groupConfigs);
try (QueueConsumer consumer1 = queueClientFactory.createConsumer(queueName1, consumerConfig, 1);
QueueConsumer consumer2 = queueClientFactory.createConsumer(queueName2, consumerConfig, 1);
QueueConsumer consumer3 = queueClientFactory.createConsumer(queueName3, consumerConfig, 1)) {
TransactionContext txContext = createTxContext(consumer1, consumer2, consumer3);
txContext.start();
// attempt to consume from flow1's queues, should be empty
for (QueueConsumer consumer : Arrays.asList(consumer1, consumer2)) {
DequeueResult<byte[]> result = consumer.dequeue(1);
Assert.assertTrue(result.isEmpty());
}
// but flow2 was not deleted -> consumer 3 should get another entry
DequeueResult<byte[]> result = consumer3.dequeue(1);
Assert.assertFalse(result.isEmpty());
Assert.assertArrayEquals(Bytes.toBytes(1), result.iterator().next());
txContext.finish();
}
}
use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.
the class QueueTest method testDropAllQueues.
@Test
public void testDropAllQueues() throws Exception {
// create a queue and a stream and enqueue one entry each
QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "myApp", "myFlow", "myFlowlet", "tDAQ");
ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
configureGroups(queueName, ImmutableList.of(consumerConfig));
try (final QueueProducer qProducer = queueClientFactory.createProducer(queueName)) {
executorFactory.createExecutor(Lists.newArrayList((TransactionAware) qProducer)).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
qProducer.enqueue(new QueueEntry(Bytes.toBytes("q42")));
}
});
// drop all queues
queueAdmin.dropAllInNamespace(NamespaceId.DEFAULT);
// verify that queue is gone and stream is still there
configureGroups(queueName, ImmutableList.of(consumerConfig));
try (final QueueConsumer qConsumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
executorFactory.createExecutor(Lists.newArrayList((TransactionAware) qConsumer)).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
DequeueResult<byte[]> dequeue = qConsumer.dequeue();
Assert.assertTrue(dequeue.isEmpty());
}
});
}
}
}
Aggregations