use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class QueueTest method testMultiStageConsumer.
@Test
public void testMultiStageConsumer() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "multistage");
ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, 2, DequeueStrategy.HASH, "key");
configureGroups(queueName, ImmutableList.of(groupConfig));
List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(groupConfig, 0), new ConsumerConfig(groupConfig, 1));
// Enqueue 10 items
try (QueueProducer producer = queueClientFactory.createProducer(queueName)) {
for (int i = 0; i < 10; i++) {
TransactionContext txContext = createTxContext(producer);
txContext.start();
producer.enqueue(new QueueEntry("key", i, Bytes.toBytes(i)));
txContext.finish();
}
}
// Consumer all even entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 1)) {
for (int i = 0; i < 5; i++) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
Assert.assertTrue(!result.isEmpty());
Assert.assertEquals(i * 2, Bytes.toInt(result.iterator().next()));
txContext.finish();
}
}
// Consume 2 odd entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 1)) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue(2);
Assert.assertEquals(2, result.size());
Iterator<byte[]> iter = result.iterator();
for (int i = 0; i < 2; i++) {
Assert.assertEquals(i * 2 + 1, Bytes.toInt(iter.next()));
}
txContext.finish();
}
// Consume the rest odd entries
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 1)) {
for (int i = 2; i < 5; i++) {
TransactionContext txContext = createTxContext(consumer);
txContext.start();
DequeueResult<byte[]> result = consumer.dequeue();
Assert.assertTrue(!result.isEmpty());
Assert.assertEquals(i * 2 + 1, Bytes.toInt(result.iterator().next()));
txContext.finish();
}
}
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class HBaseQueueTest method testQueueUpgrade.
// This test upgrade from old queue (salted base) to new queue (sharded base)
@Test(timeout = 30000L)
public void testQueueUpgrade() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "upgrade");
HBaseQueueAdmin hbaseQueueAdmin = (HBaseQueueAdmin) queueAdmin;
HBaseQueueClientFactory hBaseQueueClientFactory = (HBaseQueueClientFactory) queueClientFactory;
// Create the old queue table explicitly
HBaseQueueAdmin oldQueueAdmin = new HBaseQueueAdmin(hConf, cConf, injector.getInstance(LocationFactory.class), injector.getInstance(HBaseTableUtil.class), injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionExecutorFactory.class), QueueConstants.QueueType.QUEUE, injector.getInstance(NamespaceQueryAdmin.class), injector.getInstance(Impersonator.class));
oldQueueAdmin.create(queueName);
int buckets = cConf.getInt(QueueConstants.ConfigKeys.QUEUE_TABLE_PRESPLITS);
try (final HBaseQueueProducer oldProducer = hBaseQueueClientFactory.createProducer(oldQueueAdmin, queueName, QueueConstants.QueueType.QUEUE, QueueMetrics.NOOP_QUEUE_METRICS, new SaltedHBaseQueueStrategy(tableUtil, buckets), new ArrayList<ConsumerGroupConfig>())) {
// Enqueue 10 items to old queue table
Transactions.createTransactionExecutor(executorFactory, oldProducer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (int i = 0; i < 10; i++) {
oldProducer.enqueue(new QueueEntry("key", i, Bytes.toBytes("Message " + i)));
}
}
});
}
// Configure the consumer
final ConsumerConfig consumerConfig = new ConsumerConfig(0L, 0, 1, DequeueStrategy.HASH, "key");
try (QueueConfigurer configurer = queueAdmin.getQueueConfigurer(queueName)) {
Transactions.createTransactionExecutor(executorFactory, configurer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
configurer.configureGroups(ImmutableList.of(consumerConfig));
}
});
}
// explicit set the consumer state to be the lowest start row
try (HBaseConsumerStateStore stateStore = hbaseQueueAdmin.getConsumerStateStore(queueName)) {
Transactions.createTransactionExecutor(executorFactory, stateStore).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.updateState(consumerConfig.getGroupId(), consumerConfig.getInstanceId(), QueueEntryRow.getQueueEntryRowKey(queueName, 0L, 0));
}
});
}
// Enqueue 10 more items to new queue table
createEnqueueRunnable(queueName, 10, 1, null).run();
// Verify both old and new table have 10 rows each
Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.QUEUE)));
Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE)));
// Create a consumer. It should see all 20 items
final List<String> messages = Lists.newArrayList();
try (final QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
while (messages.size() != 20) {
Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
DequeueResult<byte[]> result = consumer.dequeue(20);
for (byte[] data : result) {
messages.add(Bytes.toString(data));
}
}
});
}
}
verifyQueueIsEmpty(queueName, ImmutableList.of(consumerConfig));
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class LevelDBStreamFileConsumerFactory method create.
@Override
protected StreamConsumer create(TableId tableId, StreamConfig streamConfig, ConsumerConfig consumerConfig, StreamConsumerStateStore stateStore, StreamConsumerState beginConsumerState, FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader, @Nullable ReadFilter extraFilter) throws IOException {
String tableName = fromTableId(tableId);
tableService.ensureTableExists(tableName);
LevelDBTableCore tableCore = new LevelDBTableCore(tableName, tableService);
Object dbLock = getDBLock(tableName);
return new LevelDBStreamFileConsumer(cConf, streamConfig, consumerConfig, reader, stateStore, beginConsumerState, extraFilter, tableCore, dbLock);
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class DequeueFilter method filterRowCells.
@Override
public void filterRowCells(List<Cell> cells) {
byte[] dataBytes = null;
byte[] metaBytes = null;
byte[] stateBytes = null;
// list is very short so it is ok to loop thru to find columns
for (Cell cell : cells) {
if (CellUtil.matchingQualifier(cell, QueueEntryRow.DATA_COLUMN)) {
dataBytes = CellUtil.cloneValue(cell);
} else if (CellUtil.matchingQualifier(cell, QueueEntryRow.META_COLUMN)) {
metaBytes = CellUtil.cloneValue(cell);
} else if (CellUtil.matchingQualifier(cell, stateColumnName)) {
stateBytes = CellUtil.cloneValue(cell);
}
}
if (dataBytes == null || metaBytes == null) {
skipRow = true;
return;
}
QueueEntryRow.CanConsume canConsume = QueueEntryRow.canConsume(consumerConfig, transaction, writePointer, counter, metaBytes, stateBytes);
// Only skip the row when canConsumer == NO, so that in case of NO_INCLUDING_ALL_OLDER, the client
// can still see the row and move the scan start row.
skipRow = canConsume == QueueEntryRow.CanConsume.NO;
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class DequeueScanObserver method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
Transaction tx = DequeueScanAttributes.getTx(scan);
if (consumerConfig == null || tx == null) {
return super.preScannerOpen(e, scan, s);
}
Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
Filter existing = scan.getFilter();
if (existing != null) {
Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
scan.setFilter(combined);
} else {
scan.setFilter(dequeueFilter);
}
return super.preScannerOpen(e, scan, s);
}
Aggregations