use of co.cask.cdap.data2.queue.DequeueStrategy in project cdap by caskdata.
the class QueueEntryRow method canConsume.
/**
* Looks at specific queue entry and determines if consumer with given consumer config and current transaction
* can consume this entry. The answer can be
* "yes" ({@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume#YES},
* "no" ({@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume#NO},
* "no" with a hint that given consumer cannot consume any of the entries prior to this one
* ({@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume#NO_INCLUDING_ALL_OLDER}.
* The latter one allows for some optimizations when doing scans of entries to be
* consumed.
*
* @param consumerConfig config of the consumer
* @param transaction current tx
* @param enqueueWritePointer write pointer used by enqueue of this entry
* @param counter counter of this entry
* @param metaValue value of meta column of this entry
* @param stateValue value of state column of this entry
* @return one {@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume} as per description above.
*/
public static CanConsume canConsume(ConsumerConfig consumerConfig, Transaction transaction, long enqueueWritePointer, int counter, byte[] metaValue, byte[] stateValue) {
DequeueStrategy dequeueStrategy = consumerConfig.getDequeueStrategy();
if (stateValue != null) {
// If the state is written by the current transaction, ignore it, as it's processing
long stateWritePointer = QueueEntryRow.getStateWritePointer(stateValue);
if (stateWritePointer == transaction.getWritePointer()) {
return CanConsume.NO;
}
// If the state was updated by a different consumer instance that is still active, ignore this entry.
// The assumption is, the corresponding instance is either processing (claimed)
// or going to process it (due to rollback/restart).
// This only applies to FIFO, as for hash and rr, repartition needs to happen if group size change.
int stateInstanceId = QueueEntryRow.getStateInstanceId(stateValue);
if (dequeueStrategy == DequeueStrategy.FIFO && stateInstanceId < consumerConfig.getGroupSize() && stateInstanceId != consumerConfig.getInstanceId()) {
return CanConsume.NO;
}
// If state is PROCESSED and committed, ignore it:
ConsumerEntryState state = QueueEntryRow.getState(stateValue);
if (state == ConsumerEntryState.PROCESSED && transaction.isVisible(stateWritePointer)) {
// Note: here we ignore the long-running transactions, because we know they don't interact with queues.
if (enqueueWritePointer < transaction.getFirstShortInProgress()) {
return CanConsume.NO_INCLUDING_ALL_OLDER;
}
return CanConsume.NO;
}
}
// Always try to process (claim) if using FIFO. The resolution will be done by atomically setting state to CLAIMED
int instanceId = consumerConfig.getInstanceId();
if (dequeueStrategy == DequeueStrategy.ROUND_ROBIN) {
instanceId = getRoundRobinConsumerInstance(enqueueWritePointer, counter, consumerConfig.getGroupSize());
} else if (dequeueStrategy == DequeueStrategy.HASH) {
try {
Map<String, Integer> hashKeys = QueueEntry.deserializeHashKeys(metaValue);
instanceId = getHashConsumerInstance(hashKeys, consumerConfig.getHashKey(), consumerConfig.getGroupSize());
} catch (IOException e) {
// SHOULD NEVER happen
throw new RuntimeException(e);
}
}
return consumerConfig.getInstanceId() == instanceId ? CanConsume.YES : CanConsume.NO;
}
use of co.cask.cdap.data2.queue.DequeueStrategy in project cdap by caskdata.
the class QueueTest method enqueueDequeue.
private void enqueueDequeue(final QueueName queueName, int preEnqueueCount, int concurrentCount, int enqueueBatchSize, int consumerSize, DequeueStrategy dequeueStrategy, final int dequeueBatchSize) throws Exception {
ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, consumerSize, dequeueStrategy, "key");
configureGroups(queueName, ImmutableList.of(groupConfig));
Preconditions.checkArgument(preEnqueueCount % enqueueBatchSize == 0, "Count must be divisible by enqueueBatchSize");
Preconditions.checkArgument(concurrentCount % enqueueBatchSize == 0, "Count must be divisible by enqueueBatchSize");
final List<ConsumerConfig> consumerConfigs = Lists.newArrayList();
for (int i = 0; i < consumerSize; i++) {
consumerConfigs.add(new ConsumerConfig(groupConfig, i));
}
createEnqueueRunnable(queueName, preEnqueueCount, enqueueBatchSize, null).run();
final CyclicBarrier startBarrier = new CyclicBarrier(consumerSize + 2);
ExecutorService executor = Executors.newFixedThreadPool(consumerSize + 1);
// Enqueue thread
executor.submit(createEnqueueRunnable(queueName, concurrentCount, enqueueBatchSize, startBarrier));
// Dequeue
final long expectedSum = ((long) preEnqueueCount / 2 * ((long) preEnqueueCount - 1)) + ((long) concurrentCount / 2 * ((long) concurrentCount - 1));
final AtomicLong valueSum = new AtomicLong();
final CountDownLatch completeLatch = new CountDownLatch(consumerSize);
for (int i = 0; i < consumerSize; i++) {
final int instanceId = i;
executor.submit(new Runnable() {
@Override
public void run() {
try {
startBarrier.await();
LOG.info("Consumer {} starts consuming {}", instanceId, queueName.getSimpleName());
try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(instanceId), 1)) {
TransactionContext txContext = createTxContext(consumer);
Stopwatch stopwatch = new Stopwatch();
stopwatch.start();
int dequeueCount = 0;
while (valueSum.get() < expectedSum) {
txContext.start();
try {
DequeueResult<byte[]> result = consumer.dequeue(dequeueBatchSize);
txContext.finish();
if (result.isEmpty()) {
continue;
}
for (byte[] data : result) {
valueSum.addAndGet(Bytes.toInt(data));
dequeueCount++;
}
} catch (TransactionFailureException e) {
LOG.error("Operation error", e);
txContext.abort();
throw Throwables.propagate(e);
}
}
long elapsed = stopwatch.elapsedTime(TimeUnit.MILLISECONDS);
LOG.info("Dequeue {} entries in {} ms for {}", dequeueCount, elapsed, queueName.getSimpleName());
LOG.info("Dequeue avg {} entries per seconds for {}", (double) dequeueCount * 1000 / elapsed, queueName.getSimpleName());
consumer.close();
completeLatch.countDown();
}
} catch (Exception e) {
LOG.error(e.getMessage(), e);
}
}
});
}
startBarrier.await();
completeLatch.await();
Assert.assertEquals(expectedSum, valueSum.get());
// Only check eviction for queue.
if (!queueName.isStream()) {
verifyQueueIsEmpty(queueName, consumerConfigs);
}
executor.shutdownNow();
}
Aggregations