use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class DequeueScanAttributes method readConsumerConfig.
public static ConsumerConfig readConsumerConfig(DataInput dataInput) throws IOException {
long groupId = dataInput.readLong();
int groupSize = dataInput.readInt();
int instanceId = dataInput.readInt();
DequeueStrategy strategy = WritableUtils.readEnum(dataInput, DequeueStrategy.class);
String hashKey = WritableUtils.readString(dataInput);
return new ConsumerConfig(groupId, instanceId, groupSize, strategy, hashKey);
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class InMemoryStreamConsumerFactory method create.
@Override
public StreamConsumer create(StreamId streamId, String namespace, ConsumerConfig consumerConfig) throws IOException {
QueueName queueName = QueueName.fromStream(streamId);
QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, -1);
return new QueueToStreamConsumer(streamId, consumerConfig, consumer);
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class ShardedHBaseQueueStrategy method createHBaseScanner.
private ResultScanner createHBaseScanner(ConsumerConfig consumerConfig, HTable hTable, Scan scan, int numRows) throws IOException {
// Modify the scan with sharded key prefix
ScanBuilder shardedScan = tableUtil.buildScan(scan);
// we should roughly divide by number of buckets, but don't want another RPC for the case we are not exactly right
int caching = (int) (1.1 * numRows / distributorBuckets);
shardedScan.setCaching(caching);
if (scan.getStartRow().length > 0) {
byte[] rowKey = getShardedKey(consumerConfig, consumerConfig.getInstanceId(), scan.getStartRow());
shardedScan.setStartRow(rowKey);
}
if (scan.getStopRow().length > 0) {
byte[] rowKey = getShardedKey(consumerConfig, consumerConfig.getInstanceId(), scan.getStopRow());
shardedScan.setStopRow(rowKey);
}
return DistributedScanner.create(hTable, shardedScan.build(), rowKeyDistributor, scansExecutor);
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class QueueEntryRow method canConsume.
/**
* Looks at specific queue entry and determines if consumer with given consumer config and current transaction
* can consume this entry. The answer can be
* "yes" ({@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume#YES},
* "no" ({@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume#NO},
* "no" with a hint that given consumer cannot consume any of the entries prior to this one
* ({@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume#NO_INCLUDING_ALL_OLDER}.
* The latter one allows for some optimizations when doing scans of entries to be
* consumed.
*
* @param consumerConfig config of the consumer
* @param transaction current tx
* @param enqueueWritePointer write pointer used by enqueue of this entry
* @param counter counter of this entry
* @param metaValue value of meta column of this entry
* @param stateValue value of state column of this entry
* @return one {@link co.cask.cdap.data2.transaction.queue.QueueEntryRow.CanConsume} as per description above.
*/
public static CanConsume canConsume(ConsumerConfig consumerConfig, Transaction transaction, long enqueueWritePointer, int counter, byte[] metaValue, byte[] stateValue) {
DequeueStrategy dequeueStrategy = consumerConfig.getDequeueStrategy();
if (stateValue != null) {
// If the state is written by the current transaction, ignore it, as it's processing
long stateWritePointer = QueueEntryRow.getStateWritePointer(stateValue);
if (stateWritePointer == transaction.getWritePointer()) {
return CanConsume.NO;
}
// If the state was updated by a different consumer instance that is still active, ignore this entry.
// The assumption is, the corresponding instance is either processing (claimed)
// or going to process it (due to rollback/restart).
// This only applies to FIFO, as for hash and rr, repartition needs to happen if group size change.
int stateInstanceId = QueueEntryRow.getStateInstanceId(stateValue);
if (dequeueStrategy == DequeueStrategy.FIFO && stateInstanceId < consumerConfig.getGroupSize() && stateInstanceId != consumerConfig.getInstanceId()) {
return CanConsume.NO;
}
// If state is PROCESSED and committed, ignore it:
ConsumerEntryState state = QueueEntryRow.getState(stateValue);
if (state == ConsumerEntryState.PROCESSED && transaction.isVisible(stateWritePointer)) {
// Note: here we ignore the long-running transactions, because we know they don't interact with queues.
if (enqueueWritePointer < transaction.getFirstShortInProgress()) {
return CanConsume.NO_INCLUDING_ALL_OLDER;
}
return CanConsume.NO;
}
}
// Always try to process (claim) if using FIFO. The resolution will be done by atomically setting state to CLAIMED
int instanceId = consumerConfig.getInstanceId();
if (dequeueStrategy == DequeueStrategy.ROUND_ROBIN) {
instanceId = getRoundRobinConsumerInstance(enqueueWritePointer, counter, consumerConfig.getGroupSize());
} else if (dequeueStrategy == DequeueStrategy.HASH) {
try {
Map<String, Integer> hashKeys = QueueEntry.deserializeHashKeys(metaValue);
instanceId = getHashConsumerInstance(hashKeys, consumerConfig.getHashKey(), consumerConfig.getGroupSize());
} catch (IOException e) {
// SHOULD NEVER happen
throw new RuntimeException(e);
}
}
return consumerConfig.getInstanceId() == instanceId ? CanConsume.YES : CanConsume.NO;
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class DequeueScanObserver method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
Transaction tx = DequeueScanAttributes.getTx(scan);
if (consumerConfig == null || tx == null) {
return super.preScannerOpen(e, scan, s);
}
Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
Filter existing = scan.getFilter();
if (existing != null) {
Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
scan.setFilter(combined);
} else {
scan.setFilter(dequeueFilter);
}
return super.preScannerOpen(e, scan, s);
}
Aggregations