use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class HBaseQueueClientFactory method createConsumer.
@Override
public QueueConsumer createConsumer(final QueueName queueName, final ConsumerConfig consumerConfig, int numGroups) throws IOException {
final HBaseQueueAdmin admin = ensureTableExists(queueName);
try {
final long groupId = consumerConfig.getGroupId();
// A callback for create a list of HBaseQueueConsumer
// based on the current queue consumer state of the given group
Callable<List<HBaseQueueConsumer>> consumerCreator = new Callable<List<HBaseQueueConsumer>>() {
@Override
public List<HBaseQueueConsumer> call() throws Exception {
List<HBaseConsumerState> states;
try (HBaseConsumerStateStore stateStore = admin.getConsumerStateStore(queueName)) {
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, stateStore);
// Find all consumer states for consumers that need to be created based on current state
states = txExecutor.execute(new Callable<List<HBaseConsumerState>>() {
@Override
public List<HBaseConsumerState> call() throws Exception {
List<HBaseConsumerState> consumerStates = Lists.newArrayList();
HBaseConsumerState state = stateStore.getState(groupId, consumerConfig.getInstanceId());
if (state.getPreviousBarrier() == null) {
// Old HBase consumer (Salted based, not sharded)
consumerStates.add(state);
return consumerStates;
}
// Find the smallest start barrier that has something to consume for this instance.
// It should always exists since we assume the queue is configured before this method is called
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
if (queueBarriers.isEmpty()) {
throw new IllegalStateException(String.format("No consumer information available. Queue: %s, GroupId: %d, InstanceId: %d", queueName, groupId, consumerConfig.getInstanceId()));
}
QueueBarrier startBarrier = Iterables.find(Lists.reverse(queueBarriers), new Predicate<QueueBarrier>() {
@Override
public boolean apply(QueueBarrier barrier) {
return barrier.getGroupConfig().getGroupSize() > consumerConfig.getInstanceId() && stateStore.isAllConsumed(consumerConfig, barrier.getStartRow());
}
}, queueBarriers.get(0));
int groupSize = startBarrier.getGroupConfig().getGroupSize();
for (int i = consumerConfig.getInstanceId(); i < groupSize; i += consumerConfig.getGroupSize()) {
consumerStates.add(stateStore.getState(groupId, i));
}
return consumerStates;
}
});
}
List<HBaseQueueConsumer> consumers = Lists.newArrayList();
for (HBaseConsumerState state : states) {
QueueType queueType = (state.getPreviousBarrier() == null) ? QueueType.QUEUE : QueueType.SHARDED_QUEUE;
HTable hTable = createHTable(admin.getDataTableId(queueName, queueType));
int distributorBuckets = getDistributorBuckets(hTable.getTableDescriptor());
HBaseQueueStrategy strategy = (state.getPreviousBarrier() == null) ? new SaltedHBaseQueueStrategy(hBaseTableUtil, distributorBuckets) : new ShardedHBaseQueueStrategy(hBaseTableUtil, distributorBuckets);
consumers.add(queueUtil.getQueueConsumer(cConf, hTable, queueName, state, admin.getConsumerStateStore(queueName), strategy));
}
return consumers;
}
};
return new SmartQueueConsumer(queueName, consumerConfig, consumerCreator);
} catch (Exception e) {
// If there is exception, nothing much can be done here besides propagating
Throwables.propagateIfPossible(e);
throw new IOException(e);
}
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class ObjectMappedTableDatasetTest method testGetSplits.
@Test
public void testGetSplits() throws Exception {
dsFrameworkUtil.createInstance(ObjectMappedTable.class.getName(), RECORDS_ID, ObjectMappedTableProperties.builder().setType(Record.class).build());
try {
final ObjectMappedTableDataset<Record> records = dsFrameworkUtil.getInstance(RECORDS_ID);
TransactionExecutor txnl = dsFrameworkUtil.newInMemoryTransactionExecutor((TransactionAware) records);
final Record record = new Record(Integer.MAX_VALUE, Long.MAX_VALUE, Float.MAX_VALUE, Double.MAX_VALUE, "foobar", Bytes.toBytes("foobar"), ByteBuffer.wrap(Bytes.toBytes("foobar")), UUID.randomUUID());
final byte[] rowkey = Bytes.toBytes("row1");
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
records.write(rowkey, record);
}
});
// should not include the record, since upper bound is not inclusive
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
List<Split> splits = records.getSplits(1, null, rowkey);
List<Record> recordsRead = new ArrayList<>();
for (Split split : splits) {
SplitReader<byte[], Record> splitReader = records.createSplitReader(split);
try {
splitReader.initialize(split);
while (splitReader.nextKeyValue()) {
recordsRead.add(splitReader.getCurrentValue());
}
} finally {
splitReader.close();
}
}
Assert.assertEquals(0, recordsRead.size());
}
});
// should include the record, since lower bound is inclusive
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
List<Split> splits = records.getSplits(1, rowkey, null);
List<Record> recordsRead = new ArrayList<>();
for (Split split : splits) {
SplitReader<byte[], Record> splitReader = records.createSplitReader(split);
try {
splitReader.initialize(split);
while (splitReader.nextKeyValue()) {
recordsRead.add(splitReader.getCurrentValue());
}
} finally {
splitReader.close();
}
}
Assert.assertEquals(1, recordsRead.size());
Assert.assertEquals(record, recordsRead.get(0));
}
});
} finally {
dsFrameworkUtil.deleteInstance(RECORDS_ID);
}
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class ObjectMappedTableDatasetTest method testGetPutDelete.
@Test
public void testGetPutDelete() throws Exception {
dsFrameworkUtil.createInstance(ObjectMappedTable.class.getName(), RECORDS_ID, ObjectMappedTableProperties.builder().setType(Record.class).build());
try {
final ObjectMappedTableDataset<Record> records = dsFrameworkUtil.getInstance(RECORDS_ID);
TransactionExecutor txnl = dsFrameworkUtil.newInMemoryTransactionExecutor((TransactionAware) records);
final Record record = new Record(Integer.MAX_VALUE, Long.MAX_VALUE, Float.MAX_VALUE, null, "foobar", Bytes.toBytes("foobar"), ByteBuffer.wrap(Bytes.toBytes("foobar")), UUID.randomUUID());
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
records.write("123", record);
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Record actual = records.read("123");
Assert.assertEquals(record, actual);
}
});
final Record record2 = new Record(Integer.MAX_VALUE, Long.MAX_VALUE, null, Double.MAX_VALUE, "foobar", Bytes.toBytes("foobar"), ByteBuffer.wrap(Bytes.toBytes("foobar")), UUID.randomUUID());
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
records.write("123", record2);
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Record actual = records.read("123");
Assert.assertEquals(record2, actual);
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
records.delete("123");
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertNull(records.read("123"));
}
});
} finally {
dsFrameworkUtil.deleteInstance(RECORDS_ID);
}
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class TimePartitionedFileSetTest method testInputPartitionPaths.
/**
* Tests that the TPFS sets the file input paths correctly for the input time range.
*/
@Test
public void testInputPartitionPaths() throws Exception {
// make sure the dataset has no partitions
final TimePartitionedFileSet tpfs = dsFrameworkUtil.getInstance(TPFS_INSTANCE);
TransactionAware txAwareDataset = (TransactionAware) tpfs;
TransactionExecutor txnl = dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset);
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
validateTimePartitions(tpfs, 0L, MAX, Collections.<Long, String>emptyMap());
}
});
Date date = DATE_FORMAT.parse("6/4/12 10:00 am");
final long time = date.getTime();
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
tpfs.addPartition(time, "file");
tpfs.addPartition(time + 5 * MINUTE, "file5");
tpfs.addPartition(time + 10 * MINUTE, "file10");
tpfs.addPartition(time + 12 * MINUTE, "file12");
}
});
validateInputPaths(time, -10, -5);
validateInputPaths(time, -10, 2, "file");
validateInputPaths(time, 1, 11, "file5", "file10");
validateInputPaths(time, 1, 15, "file5", "file10", "file12");
validateInputPaths(time, 5, 10, "file5");
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class MapReduceProgramRunnerTest method fillTestInputData.
private void fillTestInputData(TransactionExecutorFactory txExecutorFactory, final TimeseriesTable table, final boolean withBadData) throws TransactionFailureException, InterruptedException {
TransactionExecutor executor = Transactions.createTransactionExecutor(txExecutorFactory, table);
executor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
fillTestInputData(table, withBadData);
}
});
}
Aggregations