use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class ReflectionTableTest method testStructuredRecordRepresentation.
@Test
public void testStructuredRecordRepresentation() throws Exception {
dsFrameworkUtil.createInstance("table", users, DatasetProperties.builder().build());
try {
final Table usersTable = dsFrameworkUtil.getInstance(users);
final byte[] rowKey = Bytes.toBytes(123);
final Schema schema = new ReflectionSchemaGenerator().generate(User.class);
// TableDataset is not accessible here, but we know that's the underlying implementation...
TransactionExecutor tx = dsFrameworkUtil.newTransactionExecutor((TransactionAware) usersTable);
tx.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Put put = new Put(rowKey);
ReflectionPutWriter<User> putWriter = new ReflectionPutWriter<>(schema);
putWriter.write(SAMUEL, put);
usersTable.put(put);
Row row = usersTable.get(rowKey);
ReflectionRowRecordReader rowReader = new ReflectionRowRecordReader(schema, null);
StructuredRecord actual = rowReader.read(row, schema);
assertRecordEqualsUser(SAMUEL, actual);
}
});
} finally {
dsFrameworkUtil.deleteInstance(users);
}
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class TimeseriesTableTest method testDataSet.
@Test
public void testDataSet() throws Exception {
TransactionExecutor txnl = dsFrameworkUtil.newTransactionExecutor(table);
// this test runs all operations synchronously
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
byte[] metric1 = Bytes.toBytes("metric1");
byte[] metric2 = Bytes.toBytes("metric2");
byte[] tag1 = Bytes.toBytes("111");
byte[] tag2 = Bytes.toBytes("22");
byte[] tag3 = Bytes.toBytes("3");
byte[] tag4 = Bytes.toBytes("123");
long hour = TimeUnit.HOURS.toMillis(1);
long second = TimeUnit.SECONDS.toMillis(1);
long ts = System.currentTimeMillis();
// m1e1 = metric: 1, entity: 1
TimeseriesTable.Entry m1e1 = new TimeseriesTable.Entry(metric1, Bytes.toBytes(3L), ts, tag3, tag2, tag1);
table.write(m1e1);
TimeseriesTable.Entry m1e2 = new TimeseriesTable.Entry(metric1, Bytes.toBytes(10L), ts + 2 * second, tag3);
table.write(m1e2);
TimeseriesTable.Entry m1e3 = new TimeseriesTable.Entry(metric1, Bytes.toBytes(15L), ts + 2 * hour, tag1);
table.write(m1e3);
TimeseriesTable.Entry m1e4 = new TimeseriesTable.Entry(metric1, Bytes.toBytes(23L), ts + 3 * hour, tag2, tag3);
table.write(m1e4);
TimeseriesTable.Entry m1e5 = new TimeseriesTable.Entry(metric1, Bytes.toBytes(55L), ts + 3 * hour + 2 * second);
table.write(m1e5);
TimeseriesTable.Entry m2e1 = new TimeseriesTable.Entry(metric2, Bytes.toBytes(4L), ts);
table.write(m2e1);
TimeseriesTable.Entry m2e2 = new TimeseriesTable.Entry(metric2, Bytes.toBytes(11L), ts + 2 * second, tag2);
table.write(m2e2);
TimeseriesTable.Entry m2e3 = new TimeseriesTable.Entry(metric2, Bytes.toBytes(16L), ts + 2 * hour, tag2);
table.write(m2e3);
TimeseriesTable.Entry m2e4 = new TimeseriesTable.Entry(metric2, Bytes.toBytes(24L), ts + 3 * hour, tag1, tag3);
table.write(m2e4);
TimeseriesTable.Entry m2e5 = new TimeseriesTable.Entry(metric2, Bytes.toBytes(56L), ts + 3 * hour + 2 * second, tag3, tag1);
table.write(m2e5);
// whole interval is searched
assertReadResult(table.read(metric1, ts, ts + 5 * hour), m1e1, m1e2, m1e3, m1e4, m1e5);
assertReadResult(table.read(metric1, ts, ts + 5 * hour, tag2), m1e1, m1e4);
assertReadResult(table.read(metric1, ts, ts + 5 * hour, tag4));
assertReadResult(table.read(metric1, ts, ts + 5 * hour, tag2, tag4));
// This is extreme case, should not be really used by anyone. Still we want to test that it won't fail.
// It returns nothing because there's hard limit on the number of rows traversed during the read.
assertReadResult(table.read(metric1, 0, Long.MAX_VALUE));
// test pagination read
assertReadResult(table.read(metric1, ts, ts + 5 * hour, 1, 2), m1e2, m1e3);
// part of the interval
assertReadResult(table.read(metric1, ts + second, ts + 2 * second), m1e2);
assertReadResult(table.read(metric1, ts + hour, ts + 3 * hour), m1e3, m1e4);
assertReadResult(table.read(metric1, ts + second, ts + 3 * hour), m1e2, m1e3, m1e4);
assertReadResult(table.read(metric1, ts + second, ts + 3 * hour, tag3), m1e2, m1e4);
assertReadResult(table.read(metric1, ts + second, ts + 3 * hour, tag3, tag2), m1e4);
// different metric
assertReadResult(table.read(metric2, ts + hour, ts + 3 * hour, tag2), m2e3);
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class TimeseriesTableTest method testInvalidTimeRangeCondition.
@Test(expected = TransactionFailureException.class)
public void testInvalidTimeRangeCondition() throws Exception {
TransactionExecutor txnl = dsFrameworkUtil.newTransactionExecutor(table);
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
long ts = System.currentTimeMillis();
table.read(Bytes.toBytes("any"), ts, ts - 100);
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class HBaseQueueDebugger method scanQueue.
/**
* Only works for {@link co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy}.
*/
public QueueStatistics scanQueue(final QueueName queueName, @Nullable Long consumerGroupId) throws Exception {
HBaseConsumerStateStore stateStore;
try {
stateStore = queueAdmin.getConsumerStateStore(queueName);
} catch (IllegalStateException e) {
throw new NotFoundException(queueName);
}
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, stateStore);
Multimap<Long, QueueBarrier> barriers = txExecutor.execute(new TransactionExecutor.Function<HBaseConsumerStateStore, Multimap<Long, QueueBarrier>>() {
@Override
public Multimap<Long, QueueBarrier> apply(HBaseConsumerStateStore input) throws Exception {
return input.getAllBarriers();
}
}, stateStore);
printProgress("Got %d barriers\n", barriers.size());
QueueStatistics stats = new QueueStatistics();
if (consumerGroupId != null) {
barriers = Multimaps.filterKeys(barriers, Predicates.equalTo(consumerGroupId));
}
for (Map.Entry<Long, Collection<QueueBarrier>> entry : barriers.asMap().entrySet()) {
long groupId = entry.getKey();
Collection<QueueBarrier> groupBarriers = entry.getValue();
printProgress("Scanning barriers for group %d\n", groupId);
int currentSection = 1;
PeekingIterator<QueueBarrier> barrierIterator = Iterators.peekingIterator(groupBarriers.iterator());
while (barrierIterator.hasNext()) {
QueueBarrier start = barrierIterator.next();
QueueBarrier end = barrierIterator.hasNext() ? barrierIterator.peek() : null;
printProgress("Scanning section %d/%d...\n", currentSection, groupBarriers.size());
scanQueue(txExecutor, stateStore, queueName, start, end, stats);
printProgress("Current results: %s\n", stats.getReport(showTxTimestampOnly()));
currentSection++;
}
printProgress("Scanning complete");
}
System.out.printf("Results for queue %s: %s\n", queueName.toString(), stats.getReport(showTxTimestampOnly()));
return stats;
}
Aggregations