use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class HBaseQueueTest method testQueueUpgrade.
// This test upgrade from old queue (salted base) to new queue (sharded base)
@Test(timeout = 30000L)
public void testQueueUpgrade() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "upgrade");
HBaseQueueAdmin hbaseQueueAdmin = (HBaseQueueAdmin) queueAdmin;
HBaseQueueClientFactory hBaseQueueClientFactory = (HBaseQueueClientFactory) queueClientFactory;
// Create the old queue table explicitly
HBaseQueueAdmin oldQueueAdmin = new HBaseQueueAdmin(hConf, cConf, injector.getInstance(LocationFactory.class), injector.getInstance(HBaseTableUtil.class), injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionExecutorFactory.class), QueueConstants.QueueType.QUEUE, injector.getInstance(NamespaceQueryAdmin.class), injector.getInstance(Impersonator.class));
oldQueueAdmin.create(queueName);
int buckets = cConf.getInt(QueueConstants.ConfigKeys.QUEUE_TABLE_PRESPLITS);
try (final HBaseQueueProducer oldProducer = hBaseQueueClientFactory.createProducer(oldQueueAdmin, queueName, QueueConstants.QueueType.QUEUE, QueueMetrics.NOOP_QUEUE_METRICS, new SaltedHBaseQueueStrategy(tableUtil, buckets), new ArrayList<ConsumerGroupConfig>())) {
// Enqueue 10 items to old queue table
Transactions.createTransactionExecutor(executorFactory, oldProducer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (int i = 0; i < 10; i++) {
oldProducer.enqueue(new QueueEntry("key", i, Bytes.toBytes("Message " + i)));
}
}
});
}
// Configure the consumer
final ConsumerConfig consumerConfig = new ConsumerConfig(0L, 0, 1, DequeueStrategy.HASH, "key");
try (QueueConfigurer configurer = queueAdmin.getQueueConfigurer(queueName)) {
Transactions.createTransactionExecutor(executorFactory, configurer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
configurer.configureGroups(ImmutableList.of(consumerConfig));
}
});
}
// explicit set the consumer state to be the lowest start row
try (HBaseConsumerStateStore stateStore = hbaseQueueAdmin.getConsumerStateStore(queueName)) {
Transactions.createTransactionExecutor(executorFactory, stateStore).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.updateState(consumerConfig.getGroupId(), consumerConfig.getInstanceId(), QueueEntryRow.getQueueEntryRowKey(queueName, 0L, 0));
}
});
}
// Enqueue 10 more items to new queue table
createEnqueueRunnable(queueName, 10, 1, null).run();
// Verify both old and new table have 10 rows each
Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.QUEUE)));
Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE)));
// Create a consumer. It should see all 20 items
final List<String> messages = Lists.newArrayList();
try (final QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
while (messages.size() != 20) {
Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
DequeueResult<byte[]> result = consumer.dequeue(20);
for (byte[] data : result) {
messages.add(Bytes.toString(data));
}
}
});
}
}
verifyQueueIsEmpty(queueName, ImmutableList.of(consumerConfig));
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class HiveExploreTableTestRun method setupTable.
private void setupTable(@Nullable String dbName, @Nullable String tableName) throws Exception {
if (dbName != null) {
runCommand(NAMESPACE_ID, "create database if not exists " + dbName, false, null, null);
}
datasetFramework.addInstance(Table.class.getName(), MY_TABLE, setupTableProperties(dbName, tableName, SCHEMA));
// Accessing dataset instance to perform data operations
Table table = datasetFramework.getDataset(MY_TABLE, DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table);
TransactionAware txTable = (TransactionAware) table;
Transaction tx1 = transactionManager.startShort(100);
txTable.startTx(tx1);
Put put = new Put(Bytes.toBytes("row1"));
put.add("bool_field", false);
put.add("int_field", Integer.MAX_VALUE);
put.add("long_field", Long.MAX_VALUE);
put.add("float_field", 3.14f);
put.add("double_field", 3.14);
put.add("bytes_field", new byte[] { 'A', 'B', 'C' });
table.put(put);
Assert.assertTrue(txTable.commitTx());
transactionManager.canCommit(tx1, txTable.getTxChanges());
transactionManager.commit(tx1);
txTable.postTxCommit();
Transaction tx2 = transactionManager.startShort(100);
txTable.startTx(tx2);
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class HiveExploreTableTestRun method testInsertFromJoin.
@Test
public void testInsertFromJoin() throws Exception {
DatasetId userTableID = NAMESPACE_ID.dataset("users");
DatasetId purchaseTableID = NAMESPACE_ID.dataset("purchases");
DatasetId expandedTableID = NAMESPACE_ID.dataset("expanded");
Schema userSchema = Schema.recordOf("user", Schema.Field.of("id", Schema.of(Schema.Type.STRING)), Schema.Field.of("name", Schema.of(Schema.Type.STRING)), Schema.Field.of("email", Schema.of(Schema.Type.STRING)));
Schema purchaseSchema = Schema.recordOf("purchase", Schema.Field.of("purchaseid", Schema.of(Schema.Type.LONG)), Schema.Field.of("itemid", Schema.of(Schema.Type.STRING)), Schema.Field.of("userid", Schema.of(Schema.Type.STRING)), Schema.Field.of("ct", Schema.of(Schema.Type.INT)), Schema.Field.of("price", Schema.of(Schema.Type.DOUBLE)));
Schema expandedSchema = Schema.recordOf("expandedPurchase", Schema.Field.of("purchaseid", Schema.of(Schema.Type.LONG)), Schema.Field.of("itemid", Schema.of(Schema.Type.STRING)), Schema.Field.of("userid", Schema.of(Schema.Type.STRING)), Schema.Field.of("ct", Schema.of(Schema.Type.INT)), Schema.Field.of("price", Schema.of(Schema.Type.DOUBLE)), Schema.Field.of("username", Schema.of(Schema.Type.STRING)), Schema.Field.of("email", Schema.of(Schema.Type.STRING)));
datasetFramework.addInstance(Table.class.getName(), userTableID, TableProperties.builder().setSchema(userSchema).setRowFieldName("id").build());
datasetFramework.addInstance(Table.class.getName(), purchaseTableID, TableProperties.builder().setSchema(purchaseSchema).setRowFieldName("purchaseid").build());
datasetFramework.addInstance(Table.class.getName(), expandedTableID, TableProperties.builder().setSchema(expandedSchema).setRowFieldName("purchaseid").build());
Table userTable = datasetFramework.getDataset(userTableID, DatasetDefinition.NO_ARGUMENTS, null);
Table purchaseTable = datasetFramework.getDataset(purchaseTableID, DatasetDefinition.NO_ARGUMENTS, null);
TransactionAware txUserTable = (TransactionAware) userTable;
TransactionAware txPurchaseTable = (TransactionAware) purchaseTable;
Transaction tx1 = transactionManager.startShort(100);
txUserTable.startTx(tx1);
txPurchaseTable.startTx(tx1);
Put put = new Put(Bytes.toBytes("samuel"));
put.add("name", "Samuel Jackson");
put.add("email", "sjackson@gmail.com");
userTable.put(put);
put = new Put(Bytes.toBytes(1L));
put.add("userid", "samuel");
put.add("itemid", "scotch");
put.add("ct", 1);
put.add("price", 56.99d);
purchaseTable.put(put);
txUserTable.commitTx();
txPurchaseTable.commitTx();
List<byte[]> changes = new ArrayList<>();
changes.addAll(txUserTable.getTxChanges());
changes.addAll(txPurchaseTable.getTxChanges());
transactionManager.canCommit(tx1, changes);
transactionManager.commit(tx1);
txUserTable.postTxCommit();
txPurchaseTable.postTxCommit();
try {
String command = String.format("insert into table %s select P.purchaseid, P.itemid, P.userid, P.ct, P.price, U.name, U.email from " + "%s P join %s U on (P.userid = U.id)", getDatasetHiveName(expandedTableID), getDatasetHiveName(purchaseTableID), getDatasetHiveName(userTableID));
ExploreExecutionResult result = exploreClient.submit(NAMESPACE_ID, command).get();
Assert.assertEquals(QueryStatus.OpStatus.FINISHED, result.getStatus().getStatus());
command = String.format("select purchaseid, itemid, userid, ct, price, username, email from %s", getDatasetHiveName(expandedTableID));
runCommand(NAMESPACE_ID, command, true, Lists.newArrayList(new ColumnDesc("purchaseid", "BIGINT", 1, null), new ColumnDesc("itemid", "STRING", 2, null), new ColumnDesc("userid", "STRING", 3, null), new ColumnDesc("ct", "INT", 4, null), new ColumnDesc("price", "DOUBLE", 5, null), new ColumnDesc("username", "STRING", 6, null), new ColumnDesc("email", "STRING", 7, null)), Lists.newArrayList(new QueryResult(Lists.<Object>newArrayList(1L, "scotch", "samuel", 1, 56.99d, "Samuel Jackson", "sjackson@gmail.com"))));
} finally {
datasetFramework.deleteInstance(userTableID);
datasetFramework.deleteInstance(purchaseTableID);
datasetFramework.deleteInstance(expandedTableID);
}
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class HBaseQueueAdmin method deleteFlowConfigs.
private void deleteFlowConfigs(FlowId flowId) throws Exception {
// It's a bit hacky here since we know how the HBaseConsumerStateStore works.
// Maybe we need another Dataset set that works across all queues.
final QueueName prefixName = QueueName.from(URI.create(QueueName.prefixForFlow(flowId)));
DatasetId stateStoreId = getStateStoreId(flowId.getNamespace());
Map<String, String> args = ImmutableMap.of(HBaseQueueDatasetModule.PROPERTY_QUEUE_NAME, prefixName.toString());
HBaseConsumerStateStore stateStore = datasetFramework.getDataset(stateStoreId, args, null);
if (stateStore == null) {
// If the state store doesn't exists, meaning there is no queue, hence nothing to do.
return;
}
try {
final Table table = stateStore.getInternalTable();
Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Prefix name is "/" terminated ("queue:///namespace/app/flow/"), hence the scan is unique for the flow
byte[] startRow = Bytes.toBytes(prefixName.toString());
try (Scanner scanner = table.scan(startRow, Bytes.stopKeyForPrefix(startRow))) {
Row row = scanner.next();
while (row != null) {
table.delete(row.getRow());
row = scanner.next();
}
}
}
});
} finally {
stateStore.close();
}
}
use of org.apache.tephra.TransactionAware in project phoenix by apache.
the class MutationState method join.
/**
* Combine a newer mutation with this one, where in the event of overlaps, the newer one will take precedence.
* Combine any metrics collected for the newer mutation.
*
* @param newMutationState the newer mutation state
*/
public void join(MutationState newMutationState) throws SQLException {
if (this == newMutationState) {
// Doesn't make sense
return;
}
if (txContext != null) {
for (TransactionAware txAware : newMutationState.txAwares) {
txContext.addTransactionAware(txAware);
}
} else {
txAwares.addAll(newMutationState.txAwares);
}
this.sizeOffset += newMutationState.sizeOffset;
joinMutationState(newMutationState.mutations, this.mutations);
if (!newMutationState.txMutations.isEmpty()) {
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
joinMutationState(newMutationState.txMutations, this.txMutations);
}
mutationMetricQueue.combineMetricQueues(newMutationState.mutationMetricQueue);
if (readMetricQueue == null) {
readMetricQueue = newMutationState.readMetricQueue;
} else if (readMetricQueue != null && newMutationState.readMetricQueue != null) {
readMetricQueue.combineReadMetrics(newMutationState.readMetricQueue);
}
throwIfTooBig();
}
Aggregations