use of org.apache.tephra.Transaction in project cdap by caskdata.
the class SingleThreadDatasetCache method addExtraTransactionAware.
@Override
public void addExtraTransactionAware(TransactionAware txAware) {
// Using LIFO ordering is to allow TMS tx aware always be the last.
if (!extraTxAwares.contains(txAware)) {
extraTxAwares.addFirst(txAware);
// Starts the transaction on the tx aware if there is an active transaction
Transaction currentTx = txContext == null ? null : txContext.getCurrentTransaction();
if (currentTx != null) {
txAware.startTx(currentTx);
}
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class DequeueScanObserver method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
Transaction tx = DequeueScanAttributes.getTx(scan);
if (consumerConfig == null || tx == null) {
return super.preScannerOpen(e, scan, s);
}
Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
Filter existing = scan.getFilter();
if (existing != null) {
Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
scan.setFilter(combined);
} else {
scan.setFilter(dequeueFilter);
}
return super.preScannerOpen(e, scan, s);
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class DequeueScanObserver method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
Transaction tx = DequeueScanAttributes.getTx(scan);
if (consumerConfig == null || tx == null) {
return super.preScannerOpen(e, scan, s);
}
Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
Filter existing = scan.getFilter();
if (existing != null) {
Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
scan.setFilter(combined);
} else {
scan.setFilter(dequeueFilter);
}
return super.preScannerOpen(e, scan, s);
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class AppWithCustomTx method recordTransaction.
/**
* If in a transaction, records the timeout that the current transaction was given, or "default" if no explicit
* timeout was given. Otherwise does nothing.
*
* Note: we know whether and what explicit timeout was given, because we inject a {@link RevealingTxSystemClient},
* which returns a {@link RevealingTransaction} for {@link TransactionSystemClient#startShort(int)} only.
*/
static void recordTransaction(DatasetContext context, String row, String column) {
TransactionCapturingTable capture = context.getDataset(CAPTURE);
Transaction tx = capture.getTx();
// we cannot cast because the RevealingTransaction is not visible in the program class loader
String value = DEFAULT;
if (tx == null) {
try {
capture.getTable().put(new Put(row, column, value));
throw new RuntimeException("put to table without transaction should have failed.");
} catch (DataSetException e) {
// expected
}
return;
}
if ("RevealingTransaction".equals(tx.getClass().getSimpleName())) {
int txTimeout;
try {
txTimeout = (int) tx.getClass().getField("timeout").get(tx);
} catch (Exception e) {
throw Throwables.propagate(e);
}
value = String.valueOf(txTimeout);
}
capture.getTable().put(new Put(row, column, value));
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableExporter method doMain.
public void doMain(String[] args) throws Exception {
if (args.length < 1) {
printHelp();
return;
}
String tableName = args[0];
try {
startUp();
Transaction tx = txClient.startLong();
Job job = createSubmittableJob(tx, tableName);
if (!job.waitForCompletion(true)) {
LOG.info("MapReduce job failed!");
throw new RuntimeException("Failed to run the MapReduce job.");
}
// Always commit the transaction, since we are not doing any data update
// operation in this tool.
txClient.commitOrThrow(tx);
System.out.println("Export operation complete. HFiles are stored at location " + bulkloadDir.toString());
} finally {
stop();
}
}
Aggregations