use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method testEnforceTxLifetime.
@Test
public void testEnforceTxLifetime() throws Exception {
String tableName = "enforce-tx-lifetime";
DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
admin.create();
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
try {
final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
table.commitTx();
table.postTxCommit();
table.close();
CConfiguration testCConf = CConfiguration.copy(cConf);
// No mutations on tables using testCConf will succeed.
testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// A put should fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.put(b("row2"), b("col1"), b("val1"));
}
});
// A delete should also fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"));
}
});
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"), b("col1"));
}
});
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
// Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
} finally {
admin.drop();
admin.close();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method assertTxFail.
private void assertTxFail(TransactionSystemClient txSystemClient, HBaseTable table, Runnable op) throws Exception {
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
op.run();
try {
table.commitTx();
Assert.fail("Expected the mutation to fail due to tx max lifetime check");
} catch (IOException e) {
// Expected to fail due to tx max lifetime check
table.rollbackTx();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method testEnableIncrements.
@Test
public void testEnableIncrements() throws Exception {
// setup a table with increments disabled and with it enabled
String disableTableName = "incr-disable";
String enabledTableName = "incr-enable";
TableId disabledTableId = hBaseTableUtil.createHTableId(NAMESPACE1, disableTableName);
TableId enabledTableId = hBaseTableUtil.createHTableId(NAMESPACE1, enabledTableName);
DatasetProperties propsDisabled = TableProperties.builder().setReadlessIncrementSupport(false).setConflictDetection(ConflictDetection.COLUMN).build();
HBaseTableAdmin disabledAdmin = getTableAdmin(CONTEXT1, disableTableName, propsDisabled);
disabledAdmin.create();
HBaseAdmin admin = TEST_HBASE.getHBaseAdmin();
DatasetProperties propsEnabled = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
HBaseTableAdmin enabledAdmin = getTableAdmin(CONTEXT1, enabledTableName, propsEnabled);
enabledAdmin.create();
try {
try {
HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(admin, disabledTableId);
List<String> cps = htd.getCoprocessors();
assertFalse(cps.contains(IncrementHandler.class.getName()));
htd = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId);
cps = htd.getCoprocessors();
assertTrue(cps.contains(IncrementHandler.class.getName()));
} finally {
admin.close();
}
BufferingTable table = getTable(CONTEXT1, enabledTableName, propsEnabled);
byte[] row = Bytes.toBytes("row1");
byte[] col = Bytes.toBytes("col1");
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.increment(row, col, 10);
table.commitTx();
// verify that value was written as a delta value
final byte[] expectedValue = Bytes.add(IncrementHandlerState.DELTA_MAGIC_PREFIX, Bytes.toBytes(10L));
final AtomicBoolean foundValue = new AtomicBoolean();
byte[] enabledTableNameBytes = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId).getName();
TEST_HBASE.forEachRegion(enabledTableNameBytes, new Function<HRegion, Object>() {
@Override
public Object apply(HRegion hRegion) {
org.apache.hadoop.hbase.client.Scan scan = hBaseTableUtil.buildScan().build();
try {
RegionScanner scanner = hRegion.getScanner(scan);
List<Cell> results = Lists.newArrayList();
boolean hasMore;
do {
hasMore = scanner.next(results);
for (Cell cell : results) {
if (CellUtil.matchingValue(cell, expectedValue)) {
foundValue.set(true);
}
}
} while (hasMore);
} catch (IOException ioe) {
fail("IOException scanning region: " + ioe.getMessage());
}
return null;
}
});
assertTrue("Should have seen the expected encoded delta value in the " + enabledTableName + " table region", foundValue.get());
} finally {
disabledAdmin.drop();
enabledAdmin.drop();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method testTTL.
@Test
public void testTTL() throws Exception {
// for the purpose of this test it is fine not to configure ttl when creating table: we want to see if it
// applies on reading
int ttl = 1;
String ttlTable = "ttl";
String noTtlTable = "nottl";
DatasetProperties props = TableProperties.builder().setTTL(ttl).build();
getTableAdmin(CONTEXT1, ttlTable, props).create();
DatasetSpecification ttlTableSpec = DatasetSpecification.builder(ttlTable, HBaseTable.class.getName()).properties(props.getProperties()).build();
HBaseTable table = new HBaseTable(CONTEXT1, ttlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.commitTx();
TimeUnit.MILLISECONDS.sleep(1010);
tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row2"), b("col2"), b("val2"));
table.commitTx();
// now, we should not see first as it should have expired, but see the last one
tx = txSystemClient.startShort();
table.startTx(tx);
byte[] val = table.get(b("row1"), b("col1"));
if (val != null) {
LOG.info("Unexpected value " + Bytes.toStringBinary(val));
}
Assert.assertNull(val);
Assert.assertArrayEquals(b("val2"), table.get(b("row2"), b("col2")));
// test a table with no TTL
DatasetProperties props2 = TableProperties.builder().setTTL(Tables.NO_TTL).build();
getTableAdmin(CONTEXT1, noTtlTable, props2).create();
DatasetSpecification noTtlTableSpec = DatasetSpecification.builder(noTtlTable, HBaseTable.class.getName()).properties(props2.getProperties()).build();
HBaseTable table2 = new HBaseTable(CONTEXT1, noTtlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
tx = txSystemClient.startShort();
table2.startTx(tx);
table2.put(b("row1"), b("col1"), b("val1"));
table2.commitTx();
TimeUnit.SECONDS.sleep(2);
tx = txSystemClient.startShort();
table2.startTx(tx);
table2.put(b("row2"), b("col2"), b("val2"));
table2.commitTx();
// if ttl is -1 (unlimited), it should see both
tx = txSystemClient.startShort();
table2.startTx(tx);
Assert.assertArrayEquals(b("val1"), table2.get(b("row1"), b("col1")));
Assert.assertArrayEquals(b("val2"), table2.get(b("row2"), b("col2")));
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class BaseHiveExploreService method closeTransaction.
private void closeTransaction(QueryHandle handle, OperationInfo opInfo) {
try {
String txCommitted = opInfo.getSessionConf().get(Constants.Explore.TX_QUERY_CLOSED);
if (txCommitted != null && Boolean.parseBoolean(txCommitted)) {
LOG.trace("Transaction for handle {} has already been closed", handle);
return;
}
Transaction tx = ConfigurationUtil.get(opInfo.getSessionConf(), Constants.Explore.TX_QUERY_KEY, TxnCodec.INSTANCE);
LOG.trace("Closing transaction {} for handle {}", tx, handle);
if (opInfo.isReadOnly() || (opInfo.getStatus() != null && opInfo.getStatus().getStatus() == QueryStatus.OpStatus.FINISHED)) {
try {
txClient.commitOrThrow(tx);
} catch (TransactionFailureException e) {
txClient.invalidate(tx.getWritePointer());
LOG.info("Invalidating transaction: {}", tx);
}
} else {
txClient.invalidate(tx.getWritePointer());
}
} catch (Throwable e) {
LOG.error("Got exception while closing transaction.", e);
} finally {
opInfo.getSessionConf().put(Constants.Explore.TX_QUERY_CLOSED, "true");
}
}
Aggregations