use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.
the class HBaseTableTest method testCachedEncodedTransaction.
@Test
public void testCachedEncodedTransaction() throws Exception {
String tableName = "testEncodedTxTable";
DatasetProperties props = DatasetProperties.EMPTY;
getTableAdmin(CONTEXT1, tableName, props).create();
DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
// use a transaction codec that counts the number of times encode() is called
final AtomicInteger encodeCount = new AtomicInteger();
final TransactionCodec codec = new TransactionCodec() {
@Override
public byte[] encode(Transaction tx) throws IOException {
encodeCount.incrementAndGet();
return super.encode(tx);
}
};
// use a table util that creates an HTable that validates the encoded tx on each get
final AtomicReference<Transaction> txRef = new AtomicReference<>();
HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {
@Override
public HTable createHTable(Configuration conf, TableId tableId) throws IOException {
HTable htable = super.createHTable(conf, tableId);
return new MinimalDelegatingHTable(htable) {
@Override
public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.get(get);
}
@Override
public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
for (org.apache.hadoop.hbase.client.Get get : gets) {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
}
return super.get(gets);
}
@Override
public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.getScanner(scan);
}
};
}
};
HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
// test all operations: only the first one encodes
Transaction tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
Assert.assertEquals(0, encodeCount.get());
table.get(b("row"));
Assert.assertEquals(1, encodeCount.get());
table.get(ImmutableList.of(new Get("a"), new Get("b")));
Assert.assertEquals(1, encodeCount.get());
Scanner scanner = table.scan(new Scan(null, null));
Assert.assertEquals(1, encodeCount.get());
scanner.close();
table.increment(b("z"), b("z"), 0L);
Assert.assertEquals(1, encodeCount.get());
table.commitTx();
table.postTxCommit();
// test that for the next tx, we encode again
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(2, encodeCount.get());
table.commitTx();
// test that we encode again, even of postTxCommit was not called
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(3, encodeCount.get());
table.commitTx();
table.rollbackTx();
// test that rollback does not encode the tx
Assert.assertEquals(3, encodeCount.get());
// test that we encode again if the previous tx rolled back
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(4, encodeCount.get());
table.commitTx();
table.close();
Assert.assertEquals(4, encodeCount.get());
}
use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.
the class HBaseTableTest method testCachedEncodedTransaction.
@Test
public void testCachedEncodedTransaction() throws Exception {
String tableName = "testEncodedTxTable";
DatasetProperties props = DatasetProperties.EMPTY;
getTableAdmin(CONTEXT1, tableName, props).create();
DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
// use a transaction codec that counts the number of times encode() is called
final AtomicInteger encodeCount = new AtomicInteger();
final TransactionCodec codec = new TransactionCodec() {
@Override
public byte[] encode(Transaction tx) throws IOException {
encodeCount.incrementAndGet();
return super.encode(tx);
}
};
// use a table util that creates an HTable that validates the encoded tx on each get
final AtomicReference<Transaction> txRef = new AtomicReference<>();
HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {
@Override
public org.apache.hadoop.hbase.client.Table createTable(Configuration conf, TableId tableId) throws IOException {
org.apache.hadoop.hbase.client.Table table = super.createTable(conf, tableId);
return new DelegatingTable(table) {
@Override
public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.get(get);
}
@Override
public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
for (org.apache.hadoop.hbase.client.Get get : gets) {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
}
return super.get(gets);
}
@Override
public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.getScanner(scan);
}
};
}
};
HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
// test all operations: only the first one encodes
Transaction tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
Assert.assertEquals(0, encodeCount.get());
table.get(b("row"));
Assert.assertEquals(1, encodeCount.get());
table.get(ImmutableList.of(new Get("a"), new Get("b")));
Assert.assertEquals(1, encodeCount.get());
Scanner scanner = table.scan(new Scan(null, null));
Assert.assertEquals(1, encodeCount.get());
scanner.close();
table.increment(b("z"), b("z"), 0L);
Assert.assertEquals(1, encodeCount.get());
table.commitTx();
table.postTxCommit();
// test that for the next tx, we encode again
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(2, encodeCount.get());
table.commitTx();
// test that we encode again, even of postTxCommit was not called
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(3, encodeCount.get());
table.commitTx();
table.rollbackTx();
// test that rollback does not encode the tx
Assert.assertEquals(3, encodeCount.get());
// test that we encode again if the previous tx rolled back
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(4, encodeCount.get());
table.commitTx();
table.close();
Assert.assertEquals(4, encodeCount.get());
}
use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.
the class HBaseTableTest method testEnforceTxLifetime.
@Test
public void testEnforceTxLifetime() throws Exception {
String tableName = "enforce-tx-lifetime";
DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
admin.create();
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
try {
final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
table.commitTx();
table.postTxCommit();
table.close();
CConfiguration testCConf = CConfiguration.copy(cConf);
// No mutations on tables using testCConf will succeed.
testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// A put should fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.put(b("row2"), b("col1"), b("val1"));
}
});
// A delete should also fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"));
}
});
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"), b("col1"));
}
});
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
// Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
} finally {
admin.drop();
admin.close();
}
}
use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.
the class HBaseTableTest method testTTL.
@Test
public void testTTL() throws Exception {
// for the purpose of this test it is fine not to configure ttl when creating table: we want to see if it
// applies on reading
int ttl = 1;
String ttlTable = "ttl";
String noTtlTable = "nottl";
DatasetProperties props = TableProperties.builder().setTTL(ttl).build();
getTableAdmin(CONTEXT1, ttlTable, props).create();
DatasetSpecification ttlTableSpec = DatasetSpecification.builder(ttlTable, HBaseTable.class.getName()).properties(props.getProperties()).build();
HBaseTable table = new HBaseTable(CONTEXT1, ttlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.commitTx();
TimeUnit.MILLISECONDS.sleep(1010);
tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row2"), b("col2"), b("val2"));
table.commitTx();
// now, we should not see first as it should have expired, but see the last one
tx = txSystemClient.startShort();
table.startTx(tx);
byte[] val = table.get(b("row1"), b("col1"));
if (val != null) {
LOG.info("Unexpected value " + Bytes.toStringBinary(val));
}
Assert.assertNull(val);
Assert.assertArrayEquals(b("val2"), table.get(b("row2"), b("col2")));
// test a table with no TTL
DatasetProperties props2 = TableProperties.builder().setTTL(Tables.NO_TTL).build();
getTableAdmin(CONTEXT1, noTtlTable, props2).create();
DatasetSpecification noTtlTableSpec = DatasetSpecification.builder(noTtlTable, HBaseTable.class.getName()).properties(props2.getProperties()).build();
HBaseTable table2 = new HBaseTable(CONTEXT1, noTtlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
tx = txSystemClient.startShort();
table2.startTx(tx);
table2.put(b("row1"), b("col1"), b("val1"));
table2.commitTx();
TimeUnit.SECONDS.sleep(2);
tx = txSystemClient.startShort();
table2.startTx(tx);
table2.put(b("row2"), b("col2"), b("val2"));
table2.commitTx();
// if ttl is -1 (unlimited), it should see both
tx = txSystemClient.startShort();
table2.startTx(tx);
Assert.assertArrayEquals(b("val1"), table2.get(b("row1"), b("col1")));
Assert.assertArrayEquals(b("val2"), table2.get(b("row2"), b("col2")));
}
use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.
the class HBaseTableTest method testEnforceTxLifetime.
@Test
public void testEnforceTxLifetime() throws Exception {
String tableName = "enforce-tx-lifetime";
DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
admin.create();
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
try {
final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
table.commitTx();
table.postTxCommit();
table.close();
CConfiguration testCConf = CConfiguration.copy(cConf);
// No mutations on tables using testCConf will succeed.
testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// A put should fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.put(b("row2"), b("col1"), b("val1"));
}
});
// A delete should also fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"));
}
});
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"), b("col1"));
}
});
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
// Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
} finally {
admin.drop();
admin.close();
}
}
Aggregations