use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method testScannerCache.
private void testScannerCache(int rowsExpected, String tableName, @Nullable String property, @Nullable String argument, @Nullable String scanArgument) throws Exception {
// Now scan and sleep for a while after each result
Transaction tx = txClient.startShort();
DatasetProperties props = property == null ? DatasetProperties.EMPTY : DatasetProperties.of(ImmutableMap.of(HConstants.HBASE_CLIENT_SCANNER_CACHING, property));
Map<String, String> arguments = argument == null ? Collections.<String, String>emptyMap() : ImmutableMap.of(HConstants.HBASE_CLIENT_SCANNER_CACHING, argument);
Scan scan = new Scan(null, null);
if (scanArgument != null) {
scan.setProperty(HConstants.HBASE_CLIENT_SCANNER_CACHING, scanArgument);
}
Table table = getTable(CONTEXT1, tableName, props, arguments);
((TransactionAware) table).startTx(tx);
Scanner scanner = table.scan(scan);
int scanCount = 0;
try {
while (scanner.next() != null) {
scanCount++;
TimeUnit.MILLISECONDS.sleep(10);
}
scanner.close();
} finally {
LOG.info("Scanned {} rows.", scanCount);
txClient.abort(tx);
}
Assert.assertEquals(rowsExpected, scanCount);
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method testCachedEncodedTransaction.
@Test
public void testCachedEncodedTransaction() throws Exception {
String tableName = "testEncodedTxTable";
DatasetProperties props = DatasetProperties.EMPTY;
getTableAdmin(CONTEXT1, tableName, props).create();
DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
// use a transaction codec that counts the number of times encode() is called
final AtomicInteger encodeCount = new AtomicInteger();
final TransactionCodec codec = new TransactionCodec() {
@Override
public byte[] encode(Transaction tx) throws IOException {
encodeCount.incrementAndGet();
return super.encode(tx);
}
};
// use a table util that creates an HTable that validates the encoded tx on each get
final AtomicReference<Transaction> txRef = new AtomicReference<>();
HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {
@Override
public HTable createHTable(Configuration conf, TableId tableId) throws IOException {
HTable htable = super.createHTable(conf, tableId);
return new MinimalDelegatingHTable(htable) {
@Override
public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.get(get);
}
@Override
public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
for (org.apache.hadoop.hbase.client.Get get : gets) {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
}
return super.get(gets);
}
@Override
public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.getScanner(scan);
}
};
}
};
HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
// test all operations: only the first one encodes
Transaction tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
Assert.assertEquals(0, encodeCount.get());
table.get(b("row"));
Assert.assertEquals(1, encodeCount.get());
table.get(ImmutableList.of(new Get("a"), new Get("b")));
Assert.assertEquals(1, encodeCount.get());
Scanner scanner = table.scan(new Scan(null, null));
Assert.assertEquals(1, encodeCount.get());
scanner.close();
table.increment(b("z"), b("z"), 0L);
Assert.assertEquals(1, encodeCount.get());
table.commitTx();
table.postTxCommit();
// test that for the next tx, we encode again
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(2, encodeCount.get());
table.commitTx();
// test that we encode again, even of postTxCommit was not called
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(3, encodeCount.get());
table.commitTx();
table.rollbackTx();
// test that rollback does not encode the tx
Assert.assertEquals(3, encodeCount.get());
// test that we encode again if the previous tx rolled back
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(4, encodeCount.get());
table.commitTx();
table.close();
Assert.assertEquals(4, encodeCount.get());
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class HBaseTableTest method testScannerCache.
@Test
public void testScannerCache() throws Exception {
String tableName = "scanCache";
// note: it appears that HBase only enforces the scanner timeout after 10 seconds.
// setting it to 3 seconds does not mean it will actually fsail after 3 sweconds.
// therefore we have to cross the 10 seconds. here: 1200 times 10ms sleep.
int numRows = 1200;
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName);
admin.create();
try {
// write some rows and commit
Transaction tx1 = txClient.startShort();
Table myTable1 = getTable(CONTEXT1, tableName);
((TransactionAware) myTable1).startTx(tx1);
for (int i = 0; i < numRows; i++) {
myTable1.put(new Put("" + i, "x", "y"));
}
txClient.canCommitOrThrow(tx1, ((TransactionAware) myTable1).getTxChanges());
Assert.assertTrue(((TransactionAware) myTable1).commitTx());
txClient.commitOrThrow(tx1);
try {
testScannerCache(numRows, tableName, null, null, null);
Assert.fail("this should have failed with ScannerTimeoutException");
} catch (Exception e) {
// we expect a RuntimeException wrapping an HBase ScannerTimeoutException
if (!(e.getCause() instanceof ScannerTimeoutException)) {
throw e;
}
}
// cache=100 as dataset property
testScannerCache(numRows, tableName, "100", null, null);
// cache=100 as dataset runtime argument
testScannerCache(numRows, tableName, "1000", "100", null);
// cache=100 as scan property
testScannerCache(numRows, tableName, "5000", "1000", "100");
} finally {
admin.drop();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TransactionServiceClientTest method testGetSnapshot.
@Test
public void testGetSnapshot() throws Exception {
TransactionSystemClient client = getClient();
SnapshotCodecProvider codecProvider = new SnapshotCodecProvider(injector.getInstance(Configuration.class));
Transaction tx1 = client.startShort();
long currentTime = System.currentTimeMillis();
TransactionSnapshot snapshot;
try (InputStream in = client.getSnapshotInputStream()) {
snapshot = codecProvider.decode(in);
}
Assert.assertTrue(snapshot.getTimestamp() >= currentTime);
Assert.assertTrue(snapshot.getInProgress().containsKey(tx1.getWritePointer()));
// Ensures that getSnapshot didn't persist a snapshot
TransactionSnapshot snapshotAfter = getStateStorage().getLatestSnapshot();
if (snapshotAfter != null) {
Assert.assertTrue(snapshot.getTimestamp() > snapshotAfter.getTimestamp());
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class CubeDatasetTest method testTxRetryOnFailure.
@Test
public void testTxRetryOnFailure() throws Exception {
// This test ensures that there's no non-transactional cache used in cube dataset. For that, it
// 1) simulates transaction conflict for the first write to cube
// 2) attempts to write again, writes successfully
// 3) uses second cube instance to read the result
//
// In case there's a non-transactional cache used in cube, it would fill entity mappings in the first tx, and only
// use them to write data. Hence, when reading - there will be no mapping in entity table to decode, as first tx
// that wrote it is not visible (was aborted on conflict).
Aggregation agg1 = new DefaultAggregation(ImmutableList.of("dim1", "dim2", "dim3"));
int resolution = 1;
Cube cube1 = getCubeInternal("concurrCube", new int[] { resolution }, ImmutableMap.of("agg1", agg1));
Cube cube2 = getCubeInternal("concurrCube", new int[] { resolution }, ImmutableMap.of("agg1", agg1));
Configuration txConf = HBaseConfiguration.create();
TransactionManager txManager = new TransactionManager(txConf);
txManager.startAndWait();
try {
TransactionSystemClient txClient = new InMemoryTxSystemClient(txManager);
// 1) write and abort after commit to simlate conflict
Transaction tx = txClient.startShort();
((TransactionAware) cube1).startTx(tx);
writeInc(cube1, "metric1", 1, 1, "1", "1", "1");
((TransactionAware) cube1).commitTx();
txClient.abort(tx);
((TransactionAware) cube1).rollbackTx();
// 2) write successfully
tx = txClient.startShort();
((TransactionAware) cube1).startTx(tx);
writeInc(cube1, "metric1", 1, 1, "1", "1", "1");
// let's pretend we had conflict and rollback it
((TransactionAware) cube1).commitTx();
txClient.commitOrThrow(tx);
((TransactionAware) cube1).postTxCommit();
// 3) read using different cube instance
tx = txClient.startShort();
((TransactionAware) cube2).startTx(tx);
verifyCountQuery(cube2, 0, 2, resolution, "metric1", AggregationFunction.SUM, new HashMap<String, String>(), new ArrayList<String>(), ImmutableList.of(new TimeSeries("metric1", new HashMap<String, String>(), timeValues(1, 1))));
// let's pretend we had conflict and rollback it
((TransactionAware) cube2).commitTx();
txClient.commitOrThrow(tx);
((TransactionAware) cube2).postTxCommit();
} finally {
txManager.stopAndWait();
}
}
Aggregations