use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TableTest method testEmptyDelete.
@Test
public void testEmptyDelete() throws Exception {
DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
admin.create();
try {
Transaction tx = txClient.startShort();
Table myTable = getTable(CONTEXT1, MY_TABLE);
((TransactionAware) myTable).startTx(tx);
myTable.put(R1, C1, V1);
myTable.put(R1, C2, V2);
myTable.put(R1, C3, V3);
// specifying empty columns means to delete nothing
myTable.delete(R1, new byte[][] {});
myTable.delete(new Delete(R1, new byte[][] {}));
myTable.delete(new Delete(R1, ImmutableList.<byte[]>of()));
myTable.delete(new Delete(Bytes.toString(R1), new String[] {}));
myTable.delete(new Delete(Bytes.toString(R1), ImmutableList.<String>of()));
// verify the above delete calls deleted none of the rows
Row row = myTable.get(R1);
Assert.assertEquals(3, row.getColumns().size());
Assert.assertArrayEquals(R1, row.getRow());
Assert.assertArrayEquals(V1, row.get(C1));
Assert.assertArrayEquals(V2, row.get(C2));
Assert.assertArrayEquals(V3, row.get(C3));
// test deletion of only one column
Delete delete = new Delete(R1);
Assert.assertNull(delete.getColumns());
delete.add(C1);
Assert.assertNotNull(delete.getColumns());
myTable.delete(delete);
row = myTable.get(R1);
Assert.assertEquals(2, row.getColumns().size());
Assert.assertArrayEquals(R1, row.getRow());
Assert.assertArrayEquals(V2, row.get(C2));
Assert.assertArrayEquals(V3, row.get(C3));
// test delete of all columns
myTable.delete(new Delete(R1));
Assert.assertEquals(0, myTable.get(R1).getColumns().size());
txClient.abort(tx);
} finally {
admin.drop();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Table table = getTable(CONTEXT1, tableName, props);
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
// drop table
admin.drop();
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TableTest method testScanWithFuzzyRowFilter.
@Test
public void testScanWithFuzzyRowFilter() throws Exception {
DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
admin.create();
try {
Transaction tx1 = txClient.startShort();
Table table = getTable(CONTEXT1, MY_TABLE);
((TransactionAware) table).startTx(tx1);
// write data
byte[] abc = { 'a', 'b', 'c' };
for (byte b1 : abc) {
for (byte b2 : abc) {
for (byte b3 : abc) {
for (byte b4 : abc) {
table.put(new Put(new byte[] { b1, b2, b3, b4 }).add(C1, V1));
}
}
}
}
// we should have 81 (3^4) rows now
Assert.assertEquals(81, countRows(table));
// check that filter works against data written in same tx
verifyScanWithFuzzyRowFilter(table);
// commit tx, start new and TableAssert.verify scan again against "persisted" data
txClient.canCommitOrThrow(tx1, ((TransactionAware) table).getTxChanges());
Assert.assertTrue(((TransactionAware) table).commitTx());
txClient.commitOrThrow(tx1);
((TransactionAware) table).postTxCommit();
Transaction tx2 = txClient.startShort();
((TransactionAware) table).startTx(tx2);
verifyScanWithFuzzyRowFilter(table);
} finally {
admin.drop();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TableTest method testRollingBackPersistedChanges.
@Test
public void testRollingBackPersistedChanges() throws Exception {
DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
admin.create();
try {
// write and commit one row/column
Transaction tx0 = txClient.startShort();
Table myTable0 = getTable(CONTEXT1, MY_TABLE);
((TransactionAware) myTable0).startTx(tx0);
myTable0.put(R2, a(C2), a(V2));
txClient.canCommitOrThrow(tx0, ((TransactionAware) myTable0).getTxChanges());
Assert.assertTrue(((TransactionAware) myTable0).commitTx());
txClient.commitOrThrow(tx0);
((TransactionAware) myTable0).postTxCommit();
Transaction tx1 = txClient.startShort();
Table myTable1 = getTable(CONTEXT1, MY_TABLE);
((TransactionAware) myTable1).startTx(tx1);
// write r1->c1,v1 but not commit
myTable1.put(R1, a(C1), a(V1));
// also overwrite the value from tx0
myTable1.put(R2, a(C2), a(V3));
// TableAssert.verify can see changes inside tx
TableAssert.assertRow(a(C1, V1), myTable1.get(R1, a(C1)));
// persisting changes
Assert.assertTrue(((TransactionAware) myTable1).commitTx());
// let's pretend that after persisting changes we still got conflict when finalizing tx, so
// rolling back changes
Assert.assertTrue(((TransactionAware) myTable1).rollbackTx());
// making tx visible
txClient.abort(tx1);
// start new tx
Transaction tx2 = txClient.startShort();
Table myTable2 = getTable(CONTEXT1, MY_TABLE);
((TransactionAware) myTable2).startTx(tx2);
// TableAssert.verify don't see rolled back changes
TableAssert.assertRow(a(), myTable2.get(R1, a(C1)));
// TableAssert.verify we still see the previous value
TableAssert.assertRow(a(C2, V2), myTable2.get(R2, a(C2)));
} finally {
admin.drop();
}
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TableTest method testBatchWritableKeyIsIgnored.
@Test
public void testBatchWritableKeyIsIgnored() throws Exception {
String tableName = "batchWritableTable";
getTableAdmin(CONTEXT1, tableName).create();
try {
// write in a transaction, three times, with key = null, a, q, always Put with row = a
Transaction tx = txClient.startShort();
Table table = getTable(CONTEXT1, tableName);
((TransactionAware) table).startTx(tx);
table.write(null, new Put("a").add("x", "x"));
table.write(new byte[] { 'q' }, new Put("a").add("y", "y"));
table.write(new byte[] { 'a' }, new Put("a").add("z", "z"));
txClient.canCommitOrThrow(tx, ((TransactionAware) table).getTxChanges());
((TransactionAware) table).commitTx();
txClient.commitOrThrow(tx);
// validate that all writes went to row a, and row q was not written
tx = txClient.startShort();
((TransactionAware) table).startTx(tx);
Assert.assertTrue(table.get(new Get("q")).isEmpty());
Row row = table.get(new Get("a"));
Assert.assertEquals(3, row.getColumns().size());
Assert.assertEquals("x", row.getString("x"));
Assert.assertEquals("y", row.getString("y"));
Assert.assertEquals("z", row.getString("z"));
((TransactionAware) table).commitTx();
txClient.abort(tx);
} finally {
getTableAdmin(CONTEXT1, tableName).drop();
}
}
Aggregations