use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class PartitionedFileSetTest method before.
@Before
public void before() throws Exception {
txClient = new InMemoryTxSystemClient(dsFrameworkUtil.getTxManager());
dsFrameworkUtil.createInstance("partitionedFileSet", pfsInstance, PartitionedFileSetProperties.builder().setPartitioning(PARTITIONING_1).setTablePermissions(tablePermissions).setBasePath("testDir").setFilePermissions(fsPermissions).setFileGroup(group).build());
pfsBaseLocation = ((PartitionedFileSet) dsFrameworkUtil.getInstance(pfsInstance)).getEmbeddedFileSet().getBaseLocation();
Assert.assertTrue(pfsBaseLocation.exists());
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class TableTest method before.
@Before
public void before() {
Configuration txConf = HBaseConfiguration.create();
TransactionManager txManager = new TransactionManager(txConf);
txManager.startAndWait();
txClient = new InMemoryTxSystemClient(txManager);
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class DatasetOpExecutorServiceTest method testRest.
@Test
public void testRest() throws Exception {
// check non-existence with 404
testAdminOp(bob, "exists", 404, null);
// add instance, should automatically create an instance
dsFramework.addInstance("table", bob, DatasetProperties.EMPTY);
testAdminOp(bob, "exists", 200, true);
testAdminOp("bob", "exists", 404, null);
// check truncate
final Table table = dsFramework.getDataset(bob, DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table);
TransactionExecutor txExecutor = new DefaultTransactionExecutor(new InMemoryTxSystemClient(txManager), ImmutableList.of((TransactionAware) table));
// writing smth to table
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
table.put(new Put("key1", "col1", "val1"));
}
});
// verify that we can read the data
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertEquals("val1", table.get(new Get("key1", "col1")).getString("col1"));
}
});
testAdminOp(bob, "truncate", 200, null);
// verify that data is no longer there
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertTrue(table.get(new Get("key1", "col1")).isEmpty());
}
});
// check upgrade
testAdminOp(bob, "upgrade", 200, null);
// drop and check non-existence
dsFramework.deleteInstance(bob);
testAdminOp(bob, "exists", 404, null);
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class CubeDatasetTest method testTxRetryOnFailure.
@Test
public void testTxRetryOnFailure() throws Exception {
// This test ensures that there's no non-transactional cache used in cube dataset. For that, it
// 1) simulates transaction conflict for the first write to cube
// 2) attempts to write again, writes successfully
// 3) uses second cube instance to read the result
//
// In case there's a non-transactional cache used in cube, it would fill entity mappings in the first tx, and only
// use them to write data. Hence, when reading - there will be no mapping in entity table to decode, as first tx
// that wrote it is not visible (was aborted on conflict).
Aggregation agg1 = new DefaultAggregation(ImmutableList.of("dim1", "dim2", "dim3"));
int resolution = 1;
Cube cube1 = getCubeInternal("concurrCube", new int[] { resolution }, ImmutableMap.of("agg1", agg1));
Cube cube2 = getCubeInternal("concurrCube", new int[] { resolution }, ImmutableMap.of("agg1", agg1));
Configuration txConf = HBaseConfiguration.create();
TransactionManager txManager = new TransactionManager(txConf);
txManager.startAndWait();
try {
TransactionSystemClient txClient = new InMemoryTxSystemClient(txManager);
// 1) write and abort after commit to simlate conflict
Transaction tx = txClient.startShort();
((TransactionAware) cube1).startTx(tx);
writeInc(cube1, "metric1", 1, 1, "1", "1", "1");
((TransactionAware) cube1).commitTx();
txClient.abort(tx);
((TransactionAware) cube1).rollbackTx();
// 2) write successfully
tx = txClient.startShort();
((TransactionAware) cube1).startTx(tx);
writeInc(cube1, "metric1", 1, 1, "1", "1", "1");
// let's pretend we had conflict and rollback it
((TransactionAware) cube1).commitTx();
txClient.commit(tx);
((TransactionAware) cube1).postTxCommit();
// 3) read using different cube instance
tx = txClient.startShort();
((TransactionAware) cube2).startTx(tx);
verifyCountQuery(cube2, 0, 2, resolution, "metric1", AggregationFunction.SUM, new HashMap<String, String>(), new ArrayList<String>(), ImmutableList.of(new TimeSeries("metric1", new HashMap<String, String>(), timeValues(1, 1))));
// let's pretend we had conflict and rollback it
((TransactionAware) cube2).commitTx();
txClient.commit(tx);
((TransactionAware) cube2).postTxCommit();
} finally {
txManager.stopAndWait();
}
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class DynamicDatasetCacheTest method init.
@BeforeClass
public static void init() throws DatasetManagementException, IOException {
dsFramework = dsFrameworkUtil.getFramework();
dsFramework.addModule(NAMESPACE.datasetModule("testDataset"), new TestDatasetModule());
dsFramework.addModule(NAMESPACE2.datasetModule("testDataset"), new TestDatasetModule());
txClient = new InMemoryTxSystemClient(dsFrameworkUtil.getTxManager());
dsFrameworkUtil.createInstance("testDataset", NAMESPACE.dataset("a"), DatasetProperties.EMPTY);
dsFrameworkUtil.createInstance("testDataset", NAMESPACE.dataset("b"), DatasetProperties.EMPTY);
dsFrameworkUtil.createInstance("testDataset", NAMESPACE.dataset("c"), DatasetProperties.EMPTY);
dsFrameworkUtil.createInstance("testDataset", NAMESPACE2.dataset("a2"), DatasetProperties.EMPTY);
dsFrameworkUtil.createInstance("testDataset", NAMESPACE2.dataset("c2"), DatasetProperties.EMPTY);
}
Aggregations