use of io.cdap.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class DatasetDefinitionRegistryWithDefaultModules method testIndexedTableReconfigure.
@Test
public void testIndexedTableReconfigure() throws IncompatibleUpdateException {
DatasetDefinition indexedTableDef = registry.get(IndexedTable.class.getName());
Assert.assertTrue(indexedTableDef instanceof Reconfigurable);
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(false).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, "a,b,c").build();
DatasetSpecification spec = indexedTableDef.configure("idxtb", props);
DatasetProperties compat = TableProperties.builder().setReadlessIncrementSupport(// turning on is ok
true).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, "c,b,a").build();
spec = ((Reconfigurable) indexedTableDef).reconfigure("idxtb", compat, spec);
DatasetProperties incompat = TableProperties.builder().setReadlessIncrementSupport(true).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, "a,d").build();
try {
((Reconfigurable) indexedTableDef).reconfigure("idxtb", incompat, spec);
Assert.fail("reconfigure should have thrown exception");
} catch (IncompatibleUpdateException e) {
// expected
}
incompat = TableProperties.builder().setReadlessIncrementSupport(// turning off is not ok
false).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, "a,b,c").build();
try {
((Reconfigurable) indexedTableDef).reconfigure("idxtb", incompat, spec);
Assert.fail("reconfigure should have thrown exception");
} catch (IncompatibleUpdateException e) {
// expected
}
}
use of io.cdap.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class DatasetDefinitionRegistryWithDefaultModules method testCompositeDatasetReconfigure.
// tests that CompositeDatasetDefinition correctly delegates reconfigure() to its embedded types
@Test
public void testCompositeDatasetReconfigure() throws IncompatibleUpdateException {
CompositeDatasetDefinition composite = new CompositeDatasetDefinition("composite", "pedantic", new PedanticDatasetDefinition("pedantic")) {
@Override
public Dataset getDataset(DatasetContext datasetContext, DatasetSpecification spec, Map arguments, ClassLoader classLoader) {
return null;
}
};
DatasetSpecification spec = composite.configure("nn", DatasetProperties.EMPTY);
DatasetSpecification respec = composite.reconfigure("nn", DatasetProperties.EMPTY, spec);
Assert.assertEquals(spec, respec);
try {
composite.reconfigure("nn", DatasetProperties.builder().add("immutable", "x").build(), spec);
Assert.fail("reconfigure should have thrown exception");
} catch (IncompatibleUpdateException e) {
// expected
}
}
use of io.cdap.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class TestDatasetDefinition method getDataset.
@Override
public TestDataset getDataset(DatasetContext datasetContext, DatasetSpecification spec, Map<String, String> arguments, ClassLoader classLoader) throws IOException {
DatasetSpecification kvTableSpec = spec.getSpecification("kv");
KeyValueTable table = tableDef.getDataset(datasetContext, kvTableSpec, DatasetDefinition.NO_ARGUMENTS, classLoader);
return new TestDataset(spec, table, arguments);
}
use of io.cdap.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class DatasetsUtilTest method testFix.
private void testFix(String type, DatasetProperties props) {
DatasetDefinition def = DatasetFrameworkTestUtil.getDatasetDefinition(inMemoryDatasetFramework, NamespaceId.DEFAULT, type);
Assert.assertNotNull(def);
DatasetSpecification spec = def.configure("nn", props);
Map<String, String> originalProperties = DatasetsUtil.fixOriginalProperties(spec).getOriginalProperties();
Assert.assertEquals(props.getProperties(), originalProperties);
}
use of io.cdap.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class HBaseTableTest method testCachedEncodedTransaction.
@Test
public void testCachedEncodedTransaction() throws Exception {
String tableName = "testEncodedTxTable";
DatasetProperties props = DatasetProperties.EMPTY;
getTableAdmin(CONTEXT1, tableName, props).create();
DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
// use a transaction codec that counts the number of times encode() is called
final AtomicInteger encodeCount = new AtomicInteger();
final TransactionCodec codec = new TransactionCodec() {
@Override
public byte[] encode(Transaction tx) throws IOException {
encodeCount.incrementAndGet();
return super.encode(tx);
}
};
// use a table util that creates an HTable that validates the encoded tx on each get
final AtomicReference<Transaction> txRef = new AtomicReference<>();
HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {
@Override
public org.apache.hadoop.hbase.client.Table createTable(Configuration conf, TableId tableId) throws IOException {
org.apache.hadoop.hbase.client.Table table = super.createTable(conf, tableId);
return new DelegatingTable(table) {
@Override
public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.get(get);
}
@Override
public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
for (org.apache.hadoop.hbase.client.Get get : gets) {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
}
return super.get(gets);
}
@Override
public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.getScanner(scan);
}
};
}
};
HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
// test all operations: only the first one encodes
Transaction tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
Assert.assertEquals(0, encodeCount.get());
table.get(b("row"));
Assert.assertEquals(1, encodeCount.get());
table.get(ImmutableList.of(new Get("a"), new Get("b")));
Assert.assertEquals(1, encodeCount.get());
Scanner scanner = table.scan(new Scan(null, null));
Assert.assertEquals(1, encodeCount.get());
scanner.close();
table.increment(b("z"), b("z"), 0L);
Assert.assertEquals(1, encodeCount.get());
table.commitTx();
table.postTxCommit();
// test that for the next tx, we encode again
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(2, encodeCount.get());
table.commitTx();
// test that we encode again, even of postTxCommit was not called
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(3, encodeCount.get());
table.commitTx();
table.rollbackTx();
// test that rollback does not encode the tx
Assert.assertEquals(3, encodeCount.get());
// test that we encode again if the previous tx rolled back
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(4, encodeCount.get());
table.commitTx();
table.close();
Assert.assertEquals(4, encodeCount.get());
}
Aggregations