use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class HBaseTableTest method testScannerCache.
private void testScannerCache(int rowsExpected, String tableName, @Nullable String property, @Nullable String argument, @Nullable String scanArgument) throws Exception {
// Now scan and sleep for a while after each result
Transaction tx = txClient.startShort();
DatasetProperties props = property == null ? DatasetProperties.EMPTY : DatasetProperties.of(ImmutableMap.of(HConstants.HBASE_CLIENT_SCANNER_CACHING, property));
Map<String, String> arguments = argument == null ? Collections.<String, String>emptyMap() : ImmutableMap.of(HConstants.HBASE_CLIENT_SCANNER_CACHING, argument);
Scan scan = new Scan(null, null);
if (scanArgument != null) {
scan.setProperty(HConstants.HBASE_CLIENT_SCANNER_CACHING, scanArgument);
}
try (Table table = getTable(CONTEXT1, tableName, props, arguments)) {
((TransactionAware) table).startTx(tx);
Scanner scanner = table.scan(scan);
int scanCount = 0;
try {
while (scanner.next() != null) {
scanCount++;
TimeUnit.MILLISECONDS.sleep(10);
}
scanner.close();
} finally {
LOG.info("Scanned {} rows.", scanCount);
txClient.abort(tx);
}
Assert.assertEquals(rowsExpected, scanCount);
}
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class HBaseTableTest method testCachedEncodedTransaction.
@Test
public void testCachedEncodedTransaction() throws Exception {
String tableName = "testEncodedTxTable";
DatasetProperties props = DatasetProperties.EMPTY;
getTableAdmin(CONTEXT1, tableName, props).create();
DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
// use a transaction codec that counts the number of times encode() is called
final AtomicInteger encodeCount = new AtomicInteger();
final TransactionCodec codec = new TransactionCodec() {
@Override
public byte[] encode(Transaction tx) throws IOException {
encodeCount.incrementAndGet();
return super.encode(tx);
}
};
// use a table util that creates an HTable that validates the encoded tx on each get
final AtomicReference<Transaction> txRef = new AtomicReference<>();
HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {
@Override
public org.apache.hadoop.hbase.client.Table createTable(Configuration conf, TableId tableId) throws IOException {
org.apache.hadoop.hbase.client.Table table = super.createTable(conf, tableId);
return new DelegatingTable(table) {
@Override
public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.get(get);
}
@Override
public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
for (org.apache.hadoop.hbase.client.Get get : gets) {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
}
return super.get(gets);
}
@Override
public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.getScanner(scan);
}
};
}
};
HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
// test all operations: only the first one encodes
Transaction tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
Assert.assertEquals(0, encodeCount.get());
table.get(b("row"));
Assert.assertEquals(1, encodeCount.get());
table.get(ImmutableList.of(new Get("a"), new Get("b")));
Assert.assertEquals(1, encodeCount.get());
Scanner scanner = table.scan(new Scan(null, null));
Assert.assertEquals(1, encodeCount.get());
scanner.close();
table.increment(b("z"), b("z"), 0L);
Assert.assertEquals(1, encodeCount.get());
table.commitTx();
table.postTxCommit();
// test that for the next tx, we encode again
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(2, encodeCount.get());
table.commitTx();
// test that we encode again, even of postTxCommit was not called
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(3, encodeCount.get());
table.commitTx();
table.rollbackTx();
// test that rollback does not encode the tx
Assert.assertEquals(3, encodeCount.get());
// test that we encode again if the previous tx rolled back
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(4, encodeCount.get());
table.commitTx();
table.close();
Assert.assertEquals(4, encodeCount.get());
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class PartitionedFileSetDefinition method reconfigure.
@Override
public DatasetSpecification reconfigure(String instanceName, DatasetProperties properties, DatasetSpecification currentSpec) throws IncompatibleUpdateException {
// validate that the partitioning is not changing
Partitioning oldPartitioning = PartitionedFileSetProperties.getPartitioning(currentSpec.getProperties());
Partitioning newPartitioning = PartitionedFileSetProperties.getPartitioning(properties.getProperties());
Preconditions.checkNotNull(oldPartitioning, "Existing dataset has no partitioning");
Preconditions.checkNotNull(newPartitioning, "New properties do not contain partitioning");
if (!Iterators.elementsEqual(oldPartitioning.getFields().entrySet().iterator(), newPartitioning.getFields().entrySet().iterator())) {
throw new IncompatibleUpdateException(String.format("Partitioning cannot be changed. Existing: %s, new: %s", oldPartitioning, newPartitioning));
}
Map<String, String> pfsProperties = new HashMap<>(properties.getProperties());
// define the columns for indexing on the partitionsTable
DatasetProperties indexedTableProperties = DatasetProperties.builder().addAll(properties.getProperties()).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, INDEXED_COLS).build();
// only set the default base path property if the default was set the last time it was configured,
// and no base path is in the current properties.
DatasetSpecification currentFileSpec = currentSpec.getSpecification(FILESET_NAME);
DatasetProperties.Builder newFileProperties = DatasetProperties.builder().addAll(properties.getProperties());
String useNameAsBasePathDefault = currentSpec.getProperty(NAME_AS_BASE_PATH_DEFAULT);
if (Boolean.parseBoolean(useNameAsBasePathDefault) && !properties.getProperties().containsKey(FileSetProperties.BASE_PATH)) {
newFileProperties.add(FileSetProperties.BASE_PATH, instanceName);
pfsProperties.put(NAME_AS_BASE_PATH_DEFAULT, Boolean.TRUE.toString());
}
return DatasetSpecification.builder(instanceName, getName()).properties(pfsProperties).datasets(AbstractDatasetDefinition.reconfigure(filesetDef, FILESET_NAME, newFileProperties.build(), currentFileSpec), AbstractDatasetDefinition.reconfigure(indexedTableDef, PARTITION_TABLE_NAME, indexedTableProperties, currentSpec.getSpecification(PARTITION_TABLE_NAME))).build();
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class AdminApp method performAdmin.
// this will get called from the worker, also from a custom workflow action
static void performAdmin(RuntimeContext context) {
Admin admin = context.getAdmin();
Map<String, String> args = context.getRuntimeArguments();
try {
// if invoked with dropAll=true, clean up all datasets (a, b, c, d)
if ("true".equals(args.get("dropAll"))) {
for (String name : new String[] { "a", "b", "c", "d" }) {
if (admin.datasetExists(name)) {
admin.dropDataset(name);
}
}
} else {
// create a, update b with /extra in base path, truncate c, drop d
admin.createDataset("a", Table.class.getName(), DatasetProperties.EMPTY);
String type = admin.getDatasetType("b");
Assert.assertEquals(FileSet.class.getName(), type);
DatasetProperties bProps = admin.getDatasetProperties("b");
String base = bProps.getProperties().get("base.path");
Assert.assertNotNull(base);
String newBase = args.get("new.base.path");
DatasetProperties newBProps = ((FileSetProperties.Builder) FileSetProperties.builder().addAll(bProps.getProperties())).setDataExternal(true).setBasePath(newBase).build();
admin.updateDataset("b", newBProps);
admin.truncateDataset("c");
admin.dropDataset("d");
}
} catch (DatasetManagementException e) {
throw Throwables.propagate(e);
}
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class AppWithCube method configure.
@Override
public void configure() {
DatasetProperties props = DatasetProperties.builder().add("dataset.cube.resolutions", "1,60").add("dataset.cube.aggregation.agg1.dimensions", "user,action").add("dataset.cube.aggregation.agg1.requiredDimensions", "user,action").build();
createDataset(CUBE_NAME, Cube.class, props);
addService(SERVICE_NAME, new CubeHandler());
}
Aggregations