use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by cdapio.
the class HBaseTableTest method testColumnFamily.
@Test
public void testColumnFamily() throws Exception {
DatasetProperties props = TableProperties.builder().setColumnFamily("t").build();
String tableName = "testcf";
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
try (BufferingTable table = getTable(CONTEXT1, tableName, props);
BufferingTable table2 = getTable(CONTEXT1, tableName, props)) {
TransactionSystemClient txClient = new DetachedTxSystemClient();
TransactionExecutor executor = new DefaultTransactionExecutor(txClient, table);
executor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
table.put(new Put("row", "column", "testValue"));
}
});
executor = new DefaultTransactionExecutor(txClient, table2);
executor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertEquals("testValue", table2.get(new Get("row", "column")).getString("column"));
}
});
// Verify the column family name
TableId hTableId = hBaseTableUtil.createHTableId(new NamespaceId(CONTEXT1.getNamespaceId()), tableName);
HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(TEST_HBASE.getHBaseAdmin(), hTableId);
HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes("t"));
Assert.assertNotNull(hcd);
Assert.assertEquals("t", hcd.getNameAsString());
}
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by cdapio.
the class HBaseTableTest method testEnforceTxLifetime.
@Test
public void testEnforceTxLifetime() throws Exception {
String tableName = "enforce-tx-lifetime";
DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
admin.create();
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
try {
final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
table.commitTx();
table.postTxCommit();
table.close();
CConfiguration testCConf = CConfiguration.copy(cConf);
// No mutations on tables using testCConf will succeed.
testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// A put should fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.put(b("row2"), b("col1"), b("val1"));
}
});
// A delete should also fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"));
}
});
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"), b("col1"));
}
});
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
// Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
} finally {
admin.drop();
admin.close();
}
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by cdapio.
the class HBaseTableTest method testCachedEncodedTransaction.
@Test
public void testCachedEncodedTransaction() throws Exception {
String tableName = "testEncodedTxTable";
DatasetProperties props = DatasetProperties.EMPTY;
getTableAdmin(CONTEXT1, tableName, props).create();
DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
// use a transaction codec that counts the number of times encode() is called
final AtomicInteger encodeCount = new AtomicInteger();
final TransactionCodec codec = new TransactionCodec() {
@Override
public byte[] encode(Transaction tx) throws IOException {
encodeCount.incrementAndGet();
return super.encode(tx);
}
};
// use a table util that creates an HTable that validates the encoded tx on each get
final AtomicReference<Transaction> txRef = new AtomicReference<>();
HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {
@Override
public org.apache.hadoop.hbase.client.Table createTable(Configuration conf, TableId tableId) throws IOException {
org.apache.hadoop.hbase.client.Table table = super.createTable(conf, tableId);
return new DelegatingTable(table) {
@Override
public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.get(get);
}
@Override
public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
for (org.apache.hadoop.hbase.client.Get get : gets) {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
}
return super.get(gets);
}
@Override
public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
return super.getScanner(scan);
}
};
}
};
HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
// test all operations: only the first one encodes
Transaction tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
Assert.assertEquals(0, encodeCount.get());
table.get(b("row"));
Assert.assertEquals(1, encodeCount.get());
table.get(ImmutableList.of(new Get("a"), new Get("b")));
Assert.assertEquals(1, encodeCount.get());
Scanner scanner = table.scan(new Scan(null, null));
Assert.assertEquals(1, encodeCount.get());
scanner.close();
table.increment(b("z"), b("z"), 0L);
Assert.assertEquals(1, encodeCount.get());
table.commitTx();
table.postTxCommit();
// test that for the next tx, we encode again
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(2, encodeCount.get());
table.commitTx();
// test that we encode again, even of postTxCommit was not called
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(3, encodeCount.get());
table.commitTx();
table.rollbackTx();
// test that rollback does not encode the tx
Assert.assertEquals(3, encodeCount.get());
// test that we encode again if the previous tx rolled back
tx = txSystemClient.startShort();
txRef.set(tx);
table.startTx(tx);
table.get(b("row"));
Assert.assertEquals(4, encodeCount.get());
table.commitTx();
table.close();
Assert.assertEquals(4, encodeCount.get());
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by cdapio.
the class DatasetInstanceService method create.
/**
* Creates a dataset instance.
*
* @param namespaceId the namespace to create the dataset instance in
* @param name the name of the new dataset instance
* @param props the properties for the new dataset instance
* @throws NamespaceNotFoundException if the specified namespace was not found
* @throws DatasetAlreadyExistsException if a dataset with the same name already exists
* @throws DatasetTypeNotFoundException if the dataset type was not found
* @throws UnauthorizedException if perimeter security and authorization are enabled, and the current user does not
* have {@link StandardPermission#UPDATE} privilege on the #instance's namespace
*/
void create(String namespaceId, String name, DatasetInstanceConfiguration props) throws Exception {
NamespaceId namespace = ConversionHelpers.toNamespaceId(namespaceId);
DatasetId datasetId = ConversionHelpers.toDatasetInstanceId(namespaceId, name);
Principal requestingUser = authenticationContext.getPrincipal();
String ownerPrincipal = props.getOwnerPrincipal();
// need to enforce on the principal id if impersonation is involved
KerberosPrincipalId effectiveOwner = SecurityUtil.getEffectiveOwner(ownerAdmin, namespace, ownerPrincipal);
if (DatasetsUtil.isUserDataset(datasetId)) {
LOG.trace("Authorizing impersonation for dataset {}", name);
if (effectiveOwner != null) {
accessEnforcer.enforce(effectiveOwner, requestingUser, AccessPermission.SET_OWNER);
}
accessEnforcer.enforce(datasetId, requestingUser, StandardPermission.CREATE);
LOG.trace("Authorized impersonation for dataset {}", name);
}
LOG.trace("Ensuring existence of namespace {} for dataset {}", namespace, name);
ensureNamespaceExists(namespace);
LOG.trace("Ensured existence of namespace {} for dataset {}", namespace, name);
LOG.trace("Retrieving instance metadata from MDS for dataset {}", name);
DatasetSpecification existing = instanceManager.get(datasetId);
if (existing != null) {
throw new DatasetAlreadyExistsException(datasetId);
}
LOG.trace("Retrieved instance metadata from MDS for dataset {}", name);
// for creation, we need enforcement for dataset type for user dataset, but bypass for system datasets
DatasetTypeMeta typeMeta = getTypeInfo(namespace, props.getTypeName(), !DatasetsUtil.isUserDataset(datasetId));
if (typeMeta == null) {
// Type not found in the instance's namespace and the system namespace. Bail out.
throw new DatasetTypeNotFoundException(ConversionHelpers.toDatasetTypeId(namespace, props.getTypeName()));
}
LOG.info("Creating dataset {}.{}, type name: {}, properties: {}", namespaceId, name, props.getTypeName(), props.getProperties());
// exists or not
if (ownerPrincipal != null) {
LOG.trace("Adding owner for dataset {}", name);
KerberosPrincipalId owner = new KerberosPrincipalId(ownerPrincipal);
ownerAdmin.add(datasetId, owner);
LOG.trace("Added owner {} for dataset {}", owner, name);
}
try {
DatasetProperties datasetProperties = DatasetProperties.builder().addAll(props.getProperties()).setDescription(props.getDescription()).build();
LOG.trace("Calling op executor service to configure dataset {}", name);
DatasetCreationResponse response = opExecutorClient.create(datasetId, typeMeta, datasetProperties);
LOG.trace("Received spec and metadata from op executor service for dataset {}: {}", name, response);
LOG.trace("Adding instance metadata for dataset {}", name);
DatasetSpecification spec = response.getSpec();
instanceManager.add(namespace, spec);
LOG.trace("Added instance metadata for dataset {}", name);
metaCache.invalidate(datasetId);
LOG.trace("Publishing audit for creation of dataset {}", name);
publishAudit(datasetId, AuditType.CREATE);
LOG.trace("Published audit for creation of dataset {}", name);
SystemMetadata metadata = response.getMetadata();
LOG.trace("Publishing system metadata for creation of dataset {}: {}", name, metadata);
publishMetadata(datasetId, metadata);
LOG.trace("Published system metadata for creation of dataset {}", name);
// Enable explore
enableExplore(datasetId, spec, props);
} catch (Exception e) {
// there was a problem in creating the dataset instance so delete the owner if it got added earlier
// safe to call for entities which does not have an owner too
ownerAdmin.delete(datasetId);
throw e;
}
}
use of io.cdap.cdap.api.dataset.DatasetProperties in project cdap by cdapio.
the class DatasetAdminOpHTTPHandler method update.
@POST
@Path("/data/datasets/{name}/admin/update")
public void update(FullHttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("name") String name) throws Exception {
propagateUserId(request);
InternalDatasetUpdateParams params = GSON.fromJson(request.content().toString(StandardCharsets.UTF_8), InternalDatasetUpdateParams.class);
Preconditions.checkArgument(params.getProperties() != null, "Missing required 'instanceProps' parameter.");
Preconditions.checkArgument(params.getTypeMeta() != null, "Missing required 'typeMeta' parameter.");
Preconditions.checkArgument(params.getExistingSpec() != null, "Missing required 'existingSpec' parameter.");
DatasetProperties props = params.getProperties();
DatasetSpecification existing = params.getExistingSpec();
DatasetTypeMeta typeMeta = params.getTypeMeta();
try {
DatasetId instanceId = new DatasetId(namespaceId, name);
DatasetCreationResponse response = datasetAdminService.createOrUpdate(instanceId, typeMeta, props, existing);
responder.sendJson(HttpResponseStatus.OK, GSON.toJson(response));
} catch (IncompatibleUpdateException e) {
throw new ConflictException(e.getMessage());
}
}
Aggregations