use of co.cask.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class HiveExploreTableTestRun method testNoOpOnMissingSchema.
@Test
public void testNoOpOnMissingSchema() throws Exception {
DatasetId datasetId = NAMESPACE_ID.dataset("noschema");
datasetFramework.addInstance(Table.class.getName(), datasetId, DatasetProperties.EMPTY);
try {
DatasetSpecification spec = datasetFramework.getDatasetSpec(datasetId);
Assert.assertEquals(QueryHandle.NO_OP, exploreTableManager.enableDataset(datasetId, spec, false));
} finally {
datasetFramework.deleteInstance(datasetId);
}
}
use of co.cask.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class HiveExploreStructuredRecordTestRun method testRecordScannableAndWritableIsOK.
@Test
public void testRecordScannableAndWritableIsOK() throws Exception {
DatasetId instanceId = NAMESPACE_ID.dataset("tabul");
datasetFramework.addInstance("TableWrapper", instanceId, DatasetProperties.builder().add(DatasetProperties.SCHEMA, Schema.recordOf("intRecord", Schema.Field.of("x", Schema.of(Schema.Type.STRING))).toString()).build());
DatasetSpecification spec = datasetFramework.getDatasetSpec(instanceId);
try {
exploreTableManager.enableDataset(instanceId, spec, false);
runCommand(NAMESPACE_ID, "describe dataset_tabul", true, Lists.newArrayList(new ColumnDesc("col_name", "STRING", 1, "from deserializer"), new ColumnDesc("data_type", "STRING", 2, "from deserializer"), new ColumnDesc("comment", "STRING", 3, "from deserializer")), Lists.newArrayList(new QueryResult(Lists.<Object>newArrayList("x", "string", "from deserializer"))));
} finally {
datasetFramework.deleteInstance(instanceId);
}
}
use of co.cask.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class HBaseTableTest method testEnforceTxLifetime.
@Test
public void testEnforceTxLifetime() throws Exception {
String tableName = "enforce-tx-lifetime";
DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
admin.create();
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
try {
final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
table.commitTx();
table.postTxCommit();
table.close();
CConfiguration testCConf = CConfiguration.copy(cConf);
// No mutations on tables using testCConf will succeed.
testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// A put should fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.put(b("row2"), b("col1"), b("val1"));
}
});
// A delete should also fail
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"));
}
});
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.delete(b("row1"), b("col1"));
}
});
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
// Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
// So should an increment
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.increment(b("inc1"), b("col1"), 10);
}
});
// incrementAndGet gets converted to a put internally
assertTxFail(txSystemClient, failTable, new Runnable() {
@Override
public void run() {
failTable.incrementAndGet(b("inc1"), b("col1"), 10);
}
});
}
} finally {
admin.drop();
admin.close();
}
}
use of co.cask.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class HBaseTableTest method testTTL.
@Test
public void testTTL() throws Exception {
// for the purpose of this test it is fine not to configure ttl when creating table: we want to see if it
// applies on reading
int ttl = 1;
String ttlTable = "ttl";
String noTtlTable = "nottl";
DatasetProperties props = TableProperties.builder().setTTL(ttl).build();
getTableAdmin(CONTEXT1, ttlTable, props).create();
DatasetSpecification ttlTableSpec = DatasetSpecification.builder(ttlTable, HBaseTable.class.getName()).properties(props.getProperties()).build();
HBaseTable table = new HBaseTable(CONTEXT1, ttlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
Transaction tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row1"), b("col1"), b("val1"));
table.commitTx();
TimeUnit.MILLISECONDS.sleep(1010);
tx = txSystemClient.startShort();
table.startTx(tx);
table.put(b("row2"), b("col2"), b("val2"));
table.commitTx();
// now, we should not see first as it should have expired, but see the last one
tx = txSystemClient.startShort();
table.startTx(tx);
byte[] val = table.get(b("row1"), b("col1"));
if (val != null) {
LOG.info("Unexpected value " + Bytes.toStringBinary(val));
}
Assert.assertNull(val);
Assert.assertArrayEquals(b("val2"), table.get(b("row2"), b("col2")));
// test a table with no TTL
DatasetProperties props2 = TableProperties.builder().setTTL(Tables.NO_TTL).build();
getTableAdmin(CONTEXT1, noTtlTable, props2).create();
DatasetSpecification noTtlTableSpec = DatasetSpecification.builder(noTtlTable, HBaseTable.class.getName()).properties(props2.getProperties()).build();
HBaseTable table2 = new HBaseTable(CONTEXT1, noTtlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
tx = txSystemClient.startShort();
table2.startTx(tx);
table2.put(b("row1"), b("col1"), b("val1"));
table2.commitTx();
TimeUnit.SECONDS.sleep(2);
tx = txSystemClient.startShort();
table2.startTx(tx);
table2.put(b("row2"), b("col2"), b("val2"));
table2.commitTx();
// if ttl is -1 (unlimited), it should see both
tx = txSystemClient.startShort();
table2.startTx(tx);
Assert.assertArrayEquals(b("val1"), table2.get(b("row1"), b("col1")));
Assert.assertArrayEquals(b("val2"), table2.get(b("row2"), b("col2")));
}
use of co.cask.cdap.api.dataset.DatasetSpecification in project cdap by caskdata.
the class ExploreExecutorHttpHandler method updateDataset.
/**
* Enable ad-hoc exploration of a dataset instance.
*/
@POST
@Path("datasets/{dataset}/update")
@AuditPolicy(AuditDetail.REQUEST_BODY)
public void updateDataset(FullHttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespace, @PathParam("dataset") String datasetName) throws BadRequestException {
final DatasetId datasetId = new DatasetId(namespace, datasetName);
try {
UpdateExploreParameters params = readUpdateParameters(request);
final DatasetSpecification oldSpec = params.getOldSpec();
final DatasetSpecification datasetSpec = params.getNewSpec();
QueryHandle handle;
if (oldSpec.equals(datasetSpec)) {
handle = QueryHandle.NO_OP;
} else {
handle = impersonator.doAs(datasetId, new Callable<QueryHandle>() {
@Override
public QueryHandle call() throws Exception {
return exploreTableManager.updateDataset(datasetId, datasetSpec, oldSpec);
}
});
}
JsonObject json = new JsonObject();
json.addProperty("handle", handle.getHandle());
responder.sendJson(HttpResponseStatus.OK, json.toString());
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
} catch (ExploreException e) {
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Error updating explore on dataset " + datasetId);
} catch (SQLException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "SQL exception while trying to update explore on dataset " + datasetId);
} catch (UnsupportedTypeException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Schema for dataset " + datasetId + " is not supported for exploration: " + e.getMessage());
} catch (Throwable e) {
LOG.error("Got exception:", e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
Aggregations