use of co.cask.cdap.data.dataset.SystemDatasetInstantiator in project cdap by caskdata.
the class ExploreExecutorHttpHandler method doPartitionOperation.
private void doPartitionOperation(FullHttpRequest request, HttpResponder responder, DatasetId datasetId, PartitionOperation partitionOperation) {
try (SystemDatasetInstantiator datasetInstantiator = datasetInstantiatorFactory.create()) {
Dataset dataset;
try {
dataset = datasetInstantiator.getDataset(datasetId);
} catch (Exception e) {
LOG.error("Exception instantiating dataset {}.", datasetId, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Exception instantiating dataset " + datasetId);
return;
}
try {
if (!(dataset instanceof PartitionedFileSet)) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "not a partitioned dataset.");
return;
}
Partitioning partitioning = ((PartitionedFileSet) dataset).getPartitioning();
Reader reader = new InputStreamReader(new ByteBufInputStream(request.content()));
Map<String, String> properties = GSON.fromJson(reader, new TypeToken<Map<String, String>>() {
}.getType());
PartitionKey partitionKey;
try {
partitionKey = PartitionedFileSetArguments.getOutputPartitionKey(properties, partitioning);
} catch (Exception e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "invalid partition key: " + e.getMessage());
return;
}
if (partitionKey == null) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "no partition key was given.");
return;
}
QueryHandle handle = partitionOperation.submitOperation(partitionKey, properties);
if (handle == null) {
return;
}
JsonObject json = new JsonObject();
json.addProperty("handle", handle.getHandle());
responder.sendJson(HttpResponseStatus.OK, json.toString());
} finally {
Closeables.closeQuietly(dataset);
}
} catch (Throwable e) {
LOG.error("Got exception:", e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
use of co.cask.cdap.data.dataset.SystemDatasetInstantiator in project cdap by caskdata.
the class ExploreTableManager method updateDataset.
/**
* Update ad-hoc exploration on the given dataset by altering the corresponding Hive table. If exploration has
* not been enabled on the dataset, this will fail. Assumes the dataset actually exists.
*
* @param datasetId the ID of the dataset to enable
* @param spec the specification for the dataset to enable
* @return query handle for creating the Hive table for the dataset
* @throws IllegalArgumentException if some required dataset property like schema is not set
* @throws UnsupportedTypeException if the schema of the dataset is not compatible with Hive
* @throws ExploreException if there was an exception submitting the create table statement
* @throws SQLException if there was a problem with the create table statement
* @throws DatasetNotFoundException if the dataset had to be instantiated, but could not be found
* @throws ClassNotFoundException if the was a missing class when instantiating the dataset
*/
public QueryHandle updateDataset(DatasetId datasetId, DatasetSpecification spec, DatasetSpecification oldSpec) throws IllegalArgumentException, ExploreException, SQLException, UnsupportedTypeException, DatasetNotFoundException, ClassNotFoundException {
String tableName = tableNaming.getTableName(datasetId, spec.getProperties());
String databaseName = ExploreProperties.getExploreDatabaseName(spec.getProperties());
String oldTableName = tableNaming.getTableName(datasetId, oldSpec.getProperties());
String oldDatabaseName = ExploreProperties.getExploreDatabaseName(oldSpec.getProperties());
try {
exploreService.getTableInfo(datasetId.getNamespace(), oldDatabaseName, oldTableName);
} catch (TableNotFoundException e) {
// but the new spec may be explorable, so attempt to enable it
return enableDataset(datasetId, spec, false);
}
List<String> alterStatements;
if (!(oldTableName.equals(tableName) && Objects.equals(oldDatabaseName, databaseName))) {
alterStatements = new ArrayList<>();
// database/table name changed. All we can do is disable the old table and enable the new one
String disableStatement = generateDisableStatement(datasetId, oldSpec);
if (disableStatement != null) {
alterStatements.add(disableStatement);
}
String enableStatement = generateEnableStatement(datasetId, spec, false);
if (enableStatement != null) {
alterStatements.add(enableStatement);
}
} else {
try (SystemDatasetInstantiator datasetInstantiator = datasetInstantiatorFactory.create()) {
Dataset dataset = datasetInstantiator.getDataset(datasetId);
try {
alterStatements = generateAlterStatements(datasetId, tableName, dataset, spec, oldSpec);
} finally {
Closeables.closeQuietly(dataset);
}
} catch (IOException e) {
LOG.error("Exception instantiating dataset {}.", datasetId, e);
throw new ExploreException("Exception while trying to instantiate dataset " + datasetId);
}
}
LOG.trace("alter statements for update: {}", alterStatements);
if (alterStatements == null || alterStatements.isEmpty()) {
return QueryHandle.NO_OP;
}
if (alterStatements.size() == 1) {
return exploreService.execute(datasetId.getParent(), alterStatements.get(0));
}
return exploreService.execute(datasetId.getParent(), alterStatements.toArray(new String[alterStatements.size()]));
}
use of co.cask.cdap.data.dataset.SystemDatasetInstantiator in project cdap by caskdata.
the class CoreSchedulerServiceTest method beforeClass.
@BeforeClass
public static void beforeClass() throws Throwable {
AppFabricTestBase.beforeClass();
scheduler = getInjector().getInstance(Scheduler.class);
if (scheduler instanceof Service) {
((Service) scheduler).startAndWait();
}
messagingService = getInjector().getInstance(MessagingService.class);
store = getInjector().getInstance(Store.class);
DynamicDatasetCache datasetCache = new MultiThreadDatasetCache(new SystemDatasetInstantiator(getInjector().getInstance(DatasetFramework.class)), getTxClient(), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null);
transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(datasetCache, Schedulers.SUBSCRIBER_TX_TIMEOUT_SECONDS), RetryStrategies.retryOnConflict(20, 100));
}
use of co.cask.cdap.data.dataset.SystemDatasetInstantiator in project cdap by caskdata.
the class WorkerProgramRunnerTest method beforeClass.
@BeforeClass
public static void beforeClass() throws IOException {
// we are only gonna do long-running transactions here. Set the tx timeout to a ridiculously low value.
// that will test that the long-running transactions actually bypass that timeout.
CConfiguration conf = CConfiguration.create();
conf.setInt(TxConstants.Manager.CFG_TX_TIMEOUT, 1);
conf.setInt(TxConstants.Manager.CFG_TX_CLEANUP_INTERVAL, 2);
Injector injector = AppFabricTestHelper.getInjector(conf);
txService = injector.getInstance(TransactionManager.class);
txExecutorFactory = injector.getInstance(TransactionExecutorFactory.class);
dsFramework = injector.getInstance(DatasetFramework.class);
datasetCache = new SingleThreadDatasetCache(new SystemDatasetInstantiator(dsFramework, WorkerProgramRunnerTest.class.getClassLoader(), null), injector.getInstance(TransactionSystemClient.class), NamespaceId.DEFAULT, DatasetDefinition.NO_ARGUMENTS, null, null);
metricStore = injector.getInstance(MetricStore.class);
txService.startAndWait();
}
use of co.cask.cdap.data.dataset.SystemDatasetInstantiator in project cdap by caskdata.
the class MapReduceRunnerTestBase method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
CConfiguration conf = CConfiguration.create();
// allow subclasses to override the following two parameters
Integer txTimeout = Integer.getInteger(TxConstants.Manager.CFG_TX_TIMEOUT);
if (txTimeout != null) {
conf.setInt(TxConstants.Manager.CFG_TX_TIMEOUT, txTimeout);
}
Integer txCleanupInterval = Integer.getInteger(TxConstants.Manager.CFG_TX_CLEANUP_INTERVAL);
if (txCleanupInterval != null) {
conf.setInt(TxConstants.Manager.CFG_TX_CLEANUP_INTERVAL, txCleanupInterval);
}
injector = AppFabricTestHelper.getInjector(conf, new AbstractModule() {
@Override
protected void configure() {
bind(StreamFileWriterFactory.class).to(LocationStreamFileWriterFactory.class);
}
});
txService = injector.getInstance(TransactionManager.class);
txExecutorFactory = injector.getInstance(TransactionExecutorFactory.class);
dsFramework = injector.getInstance(DatasetFramework.class);
datasetCache = new SingleThreadDatasetCache(new SystemDatasetInstantiator(dsFramework, MapReduceRunnerTestBase.class.getClassLoader(), null), injector.getInstance(TransactionSystemClient.class), NamespaceId.DEFAULT, DatasetDefinition.NO_ARGUMENTS, null, null);
metricStore = injector.getInstance(MetricStore.class);
txService.startAndWait();
streamHandler = injector.getInstance(StreamHandler.class);
// Always create the default namespace
injector.getInstance(NamespaceAdmin.class).create(NamespaceMeta.DEFAULT);
}
Aggregations