use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class HBaseQueueAdmin method createStateStoreDataset.
private DatasetId createStateStoreDataset(String namespace) throws IOException {
try {
DatasetId stateStoreId = getStateStoreId(namespace);
DatasetProperties configProperties = TableProperties.builder().setColumnFamily(QueueEntryRow.COLUMN_FAMILY).build();
DatasetsUtil.createIfNotExists(datasetFramework, stateStoreId, HBaseQueueDatasetModule.STATE_STORE_TYPE_NAME, configProperties);
return stateStoreId;
} catch (DatasetManagementException e) {
throw new IOException(e);
}
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class ExistingEntitySystemMetadataWriter method writeSystemMetadataForDatasets.
private void writeSystemMetadataForDatasets(NamespaceId namespace, DatasetFramework dsFramework) throws DatasetManagementException, IOException, NamespaceNotFoundException {
SystemDatasetInstantiatorFactory systemDatasetInstantiatorFactory = new SystemDatasetInstantiatorFactory(locationFactory, dsFramework, cConf);
try (SystemDatasetInstantiator systemDatasetInstantiator = systemDatasetInstantiatorFactory.create()) {
for (DatasetSpecificationSummary summary : dsFramework.getInstances(namespace)) {
final DatasetId dsInstance = namespace.dataset(summary.getName());
DatasetProperties dsProperties = DatasetProperties.of(summary.getProperties());
String dsType = summary.getType();
Dataset dataset = null;
try {
try {
dataset = impersonator.doAs(dsInstance, new Callable<Dataset>() {
@Override
public Dataset call() throws Exception {
return systemDatasetInstantiator.getDataset(dsInstance);
}
});
} catch (Exception e) {
LOG.warn("Exception while instantiating dataset {}", dsInstance, e);
}
SystemMetadataWriter writer = new DatasetSystemMetadataWriter(metadataStore, dsInstance, dsProperties, dataset, dsType, summary.getDescription());
writer.write();
} finally {
if (dataset != null) {
dataset.close();
}
}
}
}
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class RemoteDatasetFrameworkTest method testSystemNamespace.
// Note: Cannot have these system namespace restrictions in system namespace since we use it internally in
// DatasetMetaTable util to add modules to system namespace. However, we should definitely impose these restrictions
// in RemoteDatasetFramework.
@Test
public void testSystemNamespace() throws DatasetManagementException {
DatasetFramework framework = getFramework();
// Adding module to system namespace should fail
try {
framework.addModule(NamespaceId.SYSTEM.datasetModule("keyValue"), new SingleTypeModule(SimpleKVTable.class));
Assert.fail("Should not be able to add a module to system namespace");
} catch (DatasetManagementException e) {
// expected
}
try {
framework.deleteModule(NamespaceId.SYSTEM.datasetModule("orderedTable-memory"));
Assert.fail("Should not be able to delete a default module.");
} catch (DatasetManagementException e) {
// expected
}
try {
framework.deleteAllModules(NamespaceId.SYSTEM);
Assert.fail("Should not be able to delete modules from system namespace");
} catch (DatasetManagementException e) {
// expected
}
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class DatasetBasedTimeScheduleStore method upgrade.
/**
* Method to add version to row key in SchedulerStore.
*
* @throws InterruptedException
* @throws IOException
* @throws DatasetManagementException
*/
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
while (true) {
try {
initializeScheduleTable();
break;
} catch (Exception ex) {
// Expected if the cdap services are not up.
TimeUnit.SECONDS.sleep(10);
}
}
if (isUpgradeComplete()) {
LOG.info("{} is already upgraded.", NAME);
return;
}
final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
final AtomicInteger tries = new AtomicInteger(0);
LOG.info("Starting upgrade of {}.", NAME);
while (!isUpgradeComplete()) {
sleepTimeInSecs.set(60);
try {
factory.createExecutor(ImmutableList.of((TransactionAware) table)).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
upgradeJobs(table);
upgradeTriggers(table);
// Upgrade is complete. Mark that app version upgrade is complete in the table.
table.put(APP_VERSION_UPGRADE_KEY, COLUMN, Bytes.toBytes(ProjectInfo.getVersion().toString()));
}
});
} catch (TransactionFailureException e) {
if (e instanceof TransactionConflictException) {
LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
sleepTimeInSecs.set(10);
} else {
LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
sleepTimeInSecs.set(60);
}
}
if (tries.incrementAndGet() > 500) {
LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
return;
}
TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
}
LOG.info("Upgrade of {} is complete.", NAME);
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class ExternalDatasets method makeTrackable.
/**
* If the input is an external source then an external dataset is created for tracking purpose and returned.
* If the input is a regular dataset or a stream then it is already trackable, hence same input is returned.
*
* @param admin {@link Admin} used to create external dataset
* @param input input to be tracked
* @return an external dataset if input is an external source, otherwise the same input that is passed-in is returned
*/
public static Input makeTrackable(Admin admin, Input input) {
// If input is not an external source, return the same input as it can be tracked by itself.
if (!(input instanceof Input.InputFormatProviderInput)) {
return input;
}
// Input is an external source, create an external dataset so that it can be tracked.
String inputName = input.getName();
InputFormatProvider inputFormatProvider = ((Input.InputFormatProviderInput) input).getInputFormatProvider();
Map<String, String> inputFormatConfiguration = inputFormatProvider.getInputFormatConfiguration();
// this too can be tracked by itself without creating an external dataset
if (inputFormatProvider instanceof Dataset) {
return input;
}
try {
// Create an external dataset for the input format for lineage tracking
Map<String, String> arguments = new HashMap<>();
arguments.put("input.format.class", inputFormatProvider.getInputFormatClassName());
arguments.putAll(inputFormatConfiguration);
if (!admin.datasetExists(inputName)) {
// Note: the dataset properties are the same as the arguments since we cannot identify them separately
// since they are mixed up in a single configuration object (CDAP-5674)
// Also, the properties of the external dataset created will contain runtime arguments for the same reason.
admin.createDataset(inputName, EXTERNAL_DATASET_TYPE, DatasetProperties.of(arguments));
} else {
// Check if the external dataset name clashes with an existing CDAP Dataset
String datasetType = admin.getDatasetType(inputName);
if (!EXTERNAL_DATASET_TYPE.equals(datasetType)) {
throw new IllegalArgumentException("An external source cannot have the same name as an existing CDAP Dataset instance " + inputName);
}
}
return Input.ofDataset(inputName, Collections.unmodifiableMap(arguments)).alias(input.getAlias());
} catch (DatasetManagementException e) {
throw Throwables.propagate(e);
}
}
Aggregations