use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class DefaultStore method upgrade.
/**
* Method to add version in DefaultStore.
*
* @throws InterruptedException
* @throws IOException
* @throws DatasetManagementException
*/
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
// If upgrade is already complete, then simply return.
if (isUpgradeComplete()) {
LOG.info("{} is already upgraded.", NAME);
return;
}
final AtomicInteger maxRows = new AtomicInteger(1000);
final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
LOG.info("Starting upgrade of {}.", NAME);
// to check whether they need to do additional scans to accommodate old data formats.
while (!isUpgradeComplete()) {
sleepTimeInSecs.set(60);
try {
Transactions.execute(transactional, new TxCallable<Void>() {
@Override
public Void call(DatasetContext context) throws Exception {
AppMetadataStore store = getAppMetadataStore(context);
boolean upgradeComplete = store.upgradeVersionKeys(maxRows.get());
if (upgradeComplete) {
store.setUpgradeComplete(APP_VERSION_UPGRADE_KEY);
}
return null;
}
});
} catch (TransactionFailureException e) {
if (e instanceof TransactionConflictException) {
LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
sleepTimeInSecs.set(10);
} else if (e instanceof TransactionNotInProgressException) {
int currMaxRows = maxRows.get();
if (currMaxRows > 500) {
maxRows.decrementAndGet();
} else {
LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
return;
}
sleepTimeInSecs.set(10);
LOG.debug("Upgrade step faced a Transaction Timeout exception. " + "Reducing the number of max rows to : {} and retrying the operation now.", maxRows.get(), e);
} else {
LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
sleepTimeInSecs.set(60);
}
}
TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
}
LOG.info("Upgrade of {} is complete.", NAME);
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class AdminApp method performAdmin.
// this will get called from the worker, also from a custom workflow action
static void performAdmin(RuntimeContext context) {
Admin admin = context.getAdmin();
Map<String, String> args = context.getRuntimeArguments();
try {
// if invoked with dropAll=true, clean up all datasets (a, b, c, d)
if ("true".equals(args.get("dropAll"))) {
for (String name : new String[] { "a", "b", "c", "d" }) {
if (admin.datasetExists(name)) {
admin.dropDataset(name);
}
}
} else {
// create a, update b with /extra in base path, truncate c, drop d
admin.createDataset("a", Table.class.getName(), DatasetProperties.EMPTY);
String type = admin.getDatasetType("b");
Assert.assertEquals(FileSet.class.getName(), type);
DatasetProperties bProps = admin.getDatasetProperties("b");
String base = bProps.getProperties().get("base.path");
Assert.assertNotNull(base);
String newBase = args.get("new.base.path");
DatasetProperties newBProps = ((FileSetProperties.Builder) FileSetProperties.builder().addAll(bProps.getProperties())).setDataExternal(true).setBasePath(newBase).build();
admin.updateDataset("b", newBProps);
admin.truncateDataset("c");
admin.dropDataset("d");
}
} catch (DatasetManagementException e) {
Throwables.propagate(e);
}
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class DatasetBasedStreamSizeScheduleStore method upgrade.
/**
* Method to add version in StreamSizeSchedule row key in SchedulerStore.
*
* @throws InterruptedException
* @throws IOException
* @throws DatasetManagementException
*/
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
// Wait until the store is initialized
// Use a new instance of table since Table is not thread safe
Table metaTable = null;
while (metaTable == null) {
try {
metaTable = tableUtil.getMetaTable();
} catch (Exception e) {
// ignore exception
}
TimeUnit.SECONDS.sleep(10);
}
if (isUpgradeComplete()) {
LOG.info("{} is already upgraded.", NAME);
return;
}
final AtomicInteger maxNumberUpdateRows = new AtomicInteger(1000);
final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
LOG.info("Starting upgrade of {}.", NAME);
while (true) {
sleepTimeInSecs.set(60);
try {
if (executeUpgradeInTransaction(table, maxNumberUpdateRows)) {
break;
}
} catch (TransactionFailureException e) {
if (e instanceof TransactionConflictException) {
LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
sleepTimeInSecs.set(10);
} else if (e instanceof TransactionNotInProgressException) {
int currMaxRows = maxNumberUpdateRows.get();
if (currMaxRows > 500) {
maxNumberUpdateRows.decrementAndGet();
} else {
LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
return;
}
sleepTimeInSecs.set(10);
LOG.debug("Upgrade step faced a Transaction Timeout exception. " + "Current number of max update rows is set to : {} and retrying the operation now.", maxNumberUpdateRows.get(), e);
} else {
LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
sleepTimeInSecs.set(60);
}
}
TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
}
LOG.info("Upgrade of {} is complete.", NAME);
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class PreviewDatasetFramework method getDataset.
@Nullable
@Override
public <T extends Dataset> T getDataset(final DatasetId datasetInstanceId, final Map<String, String> arguments, @Nullable final ClassLoader classLoader, final DatasetClassLoaderProvider classLoaderProvider, @Nullable final Iterable<? extends EntityId> owners, final AccessType accessType) throws DatasetManagementException, IOException {
Principal principal = authenticationContext.getPrincipal();
try {
AuthorizationEnforcer enforcer;
final boolean isUserDataset = DatasetsUtil.isUserDataset(datasetInstanceId);
// only for the datasets from the real space enforce the authorization.
if (isUserDataset && actualDatasetFramework.hasInstance(datasetInstanceId)) {
enforcer = authorizationEnforcer;
} else {
enforcer = NOOP_ENFORCER;
}
return DefaultDatasetRuntimeContext.execute(enforcer, NOOP_DATASET_ACCESS_RECORDER, principal, datasetInstanceId, null, new Callable<T>() {
@Override
public T call() throws Exception {
if (isUserDataset && actualDatasetFramework.hasInstance(datasetInstanceId)) {
return actualDatasetFramework.getDataset(datasetInstanceId, arguments, classLoader, classLoaderProvider, owners, accessType);
}
return localDatasetFramework.getDataset(datasetInstanceId, arguments, classLoader, classLoaderProvider, owners, accessType);
}
});
} catch (IOException | DatasetManagementException e) {
throw e;
} catch (Exception e) {
throw new DatasetManagementException("Failed to create dataset instance: " + datasetInstanceId, e);
}
}
use of co.cask.cdap.api.dataset.DatasetManagementException in project cdap by caskdata.
the class TimePartitionedFileSetTest method validateInputPaths.
/**
* Validates that the output configuration of the tpfs, when instantiated with (time - start * minutes) as
* input start time and (time + end * minutes) as input end time, returns the expected list of paths.
*/
private void validateInputPaths(long time, long start, long end, final String... expected) throws IOException, DatasetManagementException, InterruptedException, TransactionFailureException {
Map<String, String> arguments = Maps.newHashMap();
TimePartitionedFileSetArguments.setInputStartTime(arguments, time + start * MINUTE);
TimePartitionedFileSetArguments.setInputEndTime(arguments, time + end * MINUTE);
final TimePartitionedFileSet tpfs = dsFrameworkUtil.getInstance(TPFS_INSTANCE, arguments);
TransactionAware txAwareDataset = (TransactionAware) tpfs;
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Map<String, String> inputConfig = tpfs.getInputFormatConfiguration();
String inputs = inputConfig.get(FileInputFormat.INPUT_DIR);
Assert.assertNotNull(inputs);
if (expected.length == 0) {
Assert.assertTrue(inputs.isEmpty());
return;
}
String[] inputPaths = inputs.split(",");
Assert.assertEquals(expected.length, inputPaths.length);
// order is not guaranteed.
Arrays.sort(expected);
Arrays.sort(inputPaths);
for (int i = 0; i < expected.length; i++) {
// every input path is absolute, whereas expected paths are relative
Assert.assertTrue("path #" + i + " does not match", inputPaths[i].endsWith(expected[i]));
}
}
});
}
Aggregations