use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class DatasetTypeManager method deleteModule.
/**
* Deletes specified dataset module
* @param datasetModuleId {@link DatasetModuleId} of the dataset module to delete
* @return true if deleted successfully, false if module didn't exist: nothing to delete
* @throws DatasetModuleConflictException when there are other modules depend on the specified one, in which case
* deletion does NOT happen
*/
public boolean deleteModule(final DatasetModuleId datasetModuleId) throws DatasetModuleConflictException {
LOG.info("Deleting module {}", datasetModuleId);
try {
final DatasetTypeMDS datasetTypeMDS = datasetCache.getDataset(DatasetMetaTableUtil.META_TABLE_NAME);
final DatasetInstanceMDS datasetInstanceMDS = datasetCache.getDataset(DatasetMetaTableUtil.INSTANCE_TABLE_NAME);
return txExecutorFactory.createExecutor(datasetCache).execute(new Callable<Boolean>() {
@Override
public Boolean call() throws DatasetModuleConflictException, IOException {
final DatasetModuleMeta module = datasetTypeMDS.getModule(datasetModuleId);
if (module == null) {
return false;
}
// cannot delete when there's module that uses it
if (module.getUsedByModules().size() > 0) {
String msg = String.format("Cannot delete module %s: other modules depend on it. Delete them first", module);
throw new DatasetModuleConflictException(msg);
}
Collection<DatasetSpecification> instances = datasetInstanceMDS.getByTypes(datasetModuleId.getParent(), ImmutableSet.copyOf(module.getTypes()));
// cannot delete when there's instance that uses it
if (!instances.isEmpty()) {
String msg = String.format("Cannot delete module %s: other instances depend on it. Delete them first", module);
throw new DatasetModuleConflictException(msg);
}
// remove it from "usedBy" from other modules
for (String usedModuleName : module.getUsesModules()) {
DatasetModuleId usedModuleId = new DatasetModuleId(datasetModuleId.getNamespace(), usedModuleName);
// not using getModuleWithFallback here because we want to know the namespace in which usedModule was found,
// so we can overwrite it in the MDS in the appropriate namespace
DatasetModuleMeta usedModule = datasetTypeMDS.getModule(usedModuleId);
// if the usedModule is not found in the current namespace, try finding it in the system namespace
if (usedModule == null) {
usedModuleId = NamespaceId.SYSTEM.datasetModule(usedModuleName);
usedModule = datasetTypeMDS.getModule(usedModuleId);
Preconditions.checkState(usedModule != null, "Could not find a module %s that the module %s uses.", usedModuleName, datasetModuleId.getEntityName());
}
usedModule.removeUsedByModule(datasetModuleId.getEntityName());
datasetTypeMDS.writeModule(usedModuleId.getParent(), usedModule);
}
datasetTypeMDS.deleteModule(datasetModuleId);
try {
// Also delete module jar
Location moduleJarLocation = impersonator.doAs(datasetModuleId, new Callable<Location>() {
@Override
public Location call() throws Exception {
return Locations.getLocationFromAbsolutePath(locationFactory, module.getJarLocationPath());
}
});
if (!moduleJarLocation.delete()) {
LOG.debug("Could not delete dataset module archive");
}
} catch (Exception e) {
// the only checked exception the try-catch throws is IOException
Throwables.propagateIfInstanceOf(e, IOException.class);
Throwables.propagate(e);
}
return true;
}
});
} catch (TransactionFailureException e) {
if (e.getCause() != null && e.getCause() instanceof DatasetModuleConflictException) {
throw (DatasetModuleConflictException) e.getCause();
}
throw Throwables.propagate(e);
} catch (Exception e) {
LOG.error("Operation failed", e);
throw Throwables.propagate(e);
}
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class DatasetTypeManager method deleteModules.
/**
* Deletes all modules in a namespace, other than system.
* Presumes that the namespace has already been checked to be non-system.
*
* @param namespaceId the {@link NamespaceId} to delete modules from.
*/
public void deleteModules(final NamespaceId namespaceId) throws DatasetModuleConflictException {
Preconditions.checkArgument(namespaceId != null && !NamespaceId.SYSTEM.equals(namespaceId), "Cannot delete modules from system namespace");
LOG.info("Deleting all modules from namespace {}", namespaceId);
try {
final DatasetTypeMDS datasetTypeMDS = datasetCache.getDataset(DatasetMetaTableUtil.META_TABLE_NAME);
final DatasetInstanceMDS datasetInstanceMDS = datasetCache.getDataset(DatasetMetaTableUtil.INSTANCE_TABLE_NAME);
txExecutorFactory.createExecutor(datasetCache).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws DatasetModuleConflictException, IOException {
final Set<String> typesToDelete = new HashSet<String>();
final List<Location> moduleLocations = new ArrayList<>();
final Collection<DatasetModuleMeta> modules = datasetTypeMDS.getModules(namespaceId);
try {
impersonator.doAs(namespaceId, new Callable<Void>() {
@Override
public Void call() throws Exception {
for (DatasetModuleMeta module : modules) {
typesToDelete.addAll(module.getTypes());
moduleLocations.add(Locations.getLocationFromAbsolutePath(locationFactory, module.getJarLocationPath()));
}
return null;
}
});
} catch (Exception e) {
// the callable throws no checked exceptions
Throwables.propagate(e);
}
// check if there are any instances that use types of these modules?
Collection<DatasetSpecification> instances = datasetInstanceMDS.getByTypes(namespaceId, typesToDelete);
// cannot delete when there's instance that uses it
if (!instances.isEmpty()) {
throw new DatasetModuleConflictException("Cannot delete all modules: existing dataset instances depend on it. Delete them first");
}
datasetTypeMDS.deleteModules(namespaceId);
// Delete module locations
for (Location moduleLocation : moduleLocations) {
if (!moduleLocation.delete()) {
LOG.debug("Could not delete dataset module archive - {}", moduleLocation);
}
}
}
});
} catch (TransactionFailureException e) {
if (e.getCause() != null && e.getCause() instanceof DatasetModuleConflictException) {
throw (DatasetModuleConflictException) e.getCause();
}
LOG.error("Failed to delete all modules from namespace {}", namespaceId);
throw Throwables.propagate(e);
} catch (Exception e) {
LOG.error("Operation failed", e);
throw Throwables.propagate(e);
}
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class InMemoryStreamFileWriterFactory method create.
@Override
public FileWriter<StreamEvent> create(StreamConfig config, int generation) throws IOException {
final QueueProducer producer = queueClientFactory.createProducer(QueueName.fromStream(config.getStreamId()));
final List<TransactionAware> txAwares = Lists.newArrayList();
if (producer instanceof TransactionAware) {
txAwares.add((TransactionAware) producer);
}
final TransactionExecutor txExecutor = executorFactory.createExecutor(txAwares);
// Adapt the FileWriter interface into Queue2Producer
return new FileWriter<StreamEvent>() {
private final List<StreamEvent> events = Lists.newArrayList();
@Override
public void append(StreamEvent event) throws IOException {
events.add(event);
}
@Override
public void appendAll(Iterator<? extends StreamEvent> events) throws IOException {
Iterators.addAll(this.events, events);
}
@Override
public void close() throws IOException {
producer.close();
}
@Override
public void flush() throws IOException {
try {
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (StreamEvent event : events) {
producer.enqueue(new QueueEntry(STREAM_EVENT_CODEC.encodePayload(event)));
}
events.clear();
}
});
} catch (TransactionFailureException e) {
throw new IOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException();
}
}
};
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class FileMetadataCleaner method scanAndGetFilesToDelete.
/**
* scans for meta data in new format which has expired the log retention.
* @param tillTime time till which files will be deleted
* @param transactionTimeout transaction timeout to use for scanning entries, deleting entries.
* @return list of DeleteEntry - used to get files to delete for which metadata has already been deleted
*/
public List<DeletedEntry> scanAndGetFilesToDelete(final long tillTime, final int transactionTimeout) {
final List<DeletedEntry> toDelete = new ArrayList<>();
// we make sure transactionTimeout is greater than TX_TIMEOUT_DISCOUNT_SECS in CDAPLogAppender check.
final int cutOffTransactionTime = transactionTimeout - TX_TIMEOUT_DISCOUNT_SECS;
try {
transactional.execute(transactionTimeout, new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
Table table = LoggingStoreTableUtil.getMetadataTable(context, datasetManager);
Stopwatch stopwatch = new Stopwatch().start();
byte[] startRowKey = NEW_ROW_KEY_PREFIX;
byte[] endRowKey = NEW_ROW_KEY_PREFIX_END;
boolean reachedEnd = false;
while (!reachedEnd) {
try (Scanner scanner = table.scan(startRowKey, endRowKey)) {
while (stopwatch.elapsedTime(TimeUnit.SECONDS) < cutOffTransactionTime) {
Row row = scanner.next();
if (row == null) {
// if row is null, then scanner next returned null. so we have reached the end.
reachedEnd = true;
break;
}
byte[] rowkey = row.getRow();
// file creation time is the last 8-bytes in rowkey in the new format
long creationTime = Bytes.toLong(rowkey, rowkey.length - Bytes.SIZEOF_LONG, Bytes.SIZEOF_LONG);
if (creationTime <= tillTime) {
// expired - can be deleted
toDelete.add(new DeletedEntry(rowkey, Bytes.toString(row.get(LoggingStoreTableUtil.META_TABLE_COLUMN_KEY))));
} else {
// update start-row key based on the logging context and start a new scan.
startRowKey = Bytes.add(NEW_ROW_KEY_PREFIX, getNextContextStartKey(rowkey));
break;
}
}
}
}
}
});
} catch (TransactionFailureException e) {
LOG.warn("Got Exception while scanning metadata table", e);
// if there is an exception, no metadata, so delete file should be skipped.
return new ArrayList<>();
}
if (!toDelete.isEmpty()) {
// we will call delete on old metadata even whenever there is expired entries to delete in new format.
// though the first call will delete all old meta data.
scanAndDeleteOldMetaData(transactionTimeout, cutOffTransactionTime);
// delete meta data entries in toDelete and get the file location list
return deleteNewMetadataEntries(toDelete, transactionTimeout, cutOffTransactionTime);
}
// toDelete is empty, safe to return that
return toDelete;
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class AbstractContext method execute.
/**
* Execute in a transaction with optional retry on conflict.
*/
public void execute(final TxRunnable runnable, boolean retryOnConflict) throws TransactionFailureException {
ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(getClass().getClassLoader());
try {
Transactional txnl = retryOnConflict ? Transactions.createTransactionalWithRetry(transactional, RetryStrategies.retryOnConflict(20, 100)) : transactional;
txnl.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(getProgramInvocationClassLoader());
try {
runnable.run(context);
} finally {
ClassLoaders.setContextClassLoader(oldClassLoader);
}
}
});
} finally {
ClassLoaders.setContextClassLoader(oldClassLoader);
}
}
Aggregations