use of co.cask.cdap.api.TxRunnable in project cdap by caskdata.
the class ArtifactStore method write.
/**
* Write the artifact and its metadata to the store. Once added, artifacts cannot be changed unless they are
* snapshot versions.
*
* @param artifactId the id of the artifact to add
* @param artifactMeta the metadata for the artifact
* @param artifactContentSupplier the supplier for the input stream of the contents of the artifact
* @return detail about the newly added artifact
* @throws WriteConflictException if the artifact is already currently being written
* @throws ArtifactAlreadyExistsException if a non-snapshot version of the artifact already exists
* @throws IOException if there was an exception persisting the artifact contents to the filesystem,
* of persisting the artifact metadata to the metastore
*/
public ArtifactDetail write(final Id.Artifact artifactId, final ArtifactMeta artifactMeta, final InputSupplier<? extends InputStream> artifactContentSupplier, EntityImpersonator entityImpersonator) throws WriteConflictException, ArtifactAlreadyExistsException, IOException {
// if we're not a snapshot version, check that the artifact doesn't exist already.
final ArtifactCell artifactCell = new ArtifactCell(artifactId);
if (!artifactId.getVersion().isSnapshot()) {
try {
transactional.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
if (getMetaTable(context).get(artifactCell.rowkey, artifactCell.column) != null) {
throw new ArtifactAlreadyExistsException(artifactId.toEntityId());
}
}
});
} catch (TransactionFailureException e) {
throw Transactions.propagate(e, ArtifactAlreadyExistsException.class, IOException.class);
}
}
final Location destination;
try {
destination = copyFileToDestination(artifactId, artifactContentSupplier, entityImpersonator);
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, IOException.class);
throw Throwables.propagate(e);
}
// now try and write the metadata for the artifact
try {
transactional.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
// we have to check that the metadata doesn't exist again since somebody else may have written
// the artifact while we were copying the artifact to the filesystem.
Table metaTable = getMetaTable(context);
byte[] existingMetaBytes = metaTable.get(artifactCell.rowkey, artifactCell.column);
boolean isSnapshot = artifactId.getVersion().isSnapshot();
if (existingMetaBytes != null && !isSnapshot) {
// non-snapshot artifacts are immutable. If there is existing metadata, stop here.
throw new ArtifactAlreadyExistsException(artifactId.toEntityId());
}
ArtifactData data = new ArtifactData(destination, artifactMeta);
// this means cleaning up the old jar, and deleting plugin and app rows.
if (existingMetaBytes != null) {
deleteMeta(metaTable, artifactId, existingMetaBytes);
}
// write artifact metadata
writeMeta(metaTable, artifactId, data);
}
});
return new ArtifactDetail(new ArtifactDescriptor(artifactId.toArtifactId(), destination), artifactMeta);
} catch (TransactionConflictException e) {
destination.delete();
throw new WriteConflictException(artifactId);
} catch (TransactionFailureException e) {
destination.delete();
throw Transactions.propagate(e, ArtifactAlreadyExistsException.class, IOException.class);
}
}
use of co.cask.cdap.api.TxRunnable in project cdap by caskdata.
the class MapReduceRuntimeService method beforeSubmit.
/**
* For pre 3.5 MapReduce programs, calls the {@link MapReduce#beforeSubmit(MapReduceContext)} method.
* For MapReduce programs created after 3.5, calls the initialize method of the {@link ProgramLifecycle}.
* This method also sets up the Input/Output within the same transaction.
*/
@SuppressWarnings("unchecked")
private void beforeSubmit(final Job job) throws Exception {
// AbstractMapReduce implements final initialize(context) and requires subclass to
// implement initialize(), whereas programs that directly implement MapReduce have
// the option to override initialize(context) (if they implement ProgramLifeCycle)
final TransactionControl txControl = mapReduce instanceof AbstractMapReduce ? Transactions.getTransactionControl(TransactionControl.IMPLICIT, AbstractMapReduce.class, mapReduce, "initialize") : mapReduce instanceof ProgramLifecycle ? Transactions.getTransactionControl(TransactionControl.IMPLICIT, MapReduce.class, mapReduce, "initialize", MapReduceContext.class) : TransactionControl.IMPLICIT;
if (TransactionControl.EXPLICIT == txControl) {
doInitialize(job);
} else {
Transactionals.execute(context, new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
doInitialize(job);
}
}, Exception.class);
}
ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(job.getConfiguration().getClassLoader());
try {
// set input/outputs info, and get one of the configured mapper's TypeToken
TypeToken<?> mapperTypeToken = setInputsIfNeeded(job);
setOutputsIfNeeded(job);
setOutputClassesIfNeeded(job, mapperTypeToken);
setMapOutputClassesIfNeeded(job, mapperTypeToken);
} finally {
ClassLoaders.setContextClassLoader(oldClassLoader);
}
}
use of co.cask.cdap.api.TxRunnable in project cdap by caskdata.
the class TxLookupProvider method executeLookup.
@Nullable
private <T, R> R executeLookup(final String table, final Map<String, String> arguments, final Function<Lookup<T>, R> func) {
final AtomicReference<R> result = new AtomicReference<>();
Transactionals.execute(tx, new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
Lookup<T> lookup = getLookup(table, context.getDataset(table, arguments));
result.set(func.apply(lookup));
}
});
return result.get();
}
Aggregations