use of org.apache.tephra.Transaction in project cdap by caskdata.
the class MapReduceTaskContextProvider method createCacheLoader.
/**
* Creates a {@link CacheLoader} for the task context cache.
*/
private CacheLoader<ContextCacheKey, BasicMapReduceTaskContext> createCacheLoader(final Injector injector) {
final DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
final DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
final SecureStore secureStore = injector.getInstance(SecureStore.class);
final SecureStoreManager secureStoreManager = injector.getInstance(SecureStoreManager.class);
final MessagingService messagingService = injector.getInstance(MessagingService.class);
// Multiple instances of BasicMapReduceTaskContext can share the same program.
final AtomicReference<Program> programRef = new AtomicReference<>();
return new CacheLoader<ContextCacheKey, BasicMapReduceTaskContext>() {
@Override
public BasicMapReduceTaskContext load(ContextCacheKey key) throws Exception {
TaskAttemptID taskAttemptId = key.getTaskAttemptID();
// taskAttemptId could be null if used from a org.apache.hadoop.mapreduce.Partitioner or
// from a org.apache.hadoop.io.RawComparator, in which case we can get the JobId from the conf. Note that the
// JobId isn't in the conf for the OutputCommitter#setupJob method, in which case we use the taskAttemptId
Path txFile = MainOutputCommitter.getTxFile(key.getConfiguration(), taskAttemptId != null ? taskAttemptId.getJobID() : null);
FileSystem fs = txFile.getFileSystem(key.getConfiguration());
Preconditions.checkArgument(fs.exists(txFile));
Transaction tx;
try (FSDataInputStream txFileInputStream = fs.open(txFile)) {
byte[] txByteArray = ByteStreams.toByteArray(txFileInputStream);
tx = new TransactionCodec().decode(txByteArray);
}
MapReduceContextConfig contextConfig = new MapReduceContextConfig(key.getConfiguration());
MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(key.getConfiguration());
Program program = programRef.get();
if (program == null) {
// Creation of program is relatively cheap, so just create and do compare and set.
programRef.compareAndSet(null, createProgram(contextConfig, classLoader.getProgramClassLoader()));
program = programRef.get();
}
WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, program.getApplicationSpecification());
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
}
MapReduceSpecification spec = program.getApplicationSpecification().getMapReduce().get(program.getName());
MetricsCollectionService metricsCollectionService = null;
MapReduceMetrics.TaskType taskType = null;
String taskId = null;
ProgramOptions options = contextConfig.getProgramOptions();
// from a org.apache.hadoop.io.RawComparator
if (taskAttemptId != null) {
taskId = taskAttemptId.getTaskID().toString();
if (MapReduceMetrics.TaskType.hasType(taskAttemptId.getTaskType())) {
taskType = MapReduceMetrics.TaskType.from(taskAttemptId.getTaskType());
// if this is not for a mapper or a reducer, we don't need the metrics collection service
metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
options = new SimpleProgramOptions(options.getProgramId(), options.getArguments(), new BasicArguments(RuntimeArguments.extractScope("task", taskType.toString().toLowerCase(), contextConfig.getProgramOptions().getUserArguments().asMap())), options.isDebug());
}
}
CConfiguration cConf = injector.getInstance(CConfiguration.class);
TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
return new BasicMapReduceTaskContext(program, options, cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient, tx, programDatasetFramework, classLoader.getPluginInstantiator(), contextConfig.getLocalizedResources(), secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService, mapReduceClassLoader);
}
};
}
use of org.apache.tephra.Transaction in project phoenix by apache.
the class TephraTransactionContext method getVisibilityLevel.
@Override
public PhoenixVisibilityLevel getVisibilityLevel() {
VisibilityLevel visibilityLevel = null;
Transaction tx = getCurrentTransaction();
assert (tx != null);
visibilityLevel = tx.getVisibilityLevel();
PhoenixVisibilityLevel phoenixVisibilityLevel;
switch(visibilityLevel) {
case SNAPSHOT:
phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT;
break;
case SNAPSHOT_EXCLUDE_CURRENT:
phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
break;
case SNAPSHOT_ALL:
phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_ALL;
default:
phoenixVisibilityLevel = null;
}
return phoenixVisibilityLevel;
}
use of org.apache.tephra.Transaction in project phoenix by apache.
the class TephraTransactionContext method setVisibilityLevel.
@Override
public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
VisibilityLevel tephraVisibilityLevel = null;
switch(visibilityLevel) {
case SNAPSHOT:
tephraVisibilityLevel = VisibilityLevel.SNAPSHOT;
break;
case SNAPSHOT_EXCLUDE_CURRENT:
tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
break;
case SNAPSHOT_ALL:
tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL;
break;
default:
assert (false);
}
Transaction tx = getCurrentTransaction();
assert (tx != null);
tx.setVisibility(tephraVisibilityLevel);
}
use of org.apache.tephra.Transaction in project cdap by caskdata.
the class TransactionHttpHandler method invalidList.
@Path("/transactions/invalid")
@GET
public void invalidList(HttpRequest request, HttpResponder responder, @QueryParam("limit") @DefaultValue("-1") int limit) {
Transaction tx = txClient.startShort();
txClient.abort(tx);
long[] invalids = tx.getInvalids();
if (limit == -1) {
responder.sendJson(HttpResponseStatus.OK, GSON.toJson(invalids));
return;
}
responder.sendJson(HttpResponseStatus.OK, GSON.toJson(Arrays.copyOf(invalids, Math.min(limit, invalids.length))));
}
use of org.apache.tephra.Transaction in project phoenix by apache.
the class PhoenixIndexMetaData method getIndexMetaData.
private static IndexMetaDataCache getIndexMetaData(RegionCoprocessorEnvironment env, Map<String, byte[]> attributes) throws IOException {
if (attributes == null) {
return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE;
}
byte[] uuid = attributes.get(PhoenixIndexCodec.INDEX_UUID);
if (uuid == null) {
return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE;
}
boolean useProto = false;
byte[] md = attributes.get(PhoenixIndexCodec.INDEX_PROTO_MD);
useProto = md != null;
if (md == null) {
md = attributes.get(PhoenixIndexCodec.INDEX_MD);
}
byte[] txState = attributes.get(BaseScannerRegionObserver.TX_STATE);
if (md != null) {
final List<IndexMaintainer> indexMaintainers = IndexMaintainer.deserialize(md, useProto);
final Transaction txn = MutationState.decodeTransaction(txState);
return new IndexMetaDataCache() {
@Override
public void close() throws IOException {
}
@Override
public List<IndexMaintainer> getIndexMaintainers() {
return indexMaintainers;
}
@Override
public Transaction getTransaction() {
return txn;
}
};
} else {
byte[] tenantIdBytes = attributes.get(PhoenixRuntime.TENANT_ID_ATTRIB);
ImmutableBytesPtr tenantId = tenantIdBytes == null ? null : new ImmutableBytesPtr(tenantIdBytes);
TenantCache cache = GlobalCache.getTenantCache(env, tenantId);
IndexMetaDataCache indexCache = (IndexMetaDataCache) cache.getServerCache(new ImmutableBytesPtr(uuid));
if (indexCache == null) {
String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + "host=" + env.getRegionServerServices().getServerName();
SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND).setMessage(msg).build().buildException();
// will not return
ServerUtil.throwIOException("Index update failed", e);
}
return indexCache;
}
}
Aggregations