use of org.apache.hadoop.hbase.util.NonceKey in project hbase by apache.
the class ProcedureTestingUtility method submitProcedure.
public static <TEnv> long submitProcedure(ProcedureExecutor<TEnv> procExecutor, Procedure proc, final long nonceGroup, final long nonce) {
final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce);
long procId = procExecutor.registerNonce(nonceKey);
assertFalse(procId >= 0);
return procExecutor.submitProcedure(proc, nonceKey);
}
use of org.apache.hadoop.hbase.util.NonceKey in project hbase by apache.
the class ProcedureUtil method convertToProcedure.
/**
* Helper to convert the protobuf procedure.
* <p/>
* Used by ProcedureStore implementations.
* <p/>
* TODO: OPTIMIZATION: some of the field never change during the execution (e.g. className,
* procId, parentId, ...). We can split in 'data' and 'state', and the store may take advantage of
* it by storing the data only on insert().
*/
public static Procedure<?> convertToProcedure(ProcedureProtos.Procedure proto) throws IOException {
// Procedure from class name
Procedure<?> proc = newProcedure(proto.getClassName());
// set fields
proc.setProcId(proto.getProcId());
proc.setState(proto.getState());
proc.setSubmittedTime(proto.getSubmittedTime());
proc.setLastUpdate(proto.getLastUpdate());
if (proto.hasParentId()) {
proc.setParentProcId(proto.getParentId());
}
if (proto.hasOwner()) {
proc.setOwner(proto.getOwner());
}
if (proto.hasTimeout()) {
proc.setTimeout(proto.getTimeout());
}
if (proto.getStackIdCount() > 0) {
proc.setStackIndexes(proto.getStackIdList());
}
if (proto.hasException()) {
assert proc.getState() == ProcedureProtos.ProcedureState.FAILED || proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK : "The procedure must be failed (waiting to rollback) or rolledback";
proc.setFailure(RemoteProcedureException.fromProto(proto.getException()));
}
if (proto.hasResult()) {
proc.setResult(proto.getResult().toByteArray());
}
if (proto.getNonce() != HConstants.NO_NONCE) {
proc.setNonceKey(new NonceKey(proto.getNonceGroup(), proto.getNonce()));
}
if (proto.getLocked()) {
proc.lockedWhenLoading();
}
if (proto.getBypass()) {
proc.bypass(null);
}
ProcedureStateSerializer serializer = null;
if (proto.getStateMessageCount() > 0) {
serializer = new StateSerializer(proto.toBuilder());
} else if (proto.hasStateData()) {
InputStream inputStream = proto.getStateData().newInput();
serializer = new CompatStateSerializer(inputStream);
}
if (serializer != null) {
proc.deserializeStateData(serializer);
}
return proc;
}
use of org.apache.hadoop.hbase.util.NonceKey in project hbase by apache.
the class CompletedProcedureCleaner method periodicExecute.
@Override
protected void periodicExecute(final TEnvironment env) {
if (completed.isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace("No completed procedures to cleanup.");
}
return;
}
final long evictTtl = conf.getInt(ProcedureExecutor.EVICT_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_EVICT_TTL);
final long evictAckTtl = conf.getInt(ProcedureExecutor.EVICT_ACKED_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_ACKED_EVICT_TTL);
final int batchSize = conf.getInt(BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE);
final long[] batchIds = new long[batchSize];
int batchCount = 0;
final long now = EnvironmentEdgeManager.currentTime();
final Iterator<Map.Entry<Long, CompletedProcedureRetainer<TEnvironment>>> it = completed.entrySet().iterator();
while (it.hasNext() && store.isRunning()) {
final Map.Entry<Long, CompletedProcedureRetainer<TEnvironment>> entry = it.next();
final CompletedProcedureRetainer<TEnvironment> retainer = entry.getValue();
final Procedure<?> proc = retainer.getProcedure();
IdLock.Entry lockEntry;
try {
lockEntry = procExecutionLock.getLockEntry(proc.getProcId());
} catch (IOException e) {
// can only happen if interrupted, so not a big deal to propagate it
throw new UncheckedIOException(e);
}
try {
// TODO: Select TTL based on Procedure type
if (retainer.isExpired(now, evictTtl, evictAckTtl)) {
// Failed procedures aren't persisted in WAL.
if (!(proc instanceof FailedProcedure)) {
batchIds[batchCount++] = entry.getKey();
if (batchCount == batchIds.length) {
store.delete(batchIds, 0, batchCount);
batchCount = 0;
}
}
final NonceKey nonceKey = proc.getNonceKey();
if (nonceKey != null) {
nonceKeysToProcIdsMap.remove(nonceKey);
}
it.remove();
LOG.trace("Evict completed {}", proc);
}
} finally {
procExecutionLock.releaseLockEntry(lockEntry);
}
}
if (batchCount > 0) {
store.delete(batchIds, 0, batchCount);
}
// let the store do some cleanup works, i.e, delete the place marker for preserving the max
// procedure id.
store.cleanup();
}
use of org.apache.hadoop.hbase.util.NonceKey in project hbase by apache.
the class TestProcedureNonce method testSetFailureResultForNonce.
@Test
public void testSetFailureResultForNonce() throws IOException {
final long nonceGroup = 234;
final long nonce = 55555;
// check and register the request nonce
final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce);
assertFalse(procExecutor.registerNonce(nonceKey) >= 0);
procExecutor.setFailureResultForNonce(nonceKey, "testProc", User.getCurrent(), new IOException("test failure"));
final long procId = procExecutor.registerNonce(nonceKey);
Procedure<?> result = procExecutor.getResult(procId);
ProcedureTestingUtility.assertProcFailed(result);
}
use of org.apache.hadoop.hbase.util.NonceKey in project hbase by apache.
the class ServerNonceManager method getMvccFromOperationContext.
/**
* Return the write point of the previous succeed operation.
* @param group Nonce group.
* @param nonce Nonce.
* @return write point of the previous succeed operation.
*/
public long getMvccFromOperationContext(long group, long nonce) {
if (nonce == HConstants.NO_NONCE) {
return Long.MAX_VALUE;
}
NonceKey nk = new NonceKey(group, nonce);
OperationContext result = nonces.get(nk);
return result == null ? Long.MAX_VALUE : result.getMvcc();
}
Aggregations