use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class MultiRowMutationProcessor method process.
@Override
public void process(long now, HRegion region, List<Mutation> mutationsToApply, WALEdit walEdit) throws IOException {
byte[] byteNow = Bytes.toBytes(now);
// Check mutations
for (Mutation m : this.mutations) {
if (m instanceof Put) {
Map<byte[], List<Cell>> familyMap = m.getFamilyCellMap();
region.checkFamilies(familyMap.keySet());
region.checkTimestamps(familyMap, now);
region.updateCellTimestamps(familyMap.values(), byteNow);
} else if (m instanceof Delete) {
Delete d = (Delete) m;
region.prepareDelete(d);
region.prepareDeleteTimestamps(d, d.getFamilyCellMap(), byteNow);
} else {
throw new DoNotRetryIOException("Action must be Put or Delete. But was: " + m.getClass().getName());
}
mutationsToApply.add(m);
}
// Apply edits to a single WALEdit
for (Mutation m : mutations) {
for (List<Cell> cells : m.getFamilyCellMap().values()) {
boolean writeToWAL = m.getDurability() != Durability.SKIP_WAL;
for (Cell cell : cells) {
if (writeToWAL)
walEdit.add(cell);
}
}
}
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class RSRpcServices method doBatchOp.
/**
* Execute a list of Put/Delete mutations.
*
* @param builder
* @param region
* @param mutations
*/
private void doBatchOp(final RegionActionResult.Builder builder, final Region region, final OperationQuota quota, final List<ClientProtos.Action> mutations, final CellScanner cells) {
Mutation[] mArray = new Mutation[mutations.size()];
long before = EnvironmentEdgeManager.currentTime();
boolean batchContainsPuts = false, batchContainsDelete = false;
try {
int i = 0;
for (ClientProtos.Action action : mutations) {
MutationProto m = action.getMutation();
Mutation mutation;
if (m.getMutateType() == MutationType.PUT) {
mutation = ProtobufUtil.toPut(m, cells);
batchContainsPuts = true;
} else {
mutation = ProtobufUtil.toDelete(m, cells);
batchContainsDelete = true;
}
mArray[i++] = mutation;
quota.addMutation(mutation);
}
if (!region.getRegionInfo().isMetaTable()) {
regionServer.cacheFlusher.reclaimMemStoreMemory();
}
OperationStatus[] codes = region.batchMutate(mArray, HConstants.NO_NONCE, HConstants.NO_NONCE);
for (i = 0; i < codes.length; i++) {
int index = mutations.get(i).getIndex();
Exception e = null;
switch(codes[i].getOperationStatusCode()) {
case BAD_FAMILY:
e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
builder.addResultOrException(getResultOrException(e, index));
break;
case SANITY_CHECK_FAILURE:
e = new FailedSanityCheckException(codes[i].getExceptionMsg());
builder.addResultOrException(getResultOrException(e, index));
break;
default:
e = new DoNotRetryIOException(codes[i].getExceptionMsg());
builder.addResultOrException(getResultOrException(e, index));
break;
case SUCCESS:
builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), index));
break;
}
}
} catch (IOException ie) {
for (int i = 0; i < mutations.size(); i++) {
builder.addResultOrException(getResultOrException(ie, mutations.get(i).getIndex()));
}
}
if (regionServer.metricsRegionServer != null) {
long after = EnvironmentEdgeManager.currentTime();
if (batchContainsPuts) {
regionServer.metricsRegionServer.updatePut(after - before);
}
if (batchContainsDelete) {
regionServer.metricsRegionServer.updateDelete(after - before);
}
}
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class HRegion method processRowsWithLocks.
@Override
public void processRowsWithLocks(RowProcessor<?, ?> processor, long timeout, long nonceGroup, long nonce) throws IOException {
for (byte[] row : processor.getRowsToLock()) {
checkRow(row, "processRowsWithLocks");
}
if (!processor.readOnly()) {
checkReadOnly();
}
checkResources();
startRegionOperation();
WALEdit walEdit = new WALEdit();
// STEP 1. Run pre-process hook
preProcess(processor, walEdit);
// Short circuit the read only case
if (processor.readOnly()) {
try {
long now = EnvironmentEdgeManager.currentTime();
doProcessRowWithTimeout(processor, now, this, null, null, timeout);
processor.postProcess(this, walEdit, true);
} finally {
closeRegionOperation();
}
return;
}
boolean locked = false;
List<RowLock> acquiredRowLocks = null;
List<Mutation> mutations = new ArrayList<>();
Collection<byte[]> rowsToLock = processor.getRowsToLock();
// This is assigned by mvcc either explicity in the below or in the guts of the WAL append
// when it assigns the edit a sequencedid (A.K.A the mvcc write number).
WriteEntry writeEntry = null;
MemstoreSize memstoreSize = new MemstoreSize();
try {
boolean success = false;
try {
// STEP 2. Acquire the row lock(s)
acquiredRowLocks = new ArrayList<>(rowsToLock.size());
for (byte[] row : rowsToLock) {
// Attempt to lock all involved rows, throw if any lock times out
// use a writer lock for mixed reads and writes
acquiredRowLocks.add(getRowLockInternal(row, false));
}
// STEP 3. Region lock
lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size());
locked = true;
long now = EnvironmentEdgeManager.currentTime();
// STEP 4. Let the processor scan the rows, generate mutations and add waledits
doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout);
if (!mutations.isEmpty()) {
// STEP 5. Call the preBatchMutate hook
processor.preBatchMutate(this, walEdit);
// STEP 6. Append and sync if walEdit has data to write out.
if (!walEdit.isEmpty()) {
writeEntry = doWALAppend(walEdit, getEffectiveDurability(processor.useDurability()), processor.getClusterIds(), now, nonceGroup, nonce);
} else {
// We are here if WAL is being skipped.
writeEntry = this.mvcc.begin();
}
// STEP 7. Apply to memstore
long sequenceId = writeEntry.getWriteNumber();
for (Mutation m : mutations) {
// Handle any tag based cell features.
// TODO: Do we need to call rewriteCellTags down in applyToMemstore()? Why not before
// so tags go into WAL?
rewriteCellTags(m.getFamilyCellMap(), m);
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) {
Cell cell = cellScanner.current();
if (walEdit.isEmpty()) {
// If walEdit is empty, we put nothing in WAL. WAL stamps Cells with sequence id.
// If no WAL, need to stamp it here.
CellUtil.setSequenceId(cell, sequenceId);
}
applyToMemstore(getHStore(cell), cell, memstoreSize);
}
}
// STEP 8. call postBatchMutate hook
processor.postBatchMutate(this);
// STEP 9. Complete mvcc.
mvcc.completeAndWait(writeEntry);
writeEntry = null;
// STEP 10. Release region lock
if (locked) {
this.updatesLock.readLock().unlock();
locked = false;
}
// STEP 11. Release row lock(s)
releaseRowLocks(acquiredRowLocks);
}
success = true;
} finally {
// Call complete rather than completeAndWait because we probably had error if walKey != null
if (writeEntry != null)
mvcc.complete(writeEntry);
if (locked) {
this.updatesLock.readLock().unlock();
}
// release locks if some were acquired but another timed out
releaseRowLocks(acquiredRowLocks);
}
// 12. Run post-process hook
processor.postProcess(this, walEdit, success);
} finally {
closeRegionOperation();
if (!mutations.isEmpty()) {
long newSize = this.addAndGetMemstoreSize(memstoreSize);
requestFlushIfNeeded(newSize);
}
}
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class HRegion method checkBatchOp.
private boolean checkBatchOp(BatchOperation<?> batchOp, final int lastIndexExclusive, final Map<byte[], List<Cell>>[] familyMaps, final long now, final ObservedExceptionsInBatch observedExceptions) throws IOException {
boolean skip = false;
// Skip anything that "ran" already
if (batchOp.retCodeDetails[lastIndexExclusive].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
return true;
}
Mutation mutation = batchOp.getMutation(lastIndexExclusive);
Map<byte[], List<Cell>> familyMap = mutation.getFamilyCellMap();
// store the family map reference to allow for mutations
familyMaps[lastIndexExclusive] = familyMap;
try {
checkAndPrepareMutation(mutation, batchOp.isInReplay(), familyMap, now);
} catch (NoSuchColumnFamilyException nscf) {
final String msg = "No such column family in batch mutation. ";
if (observedExceptions.hasSeenNoSuchFamily()) {
LOG.warn(msg + nscf.getMessage());
} else {
LOG.warn(msg, nscf);
observedExceptions.sawNoSuchFamily();
}
batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.BAD_FAMILY, nscf.getMessage());
skip = true;
} catch (FailedSanityCheckException fsce) {
final String msg = "Batch Mutation did not pass sanity check. ";
if (observedExceptions.hasSeenFailedSanityCheck()) {
LOG.warn(msg + fsce.getMessage());
} else {
LOG.warn(msg, fsce);
observedExceptions.sawFailedSanityCheck();
}
batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.SANITY_CHECK_FAILURE, fsce.getMessage());
skip = true;
} catch (WrongRegionException we) {
final String msg = "Batch mutation had a row that does not belong to this region. ";
if (observedExceptions.hasSeenWrongRegion()) {
LOG.warn(msg + we.getMessage());
} else {
LOG.warn(msg, we);
observedExceptions.sawWrongRegion();
}
batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.SANITY_CHECK_FAILURE, we.getMessage());
skip = true;
}
return skip;
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class HRegion method doMiniBatchMutate.
/**
* Called to do a piece of the batch that came in to {@link #batchMutate(Mutation[], long, long)}
* In here we also handle replay of edits on region recover.
* @return Change in size brought about by applying <code>batchOp</code>
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK", justification = "Findbugs seems to be confused on this.")
@SuppressWarnings("unchecked")
private // TODO: This needs a rewrite. Doesn't have to be this long. St.Ack 20160120
void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
boolean replay = batchOp.isInReplay();
long currentNonceGroup = HConstants.NO_NONCE;
long currentNonce = HConstants.NO_NONCE;
WALEdit walEdit = null;
boolean locked = false;
// reference family maps directly so coprocessors can mutate them if desired
Map<byte[], List<Cell>>[] familyMaps = new Map[batchOp.operations.length];
// We try to set up a batch in the range [firstIndex,lastIndexExclusive)
int firstIndex = batchOp.nextIndexToProcess;
int lastIndexExclusive = firstIndex;
boolean success = false;
int noOfPuts = 0;
int noOfDeletes = 0;
WriteEntry writeEntry = null;
int cellCount = 0;
/** Keep track of the locks we hold so we can release them in finally clause */
List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.operations.length);
MemstoreSize memstoreSize = new MemstoreSize();
final ObservedExceptionsInBatch observedExceptions = new ObservedExceptionsInBatch();
try {
// STEP 1. Try to acquire as many locks as we can, and ensure we acquire at least one.
int numReadyToWrite = 0;
long now = EnvironmentEdgeManager.currentTime();
while (lastIndexExclusive < batchOp.operations.length) {
if (checkBatchOp(batchOp, lastIndexExclusive, familyMaps, now, observedExceptions)) {
lastIndexExclusive++;
continue;
}
Mutation mutation = batchOp.getMutation(lastIndexExclusive);
// If we haven't got any rows in our batch, we should block to get the next one.
RowLock rowLock = null;
try {
rowLock = getRowLockInternal(mutation.getRow(), true);
} catch (TimeoutIOException e) {
// We will retry when other exceptions, but we should stop if we timeout .
throw e;
} catch (IOException ioe) {
LOG.warn("Failed getting lock, row=" + Bytes.toStringBinary(mutation.getRow()), ioe);
}
if (rowLock == null) {
// Stop acquiring more rows for this batch
break;
} else {
acquiredRowLocks.add(rowLock);
}
lastIndexExclusive++;
numReadyToWrite++;
if (replay) {
for (List<Cell> cells : mutation.getFamilyCellMap().values()) {
cellCount += cells.size();
}
}
}
// We've now grabbed as many mutations off the list as we can
// STEP 2. Update any LATEST_TIMESTAMP timestamps
// We should record the timestamp only after we have acquired the rowLock,
// otherwise, newer puts/deletes are not guaranteed to have a newer timestamp
now = EnvironmentEdgeManager.currentTime();
byte[] byteNow = Bytes.toBytes(now);
// Nothing to put/delete -- an exception in the above such as NoSuchColumnFamily?
if (numReadyToWrite <= 0) {
return;
}
for (int i = firstIndex; !replay && i < lastIndexExclusive; i++) {
// skip invalid
if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
// lastIndexExclusive was incremented above.
continue;
}
Mutation mutation = batchOp.getMutation(i);
if (mutation instanceof Put) {
updateCellTimestamps(familyMaps[i].values(), byteNow);
noOfPuts++;
} else {
prepareDeleteTimestamps(mutation, familyMaps[i], byteNow);
noOfDeletes++;
}
rewriteCellTags(familyMaps[i], mutation);
WALEdit fromCP = batchOp.walEditsFromCoprocessors[i];
if (fromCP != null) {
cellCount += fromCP.size();
}
if (getEffectiveDurability(mutation.getDurability()) != Durability.SKIP_WAL) {
for (List<Cell> cells : familyMaps[i].values()) {
cellCount += cells.size();
}
}
}
lock(this.updatesLock.readLock(), numReadyToWrite);
locked = true;
// calling the pre CP hook for batch mutation
if (!replay && coprocessorHost != null) {
MiniBatchOperationInProgress<Mutation> miniBatchOp = new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(), batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
if (coprocessorHost.preBatchMutate(miniBatchOp)) {
return;
} else {
for (int i = firstIndex; i < lastIndexExclusive; i++) {
if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
// lastIndexExclusive was incremented above.
continue;
}
// we pass (i - firstIndex) below since the call expects a relative index
Mutation[] cpMutations = miniBatchOp.getOperationsFromCoprocessors(i - firstIndex);
if (cpMutations == null) {
continue;
}
Mutation mutation = batchOp.getMutation(i);
boolean skipWal = getEffectiveDurability(mutation.getDurability()) == Durability.SKIP_WAL;
// Else Coprocessor added more Mutations corresponding to the Mutation at this index.
for (int j = 0; j < cpMutations.length; j++) {
Mutation cpMutation = cpMutations[j];
Map<byte[], List<Cell>> cpFamilyMap = cpMutation.getFamilyCellMap();
checkAndPrepareMutation(cpMutation, replay, cpFamilyMap, now);
// Acquire row locks. If not, the whole batch will fail.
acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), true));
// Returned mutations from coprocessor correspond to the Mutation at index i. We can
// directly add the cells from those mutations to the familyMaps of this mutation.
// will get added to the memstore later
mergeFamilyMaps(familyMaps[i], cpFamilyMap);
// cells of returned mutation.
if (!skipWal) {
for (List<Cell> cells : cpFamilyMap.values()) {
cellCount += cells.size();
}
}
}
}
}
}
// STEP 3. Build WAL edit
walEdit = new WALEdit(cellCount, replay);
Durability durability = Durability.USE_DEFAULT;
for (int i = firstIndex; i < lastIndexExclusive; i++) {
// Skip puts that were determined to be invalid during preprocessing
if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
continue;
}
Mutation m = batchOp.getMutation(i);
Durability tmpDur = getEffectiveDurability(m.getDurability());
if (tmpDur.ordinal() > durability.ordinal()) {
durability = tmpDur;
}
// we use durability of the original mutation for the mutation passed by CP.
if (tmpDur == Durability.SKIP_WAL) {
recordMutationWithoutWal(m.getFamilyCellMap());
continue;
}
long nonceGroup = batchOp.getNonceGroup(i);
long nonce = batchOp.getNonce(i);
// They don't have to be, it will still work, just write more WALEdits than needed.
if (nonceGroup != currentNonceGroup || nonce != currentNonce) {
// Write what we have so far for nonces out to WAL
appendCurrentNonces(m, replay, walEdit, now, currentNonceGroup, currentNonce);
walEdit = new WALEdit(cellCount, replay);
currentNonceGroup = nonceGroup;
currentNonce = nonce;
}
// Add WAL edits by CP
WALEdit fromCP = batchOp.walEditsFromCoprocessors[i];
if (fromCP != null) {
for (Cell cell : fromCP.getCells()) {
walEdit.add(cell);
}
}
addFamilyMapToWALEdit(familyMaps[i], walEdit);
}
// STEP 4. Append the final edit to WAL and sync.
Mutation mutation = batchOp.getMutation(firstIndex);
WALKey walKey = null;
long txid;
if (replay) {
// use wal key from the original
walKey = new WALKey(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc);
walKey.setOrigLogSeqNum(batchOp.getReplaySequenceId());
if (!walEdit.isEmpty()) {
txid = this.wal.append(this.getRegionInfo(), walKey, walEdit, true);
if (txid != 0) {
sync(txid, durability);
}
}
} else {
try {
if (!walEdit.isEmpty()) {
// we use HLogKey here instead of WALKey directly to support legacy coprocessors.
walKey = new WALKey(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc, this.getReplicationScope());
// TODO: Use the doAppend methods below... complicated by the replay stuff above.
txid = this.wal.append(this.getRegionInfo(), walKey, walEdit, true);
if (txid != 0) {
sync(txid, durability);
}
if (writeEntry == null) {
// if MVCC not preassigned, wait here until assigned
writeEntry = walKey.getWriteEntry();
}
}
} catch (IOException ioe) {
if (walKey != null && writeEntry == null) {
// the writeEntry is not preassigned and error occurred during append or sync
mvcc.complete(walKey.getWriteEntry());
}
throw ioe;
}
}
if (walKey == null) {
// If no walKey, then not in replay and skipping WAL or some such. Begin an MVCC transaction
// to get sequence id.
writeEntry = mvcc.begin();
}
// STEP 5. Write back to memstore
for (int i = firstIndex; i < lastIndexExclusive; i++) {
if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
continue;
}
// We need to update the sequence id for following reasons.
// 1) If the op is in replay mode, FSWALEntry#stampRegionSequenceId won't stamp sequence id.
// 2) If no WAL, FSWALEntry won't be used
// we use durability of the original mutation for the mutation passed by CP.
boolean updateSeqId = replay || batchOp.getMutation(i).getDurability() == Durability.SKIP_WAL;
if (updateSeqId) {
this.updateSequenceId(familyMaps[i].values(), replay ? batchOp.getReplaySequenceId() : writeEntry.getWriteNumber());
}
applyFamilyMapToMemstore(familyMaps[i], memstoreSize);
}
// calling the post CP hook for batch mutation
if (!replay && coprocessorHost != null) {
MiniBatchOperationInProgress<Mutation> miniBatchOp = new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(), batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
coprocessorHost.postBatchMutate(miniBatchOp);
}
// STEP 6. Complete mvcc.
if (replay) {
this.mvcc.advanceTo(batchOp.getReplaySequenceId());
} else {
// writeEntry won't be empty if not in replay mode
mvcc.completeAndWait(writeEntry);
writeEntry = null;
}
// STEP 7. Release row locks, etc.
if (locked) {
this.updatesLock.readLock().unlock();
locked = false;
}
releaseRowLocks(acquiredRowLocks);
for (int i = firstIndex; i < lastIndexExclusive; i++) {
if (batchOp.retCodeDetails[i] == OperationStatus.NOT_RUN) {
batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
}
}
// synced so that the coprocessor contract is adhered to.
if (!replay && coprocessorHost != null) {
for (int i = firstIndex; i < lastIndexExclusive; i++) {
// only for successful puts
if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
continue;
}
Mutation m = batchOp.getMutation(i);
if (m instanceof Put) {
coprocessorHost.postPut((Put) m, walEdit, m.getDurability());
} else {
coprocessorHost.postDelete((Delete) m, walEdit, m.getDurability());
}
}
}
success = true;
} finally {
// Call complete rather than completeAndWait because we probably had error if walKey != null
if (writeEntry != null)
mvcc.complete(writeEntry);
this.addAndGetMemstoreSize(memstoreSize);
if (locked) {
this.updatesLock.readLock().unlock();
}
releaseRowLocks(acquiredRowLocks);
if (noOfPuts > 0) {
// There were some Puts in the batch.
if (this.metricsRegion != null) {
this.metricsRegion.updatePut();
}
}
if (noOfDeletes > 0) {
// There were some Deletes in the batch.
if (this.metricsRegion != null) {
this.metricsRegion.updateDelete();
}
}
if (!success) {
for (int i = firstIndex; i < lastIndexExclusive; i++) {
if (batchOp.retCodeDetails[i].getOperationStatusCode() == OperationStatusCode.NOT_RUN) {
batchOp.retCodeDetails[i] = OperationStatus.FAILURE;
}
}
}
if (coprocessorHost != null && !batchOp.isInReplay()) {
// call the coprocessor hook to do any finalization steps
// after the put is done
MiniBatchOperationInProgress<Mutation> miniBatchOp = new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(), batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
coprocessorHost.postBatchMutateIndispensably(miniBatchOp, success);
}
batchOp.nextIndexToProcess = lastIndexExclusive;
}
}
Aggregations