use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class RSRpcServices method mutate.
/**
* Mutate data in a table.
*
* @param rpcc the RPC controller
* @param request the mutate request
* @throws ServiceException
*/
@Override
public MutateResponse mutate(final RpcController rpcc, final MutateRequest request) throws ServiceException {
// rpc controller is how we bring in data via the back door; it is unprotobuf'ed data.
// It is also the conduit via which we pass back data.
HBaseRpcController controller = (HBaseRpcController) rpcc;
CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
OperationQuota quota = null;
RpcCallContext context = RpcServer.getCurrentCall();
// Clear scanner so we are not holding on to reference across call.
if (controller != null) {
controller.setCellScanner(null);
}
try {
checkOpen();
requestCount.increment();
rpcMutateRequestCount.increment();
Region region = getRegion(request.getRegion());
MutateResponse.Builder builder = MutateResponse.newBuilder();
MutationProto mutation = request.getMutation();
if (!region.getRegionInfo().isMetaTable()) {
regionServer.cacheFlusher.reclaimMemStoreMemory();
}
long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
Result r = null;
Boolean processed = null;
MutationType type = mutation.getMutateType();
quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
switch(type) {
case APPEND:
// TODO: this doesn't actually check anything.
r = append(region, quota, mutation, cellScanner, nonceGroup);
break;
case INCREMENT:
// TODO: this doesn't actually check anything.
r = increment(region, quota, mutation, cellScanner, nonceGroup);
break;
case PUT:
Put put = ProtobufUtil.toPut(mutation, cellScanner);
quota.addMutation(put);
if (request.hasCondition()) {
Condition condition = request.getCondition();
byte[] row = condition.getRow().toByteArray();
byte[] family = condition.getFamily().toByteArray();
byte[] qualifier = condition.getQualifier().toByteArray();
CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
if (region.getCoprocessorHost() != null) {
processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier, compareOp, comparator, put);
}
if (processed == null) {
boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, put, true);
if (region.getCoprocessorHost() != null) {
result = region.getCoprocessorHost().postCheckAndPut(row, family, qualifier, compareOp, comparator, put, result);
}
processed = result;
}
} else {
region.put(put);
processed = Boolean.TRUE;
}
break;
case DELETE:
Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
quota.addMutation(delete);
if (request.hasCondition()) {
Condition condition = request.getCondition();
byte[] row = condition.getRow().toByteArray();
byte[] family = condition.getFamily().toByteArray();
byte[] qualifier = condition.getQualifier().toByteArray();
CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
if (region.getCoprocessorHost() != null) {
processed = region.getCoprocessorHost().preCheckAndDelete(row, family, qualifier, compareOp, comparator, delete);
}
if (processed == null) {
boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, delete, true);
if (region.getCoprocessorHost() != null) {
result = region.getCoprocessorHost().postCheckAndDelete(row, family, qualifier, compareOp, comparator, delete, result);
}
processed = result;
}
} else {
region.delete(delete);
processed = Boolean.TRUE;
}
break;
default:
throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
}
if (processed != null) {
builder.setProcessed(processed.booleanValue());
}
boolean clientCellBlockSupported = isClientCellBlockSupport(context);
addResult(builder, r, controller, clientCellBlockSupported);
if (clientCellBlockSupported) {
addSize(context, r, null);
}
return builder.build();
} catch (IOException ie) {
regionServer.checkFileSystem();
throw new ServiceException(ie);
} finally {
if (quota != null) {
quota.close();
}
}
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestCellBlockBuilder method getSizedCellScanner.
static CellScanner getSizedCellScanner(final Cell[] cells) {
int size = -1;
for (Cell cell : cells) {
size += CellUtil.estimatedSerializedSizeOf(cell);
}
final int totalSize = ClassSize.align(size);
final CellScanner cellScanner = CellUtil.createCellScanner(cells);
return new SizedCellScanner() {
@Override
public long heapSize() {
return totalSize;
}
@Override
public Cell current() {
return cellScanner.current();
}
@Override
public boolean advance() throws IOException {
return cellScanner.advance();
}
};
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestHBaseRpcControllerImpl method testListOfCellScannerables.
@Test
public void testListOfCellScannerables() throws IOException {
final int count = 10;
List<CellScannable> cells = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
cells.add(createCell(i));
}
HBaseRpcController controller = new HBaseRpcControllerImpl(cells);
CellScanner cellScanner = controller.cellScanner();
int index = 0;
for (; cellScanner.advance(); index++) {
Cell cell = cellScanner.current();
byte[] indexBytes = Bytes.toBytes(index);
assertTrue("" + index, Bytes.equals(indexBytes, 0, indexBytes.length, cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
assertEquals(count, index);
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class HRegion method processRowsWithLocks.
@Override
public void processRowsWithLocks(RowProcessor<?, ?> processor, long timeout, long nonceGroup, long nonce) throws IOException {
for (byte[] row : processor.getRowsToLock()) {
checkRow(row, "processRowsWithLocks");
}
if (!processor.readOnly()) {
checkReadOnly();
}
checkResources();
startRegionOperation();
WALEdit walEdit = new WALEdit();
// STEP 1. Run pre-process hook
preProcess(processor, walEdit);
// Short circuit the read only case
if (processor.readOnly()) {
try {
long now = EnvironmentEdgeManager.currentTime();
doProcessRowWithTimeout(processor, now, this, null, null, timeout);
processor.postProcess(this, walEdit, true);
} finally {
closeRegionOperation();
}
return;
}
boolean locked = false;
List<RowLock> acquiredRowLocks = null;
List<Mutation> mutations = new ArrayList<>();
Collection<byte[]> rowsToLock = processor.getRowsToLock();
// This is assigned by mvcc either explicity in the below or in the guts of the WAL append
// when it assigns the edit a sequencedid (A.K.A the mvcc write number).
WriteEntry writeEntry = null;
MemstoreSize memstoreSize = new MemstoreSize();
try {
boolean success = false;
try {
// STEP 2. Acquire the row lock(s)
acquiredRowLocks = new ArrayList<>(rowsToLock.size());
for (byte[] row : rowsToLock) {
// Attempt to lock all involved rows, throw if any lock times out
// use a writer lock for mixed reads and writes
acquiredRowLocks.add(getRowLockInternal(row, false));
}
// STEP 3. Region lock
lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size());
locked = true;
long now = EnvironmentEdgeManager.currentTime();
// STEP 4. Let the processor scan the rows, generate mutations and add waledits
doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout);
if (!mutations.isEmpty()) {
// STEP 5. Call the preBatchMutate hook
processor.preBatchMutate(this, walEdit);
// STEP 6. Append and sync if walEdit has data to write out.
if (!walEdit.isEmpty()) {
writeEntry = doWALAppend(walEdit, getEffectiveDurability(processor.useDurability()), processor.getClusterIds(), now, nonceGroup, nonce);
} else {
// We are here if WAL is being skipped.
writeEntry = this.mvcc.begin();
}
// STEP 7. Apply to memstore
long sequenceId = writeEntry.getWriteNumber();
for (Mutation m : mutations) {
// Handle any tag based cell features.
// TODO: Do we need to call rewriteCellTags down in applyToMemstore()? Why not before
// so tags go into WAL?
rewriteCellTags(m.getFamilyCellMap(), m);
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) {
Cell cell = cellScanner.current();
if (walEdit.isEmpty()) {
// If walEdit is empty, we put nothing in WAL. WAL stamps Cells with sequence id.
// If no WAL, need to stamp it here.
CellUtil.setSequenceId(cell, sequenceId);
}
applyToMemstore(getHStore(cell), cell, memstoreSize);
}
}
// STEP 8. call postBatchMutate hook
processor.postBatchMutate(this);
// STEP 9. Complete mvcc.
mvcc.completeAndWait(writeEntry);
writeEntry = null;
// STEP 10. Release region lock
if (locked) {
this.updatesLock.readLock().unlock();
locked = false;
}
// STEP 11. Release row lock(s)
releaseRowLocks(acquiredRowLocks);
}
success = true;
} finally {
// Call complete rather than completeAndWait because we probably had error if walKey != null
if (writeEntry != null)
mvcc.complete(writeEntry);
if (locked) {
this.updatesLock.readLock().unlock();
}
// release locks if some were acquired but another timed out
releaseRowLocks(acquiredRowLocks);
}
// 12. Run post-process hook
processor.postProcess(this, walEdit, success);
} finally {
closeRegionOperation();
if (!mutations.isEmpty()) {
long newSize = this.addAndGetMemstoreSize(memstoreSize);
requestFlushIfNeeded(newSize);
}
}
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestPutDeleteEtcCellIteration method testPutIteration.
@Test
public void testPutIteration() throws IOException {
Put p = new Put(ROW);
for (int i = 0; i < COUNT; i++) {
byte[] bytes = Bytes.toBytes(i);
p.addColumn(bytes, bytes, TIMESTAMP, bytes);
}
int index = 0;
for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance(); ) {
Cell cell = cellScanner.current();
byte[] bytes = Bytes.toBytes(index++);
cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
}
assertEquals(COUNT, index);
}
Aggregations