use of org.apache.hadoop.hbase.ipc.RpcCall in project hbase by apache.
the class RegionScannerImpl method nextInternal.
private boolean nextInternal(List<Cell> results, ScannerContext scannerContext) throws IOException {
Preconditions.checkArgument(results.isEmpty(), "First parameter should be an empty list");
Preconditions.checkArgument(scannerContext != null, "Scanner context cannot be null");
Optional<RpcCall> rpcCall = RpcServer.getCurrentCall();
// Save the initial progress from the Scanner context in these local variables. The progress
// may need to be reset a few times if rows are being filtered out so we save the initial
// progress.
int initialBatchProgress = scannerContext.getBatchProgress();
long initialSizeProgress = scannerContext.getDataSizeProgress();
long initialHeapSizeProgress = scannerContext.getHeapSizeProgress();
// Used to check time limit
LimitScope limitScope = LimitScope.BETWEEN_CELLS;
// and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow).
while (true) {
resetProgress(scannerContext, initialBatchProgress, initialSizeProgress, initialHeapSizeProgress);
checkClientDisconnect(rpcCall);
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation.
region.checkInterrupt();
// Let's see what we have in the storeHeap.
Cell current = this.storeHeap.peek();
boolean shouldStop = shouldStop(current);
// When has filter row is true it means that the all the cells for a particular row must be
// read before a filtering decision can be made. This means that filters where hasFilterRow
// run the risk of enLongAddering out of memory errors in the case that they are applied to a
// table that has very large rows.
boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow();
// LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row
if (hasFilterRow) {
if (LOG.isTraceEnabled()) {
LOG.trace("filter#hasFilterRow is true which prevents partial results from being " + " formed. Changing scope of limits that may create partials");
}
scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS);
scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS);
limitScope = LimitScope.BETWEEN_ROWS;
}
if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) {
if (hasFilterRow) {
throw new IncompatibleFilterException("Filter whose hasFilterRow() returns true is incompatible with scans that must " + " stop mid-row because of a limit. ScannerContext:" + scannerContext);
}
return true;
}
// If not, then it's main path - getting results from storeHeap.
if (joinedContinuationRow == null) {
// First, check if we are at a stop row. If so, there are no more results.
if (shouldStop) {
if (hasFilterRow) {
filter.filterRowCells(results);
}
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
// Technically, if we hit limits before on this row, we don't need this call.
if (filterRowKey(current)) {
incrementCountOfRowsFilteredMetric(scannerContext);
// early check, see HBASE-16296
if (isFilterDoneInternal()) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
// Typically the count of rows scanned is incremented inside #populateResult. However,
// here we are filtering a row based purely on its row key, preventing us from calling
// #populateResult. Thus, perform the necessary increment here to rows scanned metric
incrementCountOfRowsScannedMetric(scannerContext);
boolean moreRows = nextRow(scannerContext, current);
if (!moreRows) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
results.clear();
// Read nothing as the rowkey was filtered, but still need to check time limit
if (scannerContext.checkTimeLimit(limitScope)) {
return true;
}
continue;
}
// Ok, we are good, let's try to get some results from the main heap.
populateResult(results, this.storeHeap, scannerContext, current);
if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) {
if (hasFilterRow) {
throw new IncompatibleFilterException("Filter whose hasFilterRow() returns true is incompatible with scans that must " + " stop mid-row because of a limit. ScannerContext:" + scannerContext);
}
return true;
}
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation.
region.checkInterrupt();
Cell nextKv = this.storeHeap.peek();
shouldStop = shouldStop(nextKv);
// save that the row was empty before filters applied to it.
final boolean isEmptyRow = results.isEmpty();
// We have the part of the row necessary for filtering (all of it, usually).
// First filter with the filterRow(List).
FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED;
if (hasFilterRow) {
ret = filter.filterRowCellsWithRet(results);
// according to contents of results now.
if (scannerContext.getKeepProgress()) {
scannerContext.setProgress(initialBatchProgress, initialSizeProgress, initialHeapSizeProgress);
} else {
scannerContext.clearProgress();
}
scannerContext.incrementBatchProgress(results.size());
for (Cell cell : results) {
scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), cell.heapSize());
}
}
if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) {
incrementCountOfRowsFilteredMetric(scannerContext);
results.clear();
boolean moreRows = nextRow(scannerContext, current);
if (!moreRows) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
// we should continue on. Otherwise, nothing else to do.
if (!shouldStop) {
// Read nothing as the cells was filtered, but still need to check time limit
if (scannerContext.checkTimeLimit(limitScope)) {
return true;
}
continue;
}
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
// fetch to (possibly) reduce amount of data loads from disk.
if (this.joinedHeap != null) {
boolean mayHaveData = joinedHeapMayHaveData(current);
if (mayHaveData) {
joinedContinuationRow = current;
populateFromJoinedHeap(results, scannerContext);
if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) {
return true;
}
}
}
} else {
// Populating from the joined heap was stopped by limits, populate some more.
populateFromJoinedHeap(results, scannerContext);
if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) {
return true;
}
}
// the case, we need to call it again on the next next() invocation.
if (joinedContinuationRow != null) {
return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues();
}
// the case when SingleColumnValueExcludeFilter is used.
if (results.isEmpty()) {
incrementCountOfRowsFilteredMetric(scannerContext);
boolean moreRows = nextRow(scannerContext, current);
if (!moreRows) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
if (!shouldStop) {
continue;
}
}
if (shouldStop) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
} else {
return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues();
}
}
}
use of org.apache.hadoop.hbase.ipc.RpcCall in project hbase by apache.
the class SlowLogQueueService method consumeEventFromDisruptor.
/**
* This implementation is specific to slowLog event. This consumes slowLog event from
* disruptor and inserts records to EvictingQueue.
*
* @param namedQueuePayload namedQueue payload from disruptor ring buffer
*/
@Override
public void consumeEventFromDisruptor(NamedQueuePayload namedQueuePayload) {
if (!isOnlineLogProviderEnabled) {
return;
}
if (!(namedQueuePayload instanceof RpcLogDetails)) {
LOG.warn("SlowLogQueueService: NamedQueuePayload is not of type RpcLogDetails.");
return;
}
final RpcLogDetails rpcLogDetails = (RpcLogDetails) namedQueuePayload;
final RpcCall rpcCall = rpcLogDetails.getRpcCall();
final String clientAddress = rpcLogDetails.getClientAddress();
final long responseSize = rpcLogDetails.getResponseSize();
final String className = rpcLogDetails.getClassName();
final TooSlowLog.SlowLogPayload.Type type = getLogType(rpcLogDetails);
if (type == null) {
return;
}
Descriptors.MethodDescriptor methodDescriptor = rpcCall.getMethod();
Message param = rpcLogDetails.getParam();
long receiveTime = rpcCall.getReceiveTime();
long startTime = rpcCall.getStartTime();
long endTime = EnvironmentEdgeManager.currentTime();
int processingTime = (int) (endTime - startTime);
int qTime = (int) (startTime - receiveTime);
final SlowLogParams slowLogParams = ProtobufUtil.getSlowLogParams(param);
int numGets = 0;
int numMutations = 0;
int numServiceCalls = 0;
if (param instanceof ClientProtos.MultiRequest) {
ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest) param;
for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) {
for (ClientProtos.Action action : regionAction.getActionList()) {
if (action.hasMutation()) {
numMutations++;
}
if (action.hasGet()) {
numGets++;
}
if (action.hasServiceCall()) {
numServiceCalls++;
}
}
}
}
final String userName = rpcCall.getRequestUserName().orElse(StringUtils.EMPTY);
final String methodDescriptorName = methodDescriptor != null ? methodDescriptor.getName() : StringUtils.EMPTY;
TooSlowLog.SlowLogPayload slowLogPayload = TooSlowLog.SlowLogPayload.newBuilder().setCallDetails(methodDescriptorName + "(" + param.getClass().getName() + ")").setClientAddress(clientAddress).setMethodName(methodDescriptorName).setMultiGets(numGets).setMultiMutations(numMutations).setMultiServiceCalls(numServiceCalls).setParam(slowLogParams != null ? slowLogParams.getParams() : StringUtils.EMPTY).setProcessingTime(processingTime).setQueueTime(qTime).setRegionName(slowLogParams != null ? slowLogParams.getRegionName() : StringUtils.EMPTY).setResponseSize(responseSize).setServerClass(className).setStartTime(startTime).setType(type).setUserName(userName).build();
slowLogQueue.add(slowLogPayload);
if (isSlowLogTableEnabled) {
if (!slowLogPayload.getRegionName().startsWith("hbase:slowlog")) {
slowLogPersistentService.addToQueueForSysTable(slowLogPayload);
}
}
}
use of org.apache.hadoop.hbase.ipc.RpcCall in project hbase by apache.
the class HRegion method getRowLockInternal.
// will be override in tests
protected RowLock getRowLockInternal(byte[] row, boolean readLock, RowLock prevRowLock) throws IOException {
// create an object to use a a key in the row lock map
HashedBytes rowKey = new HashedBytes(row);
RowLockContext rowLockContext = null;
RowLockImpl result = null;
boolean success = false;
try {
// TODO: do we need to add a time component here?
while (result == null) {
rowLockContext = computeIfAbsent(lockedRows, rowKey, () -> new RowLockContext(rowKey));
// This can fail as
if (readLock) {
// For read lock, if the caller has locked the same row previously, it will not try
// to acquire the same read lock. It simply returns the previous row lock.
RowLockImpl prevRowLockImpl = (RowLockImpl) prevRowLock;
if ((prevRowLockImpl != null) && (prevRowLockImpl.getLock() == rowLockContext.readWriteLock.readLock())) {
success = true;
return prevRowLock;
}
result = rowLockContext.newReadLock();
} else {
result = rowLockContext.newWriteLock();
}
}
int timeout = rowLockWaitDuration;
boolean reachDeadlineFirst = false;
Optional<RpcCall> call = RpcServer.getCurrentCall();
if (call.isPresent()) {
long deadline = call.get().getDeadline();
if (deadline < Long.MAX_VALUE) {
int timeToDeadline = (int) (deadline - EnvironmentEdgeManager.currentTime());
if (timeToDeadline <= this.rowLockWaitDuration) {
reachDeadlineFirst = true;
timeout = timeToDeadline;
}
}
}
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
String message = "Timed out waiting for lock for row: " + rowKey + " in region " + getRegionInfo().getEncodedName();
if (reachDeadlineFirst) {
throw new TimeoutIOException(message);
} else {
// If timeToDeadline is larger than rowLockWaitDuration, we can not drop the request.
throw new IOException(message);
}
}
rowLockContext.setThreadName(Thread.currentThread().getName());
success = true;
return result;
} catch (InterruptedException ie) {
if (LOG.isDebugEnabled()) {
LOG.debug("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, getRegionInfo().getRegionNameAsString());
}
throw throwOnInterrupt(ie);
} catch (Error error) {
// The maximum lock count for read lock is 64K (hardcoded), when this maximum count
// is reached, it will throw out an Error. This Error needs to be caught so it can
// go ahead to process the minibatch with lock acquired.
LOG.warn("Error to get row lock for {}, in region {}, cause: {}", Bytes.toStringBinary(row), getRegionInfo().getRegionNameAsString(), error);
IOException ioe = new IOException(error);
throw ioe;
} finally {
// Clean up the counts just in case this was the thing keeping the context alive.
if (!success && rowLockContext != null) {
rowLockContext.cleanUp();
}
}
}
use of org.apache.hadoop.hbase.ipc.RpcCall in project hbase by apache.
the class TestRSRpcServices method testRegionScannerHolderToString.
/**
* Simple test of the toString on RegionScannerHolder works.
* Just creates one and calls #toString on it.
*/
@Test
public void testRegionScannerHolderToString() throws UnknownHostException {
RpcCall call = Mockito.mock(RpcCall.class);
int port = 1234;
Mockito.when(call.getRemotePort()).thenReturn(port);
InetAddress address = InetAddress.getLocalHost();
Mockito.when(call.getRemoteAddress()).thenReturn(address);
Optional<String> userName = Optional.ofNullable("test");
Mockito.when(call.getRequestUserName()).thenReturn(userName);
RpcServer.setCurrentCall(call);
String clientIpAndPort = RSRpcServices.getRemoteClientIpAndPort();
String userNameTest = RSRpcServices.getUserName();
assertEquals("test", userNameTest);
HRegion region = Mockito.mock(HRegion.class);
Mockito.when(region.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
RSRpcServices.RegionScannerHolder rsh = new RSRpcServices.RegionScannerHolder(null, region, null, null, false, false, clientIpAndPort, userNameTest);
LOG.info("rsh: {}", rsh);
}
use of org.apache.hadoop.hbase.ipc.RpcCall in project hbase by apache.
the class HRegion method getRowLockInternal.
protected RowLock getRowLockInternal(byte[] row, boolean readLock) throws IOException {
// create an object to use a a key in the row lock map
HashedBytes rowKey = new HashedBytes(row);
RowLockContext rowLockContext = null;
RowLockImpl result = null;
TraceScope traceScope = null;
// If we're tracing start a span to show how long this took.
if (Trace.isTracing()) {
traceScope = Trace.startSpan("HRegion.getRowLock");
traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock ? "readLock" : "writeLock"));
}
try {
// TODO: do we need to add a time component here?
while (result == null) {
rowLockContext = computeIfAbsent(lockedRows, rowKey, () -> new RowLockContext(rowKey));
// This can fail as
if (readLock) {
result = rowLockContext.newReadLock();
} else {
result = rowLockContext.newWriteLock();
}
}
int timeout = rowLockWaitDuration;
boolean reachDeadlineFirst = false;
RpcCall call = RpcServer.getCurrentCall();
if (call != null && call.getDeadline() < Long.MAX_VALUE) {
int timeToDeadline = (int) (call.getDeadline() - System.currentTimeMillis());
if (timeToDeadline <= this.rowLockWaitDuration) {
reachDeadlineFirst = true;
timeout = timeToDeadline;
}
}
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
if (traceScope != null) {
traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
}
result = null;
// Clean up the counts just in case this was the thing keeping the context alive.
rowLockContext.cleanUp();
String message = "Timed out waiting for lock for row: " + rowKey + " in region " + getRegionInfo().getEncodedName();
if (reachDeadlineFirst) {
throw new TimeoutIOException(message);
} else {
// If timeToDeadline is larger than rowLockWaitDuration, we can not drop the request.
throw new IOException(message);
}
}
rowLockContext.setThreadName(Thread.currentThread().getName());
return result;
} catch (InterruptedException ie) {
LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
InterruptedIOException iie = new InterruptedIOException();
iie.initCause(ie);
if (traceScope != null) {
traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
}
Thread.currentThread().interrupt();
throw iie;
} finally {
if (traceScope != null) {
traceScope.close();
}
}
}
Aggregations