use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.
the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInRelationshipTypeIndex.
@Test
void corruptionInRelationshipTypeIndex() throws Exception {
MutableObject<Long> rootNode = new MutableObject<>();
Path relationshipTypeScanStoreFile = relationshipTypeScanStoreFile();
corruptIndexes(readOnly(), (tree, inspection) -> {
rootNode.setValue(inspection.getRootNode());
tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
}, relationshipTypeScanStoreFile);
ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance());
assertFalse(result.isSuccessful());
assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
assertResultContainsMessage(result, "Number of inconsistent RELATIONSHIP_TYPE_SCAN_DOCUMENT records: 1");
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.
the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInCountsStore.
@Test
void corruptionInCountsStore() throws Exception {
MutableObject<Long> rootNode = new MutableObject<>();
Path countsStoreFile = countsStoreFile();
final LayoutBootstrapper countsLayoutBootstrapper = (indexFile, pageCache, meta) -> new CountsLayout();
corruptIndexes(fs, readOnly(), (tree, inspection) -> {
rootNode.setValue(inspection.getRootNode());
tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
}, countsLayoutBootstrapper, countsStoreFile);
ConsistencyFlags flags = new ConsistencyFlags(false, false, true);
ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance(), flags);
assertFalse(result.isSuccessful());
assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
assertResultContainsMessage(result, "Number of inconsistent COUNTS records: 1");
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.
the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInIndexStatisticsStore.
@Test
void corruptionInIndexStatisticsStore() throws Exception {
MutableObject<Long> rootNode = new MutableObject<>();
Path indexStatisticsStoreFile = indexStatisticsStoreFile();
corruptIndexes(readOnly(), (tree, inspection) -> {
rootNode.setValue(inspection.getRootNode());
tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
}, indexStatisticsStoreFile);
ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance());
assertFalse(result.isSuccessful());
assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
assertResultContainsMessage(result, "Number of inconsistent INDEX_STATISTICS records: 1");
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project hbase by apache.
the class RSRpcServices method scan.
/**
* Scan data in a table.
*
* @param controller the RPC controller
* @param request the scan request
* @throws ServiceException
*/
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request) throws ServiceException {
if (controller != null && !(controller instanceof HBaseRpcController)) {
throw new UnsupportedOperationException("We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
}
if (!request.hasScannerId() && !request.hasScan()) {
throw new ServiceException(new DoNotRetryIOException("Missing required input: scannerId or scan"));
}
try {
checkOpen();
} catch (IOException e) {
if (request.hasScannerId()) {
String scannerName = toScannerName(request.getScannerId());
if (LOG.isDebugEnabled()) {
LOG.debug("Server shutting down and client tried to access missing scanner " + scannerName);
}
final LeaseManager leaseManager = server.getLeaseManager();
if (leaseManager != null) {
try {
leaseManager.cancelLease(scannerName);
} catch (LeaseException le) {
// No problem, ignore
if (LOG.isTraceEnabled()) {
LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
}
}
}
}
throw new ServiceException(e);
}
requestCount.increment();
rpcScanRequestCount.increment();
RegionScannerHolder rsh;
ScanResponse.Builder builder = ScanResponse.newBuilder();
String scannerName;
try {
if (request.hasScannerId()) {
// The downstream projects such as AsyncHBase in OpenTSDB need this value. See HBASE-18000
// for more details.
long scannerId = request.getScannerId();
builder.setScannerId(scannerId);
scannerName = toScannerName(scannerId);
rsh = getRegionScanner(request);
} else {
Pair<String, RegionScannerHolder> scannerNameAndRSH = newRegionScanner(request, builder);
scannerName = scannerNameAndRSH.getFirst();
rsh = scannerNameAndRSH.getSecond();
}
} catch (IOException e) {
if (e == SCANNER_ALREADY_CLOSED) {
// the old client will still send a close request to us. Just ignore it and return.
return builder.build();
}
throw new ServiceException(e);
}
if (rsh.fullRegionScan) {
rpcFullScanRequestCount.increment();
}
HRegion region = rsh.r;
LeaseManager.Lease lease;
try {
// Remove lease while its being processed in server; protects against case
// where processing of request takes > lease expiration time. or null if none found.
lease = server.getLeaseManager().removeLease(scannerName);
} catch (LeaseException e) {
throw new ServiceException(e);
}
if (request.hasRenew() && request.getRenew()) {
// add back and return
addScannerLeaseBack(lease);
try {
checkScanNextCallSeq(request, rsh);
} catch (OutOfOrderScannerNextException e) {
throw new ServiceException(e);
}
return builder.build();
}
OperationQuota quota;
try {
quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
} catch (IOException e) {
addScannerLeaseBack(lease);
throw new ServiceException(e);
}
try {
checkScanNextCallSeq(request, rsh);
} catch (OutOfOrderScannerNextException e) {
addScannerLeaseBack(lease);
throw new ServiceException(e);
}
// Now we have increased the next call sequence. If we give client an error, the retry will
// never success. So we'd better close the scanner and return a DoNotRetryIOException to client
// and then client will try to open a new scanner.
boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
// this is scan.getCaching
int rows;
if (request.hasNumberOfRows()) {
rows = request.getNumberOfRows();
} else {
rows = closeScanner ? 0 : 1;
}
RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
// now let's do the real scan.
long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
RegionScanner scanner = rsh.s;
// this is the limit of rows for this scan, if we the number of rows reach this value, we will
// close the scanner.
int limitOfRows;
if (request.hasLimitOfRows()) {
limitOfRows = request.getLimitOfRows();
} else {
limitOfRows = -1;
}
MutableObject<Object> lastBlock = new MutableObject<>();
boolean scannerClosed = false;
try {
List<Result> results = new ArrayList<>(Math.min(rows, 512));
if (rows > 0) {
boolean done = false;
// Call coprocessor. Get region info from scanner.
if (region.getCoprocessorHost() != null) {
Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
if (!results.isEmpty()) {
for (Result r : results) {
lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
}
}
if (bypass != null && bypass.booleanValue()) {
done = true;
}
}
if (!done) {
scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows, results, builder, lastBlock, context);
} else {
builder.setMoreResultsInRegion(!results.isEmpty());
}
} else {
// This is a open scanner call with numberOfRow = 0, so set more results in region to true.
builder.setMoreResultsInRegion(true);
}
quota.addScanResult(results);
addResults(builder, results, (HBaseRpcController) controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()), isClientCellBlockSupport(context));
if (scanner.isFilterDone() && results.isEmpty()) {
// If the scanner's filter - if any - is done with the scan
// only set moreResults to false if the results is empty. This is used to keep compatible
// with the old scan implementation where we just ignore the returned results if moreResults
// is false. Can remove the isEmpty check after we get rid of the old implementation.
builder.setMoreResults(false);
}
// have already set this flag.
assert builder.hasMoreResultsInRegion();
// yet.
if (!builder.hasMoreResults()) {
builder.setMoreResults(true);
}
if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
// Record the last cell of the last result if it is a partial result
// We need this to calculate the complete rows we have returned to client as the
// mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
// current row. We may filter out all the remaining cells for the current row and just
// return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
// check for row change.
Result lastResult = results.get(results.size() - 1);
if (lastResult.mayHaveMoreCellsInRow()) {
rsh.rowOfLastPartialResult = lastResult.getRow();
} else {
rsh.rowOfLastPartialResult = null;
}
}
if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
scannerClosed = true;
closeScanner(region, scanner, scannerName, context);
}
return builder.build();
} catch (IOException e) {
try {
// scanner is closed here
scannerClosed = true;
// The scanner state might be left in a dirty state, so we will tell the Client to
// fail this RPC and close the scanner while opening up another one from the start of
// row that the client has last seen.
closeScanner(region, scanner, scannerName, context);
// the client. See ClientScanner code to see how it deals with these special exceptions.
if (e instanceof DoNotRetryIOException) {
throw e;
}
// DoNotRetryIOException. This can avoid the retry in ClientScanner.
if (e instanceof FileNotFoundException) {
throw new DoNotRetryIOException(e);
}
// a special exception to save an RPC.
if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
// 1.4.0+ clients know how to handle
throw new ScannerResetException("Scanner is closed on the server-side", e);
} else {
// older clients do not know about SRE. Just throw USE, which they will handle
throw new UnknownScannerException("Throwing UnknownScannerException to reset the client" + " scanner state for clients older than 1.3.", e);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
} finally {
if (!scannerClosed) {
// the closeCallBack will be set in closeScanner so here we only care about shippedCallback
if (context != null) {
context.setCallBack(rsh.shippedCallback);
} else {
// When context != null, adding back the lease will be done in callback set above.
addScannerLeaseBack(lease);
}
}
quota.close();
}
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project hbase by apache.
the class RegionReplicationSink method send.
private void send() {
List<SinkEntry> toSend = new ArrayList<>();
long totalSize = 0L;
boolean hasMetaEdit = false;
for (SinkEntry entry; ; ) {
entry = entries.poll();
if (entry == null) {
break;
}
toSend.add(entry);
totalSize += entry.size;
hasMetaEdit |= entry.edit.isMetaEdit();
if (toSend.size() >= batchCountCapacity || totalSize >= batchSizeCapacity) {
break;
}
}
int toSendReplicaCount = regionReplication - 1 - failedReplicas.size();
if (toSendReplicaCount <= 0) {
return;
}
long rpcTimeoutNsToUse;
long operationTimeoutNsToUse;
if (!hasMetaEdit) {
rpcTimeoutNsToUse = rpcTimeoutNs;
operationTimeoutNsToUse = operationTimeoutNs;
} else {
rpcTimeoutNsToUse = metaEditRpcTimeoutNs;
operationTimeoutNsToUse = metaEditOperationTimeoutNs;
}
sending = true;
List<WAL.Entry> walEntries = toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList());
AtomicInteger remaining = new AtomicInteger(toSendReplicaCount);
Map<Integer, MutableObject<Throwable>> replica2Error = new HashMap<>();
for (int replicaId = 1; replicaId < regionReplication; replicaId++) {
if (failedReplicas.contains(replicaId)) {
continue;
}
MutableObject<Throwable> error = new MutableObject<>();
replica2Error.put(replicaId, error);
RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(primary, replicaId);
FutureUtils.addListener(conn.replicate(replica, walEntries, retries, rpcTimeoutNsToUse, operationTimeoutNsToUse), (r, e) -> {
error.setValue(e);
if (remaining.decrementAndGet() == 0) {
onComplete(toSend, replica2Error);
}
});
}
}
Aggregations