use of org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.
the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInIndexStatisticsStore.
@Test
void corruptionInIndexStatisticsStore() throws Exception {
MutableObject<Long> rootNode = new MutableObject<>();
Path indexStatisticsStoreFile = indexStatisticsStoreFile();
corruptIndexes(readOnly(), (tree, inspection) -> {
rootNode.setValue(inspection.getRootNode());
tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
}, indexStatisticsStoreFile);
ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance());
assertFalse(result.isSuccessful());
assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
assertResultContainsMessage(result, "Number of inconsistent INDEX_STATISTICS records: 1");
}
use of org.apache.commons.lang3.mutable.MutableObject in project hbase by apache.
the class RSRpcServices method scan.
/**
* Scan data in a table.
*
* @param controller the RPC controller
* @param request the scan request
* @throws ServiceException
*/
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request) throws ServiceException {
if (controller != null && !(controller instanceof HBaseRpcController)) {
throw new UnsupportedOperationException("We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
}
if (!request.hasScannerId() && !request.hasScan()) {
throw new ServiceException(new DoNotRetryIOException("Missing required input: scannerId or scan"));
}
try {
checkOpen();
} catch (IOException e) {
if (request.hasScannerId()) {
String scannerName = toScannerName(request.getScannerId());
if (LOG.isDebugEnabled()) {
LOG.debug("Server shutting down and client tried to access missing scanner " + scannerName);
}
final LeaseManager leaseManager = server.getLeaseManager();
if (leaseManager != null) {
try {
leaseManager.cancelLease(scannerName);
} catch (LeaseException le) {
// No problem, ignore
if (LOG.isTraceEnabled()) {
LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
}
}
}
}
throw new ServiceException(e);
}
requestCount.increment();
rpcScanRequestCount.increment();
RegionScannerHolder rsh;
ScanResponse.Builder builder = ScanResponse.newBuilder();
String scannerName;
try {
if (request.hasScannerId()) {
// The downstream projects such as AsyncHBase in OpenTSDB need this value. See HBASE-18000
// for more details.
long scannerId = request.getScannerId();
builder.setScannerId(scannerId);
scannerName = toScannerName(scannerId);
rsh = getRegionScanner(request);
} else {
Pair<String, RegionScannerHolder> scannerNameAndRSH = newRegionScanner(request, builder);
scannerName = scannerNameAndRSH.getFirst();
rsh = scannerNameAndRSH.getSecond();
}
} catch (IOException e) {
if (e == SCANNER_ALREADY_CLOSED) {
// the old client will still send a close request to us. Just ignore it and return.
return builder.build();
}
throw new ServiceException(e);
}
if (rsh.fullRegionScan) {
rpcFullScanRequestCount.increment();
}
HRegion region = rsh.r;
LeaseManager.Lease lease;
try {
// Remove lease while its being processed in server; protects against case
// where processing of request takes > lease expiration time. or null if none found.
lease = server.getLeaseManager().removeLease(scannerName);
} catch (LeaseException e) {
throw new ServiceException(e);
}
if (request.hasRenew() && request.getRenew()) {
// add back and return
addScannerLeaseBack(lease);
try {
checkScanNextCallSeq(request, rsh);
} catch (OutOfOrderScannerNextException e) {
throw new ServiceException(e);
}
return builder.build();
}
OperationQuota quota;
try {
quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
} catch (IOException e) {
addScannerLeaseBack(lease);
throw new ServiceException(e);
}
try {
checkScanNextCallSeq(request, rsh);
} catch (OutOfOrderScannerNextException e) {
addScannerLeaseBack(lease);
throw new ServiceException(e);
}
// Now we have increased the next call sequence. If we give client an error, the retry will
// never success. So we'd better close the scanner and return a DoNotRetryIOException to client
// and then client will try to open a new scanner.
boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
// this is scan.getCaching
int rows;
if (request.hasNumberOfRows()) {
rows = request.getNumberOfRows();
} else {
rows = closeScanner ? 0 : 1;
}
RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
// now let's do the real scan.
long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
RegionScanner scanner = rsh.s;
// this is the limit of rows for this scan, if we the number of rows reach this value, we will
// close the scanner.
int limitOfRows;
if (request.hasLimitOfRows()) {
limitOfRows = request.getLimitOfRows();
} else {
limitOfRows = -1;
}
MutableObject<Object> lastBlock = new MutableObject<>();
boolean scannerClosed = false;
try {
List<Result> results = new ArrayList<>(Math.min(rows, 512));
if (rows > 0) {
boolean done = false;
// Call coprocessor. Get region info from scanner.
if (region.getCoprocessorHost() != null) {
Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
if (!results.isEmpty()) {
for (Result r : results) {
lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
}
}
if (bypass != null && bypass.booleanValue()) {
done = true;
}
}
if (!done) {
scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows, results, builder, lastBlock, context);
} else {
builder.setMoreResultsInRegion(!results.isEmpty());
}
} else {
// This is a open scanner call with numberOfRow = 0, so set more results in region to true.
builder.setMoreResultsInRegion(true);
}
quota.addScanResult(results);
addResults(builder, results, (HBaseRpcController) controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()), isClientCellBlockSupport(context));
if (scanner.isFilterDone() && results.isEmpty()) {
// If the scanner's filter - if any - is done with the scan
// only set moreResults to false if the results is empty. This is used to keep compatible
// with the old scan implementation where we just ignore the returned results if moreResults
// is false. Can remove the isEmpty check after we get rid of the old implementation.
builder.setMoreResults(false);
}
// have already set this flag.
assert builder.hasMoreResultsInRegion();
// yet.
if (!builder.hasMoreResults()) {
builder.setMoreResults(true);
}
if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
// Record the last cell of the last result if it is a partial result
// We need this to calculate the complete rows we have returned to client as the
// mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
// current row. We may filter out all the remaining cells for the current row and just
// return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
// check for row change.
Result lastResult = results.get(results.size() - 1);
if (lastResult.mayHaveMoreCellsInRow()) {
rsh.rowOfLastPartialResult = lastResult.getRow();
} else {
rsh.rowOfLastPartialResult = null;
}
}
if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
scannerClosed = true;
closeScanner(region, scanner, scannerName, context);
}
return builder.build();
} catch (IOException e) {
try {
// scanner is closed here
scannerClosed = true;
// The scanner state might be left in a dirty state, so we will tell the Client to
// fail this RPC and close the scanner while opening up another one from the start of
// row that the client has last seen.
closeScanner(region, scanner, scannerName, context);
// the client. See ClientScanner code to see how it deals with these special exceptions.
if (e instanceof DoNotRetryIOException) {
throw e;
}
// DoNotRetryIOException. This can avoid the retry in ClientScanner.
if (e instanceof FileNotFoundException) {
throw new DoNotRetryIOException(e);
}
// a special exception to save an RPC.
if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
// 1.4.0+ clients know how to handle
throw new ScannerResetException("Scanner is closed on the server-side", e);
} else {
// older clients do not know about SRE. Just throw USE, which they will handle
throw new UnknownScannerException("Throwing UnknownScannerException to reset the client" + " scanner state for clients older than 1.3.", e);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
} finally {
if (!scannerClosed) {
// the closeCallBack will be set in closeScanner so here we only care about shippedCallback
if (context != null) {
context.setCallBack(rsh.shippedCallback);
} else {
// When context != null, adding back the lease will be done in callback set above.
addScannerLeaseBack(lease);
}
}
quota.close();
}
}
use of org.apache.commons.lang3.mutable.MutableObject in project hbase by apache.
the class RegionReplicationSink method send.
private void send() {
List<SinkEntry> toSend = new ArrayList<>();
long totalSize = 0L;
boolean hasMetaEdit = false;
for (SinkEntry entry; ; ) {
entry = entries.poll();
if (entry == null) {
break;
}
toSend.add(entry);
totalSize += entry.size;
hasMetaEdit |= entry.edit.isMetaEdit();
if (toSend.size() >= batchCountCapacity || totalSize >= batchSizeCapacity) {
break;
}
}
int toSendReplicaCount = regionReplication - 1 - failedReplicas.size();
if (toSendReplicaCount <= 0) {
return;
}
long rpcTimeoutNsToUse;
long operationTimeoutNsToUse;
if (!hasMetaEdit) {
rpcTimeoutNsToUse = rpcTimeoutNs;
operationTimeoutNsToUse = operationTimeoutNs;
} else {
rpcTimeoutNsToUse = metaEditRpcTimeoutNs;
operationTimeoutNsToUse = metaEditOperationTimeoutNs;
}
sending = true;
List<WAL.Entry> walEntries = toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList());
AtomicInteger remaining = new AtomicInteger(toSendReplicaCount);
Map<Integer, MutableObject<Throwable>> replica2Error = new HashMap<>();
for (int replicaId = 1; replicaId < regionReplication; replicaId++) {
if (failedReplicas.contains(replicaId)) {
continue;
}
MutableObject<Throwable> error = new MutableObject<>();
replica2Error.put(replicaId, error);
RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(primary, replicaId);
FutureUtils.addListener(conn.replicate(replica, walEntries, retries, rpcTimeoutNsToUse, operationTimeoutNsToUse), (r, e) -> {
error.setValue(e);
if (remaining.decrementAndGet() == 0) {
onComplete(toSend, replica2Error);
}
});
}
}
use of org.apache.commons.lang3.mutable.MutableObject in project druid by druid-io.
the class VectorGroupByEngineIteratorTest method testCreateOneGrouperAndCloseItWhenClose.
@Test
public void testCreateOneGrouperAndCloseItWhenClose() throws IOException {
final Interval interval = TestIndex.DATA_INTERVAL;
final AggregatorFactory factory = new DoubleSumAggregatorFactory("index", "index");
final GroupByQuery query = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setInterval(interval).setDimensions(new DefaultDimensionSpec("market", null, null)).setAggregatorSpecs(factory).build();
final StorageAdapter storageAdapter = new QueryableIndexStorageAdapter(TestIndex.getMMappedTestIndex());
final ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[4096]);
final VectorCursor cursor = storageAdapter.makeVectorCursor(Filters.toFilter(query.getDimFilter()), interval, query.getVirtualColumns(), false, QueryContexts.getVectorSize(query), null);
final List<GroupByVectorColumnSelector> dimensions = query.getDimensions().stream().map(dimensionSpec -> ColumnProcessors.makeVectorProcessor(dimensionSpec, GroupByVectorColumnProcessorFactory.instance(), cursor.getColumnSelectorFactory())).collect(Collectors.toList());
final MutableObject<VectorGrouper> grouperCaptor = new MutableObject<>();
final VectorGroupByEngineIterator iterator = new VectorGroupByEngineIterator(query, new GroupByQueryConfig(), storageAdapter, cursor, interval, dimensions, byteBuffer, null) {
@Override
VectorGrouper makeGrouper() {
grouperCaptor.setValue(Mockito.spy(super.makeGrouper()));
return grouperCaptor.getValue();
}
};
iterator.close();
Mockito.verify(grouperCaptor.getValue()).close();
}
use of org.apache.commons.lang3.mutable.MutableObject in project xm-ms-entity by xm-online.
the class XmEntityResourceExtendedIntTest method testAttachmentStartDate.
@Test
@Transactional
public void testAttachmentStartDate() throws Exception {
Attachment attachment = new Attachment().typeKey("A").name("1");
XmEntity entity = new XmEntity().name(" ").key(randomUUID()).typeKey("TEST_DELETE").attachments(asSet(attachment));
MutableObject<Instant> startDate = new MutableObject<>();
MutableObject<XmEntity> entityHolder = new MutableObject<>();
byte[] content = TestUtil.convertObjectToJsonBytes(entity);
restXmEntityMockMvc.perform(post("/api/xm-entities").contentType(TestUtil.APPLICATION_JSON_UTF8).content(content)).andDo(r -> entityHolder.setValue(readValue(r))).andDo(r -> log.info(r.getResponse().getContentAsString())).andExpect(status().is2xxSuccessful());
Long id = entityHolder.getValue().getId();
restXmEntityMockMvc.perform(get("/api/xm-entities/" + id)).andDo(r -> startDate.setValue(readValue(r).getAttachments().iterator().next().getStartDate())).andDo(r -> log.info(r.getResponse().getContentAsString())).andExpect(status().is2xxSuccessful());
assertNotNull(startDate.getValue());
content = TestUtil.convertObjectToJsonBytes(entityHolder.getValue());
restXmEntityMockMvc.perform(put("/api/xm-entities").contentType(TestUtil.APPLICATION_JSON_UTF8).content(content)).andDo(r -> log.info(r.getResponse().getContentAsString())).andExpect(status().is2xxSuccessful());
restXmEntityMockMvc.perform(get("/api/xm-entities/" + id)).andDo(r -> log.info(r.getResponse().getContentAsString())).andDo(r -> assertEquals(startDate.getValue(), readValue(r).getAttachments().iterator().next().getStartDate())).andExpect(status().is2xxSuccessful());
}
Aggregations