Search in sources :

Example 46 with MutableObject

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.

the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInRelationshipTypeIndex.

@Test
void corruptionInRelationshipTypeIndex() throws Exception {
    MutableObject<Long> rootNode = new MutableObject<>();
    Path relationshipTypeScanStoreFile = relationshipTypeScanStoreFile();
    corruptIndexes(readOnly(), (tree, inspection) -> {
        rootNode.setValue(inspection.getRootNode());
        tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
    }, relationshipTypeScanStoreFile);
    ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance());
    assertFalse(result.isSuccessful());
    assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
    assertResultContainsMessage(result, "Number of inconsistent RELATIONSHIP_TYPE_SCAN_DOCUMENT records: 1");
}
Also used : Path(java.nio.file.Path) MutableObject(org.apache.commons.lang3.mutable.MutableObject) Test(org.junit.jupiter.api.Test)

Example 47 with MutableObject

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.

the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInCountsStore.

@Test
void corruptionInCountsStore() throws Exception {
    MutableObject<Long> rootNode = new MutableObject<>();
    Path countsStoreFile = countsStoreFile();
    final LayoutBootstrapper countsLayoutBootstrapper = (indexFile, pageCache, meta) -> new CountsLayout();
    corruptIndexes(fs, readOnly(), (tree, inspection) -> {
        rootNode.setValue(inspection.getRootNode());
        tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
    }, countsLayoutBootstrapper, countsStoreFile);
    ConsistencyFlags flags = new ConsistencyFlags(false, false, true);
    ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance(), flags);
    assertFalse(result.isSuccessful());
    assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
    assertResultContainsMessage(result, "Number of inconsistent COUNTS records: 1");
}
Also used : Path(java.nio.file.Path) BeforeEach(org.junit.jupiter.api.BeforeEach) DatabaseReadOnlyChecker.readOnly(org.neo4j.configuration.helpers.DatabaseReadOnlyChecker.readOnly) GBPTreeCorruption.pageSpecificCorruption(org.neo4j.index.internal.gbptree.GBPTreeCorruption.pageSpecificCorruption) CursorContext(org.neo4j.io.pagecache.context.CursorContext) Config(org.neo4j.configuration.Config) NullLogProvider(org.neo4j.logging.NullLogProvider) InspectingVisitor(org.neo4j.index.internal.gbptree.InspectingVisitor) DatabaseLayout(org.neo4j.io.layout.DatabaseLayout) DEFAULT_DATABASE_NAME(org.neo4j.configuration.GraphDatabaseSettings.DEFAULT_DATABASE_NAME) TestInstance(org.junit.jupiter.api.TestInstance) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) NATIVE30(org.neo4j.configuration.GraphDatabaseSettings.SchemaIndex.NATIVE30) BeforeAll(org.junit.jupiter.api.BeforeAll) MutableObject(org.apache.commons.lang3.mutable.MutableObject) Transaction(org.neo4j.graphdb.Transaction) Path(java.nio.file.Path) UncloseableDelegatingFileSystemAbstraction(org.neo4j.io.fs.UncloseableDelegatingFileSystemAbstraction) DatabaseReadOnlyChecker.writable(org.neo4j.configuration.helpers.DatabaseReadOnlyChecker.writable) TestDirectory(org.neo4j.test.rule.TestDirectory) String.format(java.lang.String.format) GraphDatabaseAPI(org.neo4j.kernel.internal.GraphDatabaseAPI) Test(org.junit.jupiter.api.Test) List(java.util.List) NATIVE_BTREE10(org.neo4j.configuration.GraphDatabaseSettings.SchemaIndex.NATIVE_BTREE10) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Writer(java.io.Writer) DatabaseReadOnlyChecker(org.neo4j.configuration.helpers.DatabaseReadOnlyChecker) DatabaseManagementService(org.neo4j.dbms.api.DatabaseManagementService) IOUtils.readLines(org.apache.commons.io.IOUtils.readLines) NONE(org.neo4j.internal.helpers.progress.ProgressMonitorFactory.NONE) ImmutableLongList(org.eclipse.collections.api.list.primitive.ImmutableLongList) GBPTreeInspection(org.neo4j.index.internal.gbptree.GBPTreeInspection) Arrays(org.bouncycastle.util.Arrays) Label(org.neo4j.graphdb.Label) GraphDatabaseSettings(org.neo4j.configuration.GraphDatabaseSettings) LogProvider(org.neo4j.logging.LogProvider) NULL(org.neo4j.io.pagecache.tracing.PageCacheTracer.NULL) JobSchedulerFactory.createInitialisedScheduler(org.neo4j.kernel.impl.scheduler.JobSchedulerFactory.createInitialisedScheduler) GBPTreeBootstrapper(org.neo4j.index.internal.gbptree.GBPTreeBootstrapper) Node(org.neo4j.graphdb.Node) ArrayList(java.util.ArrayList) GBPTree(org.neo4j.index.internal.gbptree.GBPTree) TestDatabaseManagementServiceBuilder(org.neo4j.test.TestDatabaseManagementServiceBuilder) GraphDatabaseService(org.neo4j.graphdb.GraphDatabaseService) SchemaLayouts(org.neo4j.kernel.impl.index.schema.SchemaLayouts) CountsLayout(org.neo4j.internal.counts.CountsLayout) GBPTreeCorruption(org.neo4j.index.internal.gbptree.GBPTreeCorruption) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) JobScheduler(org.neo4j.scheduler.JobScheduler) FileHandle(org.neo4j.io.fs.FileHandle) DEFAULT(org.neo4j.consistency.checking.full.ConsistencyFlags.DEFAULT) UTF_8(java.nio.charset.StandardCharsets.UTF_8) StringWriter(java.io.StringWriter) DatabaseManagementServiceBuilder(org.neo4j.dbms.api.DatabaseManagementServiceBuilder) IOException(java.io.IOException) IndexDirectoryStructure(org.neo4j.kernel.api.index.IndexDirectoryStructure) ProgressMonitorFactory(org.neo4j.internal.helpers.progress.ProgressMonitorFactory) GBPTreePointerType(org.neo4j.index.internal.gbptree.GBPTreePointerType) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ConsistencyCheckIncompleteException(org.neo4j.consistency.checking.full.ConsistencyCheckIncompleteException) DefaultFileSystemAbstraction(org.neo4j.io.fs.DefaultFileSystemAbstraction) EphemeralFileSystemAbstraction(org.neo4j.io.fs.EphemeralFileSystemAbstraction) NO_FLUSH_ON_CLOSE(org.neo4j.index.internal.gbptree.GBPTreeOpenOptions.NO_FLUSH_ON_CLOSE) GraphDatabaseSettings.neo4j_home(org.neo4j.configuration.GraphDatabaseSettings.neo4j_home) ConsistencyFlags(org.neo4j.consistency.checking.full.ConsistencyFlags) LayoutBootstrapper(org.neo4j.index.internal.gbptree.LayoutBootstrapper) FileSystemAbstraction(org.neo4j.io.fs.FileSystemAbstraction) LayoutBootstrapper(org.neo4j.index.internal.gbptree.LayoutBootstrapper) ConsistencyFlags(org.neo4j.consistency.checking.full.ConsistencyFlags) CountsLayout(org.neo4j.internal.counts.CountsLayout) MutableObject(org.apache.commons.lang3.mutable.MutableObject) Test(org.junit.jupiter.api.Test)

Example 48 with MutableObject

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project neo4j by neo4j.

the class ConsistencyCheckWithCorruptGBPTreeIT method corruptionInIndexStatisticsStore.

@Test
void corruptionInIndexStatisticsStore() throws Exception {
    MutableObject<Long> rootNode = new MutableObject<>();
    Path indexStatisticsStoreFile = indexStatisticsStoreFile();
    corruptIndexes(readOnly(), (tree, inspection) -> {
        rootNode.setValue(inspection.getRootNode());
        tree.unsafe(pageSpecificCorruption(rootNode.getValue(), GBPTreeCorruption.broken(GBPTreePointerType.leftSibling())), CursorContext.NULL);
    }, indexStatisticsStoreFile);
    ConsistencyCheckService.Result result = runConsistencyCheck(NullLogProvider.getInstance());
    assertFalse(result.isSuccessful());
    assertResultContainsMessage(result, "Index inconsistency: Broken pointer found in tree node " + rootNode.getValue() + ", pointerType='left sibling'");
    assertResultContainsMessage(result, "Number of inconsistent INDEX_STATISTICS records: 1");
}
Also used : Path(java.nio.file.Path) MutableObject(org.apache.commons.lang3.mutable.MutableObject) Test(org.junit.jupiter.api.Test)

Example 49 with MutableObject

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project hbase by apache.

the class RSRpcServices method scan.

/**
 * Scan data in a table.
 *
 * @param controller the RPC controller
 * @param request the scan request
 * @throws ServiceException
 */
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request) throws ServiceException {
    if (controller != null && !(controller instanceof HBaseRpcController)) {
        throw new UnsupportedOperationException("We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
    }
    if (!request.hasScannerId() && !request.hasScan()) {
        throw new ServiceException(new DoNotRetryIOException("Missing required input: scannerId or scan"));
    }
    try {
        checkOpen();
    } catch (IOException e) {
        if (request.hasScannerId()) {
            String scannerName = toScannerName(request.getScannerId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Server shutting down and client tried to access missing scanner " + scannerName);
            }
            final LeaseManager leaseManager = server.getLeaseManager();
            if (leaseManager != null) {
                try {
                    leaseManager.cancelLease(scannerName);
                } catch (LeaseException le) {
                    // No problem, ignore
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
                    }
                }
            }
        }
        throw new ServiceException(e);
    }
    requestCount.increment();
    rpcScanRequestCount.increment();
    RegionScannerHolder rsh;
    ScanResponse.Builder builder = ScanResponse.newBuilder();
    String scannerName;
    try {
        if (request.hasScannerId()) {
            // The downstream projects such as AsyncHBase in OpenTSDB need this value. See HBASE-18000
            // for more details.
            long scannerId = request.getScannerId();
            builder.setScannerId(scannerId);
            scannerName = toScannerName(scannerId);
            rsh = getRegionScanner(request);
        } else {
            Pair<String, RegionScannerHolder> scannerNameAndRSH = newRegionScanner(request, builder);
            scannerName = scannerNameAndRSH.getFirst();
            rsh = scannerNameAndRSH.getSecond();
        }
    } catch (IOException e) {
        if (e == SCANNER_ALREADY_CLOSED) {
            // the old client will still send a close request to us. Just ignore it and return.
            return builder.build();
        }
        throw new ServiceException(e);
    }
    if (rsh.fullRegionScan) {
        rpcFullScanRequestCount.increment();
    }
    HRegion region = rsh.r;
    LeaseManager.Lease lease;
    try {
        // Remove lease while its being processed in server; protects against case
        // where processing of request takes > lease expiration time. or null if none found.
        lease = server.getLeaseManager().removeLease(scannerName);
    } catch (LeaseException e) {
        throw new ServiceException(e);
    }
    if (request.hasRenew() && request.getRenew()) {
        // add back and return
        addScannerLeaseBack(lease);
        try {
            checkScanNextCallSeq(request, rsh);
        } catch (OutOfOrderScannerNextException e) {
            throw new ServiceException(e);
        }
        return builder.build();
    }
    OperationQuota quota;
    try {
        quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
    } catch (IOException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    try {
        checkScanNextCallSeq(request, rsh);
    } catch (OutOfOrderScannerNextException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    // Now we have increased the next call sequence. If we give client an error, the retry will
    // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
    // and then client will try to open a new scanner.
    boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
    // this is scan.getCaching
    int rows;
    if (request.hasNumberOfRows()) {
        rows = request.getNumberOfRows();
    } else {
        rows = closeScanner ? 0 : 1;
    }
    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
    // now let's do the real scan.
    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
    RegionScanner scanner = rsh.s;
    // this is the limit of rows for this scan, if we the number of rows reach this value, we will
    // close the scanner.
    int limitOfRows;
    if (request.hasLimitOfRows()) {
        limitOfRows = request.getLimitOfRows();
    } else {
        limitOfRows = -1;
    }
    MutableObject<Object> lastBlock = new MutableObject<>();
    boolean scannerClosed = false;
    try {
        List<Result> results = new ArrayList<>(Math.min(rows, 512));
        if (rows > 0) {
            boolean done = false;
            // Call coprocessor. Get region info from scanner.
            if (region.getCoprocessorHost() != null) {
                Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
                if (!results.isEmpty()) {
                    for (Result r : results) {
                        lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
                    }
                }
                if (bypass != null && bypass.booleanValue()) {
                    done = true;
                }
            }
            if (!done) {
                scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows, results, builder, lastBlock, context);
            } else {
                builder.setMoreResultsInRegion(!results.isEmpty());
            }
        } else {
            // This is a open scanner call with numberOfRow = 0, so set more results in region to true.
            builder.setMoreResultsInRegion(true);
        }
        quota.addScanResult(results);
        addResults(builder, results, (HBaseRpcController) controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()), isClientCellBlockSupport(context));
        if (scanner.isFilterDone() && results.isEmpty()) {
            // If the scanner's filter - if any - is done with the scan
            // only set moreResults to false if the results is empty. This is used to keep compatible
            // with the old scan implementation where we just ignore the returned results if moreResults
            // is false. Can remove the isEmpty check after we get rid of the old implementation.
            builder.setMoreResults(false);
        }
        // have already set this flag.
        assert builder.hasMoreResultsInRegion();
        // yet.
        if (!builder.hasMoreResults()) {
            builder.setMoreResults(true);
        }
        if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
            // Record the last cell of the last result if it is a partial result
            // We need this to calculate the complete rows we have returned to client as the
            // mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
            // current row. We may filter out all the remaining cells for the current row and just
            // return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
            // check for row change.
            Result lastResult = results.get(results.size() - 1);
            if (lastResult.mayHaveMoreCellsInRow()) {
                rsh.rowOfLastPartialResult = lastResult.getRow();
            } else {
                rsh.rowOfLastPartialResult = null;
            }
        }
        if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
            scannerClosed = true;
            closeScanner(region, scanner, scannerName, context);
        }
        return builder.build();
    } catch (IOException e) {
        try {
            // scanner is closed here
            scannerClosed = true;
            // The scanner state might be left in a dirty state, so we will tell the Client to
            // fail this RPC and close the scanner while opening up another one from the start of
            // row that the client has last seen.
            closeScanner(region, scanner, scannerName, context);
            // the client. See ClientScanner code to see how it deals with these special exceptions.
            if (e instanceof DoNotRetryIOException) {
                throw e;
            }
            // DoNotRetryIOException. This can avoid the retry in ClientScanner.
            if (e instanceof FileNotFoundException) {
                throw new DoNotRetryIOException(e);
            }
            // a special exception to save an RPC.
            if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
                // 1.4.0+ clients know how to handle
                throw new ScannerResetException("Scanner is closed on the server-side", e);
            } else {
                // older clients do not know about SRE. Just throw USE, which they will handle
                throw new UnknownScannerException("Throwing UnknownScannerException to reset the client" + " scanner state for clients older than 1.3.", e);
            }
        } catch (IOException ioe) {
            throw new ServiceException(ioe);
        }
    } finally {
        if (!scannerClosed) {
            // the closeCallBack will be set in closeScanner so here we only care about shippedCallback
            if (context != null) {
                context.setCallBack(rsh.shippedCallback);
            } else {
                // When context != null, adding back the lease will be done in callback set above.
                addScannerLeaseBack(lease);
            }
        }
        quota.close();
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) MutableObject(org.apache.commons.lang3.mutable.MutableObject) RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Lease(org.apache.hadoop.hbase.regionserver.LeaseManager.Lease) ScanResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) MutableObject(org.apache.commons.lang3.mutable.MutableObject)

Example 50 with MutableObject

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.mutable.MutableObject in project hbase by apache.

the class RegionReplicationSink method send.

private void send() {
    List<SinkEntry> toSend = new ArrayList<>();
    long totalSize = 0L;
    boolean hasMetaEdit = false;
    for (SinkEntry entry; ; ) {
        entry = entries.poll();
        if (entry == null) {
            break;
        }
        toSend.add(entry);
        totalSize += entry.size;
        hasMetaEdit |= entry.edit.isMetaEdit();
        if (toSend.size() >= batchCountCapacity || totalSize >= batchSizeCapacity) {
            break;
        }
    }
    int toSendReplicaCount = regionReplication - 1 - failedReplicas.size();
    if (toSendReplicaCount <= 0) {
        return;
    }
    long rpcTimeoutNsToUse;
    long operationTimeoutNsToUse;
    if (!hasMetaEdit) {
        rpcTimeoutNsToUse = rpcTimeoutNs;
        operationTimeoutNsToUse = operationTimeoutNs;
    } else {
        rpcTimeoutNsToUse = metaEditRpcTimeoutNs;
        operationTimeoutNsToUse = metaEditOperationTimeoutNs;
    }
    sending = true;
    List<WAL.Entry> walEntries = toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList());
    AtomicInteger remaining = new AtomicInteger(toSendReplicaCount);
    Map<Integer, MutableObject<Throwable>> replica2Error = new HashMap<>();
    for (int replicaId = 1; replicaId < regionReplication; replicaId++) {
        if (failedReplicas.contains(replicaId)) {
            continue;
        }
        MutableObject<Throwable> error = new MutableObject<>();
        replica2Error.put(replicaId, error);
        RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(primary, replicaId);
        FutureUtils.addListener(conn.replicate(replica, walEntries, retries, rpcTimeoutNsToUse, operationTimeoutNsToUse), (r, e) -> {
            error.setValue(e);
            if (remaining.decrementAndGet() == 0) {
                onComplete(toSend, replica2Error);
            }
        });
    }
}
Also used : FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) Preconditions(org.apache.hbase.thirdparty.com.google.common.base.Preconditions) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) IntHashSet(org.agrona.collections.IntHashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StringUtils(org.apache.hadoop.util.StringUtils) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) WAL(org.apache.hadoop.hbase.wal.WAL) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableObject(org.apache.commons.lang3.mutable.MutableObject) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) Logger(org.slf4j.Logger) Set(java.util.Set) RegionReplicaUtil(org.apache.hadoop.hbase.client.RegionReplicaUtil) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) CellUtil(org.apache.hadoop.hbase.CellUtil) List(java.util.List) FutureUtils(org.apache.hadoop.hbase.util.FutureUtils) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Optional(java.util.Optional) Queue(java.util.Queue) ArrayDeque(java.util.ArrayDeque) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MutableObject(org.apache.commons.lang3.mutable.MutableObject)

Aggregations

MutableObject (org.apache.commons.lang3.mutable.MutableObject)111 ILogicalExpression (org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression)60 LogicalVariable (org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable)58 ArrayList (java.util.ArrayList)55 Mutable (org.apache.commons.lang3.mutable.Mutable)55 VariableReferenceExpression (org.apache.hyracks.algebricks.core.algebra.expressions.VariableReferenceExpression)52 ILogicalOperator (org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator)49 ScalarFunctionCallExpression (org.apache.hyracks.algebricks.core.algebra.expressions.ScalarFunctionCallExpression)28 Pair (org.apache.hyracks.algebricks.common.utils.Pair)27 AssignOperator (org.apache.hyracks.algebricks.core.algebra.operators.logical.AssignOperator)23 List (java.util.List)22 AbstractFunctionCallExpression (org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression)21 ConstantExpression (org.apache.hyracks.algebricks.core.algebra.expressions.ConstantExpression)18 ILogicalPlan (org.apache.hyracks.algebricks.core.algebra.base.ILogicalPlan)17 AggregateFunctionCallExpression (org.apache.hyracks.algebricks.core.algebra.expressions.AggregateFunctionCallExpression)14 Test (org.junit.Test)14 GbyVariableExpressionPair (org.apache.asterix.lang.common.expression.GbyVariableExpressionPair)13 UnnestingFunctionCallExpression (org.apache.hyracks.algebricks.core.algebra.expressions.UnnestingFunctionCallExpression)13 NestedTupleSourceOperator (org.apache.hyracks.algebricks.core.algebra.operators.logical.NestedTupleSourceOperator)13 HashSet (java.util.HashSet)12