Search in sources :

Example 36 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class ServerCacheClient method removeServerCache.

/**
     * Remove the cached table from all region servers
     * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
     * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
     * @throws SQLException
     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    Throwable lastThrowable = null;
    TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
    final PTable cacheUsingTable = cacheUsingTableRef.getTable();
    byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
    HTableInterface iterateOverTable = services.getTable(tableName);
    try {
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
    		 * Allow for the possibility that the region we based where to send our cache has split and been
    		 * relocated to another region server *after* we sent it, but before we removed it. To accommodate
    		 * this, we iterate through the current metadata boundaries and remove the cache once for each
    		 * server that we originally sent to.
    		 */
        if (LOG.isDebugEnabled()) {
            LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
        }
        for (HRegionLocation entry : locations) {
            if (remainingOnServers.contains(entry)) {
                // Call once per server
                try {
                    byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                    iterateOverTable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {

                        @Override
                        public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
                            ServerRpcController controller = new ServerRpcController();
                            BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
                            RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
                            final byte[] tenantIdBytes;
                            if (cacheUsingTable.isMultiTenant()) {
                                try {
                                    tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                } catch (SQLException e) {
                                    throw new IOException(e);
                                }
                            } else {
                                tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                            }
                            if (tenantIdBytes != null) {
                                builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                            }
                            builder.setCacheId(ByteStringer.wrap(cacheId));
                            instance.removeServerCache(controller, builder.build(), rpcCallback);
                            if (controller.getFailedOn() != null) {
                                throw controller.getFailedOn();
                            }
                            return rpcCallback.get();
                        }
                    });
                    remainingOnServers.remove(entry);
                } catch (Throwable t) {
                    lastThrowable = t;
                    LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
                }
            }
        }
        if (!remainingOnServers.isEmpty()) {
            LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
        }
    } finally {
        Closeables.closeQuietly(iterateOverTable);
    }
}
Also used : RemoveServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheResponse) SQLException(java.sql.SQLException) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) HashSet(java.util.HashSet)

Example 37 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class HashJoinPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = delegate.getContext().getScan();
    }
    int count = subPlans.length;
    PhoenixConnection connection = getContext().getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    ExecutorService executor = services.getExecutor();
    List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
    if (joinInfo != null) {
        hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
        firstJobEndTime = new AtomicLong(0);
        keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
    }
    for (int i = 0; i < count; i++) {
        final int index = i;
        futures.add(executor.submit(new JobCallable<ServerCache>() {

            @Override
            public ServerCache call() throws Exception {
                ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
                return cache;
            }

            @Override
            public Object getJobId() {
                return HashJoinPlan.this;
            }

            @Override
            public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                return NO_OP_INSTANCE;
            }
        }));
    }
    SQLException firstException = null;
    for (int i = 0; i < count; i++) {
        try {
            ServerCache result = futures.get(i).get();
            if (result != null) {
                dependencies.add(result);
            }
            subPlans[i].postProcess(result, this);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            if (firstException == null) {
                firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
            }
        } catch (ExecutionException e) {
            if (firstException == null) {
                firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
            }
        }
    }
    if (firstException != null) {
        SQLCloseables.closeAllQuietly(dependencies);
        throw firstException;
    }
    Expression postFilter = null;
    boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
    if (recompileWhereClause || hasKeyRangeExpressions) {
        StatementContext context = delegate.getContext();
        PTable table = context.getCurrentTable().getTable();
        ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
        context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
        if (recompileWhereClause) {
            postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
        }
        if (hasKeyRangeExpressions) {
            WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
        }
    }
    if (joinInfo != null) {
        HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
    }
    ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
    if (statement.getInnerSelectStatement() != null && postFilter != null) {
        iterator = new FilterResultIterator(iterator, postFilter);
    }
    return iterator;
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) FilterResultIterator(org.apache.phoenix.iterate.FilterResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HashCacheClient(org.apache.phoenix.join.HashCacheClient) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) FilterResultIterator(org.apache.phoenix.iterate.FilterResultIterator) SelectStatement(org.apache.phoenix.parse.SelectStatement) AtomicLong(java.util.concurrent.atomic.AtomicLong) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) InListExpression(org.apache.phoenix.expression.InListExpression) RowValueConstructorExpression(org.apache.phoenix.expression.RowValueConstructorExpression) SQLParser(org.apache.phoenix.parse.SQLParser) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ParseNode(org.apache.phoenix.parse.ParseNode) ExecutionException(java.util.concurrent.ExecutionException) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 38 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class BaseResultIterators method getIterators.

/**
     * Executes the scan in parallel across all regions, blocking until all scans are complete.
     * @return the result iterators for the scan of each region
     */
@Override
public List<PeekingResultIterator> getIterators() throws SQLException {
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, ScanUtil.getCustomAnnotations(scan)));
    }
    boolean isReverse = ScanUtil.isReversed(scan);
    boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;
    final ConnectionQueryServices services = context.getConnection().getQueryServices();
    // Get query time out from Statement
    final long startTime = System.currentTimeMillis();
    final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis();
    int numScans = size();
    // Capture all iterators so that if something goes wrong, we close them all
    // The iterators list is based on the submission of work, so it may not
    // contain them all (for example if work was rejected from the queue)
    Queue<PeekingResultIterator> allIterators = new ConcurrentLinkedQueue<>();
    List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numScans);
    ScanWrapper previousScan = new ScanWrapper(null);
    return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, splits.size(), previousScan);
}
Also used : ArrayList(java.util.ArrayList) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Hint(org.apache.phoenix.parse.HintNode.Hint)

Aggregations

ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)38 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Connection (java.sql.Connection)14 SQLException (java.sql.SQLException)12 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)9 PTable (org.apache.phoenix.schema.PTable)9 Test (org.junit.Test)9 ResultSet (java.sql.ResultSet)8 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)8 ArrayList (java.util.ArrayList)7 Properties (java.util.Properties)7 PreparedStatement (java.sql.PreparedStatement)5 Put (org.apache.hadoop.hbase.client.Put)5 Hint (org.apache.phoenix.parse.HintNode.Hint)5 Scan (org.apache.hadoop.hbase.client.Scan)4 MutationState (org.apache.phoenix.execute.MutationState)4 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)4 DelegateConnectionQueryServices (org.apache.phoenix.query.DelegateConnectionQueryServices)4 PColumn (org.apache.phoenix.schema.PColumn)4