Search in sources :

Example 1 with JobCallable

use of org.apache.phoenix.job.JobManager.JobCallable in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
         * Execute EndPoint in parallel on each server to send compressed hash cache 
         */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        final PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

                                @Override
                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    builder.setHasProtoBufIndexMaintainer(true);
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(ByteStringer.wrap(txState));
                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                        throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                }
                            });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
                }
            }
        }
        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }
        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}
Also used : SQLException(java.sql.SQLException) SQLCloseable(org.apache.phoenix.util.SQLCloseable) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) HashSet(java.util.HashSet) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Example 2 with JobCallable

use of org.apache.phoenix.job.JobManager.JobCallable in project phoenix by apache.

the class SerialIterators method submitWork.

@Override
protected void submitWork(final List<List<Scan>> nestedScans, List<List<Pair<Scan, Future<PeekingResultIterator>>>> nestedFutures, final Queue<PeekingResultIterator> allIterators, int estFlattenedSize, boolean isReverse, final ParallelScanGrouper scanGrouper) {
    ExecutorService executor = context.getConnection().getQueryServices().getExecutor();
    final String tableName = tableRef.getTable().getPhysicalName().getString();
    final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(context.getReadMetricsQueue(), tableName);
    final PhoenixConnection conn = context.getConnection();
    final long renewLeaseThreshold = conn.getQueryServices().getRenewLeaseThresholdMilliSeconds();
    int expectedListSize = nestedScans.size() * 10;
    List<Scan> flattenedScans = Lists.newArrayListWithExpectedSize(expectedListSize);
    for (List<Scan> list : nestedScans) {
        flattenedScans.addAll(list);
    }
    if (!flattenedScans.isEmpty()) {
        if (isReverse) {
            flattenedScans = Lists.reverse(flattenedScans);
        }
        final List<Scan> finalScans = flattenedScans;
        Future<PeekingResultIterator> future = executor.submit(Tracing.wrap(new JobCallable<PeekingResultIterator>() {

            @Override
            public PeekingResultIterator call() throws Exception {
                PeekingResultIterator itr = new SerialIterator(finalScans, tableName, renewLeaseThreshold, offset);
                return itr;
            }

            /**
                 * Defines the grouping for round robin behavior.  All threads spawned to process
                 * this scan will be grouped together and time sliced with other simultaneously
                 * executing parallel scans.
                 */
            @Override
            public Object getJobId() {
                return SerialIterators.this;
            }

            @Override
            public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                return taskMetrics;
            }
        }, "Serial scanner for table: " + tableRef.getTable().getPhysicalName().getString()));
        // Add our singleton Future which will execute serially
        nestedFutures.add(Collections.singletonList(new Pair<Scan, Future<PeekingResultIterator>>(flattenedScans.get(0), future)));
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) ExecutorService(java.util.concurrent.ExecutorService) Scan(org.apache.hadoop.hbase.client.Scan) TaskExecutionMetricsHolder(org.apache.phoenix.monitoring.TaskExecutionMetricsHolder) Pair(org.apache.hadoop.hbase.util.Pair)

Example 3 with JobCallable

use of org.apache.phoenix.job.JobManager.JobCallable in project phoenix by apache.

the class HashJoinPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = delegate.getContext().getScan();
    }
    int count = subPlans.length;
    PhoenixConnection connection = getContext().getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    ExecutorService executor = services.getExecutor();
    List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
    if (joinInfo != null) {
        hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
        firstJobEndTime = new AtomicLong(0);
        keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
    }
    for (int i = 0; i < count; i++) {
        final int index = i;
        futures.add(executor.submit(new JobCallable<ServerCache>() {

            @Override
            public ServerCache call() throws Exception {
                ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
                return cache;
            }

            @Override
            public Object getJobId() {
                return HashJoinPlan.this;
            }

            @Override
            public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                return NO_OP_INSTANCE;
            }
        }));
    }
    SQLException firstException = null;
    for (int i = 0; i < count; i++) {
        try {
            ServerCache result = futures.get(i).get();
            if (result != null) {
                dependencies.add(result);
            }
            subPlans[i].postProcess(result, this);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            if (firstException == null) {
                firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
            }
        } catch (ExecutionException e) {
            if (firstException == null) {
                firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
            }
        }
    }
    if (firstException != null) {
        SQLCloseables.closeAllQuietly(dependencies);
        throw firstException;
    }
    Expression postFilter = null;
    boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
    if (recompileWhereClause || hasKeyRangeExpressions) {
        StatementContext context = delegate.getContext();
        PTable table = context.getCurrentTable().getTable();
        ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
        context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
        if (recompileWhereClause) {
            postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
        }
        if (hasKeyRangeExpressions) {
            WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
        }
    }
    if (joinInfo != null) {
        HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
    }
    ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
    if (statement.getInnerSelectStatement() != null && postFilter != null) {
        iterator = new FilterResultIterator(iterator, postFilter);
    }
    return iterator;
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) FilterResultIterator(org.apache.phoenix.iterate.FilterResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HashCacheClient(org.apache.phoenix.join.HashCacheClient) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) FilterResultIterator(org.apache.phoenix.iterate.FilterResultIterator) SelectStatement(org.apache.phoenix.parse.SelectStatement) AtomicLong(java.util.concurrent.atomic.AtomicLong) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) InListExpression(org.apache.phoenix.expression.InListExpression) RowValueConstructorExpression(org.apache.phoenix.expression.RowValueConstructorExpression) SQLParser(org.apache.phoenix.parse.SQLParser) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ParseNode(org.apache.phoenix.parse.ParseNode) ExecutionException(java.util.concurrent.ExecutionException) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 4 with JobCallable

use of org.apache.phoenix.job.JobManager.JobCallable in project phoenix by apache.

the class ParallelIterators method submitWork.

@Override
protected void submitWork(final List<List<Scan>> nestedScans, List<List<Pair<Scan, Future<PeekingResultIterator>>>> nestedFutures, final Queue<PeekingResultIterator> allIterators, int estFlattenedSize, final boolean isReverse, ParallelScanGrouper scanGrouper) throws SQLException {
    // Pre-populate nestedFutures lists so that we can shuffle the scans
    // and add the future to the right nested list. By shuffling the scans
    // we get better utilization of the cluster since our thread executor
    // will spray the scans across machines as opposed to targeting a
    // single one since the scans are in row key order.
    ExecutorService executor = context.getConnection().getQueryServices().getExecutor();
    List<ScanLocator> scanLocations = Lists.newArrayListWithExpectedSize(estFlattenedSize);
    for (int i = 0; i < nestedScans.size(); i++) {
        List<Scan> scans = nestedScans.get(i);
        int numScans = scans.size();
        List<Pair<Scan, Future<PeekingResultIterator>>> futures = Lists.newArrayListWithExpectedSize(numScans);
        nestedFutures.add(futures);
        for (int j = 0; j < numScans; j++) {
            Scan scan = nestedScans.get(i).get(j);
            scanLocations.add(new ScanLocator(scan, i, j, j == 0, (j == numScans - 1)));
            // placeholder
            futures.add(null);
        }
    }
    // Shuffle so that we start execution across many machines
    // before we fill up the thread pool
    Collections.shuffle(scanLocations);
    ReadMetricQueue readMetrics = context.getReadMetricsQueue();
    final String physicalTableName = tableRef.getTable().getPhysicalName().getString();
    int numScans = scanLocations.size();
    context.getOverallQueryMetrics().updateNumParallelScans(numScans);
    GLOBAL_NUM_PARALLEL_SCANS.update(numScans);
    final long renewLeaseThreshold = context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
    boolean isRequestMetricsEnabled = readMetrics.isRequestMetricsEnabled();
    for (final ScanLocator scanLocation : scanLocations) {
        final Scan scan = scanLocation.getScan();
        final ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, physicalTableName, scan, isRequestMetricsEnabled);
        final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(readMetrics, physicalTableName);
        final TableResultIterator tableResultItr = context.getConnection().getTableResultIteratorFactory().newIterator(mutationState, tableRef, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper);
        context.getConnection().addIteratorForLeaseRenewal(tableResultItr);
        Future<PeekingResultIterator> future = executor.submit(Tracing.wrap(new JobCallable<PeekingResultIterator>() {

            @Override
            public PeekingResultIterator call() throws Exception {
                long startTime = System.currentTimeMillis();
                if (logger.isDebugEnabled()) {
                    logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
                }
                PeekingResultIterator iterator = iteratorFactory.newIterator(context, tableResultItr, scan, physicalTableName, ParallelIterators.this.plan);
                if (initFirstScanOnly) {
                    if ((!isReverse && scanLocation.isFirstScan()) || (isReverse && scanLocation.isLastScan())) {
                        // Fill the scanner's cache. This helps reduce latency since we are parallelizing the I/O needed.
                        iterator.peek();
                    }
                } else {
                    iterator.peek();
                }
                allIterators.add(iterator);
                return iterator;
            }

            /**
                 * Defines the grouping for round robin behavior.  All threads spawned to process
                 * this scan will be grouped together and time sliced with other simultaneously
                 * executing parallel scans.
                 */
            @Override
            public Object getJobId() {
                return ParallelIterators.this;
            }

            @Override
            public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                return taskMetrics;
            }
        }, "Parallel scanner for table: " + tableRef.getTable().getPhysicalName().getString()));
        // Add our future in the right place so that we can concatenate the
        // results of the inner futures versus merge sorting across all of them.
        nestedFutures.get(scanLocation.getOuterListIndex()).set(scanLocation.getInnerListIndex(), new Pair<Scan, Future<PeekingResultIterator>>(scan, future));
    }
}
Also used : ReadMetricQueue(org.apache.phoenix.monitoring.ReadMetricQueue) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) ScanMetricsHolder(org.apache.phoenix.monitoring.ScanMetricsHolder) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Scan(org.apache.hadoop.hbase.client.Scan) TaskExecutionMetricsHolder(org.apache.phoenix.monitoring.TaskExecutionMetricsHolder) Pair(org.apache.hadoop.hbase.util.Pair)

Aggregations

ExecutorService (java.util.concurrent.ExecutorService)4 JobCallable (org.apache.phoenix.job.JobManager.JobCallable)4 Future (java.util.concurrent.Future)3 SQLException (java.sql.SQLException)2 Scan (org.apache.hadoop.hbase.client.Scan)2 Pair (org.apache.hadoop.hbase.util.Pair)2 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)2 TaskExecutionMetricsHolder (org.apache.phoenix.monitoring.TaskExecutionMetricsHolder)2 ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)2 PTable (org.apache.phoenix.schema.PTable)2 Closeable (java.io.Closeable)1 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 ExecutionException (java.util.concurrent.ExecutionException)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)1 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)1 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)1 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)1