Search in sources :

Example 6 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class RoundRobinResultIteratorIT method setupTableForSplit.

private static int setupTableForSplit(String tableName) throws Exception {
    int batchSize = 25;
    int maxFileSize = 1024 * 10;
    int payLoadSize = 1024;
    String payload;
    StringBuilder buf = new StringBuilder();
    for (int i = 0; i < payLoadSize; i++) {
        buf.append('a');
    }
    payload = buf.toString();
    int MIN_CHAR = 'a';
    int MAX_CHAR = 'z';
    Connection conn = getConnection();
    conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + HTableDescriptor.MAX_FILESIZE + "=" + maxFileSize + "," + " SALT_BUCKETS = " + NUM_SALT_BUCKETS);
    PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
    int rowCount = 0;
    for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) {
        for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) {
            String pk = Character.toString((char) c1) + Character.toString((char) c2);
            stmt.setString(1, pk);
            stmt.setString(2, payload);
            stmt.execute();
            rowCount++;
            if (rowCount % batchSize == 0) {
                conn.commit();
            }
        }
    }
    conn.commit();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        admin.flush(tableName);
    } finally {
        admin.close();
    }
    conn.close();
    return rowCount;
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PreparedStatement(java.sql.PreparedStatement) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 7 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
         * Execute EndPoint in parallel on each server to send compressed hash cache 
         */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        final PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

                                @Override
                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    builder.setHasProtoBufIndexMaintainer(true);
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(ByteStringer.wrap(txState));
                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                        throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                }
                            });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
                }
            }
        }
        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }
        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}
Also used : SQLException(java.sql.SQLException) SQLCloseable(org.apache.phoenix.util.SQLCloseable) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) HashSet(java.util.HashSet) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Example 8 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class DeleteCompiler method deleteRows.

private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
    PTable table = targetTableRef.getTable();
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    PName tenantId = connection.getTenantId();
    byte[] tenantIdBytes = null;
    if (tenantId != null) {
        tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
    }
    final boolean isAutoCommit = connection.getAutoCommit();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
    List<Map<ImmutableBytesPtr, RowMutationState>> indexMutations = null;
    // the data table through a single query to save executing an additional one.
    if (!indexTableRefs.isEmpty()) {
        indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
        for (int i = 0; i < indexTableRefs.size(); i++) {
            indexMutations.add(Maps.<ImmutableBytesPtr, RowMutationState>newHashMapWithExpectedSize(batchSize));
        }
    }
    List<PColumn> pkColumns = table.getPKColumns();
    boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int offset = (table.getBucketNum() == null ? 0 : 1);
    byte[][] values = new byte[pkColumns.size()][];
    if (isSharedViewIndex) {
        values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isMultiTenant) {
        values[offset++] = tenantIdBytes;
    }
    try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        int rowCount = 0;
        while (rs.next()) {
            // allocate new as this is a key in a Map
            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
            // there's no transation required.
            if (sourceTableRef.equals(targetTableRef)) {
                rs.getCurrentRow().getKey(ptr);
            } else {
                for (int i = offset; i < values.length; i++) {
                    byte[] byteValue = rs.getBytes(i + 1 - offset);
                    // TODO: consider going under the hood and just getting the bytes
                    if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
                        byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                        byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
                    }
                    values[i] = byteValue;
                }
                table.newKey(ptr, values);
            }
            // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
            // row key will already have its value. 
            mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            for (int i = 0; i < indexTableRefs.size(); i++) {
                // allocate new as this is a key in a Map
                ImmutableBytesPtr indexPtr = new ImmutableBytesPtr();
                rs.getCurrentRow().getKey(indexPtr);
                indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            }
            if (mutations.size() > maxSize) {
                throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
            }
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                for (int i = 0; i < indexTableRefs.size(); i++) {
                    MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
                    connection.getMutationState().join(indexState);
                }
                connection.getMutationState().send();
                mutations.clear();
                if (indexMutations != null) {
                    indexMutations.clear();
                }
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
        MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
        for (int i = 0; i < indexTableRefs.size(); i++) {
            // To prevent the counting of these index rows, we have a negative for remainingRows.
            MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
            state.join(indexState);
        }
        return state;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) PName(org.apache.phoenix.schema.PName) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) Map(java.util.Map) HashMap(java.util.HashMap) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 9 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class TestUtil method doMajorCompaction.

/**
     * Runs a major compaction, and then waits until the compaction is complete before returning.
     *
     * @param tableName name of the table to be compacted
     */
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {
    tableName = SchemaUtil.normalizeIdentifier(tableName);
    // We simply write a marker row, request a major compaction, and then wait until the marker
    // row is gone
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    MutationState mutationState = pconn.getMutationState();
    if (table.isTransactional()) {
        mutationState.startTransaction();
    }
    try (HTableInterface htable = mutationState.getHTable(table)) {
        byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
        Put put = new Put(markerRowKey);
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.put(put);
        Delete delete = new Delete(markerRowKey);
        delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.delete(delete);
        htable.close();
        if (table.isTransactional()) {
            mutationState.commit();
        }
        HBaseAdmin hbaseAdmin = services.getAdmin();
        hbaseAdmin.flush(tableName);
        hbaseAdmin.majorCompact(tableName);
        hbaseAdmin.close();
        boolean compactionDone = false;
        while (!compactionDone) {
            Thread.sleep(6000L);
            Scan scan = new Scan();
            scan.setStartRow(markerRowKey);
            scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
            scan.setRaw(true);
            try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                ResultScanner scanner = htableForRawScan.getScanner(scan);
                List<Result> results = Lists.newArrayList(scanner);
                LOG.info("Results: " + results);
                compactionDone = results.isEmpty();
                scanner.close();
            }
            LOG.info("Compaction done: " + compactionDone);
            // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
            if (!compactionDone && table.isTransactional()) {
                hbaseAdmin = services.getAdmin();
                hbaseAdmin.flush(tableName);
                hbaseAdmin.majorCompact(tableName);
                hbaseAdmin.close();
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTable(org.apache.phoenix.schema.PTable) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 10 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class SkipScanAfterManualSplitIT method initTable.

private static void initTable(String tableName) throws Exception {
    Connection conn = getConnection();
    conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + HTableDescriptor.MAX_FILESIZE + "=" + MAX_FILESIZE + "," + " SALT_BUCKETS = 4");
    PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
    int rowCount = 0;
    for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) {
        for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) {
            String pk = Character.toString((char) c1) + Character.toString((char) c2);
            stmt.setString(1, pk);
            stmt.setString(2, PAYLOAD);
            stmt.execute();
            rowCount++;
            if (rowCount % BATCH_SIZE == 0) {
                conn.commit();
            }
        }
    }
    conn.commit();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        admin.flush(tableName);
    } finally {
        admin.close();
    }
    conn.close();
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PreparedStatement(java.sql.PreparedStatement) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Aggregations

ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)38 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Connection (java.sql.Connection)14 SQLException (java.sql.SQLException)12 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)9 PTable (org.apache.phoenix.schema.PTable)9 Test (org.junit.Test)9 ResultSet (java.sql.ResultSet)8 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)8 ArrayList (java.util.ArrayList)7 Properties (java.util.Properties)7 PreparedStatement (java.sql.PreparedStatement)5 Put (org.apache.hadoop.hbase.client.Put)5 Hint (org.apache.phoenix.parse.HintNode.Hint)5 Scan (org.apache.hadoop.hbase.client.Scan)4 MutationState (org.apache.phoenix.execute.MutationState)4 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)4 DelegateConnectionQueryServices (org.apache.phoenix.query.DelegateConnectionQueryServices)4 PColumn (org.apache.phoenix.schema.PColumn)4