Search in sources :

Example 71 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class DeleteCompiler method deleteRows.

private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
    PTable table = targetTableRef.getTable();
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    PName tenantId = connection.getTenantId();
    byte[] tenantIdBytes = null;
    if (tenantId != null) {
        tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
    }
    final boolean isAutoCommit = connection.getAutoCommit();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
    List<Map<ImmutableBytesPtr, RowMutationState>> indexMutations = null;
    // the data table through a single query to save executing an additional one.
    if (!indexTableRefs.isEmpty()) {
        indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
        for (int i = 0; i < indexTableRefs.size(); i++) {
            indexMutations.add(Maps.<ImmutableBytesPtr, RowMutationState>newHashMapWithExpectedSize(batchSize));
        }
    }
    List<PColumn> pkColumns = table.getPKColumns();
    boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int offset = (table.getBucketNum() == null ? 0 : 1);
    byte[][] values = new byte[pkColumns.size()][];
    if (isSharedViewIndex) {
        values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isMultiTenant) {
        values[offset++] = tenantIdBytes;
    }
    try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        int rowCount = 0;
        while (rs.next()) {
            // allocate new as this is a key in a Map
            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
            // there's no transation required.
            if (sourceTableRef.equals(targetTableRef)) {
                rs.getCurrentRow().getKey(ptr);
            } else {
                for (int i = offset; i < values.length; i++) {
                    byte[] byteValue = rs.getBytes(i + 1 - offset);
                    // TODO: consider going under the hood and just getting the bytes
                    if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
                        byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                        byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
                    }
                    values[i] = byteValue;
                }
                table.newKey(ptr, values);
            }
            // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
            // row key will already have its value. 
            mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            for (int i = 0; i < indexTableRefs.size(); i++) {
                // allocate new as this is a key in a Map
                ImmutableBytesPtr indexPtr = new ImmutableBytesPtr();
                rs.getCurrentRow().getKey(indexPtr);
                indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            }
            if (mutations.size() > maxSize) {
                throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
            }
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                for (int i = 0; i < indexTableRefs.size(); i++) {
                    MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
                    connection.getMutationState().join(indexState);
                }
                connection.getMutationState().send();
                mutations.clear();
                if (indexMutations != null) {
                    indexMutations.clear();
                }
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
        MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
        for (int i = 0; i < indexTableRefs.size(); i++) {
            // To prevent the counting of these index rows, we have a negative for remainingRows.
            MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
            state.join(indexState);
        }
        return state;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) PName(org.apache.phoenix.schema.PName) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) Map(java.util.Map) HashMap(java.util.HashMap) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 72 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class DropSequenceCompiler method compile.

public MutationPlan compile(final DropSequenceStatement sequence) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final MetaDataClient client = new MetaDataClient(connection);
    final StatementContext context = new StatementContext(statement);
    return new BaseMutationPlan(context, operation) {

        @Override
        public MutationState execute() throws SQLException {
            return client.dropSequence(sequence);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return new ExplainPlan(Collections.singletonList("DROP SEQUENCE"));
        }
    };
}
Also used : MetaDataClient(org.apache.phoenix.schema.MetaDataClient) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection)

Example 73 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class TestUtil method doMajorCompaction.

/**
     * Runs a major compaction, and then waits until the compaction is complete before returning.
     *
     * @param tableName name of the table to be compacted
     */
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {
    tableName = SchemaUtil.normalizeIdentifier(tableName);
    // We simply write a marker row, request a major compaction, and then wait until the marker
    // row is gone
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    MutationState mutationState = pconn.getMutationState();
    if (table.isTransactional()) {
        mutationState.startTransaction();
    }
    try (HTableInterface htable = mutationState.getHTable(table)) {
        byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
        Put put = new Put(markerRowKey);
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.put(put);
        Delete delete = new Delete(markerRowKey);
        delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.delete(delete);
        htable.close();
        if (table.isTransactional()) {
            mutationState.commit();
        }
        HBaseAdmin hbaseAdmin = services.getAdmin();
        hbaseAdmin.flush(tableName);
        hbaseAdmin.majorCompact(tableName);
        hbaseAdmin.close();
        boolean compactionDone = false;
        while (!compactionDone) {
            Thread.sleep(6000L);
            Scan scan = new Scan();
            scan.setStartRow(markerRowKey);
            scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
            scan.setRaw(true);
            try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                ResultScanner scanner = htableForRawScan.getScanner(scan);
                List<Result> results = Lists.newArrayList(scanner);
                LOG.info("Results: " + results);
                compactionDone = results.isEmpty();
                scanner.close();
            }
            LOG.info("Compaction done: " + compactionDone);
            // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
            if (!compactionDone && table.isTransactional()) {
                hbaseAdmin = services.getAdmin();
                hbaseAdmin.flush(tableName);
                hbaseAdmin.majorCompact(tableName);
                hbaseAdmin.close();
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTable(org.apache.phoenix.schema.PTable) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 74 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class TestUtil method getGuidePostsList.

public static Collection<GuidePostsInfo> getGuidePostsList(Connection conn, String tableName, String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix) throws SQLException {
    String whereClauseStart = (lowerRange == null && upperRange == null ? "" : " WHERE " + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + (upperRange != null ? (pkCol + " < ?") : "")));
    String whereClause = whereClauseSuffix == null ? whereClauseStart : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix);
    String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + tableName + whereClause;
    PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class);
    if (lowerRange != null) {
        pstmt.setBytes(1, lowerRange);
    }
    if (upperRange != null) {
        pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange);
    }
    pstmt.execute();
    TableRef tableRef = pstmt.getQueryPlan().getTableRef();
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = tableRef.getTable();
    GuidePostsInfo info = pconn.getQueryServices().getTableStats(new GuidePostsKey(table.getName().getBytes(), SchemaUtil.getEmptyColumnFamily(table)));
    return Collections.singletonList(info);
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) GuidePostsKey(org.apache.phoenix.schema.stats.GuidePostsKey) GuidePostsInfo(org.apache.phoenix.schema.stats.GuidePostsInfo) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) TableRef(org.apache.phoenix.schema.TableRef) PTable(org.apache.phoenix.schema.PTable)

Example 75 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class TestUtil method clearMetaDataCache.

public static void clearMetaDataCache(Connection conn) throws Throwable {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    HTableInterface htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearCacheResponse>() {

        @Override
        public ClearCacheResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<ClearCacheResponse> rpcCallback = new BlockingRpcCallback<ClearCacheResponse>();
            ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
            instance.clearCache(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) ClearCacheResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController)

Aggregations

PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)216 Test (org.junit.Test)111 Connection (java.sql.Connection)57 Properties (java.util.Properties)52 PTable (org.apache.phoenix.schema.PTable)52 Scan (org.apache.hadoop.hbase.client.Scan)51 PhoenixPreparedStatement (org.apache.phoenix.jdbc.PhoenixPreparedStatement)48 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)47 PTableKey (org.apache.phoenix.schema.PTableKey)43 ResultSet (java.sql.ResultSet)41 PreparedStatement (java.sql.PreparedStatement)40 SQLException (java.sql.SQLException)40 Filter (org.apache.hadoop.hbase.filter.Filter)29 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)29 RowKeyComparisonFilter (org.apache.phoenix.filter.RowKeyComparisonFilter)28 TestUtil.multiEncodedKVFilter (org.apache.phoenix.util.TestUtil.multiEncodedKVFilter)28 TestUtil.singleKVFilter (org.apache.phoenix.util.TestUtil.singleKVFilter)28 Statement (java.sql.Statement)19 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 PColumn (org.apache.phoenix.schema.PColumn)17