Search in sources :

Example 21 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class BaseResultIterators method initializeScan.

private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
    StatementContext context = plan.getContext();
    TableRef tableRef = plan.getTableRef();
    PTable table = tableRef.getTable();
    Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
    // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
    if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
        // We project *all* KeyValues across all column families as we make a pass over
        // a physical table and we want to make sure we catch all KeyValues that may be
        // dynamic or part of an updatable view.
        familyMap.clear();
        scan.setMaxVersions();
        // Remove any filter
        scan.setFilter(null);
        // Traverse (and subsequently clone) all KeyValues
        scan.setRaw(true);
        // Pass over PTable so we can re-write rows according to the row key schema
        scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
    } else {
        FilterableStatement statement = plan.getStatement();
        RowProjector projector = plan.getProjector();
        boolean optimizeProjection = false;
        boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
        if (!projector.projectEverything()) {
            // not match the actual column families of the table (which is bad).
            if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
                // Project the one column family. We must project a column family since it's possible
                // that there are other non declared column families that we need to ignore.
                scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
            } else {
                optimizeProjection = true;
                if (projector.projectEveryRow()) {
                    if (table.getViewType() == ViewType.MAPPED) {
                        // Since we don't have the empty key value in MAPPED tables, 
                        // we must project all CFs in HRS. However, only the
                        // selected column values are returned back to client.
                        context.getWhereConditionColumns().clear();
                        for (PColumnFamily family : table.getColumnFamilies()) {
                            context.addWhereConditionColumn(family.getName().getBytes(), null);
                        }
                    } else {
                        byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
                        // been projected in its entirety.
                        if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
                            scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
                        }
                    }
                }
            }
        }
        // Add FirstKeyOnlyFilter if there are no references to key value columns
        if (keyOnlyFilter) {
            ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
        }
        if (perScanLimit != null) {
            ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
        }
        if (offset != null) {
            ScanUtil.addOffsetAttribute(scan, offset);
        }
        int cols = plan.getGroupBy().getOrderPreservingColumnCount();
        if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
            ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
            if (plan.getLimit() != null) {
                // We can push the limit to the server
                ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
            }
        }
        scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
        scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
        // we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
        scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
        // So there is no point setting the range.
        if (!ScanUtil.isAnalyzeTable(scan)) {
            setQualifierRanges(keyOnlyFilter, table, scan, context);
        }
        if (optimizeProjection) {
            optimizeProjection(context, scan, table, statement);
        }
    }
}
Also used : NavigableSet(java.util.NavigableSet) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) TableRef(org.apache.phoenix.schema.TableRef)

Example 22 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class PhoenixIndexFailurePolicy method getLocalIndexNames.

private Collection<? extends String> getLocalIndexNames(HTableInterfaceReference ref, Collection<Mutation> mutations) throws IOException {
    Set<String> indexTableNames = new HashSet<String>(1);
    PhoenixConnection conn = null;
    try {
        conn = QueryUtil.getConnectionOnServer(this.env.getConfiguration()).unwrap(PhoenixConnection.class);
        PTable dataTable = PhoenixRuntime.getTableNoCache(conn, ref.getTableName());
        List<PTable> indexes = dataTable.getIndexes();
        // local index used to get view id from index mutation row key.
        PTable localIndex = null;
        Map<ImmutableBytesWritable, String> localIndexNames = new HashMap<ImmutableBytesWritable, String>();
        for (PTable index : indexes) {
            if (index.getIndexType() == IndexType.LOCAL && index.getIndexState() == PIndexState.ACTIVE) {
                if (localIndex == null)
                    localIndex = index;
                localIndexNames.put(new ImmutableBytesWritable(MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId())), index.getName().getString());
            }
        }
        if (localIndex == null) {
            return Collections.emptySet();
        }
        IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn);
        HRegionInfo regionInfo = this.env.getRegion().getRegionInfo();
        int offset = regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo.getStartKey().length;
        byte[] viewId = null;
        for (Mutation mutation : mutations) {
            viewId = indexMaintainer.getViewIndexIdFromIndexRowKey(new ImmutableBytesWritable(mutation.getRow(), offset, mutation.getRow().length - offset));
            String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
            indexTableNames.add(indexTableName);
        }
    } catch (ClassNotFoundException e) {
        throw new IOException(e);
    } catch (SQLException e) {
        throw new IOException(e);
    } finally {
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                throw new IOException(e);
            }
        }
    }
    return indexTableNames;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Mutation(org.apache.hadoop.hbase.client.Mutation) HashSet(java.util.HashSet)

Example 23 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class BaseResultIterators method getGuidePosts.

private GuidePostsInfo getGuidePosts() throws SQLException {
    if (!useStats() || !StatisticsUtil.isStatsEnabled(TableName.valueOf(physicalTableName))) {
        return GuidePostsInfo.NO_GUIDEPOST;
    }
    TreeSet<byte[]> whereConditions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
    for (Pair<byte[], byte[]> where : context.getWhereConditionColumns()) {
        byte[] cf = where.getFirst();
        if (cf != null) {
            whereConditions.add(cf);
        }
    }
    PTable table = getTable();
    byte[] defaultCF = SchemaUtil.getEmptyColumnFamily(getTable());
    byte[] cf = null;
    if (!table.getColumnFamilies().isEmpty() && !whereConditions.isEmpty()) {
        for (Pair<byte[], byte[]> where : context.getWhereConditionColumns()) {
            byte[] whereCF = where.getFirst();
            if (Bytes.compareTo(defaultCF, whereCF) == 0) {
                cf = defaultCF;
                break;
            }
        }
        if (cf == null) {
            cf = context.getWhereConditionColumns().get(0).getFirst();
        }
    }
    if (cf == null) {
        cf = defaultCF;
    }
    GuidePostsKey key = new GuidePostsKey(physicalTableName, cf);
    return context.getConnection().getQueryServices().getTableStats(key);
}
Also used : GuidePostsKey(org.apache.phoenix.schema.stats.GuidePostsKey) TreeSet(java.util.TreeSet) PTable(org.apache.phoenix.schema.PTable)

Example 24 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class ConnectionQueryServicesImpl method metaDataMutated.

/**
     * Ensures that metaData mutations are handled in the correct order
     */
private PMetaData metaDataMutated(PName tenantId, String tableName, long tableSeqNum, Mutator mutator) throws SQLException {
    synchronized (latestMetaDataLock) {
        throwConnectionClosedIfNullMetaData();
        PMetaData metaData = latestMetaData;
        PTable table;
        long endTime = System.currentTimeMillis() + DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS;
        while (true) {
            try {
                try {
                    table = metaData.getTableRef(new PTableKey(tenantId, tableName)).getTable();
                    /* If the table is at the prior sequence number, then we're good to go.
                         * We know if we've got this far, that the server validated the mutations,
                         * so we'd just need to wait until the other connection that mutated the same
                         * table is processed.
                         */
                    if (table.getSequenceNumber() + 1 == tableSeqNum) {
                        // TODO: assert that timeStamp is bigger that table timeStamp?
                        mutator.mutate(metaData);
                        break;
                    } else if (table.getSequenceNumber() >= tableSeqNum) {
                        logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
                        break;
                    }
                } catch (TableNotFoundException e) {
                }
                long waitTime = endTime - System.currentTimeMillis();
                // and the next time it's used it'll be pulled over from the server.
                if (waitTime <= 0) {
                    logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS / 1000) + " seconds for " + tableName);
                    // There will never be a parentTableName here, as that would only
                    // be non null for an index an we never add/remove columns from an index.
                    metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
                    break;
                }
                latestMetaDataLock.wait(waitTime);
            } catch (InterruptedException e) {
                // restore the interrupt status
                Thread.currentThread().interrupt();
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
            }
        }
        latestMetaData = metaData;
        latestMetaDataLock.notifyAll();
        return metaData;
    }
}
Also used : TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) PMetaData(org.apache.phoenix.schema.PMetaData) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable)

Example 25 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropTable.

@Override
public MetaDataMutationResult dropTable(final List<Mutation> tableMetaData, final PTableType tableType, final boolean cascade) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes);
    final MetaDataMutationResult result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            DropTableRequest.Builder builder = DropTableRequest.newBuilder();
            for (Mutation m : tableMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setTableType(tableType.getSerializedValue());
            builder.setCascade(cascade);
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.dropTable(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    final MutationCode code = result.getMutationCode();
    switch(code) {
        case TABLE_ALREADY_EXISTS:
            ReadOnlyProps props = this.getProps();
            boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
            PTable table = result.getTable();
            if (dropMetadata) {
                flushParentPhysicalTable(table);
                dropTables(result.getTableNamesToDelete());
            } else {
                invalidateTableStats(result.getTableNamesToDelete());
            }
            long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData);
            if (tableType == PTableType.TABLE) {
                byte[] physicalName = table.getPhysicalName().getBytes();
                ensureViewIndexTableDropped(physicalName, timestamp);
                ensureLocalIndexTableDropped(physicalName, timestamp);
                tableStatsCache.invalidateAll(table);
            }
            break;
        default:
            break;
    }
    return result;
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) PTable(org.apache.phoenix.schema.PTable) ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Aggregations

PTable (org.apache.phoenix.schema.PTable)153 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)63 PTableKey (org.apache.phoenix.schema.PTableKey)48 PColumn (org.apache.phoenix.schema.PColumn)47 Connection (java.sql.Connection)35 TableRef (org.apache.phoenix.schema.TableRef)29 SQLException (java.sql.SQLException)28 ArrayList (java.util.ArrayList)28 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)28 Test (org.junit.Test)27 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)24 Expression (org.apache.phoenix.expression.Expression)24 Scan (org.apache.hadoop.hbase.client.Scan)21 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)21 Properties (java.util.Properties)20 Mutation (org.apache.hadoop.hbase.client.Mutation)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)16 IOException (java.io.IOException)15 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PName (org.apache.phoenix.schema.PName)14