Search in sources :

Example 31 with QueryPlan

use of org.apache.phoenix.compile.QueryPlan in project phoenix by apache.

the class SqlQueryToColumnInfoFunction method apply.

@Override
public List<ColumnInfo> apply(String sqlQuery) {
    Preconditions.checkNotNull(sqlQuery);
    Connection connection = null;
    List<ColumnInfo> columnInfos = null;
    try {
        connection = ConnectionUtil.getInputConnection(this.configuration);
        final Statement statement = connection.createStatement();
        final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
        final QueryPlan queryPlan = pstmt.compileQuery(sqlQuery);
        final List<? extends ColumnProjector> projectedColumns = queryPlan.getProjector().getColumnProjectors();
        columnInfos = Lists.newArrayListWithCapacity(projectedColumns.size());
        columnInfos = Lists.transform(projectedColumns, new Function<ColumnProjector, ColumnInfo>() {

            @Override
            public ColumnInfo apply(final ColumnProjector columnProjector) {
                return new ColumnInfo(columnProjector.getName(), columnProjector.getExpression().getDataType().getSqlType());
            }
        });
    } catch (SQLException e) {
        LOG.error(String.format(" Error [%s] parsing SELECT query [%s] ", e.getMessage(), sqlQuery));
        throw new RuntimeException(e);
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (SQLException sqle) {
                LOG.error("Error closing connection!!");
                throw new RuntimeException(sqle);
            }
        }
    }
    return columnInfos;
}
Also used : Function(com.google.common.base.Function) SQLException(java.sql.SQLException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) ColumnInfo(org.apache.phoenix.util.ColumnInfo) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) ColumnProjector(org.apache.phoenix.compile.ColumnProjector)

Example 32 with QueryPlan

use of org.apache.phoenix.compile.QueryPlan in project phoenix by apache.

the class MetaDataEndpointImpl method createTable.

@Override
public void createTable(RpcController controller, CreateTableRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[][] rowKeyMetaData = new byte[3][];
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        byte[] parentSchemaName = null;
        byte[] parentTableName = null;
        PTableType tableType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
        byte[] parentTableKey = null;
        Mutation viewPhysicalTableRow = null;
        if (tableType == PTableType.VIEW) {
            byte[][] parentSchemaTableNames = new byte[2][];
            /*
                 * For a view, we lock the base physical table row. For a mapped view, there is 
                 * no link present to the physical table. So the viewPhysicalTableRow is null
                 * in that case.
                 */
            viewPhysicalTableRow = getPhysicalTableForView(tableMetadata, parentSchemaTableNames);
            parentSchemaName = parentSchemaTableNames[0];
            parentTableName = parentSchemaTableNames[1];
            if (parentTableName != null) {
                parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentSchemaName, parentTableName);
            }
        } else if (tableType == PTableType.INDEX) {
            parentSchemaName = schemaName;
            /* 
                 * For an index we lock the parent table's row which could be a physical table or a view.
                 * If the parent table is a physical table, then the tenantIdBytes is empty because
                 * we allow creating an index with a tenant connection only if the parent table is a view.
                 */
            parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
            parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName);
        }
        Region region = env.getRegion();
        List<RowLock> locks = Lists.newArrayList();
        // Place a lock using key for the table to be created
        byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        try {
            acquireLock(region, tableKey, locks);
            // If the table key resides outside the region, return without doing anything
            MetaDataMutationResult result = checkTableKeyInRegion(tableKey, region);
            if (result != null) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            ImmutableBytesPtr parentCacheKey = null;
            PTable parentTable = null;
            if (parentTableName != null) {
                // Check if the parent table resides in the same region. If not, don't worry about locking the parent table row
                // or loading the parent table. For a view, the parent table that needs to be locked is the base physical table.
                // For an index on view, the view header row needs to be locked. 
                result = checkTableKeyInRegion(parentTableKey, region);
                if (result == null) {
                    acquireLock(region, parentTableKey, locks);
                    parentCacheKey = new ImmutableBytesPtr(parentTableKey);
                    parentTable = loadTable(env, parentTableKey, parentCacheKey, clientTimeStamp, clientTimeStamp);
                    if (parentTable == null || isTableDeleted(parentTable)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    long parentTableSeqNumber;
                    if (tableType == PTableType.VIEW && viewPhysicalTableRow != null && request.hasClientVersion()) {
                        // Starting 4.5, the client passes the sequence number of the physical table in the table metadata.
                        parentTableSeqNumber = MetaDataUtil.getSequenceNumber(viewPhysicalTableRow);
                    } else if (tableType == PTableType.VIEW && !request.hasClientVersion()) {
                        // Before 4.5, due to a bug, the parent table key wasn't available.
                        // So don't do anything and prevent the exception from being thrown.
                        parentTableSeqNumber = parentTable.getSequenceNumber();
                    } else {
                        parentTableSeqNumber = MetaDataUtil.getParentSequenceNumber(tableMetadata);
                    }
                    // If parent table isn't at the expected sequence number, then return
                    if (parentTable.getSequenceNumber() != parentTableSeqNumber) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.CONCURRENT_TABLE_MUTATION);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setTable(PTableImpl.toProto(parentTable));
                        done.run(builder.build());
                        return;
                    }
                }
            }
            // Load child table next
            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
            // Get as of latest timestamp so we can detect if we have a newer table that already
            // exists without making an additional query
            PTable table = loadTable(env, tableKey, cacheKey, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
            if (table != null) {
                if (table.getTimeStamp() < clientTimeStamp) {
                    // continue
                    if (!isTableDeleted(table)) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        builder.setTable(PTableImpl.toProto(table));
                        done.run(builder.build());
                        return;
                    }
                } else {
                    builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    builder.setTable(PTableImpl.toProto(table));
                    done.run(builder.build());
                    return;
                }
            }
            // sends over depending on its base physical table.
            if (tableType != PTableType.VIEW) {
                UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp);
            }
            // tableMetadata and set the view statement and partition column correctly
            if (parentTable != null && parentTable.getAutoPartitionSeqName() != null) {
                long autoPartitionNum = 1;
                try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
                    Statement stmt = connection.createStatement()) {
                    String seqName = parentTable.getAutoPartitionSeqName();
                    // Not going through the standard route of using statement.execute() as that code path
                    // is blocked if the metadata hasn't been been upgraded to the new minor release.
                    String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s", seqName);
                    PhoenixStatement ps = stmt.unwrap(PhoenixStatement.class);
                    QueryPlan plan = ps.compileQuery(seqNextValueSql);
                    ResultIterator resultIterator = plan.iterator();
                    PhoenixResultSet rs = ps.newResultSet(resultIterator, plan.getProjector(), plan.getContext());
                    rs.next();
                    autoPartitionNum = rs.getLong(1);
                } catch (SequenceNotFoundException e) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                PColumn autoPartitionCol = parentTable.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable));
                if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                builder.setAutoPartitionNum(autoPartitionNum);
                // set the VIEW STATEMENT column of the header row
                Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
                NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
                List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                Cell cell = cells.get(0);
                String autoPartitionWhere = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum);
                String hbaseVersion = VersionInfo.getVersion();
                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
                KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion);
                MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr);
                byte[] value = ptr.copyBytesIfNecessary();
                byte[] viewStatement = null;
                // if we have an existing where clause add the auto partition where clause to it
                if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) {
                    viewStatement = Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere));
                } else {
                    viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
                }
                Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
                cells.add(viewStatementCell);
                // set the IS_VIEW_REFERENCED column of the auto partition column row
                Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
                familyCellMap = autoPartitionPut.getFamilyCellMap();
                cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                cell = cells.get(0);
                PDataType dataType = autoPartitionCol.getDataType();
                Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                byte[] bytes = new byte[dataType.getByteSize() + 1];
                dataType.toBytes(val, bytes, 0);
                Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
                cells.add(viewConstantCell);
            }
            Short indexId = null;
            if (request.hasAllocateIndexId() && request.getAllocateIndexId()) {
                String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes);
                try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class)) {
                    PName physicalName = parentTable.getPhysicalName();
                    int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets();
                    SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, physicalName, nSequenceSaltBuckets, parentTable.isNamespaceMapped());
                    // TODO Review Earlier sequence was created at (SCN-1/LATEST_TIMESTAMP) and incremented at the client max(SCN,dataTable.getTimestamp), but it seems we should
                    // use always LATEST_TIMESTAMP to avoid seeing wrong sequence values by different connection having SCN
                    // or not. 
                    long sequenceTimestamp = HConstants.LATEST_TIMESTAMP;
                    try {
                        connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp);
                    } catch (SequenceAlreadyExistsException e) {
                    }
                    long[] seqValues = new long[1];
                    SQLException[] sqlExceptions = new SQLException[1];
                    connection.getQueryServices().incrementSequences(Collections.singletonList(new SequenceAllocation(key, 1)), HConstants.LATEST_TIMESTAMP, seqValues, sqlExceptions);
                    if (sqlExceptions[0] != null) {
                        throw sqlExceptions[0];
                    }
                    long seqValue = seqValues[0];
                    if (seqValue > Short.MAX_VALUE) {
                        builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                        done.run(builder.build());
                        return;
                    }
                    Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
                    NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
                    List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
                    Cell cell = cells.get(0);
                    PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                    Object val = dataType.toObject(seqValue, PLong.INSTANCE);
                    byte[] bytes = new byte[dataType.getByteSize() + 1];
                    dataType.toBytes(val, bytes, 0);
                    Cell indexIdCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_INDEX_ID_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
                    cells.add(indexIdCell);
                    indexId = (short) seqValue;
                }
            }
            // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
            // system table. Basically, we get all the locks that we don't already hold for all the
            // tableMetadata rows. This ensures we don't have deadlock situations (ensuring
            // primary and then index table locks are held, in that order). For now, we just don't support
            // indexing on the system table. This is an issue because of the way we manage batch mutation
            // in the Indexer.
            region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            // Invalidate the cache - the next getTable call will add it
            // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            if (parentCacheKey != null) {
                metaDataCache.invalidate(parentCacheKey);
            }
            metaDataCache.invalidate(cacheKey);
            // Get timeStamp from mutations - the above method sets it if it's unset
            long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
            if (indexId != null) {
                builder.setViewIndexId(indexId);
            }
            builder.setMutationTime(currentTimeStamp);
            done.run(builder.build());
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("createTable failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) KeyValue(org.apache.hadoop.hbase.KeyValue) SQLException(java.sql.SQLException) SequenceAlreadyExistsException(org.apache.phoenix.schema.SequenceAlreadyExistsException) ByteString(com.google.protobuf.ByteString) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) PDataType(org.apache.phoenix.schema.types.PDataType) SequenceKey(org.apache.phoenix.schema.SequenceKey) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) SequenceNotFoundException(org.apache.phoenix.schema.SequenceNotFoundException) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Statement(java.sql.Statement) PTableType(org.apache.phoenix.schema.PTableType) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) GenericKeyValueBuilder(org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) Put(org.apache.hadoop.hbase.client.Put) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PName(org.apache.phoenix.schema.PName) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 33 with QueryPlan

use of org.apache.phoenix.compile.QueryPlan in project phoenix by apache.

the class QueryOptimizer method optimize.

public QueryPlan optimize(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException {
    QueryCompiler compiler = new QueryCompiler(statement, select, resolver, targetColumns, parallelIteratorFactory, new SequenceManager(statement));
    QueryPlan dataPlan = compiler.compile();
    return optimize(dataPlan, statement, targetColumns, parallelIteratorFactory);
}
Also used : QueryCompiler(org.apache.phoenix.compile.QueryCompiler) QueryPlan(org.apache.phoenix.compile.QueryPlan) SequenceManager(org.apache.phoenix.compile.SequenceManager)

Example 34 with QueryPlan

use of org.apache.phoenix.compile.QueryPlan in project phoenix by apache.

the class QueryOptimizer method addPlan.

private static QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, boolean isHinted) throws SQLException {
    int nColumns = dataPlan.getProjector().getColumnCount();
    String tableAlias = dataPlan.getTableRef().getTableAlias();
    // double quote in case it's case sensitive
    String alias = tableAlias == null ? null : '"' + tableAlias + '"';
    String schemaName = index.getParentSchemaName().getString();
    schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"';
    String tableName = '"' + index.getTableName().getString() + '"';
    TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName));
    SelectStatement indexSelect = FACTORY.select(select, table);
    ColumnResolver resolver = FromCompiler.getResolverForQuery(indexSelect, statement.getConnection());
    // We will or will not do tuple projection according to the data plan.
    boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED;
    // Check index state of now potentially updated index table to make sure it's active
    if (PIndexState.ACTIVE.equals(resolver.getTables().get(0).getTable().getIndexState())) {
        try {
            // translate nodes that match expressions that are indexed to the associated column parse node
            indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes()));
            QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected);
            QueryPlan plan = compiler.compile();
            // then we can use the index even the query doesn't have where clause. 
            if (index.getIndexType() == IndexType.LOCAL && indexSelect.getWhere() == null && !plan.getContext().getDataColumns().isEmpty()) {
                return null;
            }
            // must contain all columns from the data table to be able to be used.
            if (plan.getTableRef().getTable().getIndexState() == PIndexState.ACTIVE) {
                if (plan.getProjector().getColumnCount() == nColumns) {
                    return plan;
                } else if (index.getIndexType() == IndexType.GLOBAL) {
                    String schemaNameStr = index.getSchemaName() == null ? null : index.getSchemaName().getString();
                    String tableNameStr = index.getTableName() == null ? null : index.getTableName().getString();
                    throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
                }
            }
        } catch (ColumnNotFoundException e) {
            /* Means that a column is being used that's not in our index.
                 * Since we currently don't keep stats, we don't know the selectivity of the index.
                 * For now, if this is a hinted plan, we will try rewriting the query as a subquery;
                 * otherwise we just don't use this index (as opposed to trying to join back from
                 * the index table to the data table.
                 */
            SelectStatement dataSelect = (SelectStatement) dataPlan.getStatement();
            ParseNode where = dataSelect.getWhere();
            if (isHinted && where != null) {
                StatementContext context = new StatementContext(statement, resolver);
                WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context);
                where = where.accept(whereRewriter);
                if (where != null) {
                    PTable dataTable = dataPlan.getTableRef().getTable();
                    List<PColumn> pkColumns = dataTable.getPKColumns();
                    List<AliasedNode> aliasedNodes = Lists.<AliasedNode>newArrayListWithExpectedSize(pkColumns.size());
                    List<ParseNode> nodes = Lists.<ParseNode>newArrayListWithExpectedSize(pkColumns.size());
                    boolean isSalted = dataTable.getBucketNum() != null;
                    boolean isTenantSpecific = dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null;
                    int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0);
                    for (int i = posOffset; i < pkColumns.size(); i++) {
                        PColumn column = pkColumns.get(i);
                        String indexColName = IndexUtil.getIndexColumnName(column);
                        ParseNode indexColNode = new ColumnParseNode(null, '"' + indexColName + '"', indexColName);
                        PDataType indexColType = IndexUtil.getIndexColumnDataType(column);
                        PDataType dataColType = column.getDataType();
                        if (indexColType != dataColType) {
                            indexColNode = FACTORY.cast(indexColNode, dataColType, null, null);
                        }
                        aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode));
                        nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"'));
                    }
                    SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.<SelectStatement>emptyList(), indexSelect.getUdfParseNodes());
                    ParseNode outerWhere = FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true);
                    ParseNode extractedCondition = whereRewriter.getExtractedCondition();
                    if (extractedCondition != null) {
                        outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition));
                    }
                    HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] { Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION }), FACTORY.hint("NO_INDEX"));
                    SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere);
                    ColumnResolver queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                    query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection());
                    queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection());
                    query = StatementNormalizer.normalize(query, queryResolver);
                    QueryPlan plan = new QueryCompiler(statement, query, queryResolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected).compile();
                    return plan;
                }
            }
        }
    }
    return null;
}
Also used : QueryCompiler(org.apache.phoenix.compile.QueryCompiler) QueryPlan(org.apache.phoenix.compile.QueryPlan) Hint(org.apache.phoenix.parse.HintNode.Hint) PTable(org.apache.phoenix.schema.PTable) StatementContext(org.apache.phoenix.compile.StatementContext) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) PDataType(org.apache.phoenix.schema.types.PDataType) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) HintNode(org.apache.phoenix.parse.HintNode) TableNode(org.apache.phoenix.parse.TableNode) AndParseNode(org.apache.phoenix.parse.AndParseNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) IndexExpressionParseNodeRewriter(org.apache.phoenix.parse.IndexExpressionParseNodeRewriter) ColumnResolver(org.apache.phoenix.compile.ColumnResolver)

Example 35 with QueryPlan

use of org.apache.phoenix.compile.QueryPlan in project phoenix by apache.

the class PhoenixInputFormat method getSplits.

@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
    final Configuration configuration = context.getConfiguration();
    final QueryPlan queryPlan = getQueryPlan(context, configuration);
    final List<KeyRange> allSplits = queryPlan.getSplits();
    final List<InputSplit> splits = generateSplits(queryPlan, allSplits, configuration);
    return splits;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) KeyRange(org.apache.phoenix.query.KeyRange) QueryPlan(org.apache.phoenix.compile.QueryPlan) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Aggregations

QueryPlan (org.apache.phoenix.compile.QueryPlan)36 Connection (java.sql.Connection)19 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)19 PreparedStatement (java.sql.PreparedStatement)12 ResultSet (java.sql.ResultSet)11 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)11 Test (org.junit.Test)11 Statement (java.sql.Statement)8 SQLException (java.sql.SQLException)7 PTable (org.apache.phoenix.schema.PTable)7 Date (java.sql.Date)6 Properties (java.util.Properties)6 ResultIterator (org.apache.phoenix.iterate.ResultIterator)5 ArrayList (java.util.ArrayList)4 List (java.util.List)4 KeyRange (org.apache.phoenix.query.KeyRange)4 PColumn (org.apache.phoenix.schema.PColumn)4 IOException (java.io.IOException)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)3 Pair (org.apache.hadoop.hbase.util.Pair)3