Search in sources :

Example 1 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class BaseResultIterators method initializeScan.

private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
    StatementContext context = plan.getContext();
    TableRef tableRef = plan.getTableRef();
    PTable table = tableRef.getTable();
    Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
    // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
    if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
        // We project *all* KeyValues across all column families as we make a pass over
        // a physical table and we want to make sure we catch all KeyValues that may be
        // dynamic or part of an updatable view.
        familyMap.clear();
        scan.setMaxVersions();
        // Remove any filter
        scan.setFilter(null);
        // Traverse (and subsequently clone) all KeyValues
        scan.setRaw(true);
        // Pass over PTable so we can re-write rows according to the row key schema
        scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
    } else {
        FilterableStatement statement = plan.getStatement();
        RowProjector projector = plan.getProjector();
        boolean optimizeProjection = false;
        boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
        if (!projector.projectEverything()) {
            // not match the actual column families of the table (which is bad).
            if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
                // Project the one column family. We must project a column family since it's possible
                // that there are other non declared column families that we need to ignore.
                scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
            } else {
                optimizeProjection = true;
                if (projector.projectEveryRow()) {
                    if (table.getViewType() == ViewType.MAPPED) {
                        // Since we don't have the empty key value in MAPPED tables, 
                        // we must project all CFs in HRS. However, only the
                        // selected column values are returned back to client.
                        context.getWhereConditionColumns().clear();
                        for (PColumnFamily family : table.getColumnFamilies()) {
                            context.addWhereConditionColumn(family.getName().getBytes(), null);
                        }
                    } else {
                        byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
                        // been projected in its entirety.
                        if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
                            scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
                        }
                    }
                }
            }
        }
        // Add FirstKeyOnlyFilter if there are no references to key value columns
        if (keyOnlyFilter) {
            ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
        }
        if (perScanLimit != null) {
            ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
        }
        if (offset != null) {
            ScanUtil.addOffsetAttribute(scan, offset);
        }
        int cols = plan.getGroupBy().getOrderPreservingColumnCount();
        if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
            ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
            if (plan.getLimit() != null) {
                // We can push the limit to the server
                ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
            }
        }
        scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
        scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
        // we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
        scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
        // So there is no point setting the range.
        if (!ScanUtil.isAnalyzeTable(scan)) {
            setQualifierRanges(keyOnlyFilter, table, scan, context);
        }
        if (optimizeProjection) {
            optimizeProjection(context, scan, table, statement);
        }
    }
}
Also used : NavigableSet(java.util.NavigableSet) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) TableRef(org.apache.phoenix.schema.TableRef)

Example 2 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class ColumnRef method newColumnExpression.

public Expression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) throws SQLException {
    PTable table = tableRef.getTable();
    PColumn column = this.getColumn();
    String displayName = tableRef.getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive);
    if (SchemaUtil.isPKColumn(column)) {
        return new RowKeyColumnExpression(column, new RowKeyValueAccessor(table.getPKColumns(), pkSlotPosition), displayName);
    }
    if (table.getType() == PTableType.PROJECTED || table.getType() == PTableType.SUBQUERY) {
        return new ProjectedColumnExpression(column, table, displayName);
    }
    Expression expression = table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS ? new SingleCellColumnExpression(column, displayName, table.getEncodingScheme()) : new KeyValueColumnExpression(column, displayName);
    if (column.getExpressionStr() != null) {
        String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS;
        PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
        StatementContext context = new StatementContext(new PhoenixStatement(conn));
        ExpressionCompiler compiler = new ExpressionCompiler(context);
        ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression();
        Expression defaultExpression = defaultParseNode.accept(compiler);
        if (!ExpressionUtil.isNull(defaultExpression, new ImmutableBytesWritable())) {
            return new DefaultValueExpression(Arrays.asList(expression, defaultExpression));
        }
    }
    return expression;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) DefaultValueExpression(org.apache.phoenix.expression.function.DefaultValueExpression) SQLParser(org.apache.phoenix.parse.SQLParser) ParseNode(org.apache.phoenix.parse.ParseNode) ExpressionCompiler(org.apache.phoenix.compile.ExpressionCompiler) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) DefaultValueExpression(org.apache.phoenix.expression.function.DefaultValueExpression)

Example 3 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class IndexUtil method rewriteViewStatement.

/**
     * Rewrite a view statement to be valid against an index
     * @param conn
     * @param index
     * @param table
     * @return
     * @throws SQLException
     */
public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, String viewStatement) throws SQLException {
    if (viewStatement == null) {
        return null;
    }
    SelectStatement select = new SQLParser(viewStatement).parseQuery();
    ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table));
    SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver);
    ParseNode whereNode = translatedSelect.getWhere();
    PhoenixStatement statement = new PhoenixStatement(conn);
    TableRef indexTableRef = new TableRef(index) {

        @Override
        public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
            return '"' + ref.getColumn().getName().getString() + '"';
        }
    };
    ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef);
    StatementContext context = new StatementContext(statement, indexResolver);
    // Compile to ensure validity
    WhereCompiler.compile(context, whereNode);
    StringBuilder buf = new StringBuilder();
    whereNode.toSQL(indexResolver, buf);
    return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString());
}
Also used : SelectStatement(org.apache.phoenix.parse.SelectStatement) SQLParser(org.apache.phoenix.parse.SQLParser) ParseNode(org.apache.phoenix.parse.ParseNode) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) TableRef(org.apache.phoenix.schema.TableRef) StatementContext(org.apache.phoenix.compile.StatementContext)

Example 4 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class PhoenixDatabaseMetaData method getColumns.

@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
    StringBuilder buf = new StringBuilder("select \n " + TENANT_ID + " " + TABLE_CAT + // use this for tenant id
    "," + TABLE_SCHEM + "," + TABLE_NAME + " ," + COLUMN_NAME + "," + ExternalSqlTypeIdFunction.NAME + "(" + DATA_TYPE + ") AS " + DATA_TYPE + "," + SqlTypeNameFunction.NAME + "(" + DATA_TYPE + ") AS " + TYPE_NAME + "," + COLUMN_SIZE + "," + BUFFER_LENGTH + "," + DECIMAL_DIGITS + "," + NUM_PREC_RADIX + "," + NULLABLE + "," + REMARKS + "," + COLUMN_DEF + "," + SQL_DATA_TYPE + "," + SQL_DATETIME_SUB + "," + CHAR_OCTET_LENGTH + "," + "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + ORDINAL_POSITION + "-1 ELSE " + ORDINAL_POSITION + " END AS " + ORDINAL_POSITION + "," + "CASE " + NULLABLE + " WHEN " + DatabaseMetaData.attributeNoNulls + " THEN '" + Boolean.FALSE.toString() + "' WHEN " + DatabaseMetaData.attributeNullable + " THEN '" + Boolean.TRUE.toString() + "' END AS " + IS_NULLABLE + "," + SCOPE_CATALOG + "," + SCOPE_SCHEMA + "," + SCOPE_TABLE + "," + SOURCE_DATA_TYPE + "," + IS_AUTOINCREMENT + "," + ARRAY_SIZE + "," + COLUMN_FAMILY + "," + DATA_TYPE + " " + TYPE_ID + // raw type id for potential internal consumption
    "," + VIEW_CONSTANT + "," + MULTI_TENANT + "," + "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + KEY_SEQ + "-1 ELSE " + KEY_SEQ + " END AS " + KEY_SEQ + " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(" + TENANT_POS_SHIFT + " BOOLEAN)");
    StringBuilder where = new StringBuilder();
    addTenantIdFilter(where, catalog);
    if (schemaPattern != null) {
        appendConjunction(where);
        where.append(TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'"));
    }
    if (tableNamePattern != null && tableNamePattern.length() > 0) {
        appendConjunction(where);
        where.append(TABLE_NAME + " like '" + StringUtil.escapeStringConstant(tableNamePattern) + "'");
    }
    // Allow a "." in columnNamePattern for column family match
    String colPattern = null;
    if (columnNamePattern != null && columnNamePattern.length() > 0) {
        String cfPattern = null;
        int index = columnNamePattern.indexOf('.');
        if (index <= 0) {
            colPattern = columnNamePattern;
        } else {
            cfPattern = columnNamePattern.substring(0, index);
            if (columnNamePattern.length() > index + 1) {
                colPattern = columnNamePattern.substring(index + 1);
            }
        }
        if (cfPattern != null && cfPattern.length() > 0) {
            // if null or empty, will pick up all columns
            // Will pick up only KV columns
            appendConjunction(where);
            where.append(COLUMN_FAMILY + " like '" + StringUtil.escapeStringConstant(cfPattern) + "'");
        }
        if (colPattern != null && colPattern.length() > 0) {
            appendConjunction(where);
            where.append(COLUMN_NAME + " like '" + StringUtil.escapeStringConstant(colPattern) + "'");
        }
    }
    if (colPattern == null || colPattern.length() == 0) {
        appendConjunction(where);
        where.append(COLUMN_NAME + " is not null");
        appendConjunction(where);
        where.append(LINK_TYPE + " is null");
    }
    boolean isTenantSpecificConnection = connection.getTenantId() != null;
    if (isTenantSpecificConnection) {
        buf.append(" where (" + where + ") OR (" + COLUMN_FAMILY + " is null AND " + COLUMN_NAME + " is null)");
    } else {
        buf.append(" where " + where);
    }
    buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + SYSTEM_CATALOG_ALIAS + "." + ORDINAL_POSITION);
    Statement stmt;
    if (isTenantSpecificConnection) {
        stmt = connection.createStatement(new PhoenixStatementFactory() {

            @Override
            public PhoenixStatement newStatement(PhoenixConnection connection) {
                return new PhoenixStatement(connection) {

                    @Override
                    public PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector, StatementContext context) throws SQLException {
                        return new PhoenixResultSet(new TenantColumnFilteringIterator(iterator, projector), projector, context);
                    }
                };
            }
        });
    } else {
        stmt = connection.createStatement();
    }
    return stmt.executeQuery(buf.toString());
}
Also used : Statement(java.sql.Statement) MaterializedResultIterator(org.apache.phoenix.iterate.MaterializedResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector)

Example 5 with StatementContext

use of org.apache.phoenix.compile.StatementContext in project phoenix by apache.

the class PhoenixRecordReader method initialize.

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    final PhoenixInputSplit pSplit = (PhoenixInputSplit) split;
    final List<Scan> scans = pSplit.getScans();
    try {
        List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
        StatementContext ctx = queryPlan.getContext();
        ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
        String tableName = queryPlan.getTableRef().getTable().getPhysicalName().getString();
        String snapshotName = this.configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY);
        // Clear the table region boundary cache to make sure long running jobs stay up to date
        byte[] tableNameBytes = queryPlan.getTableRef().getTable().getPhysicalName().getBytes();
        ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices();
        services.clearTableRegionCache(tableNameBytes);
        long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
        boolean isRequestMetricsEnabled = readMetrics.isRequestMetricsEnabled();
        for (Scan scan : scans) {
            // For MR, skip the region boundary check exception if we encounter a split. ref: PHOENIX-2599
            scan.setAttribute(BaseScannerRegionObserver.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true));
            PeekingResultIterator peekingResultIterator;
            ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan, isRequestMetricsEnabled);
            if (snapshotName != null) {
                // result iterator to read snapshots
                final TableSnapshotResultIterator tableSnapshotResultIterator = new TableSnapshotResultIterator(configuration, scan, scanMetricsHolder);
                peekingResultIterator = LookAheadResultIterator.wrap(tableSnapshotResultIterator);
            } else {
                final TableResultIterator tableResultIterator = new TableResultIterator(queryPlan.getContext().getConnection().getMutationState(), scan, scanMetricsHolder, renewScannerLeaseThreshold, queryPlan, MapReduceParallelScanGrouper.getInstance());
                peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator);
            }
            iterators.add(peekingResultIterator);
        }
        ResultIterator iterator = queryPlan.useRoundRobinIterator() ? RoundRobinResultIterator.newIterator(iterators, queryPlan) : ConcatResultIterator.newIterator(iterators);
        if (queryPlan.getContext().getSequenceManager().getSequenceCount() > 0) {
            iterator = new SequenceResultIterator(iterator, queryPlan.getContext().getSequenceManager());
        }
        this.resultIterator = iterator;
        // Clone the row projector as it's not thread safe and would be used simultaneously by
        // multiple threads otherwise.
        this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
    } catch (SQLException e) {
        LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ", e.getMessage()));
        Throwables.propagate(e);
    }
}
Also used : ReadMetricQueue(org.apache.phoenix.monitoring.ReadMetricQueue) SQLException(java.sql.SQLException) ScanMetricsHolder(org.apache.phoenix.monitoring.ScanMetricsHolder) StatementContext(org.apache.phoenix.compile.StatementContext) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) Scan(org.apache.hadoop.hbase.client.Scan) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Aggregations

StatementContext (org.apache.phoenix.compile.StatementContext)19 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)10 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)8 ParseNode (org.apache.phoenix.parse.ParseNode)7 SQLException (java.sql.SQLException)6 ColumnResolver (org.apache.phoenix.compile.ColumnResolver)6 Expression (org.apache.phoenix.expression.Expression)6 ArrayList (java.util.ArrayList)5 SQLParser (org.apache.phoenix.parse.SQLParser)5 PTable (org.apache.phoenix.schema.PTable)5 Scan (org.apache.hadoop.hbase.client.Scan)4 RowKeyColumnExpression (org.apache.phoenix.expression.RowKeyColumnExpression)4 ResultIterator (org.apache.phoenix.iterate.ResultIterator)4 PDataType (org.apache.phoenix.schema.types.PDataType)4 PreparedStatement (java.sql.PreparedStatement)3 List (java.util.List)3 RowProjector (org.apache.phoenix.compile.RowProjector)3 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)3 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)3 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)3