Search in sources :

Example 1 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class AggregatePlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (groupBy.isEmpty()) {
        UngroupedAggregateRegionObserver.serializeIntoScan(scan);
    } else {
        // Set attribute with serialized expressions for coprocessor
        GroupedAggregateRegionObserver.serializeIntoScan(scan, groupBy.getScanAttribName(), groupBy.getKeyExpressions());
        if (limit != null && orderBy.getOrderByExpressions().isEmpty() && having == null && ((statement.isDistinct() && !statement.isAggregate()) || (!statement.isDistinct() && (context.getAggregationManager().isEmpty() || BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS.equals(groupBy.getScanAttribName()))))) {
            /*
                 * Optimization to early exit from the scan for a GROUP BY or DISTINCT with a LIMIT.
                 * We may exit early according to the LIMIT specified if the query has:
                 * 1) No ORDER BY clause (or the ORDER BY was optimized out). We cannot exit
                 *    early if there's an ORDER BY because the first group may be found last
                 *    in the scan.
                 * 2) No HAVING clause, since we execute the HAVING on the client side. The LIMIT
                 *    needs to be evaluated *after* the HAVING.
                 * 3) DISTINCT clause with no GROUP BY. We cannot exit early if there's a
                 *    GROUP BY, as the GROUP BY is processed on the client-side post aggregation
                 *    if a DISTNCT has a GROUP BY. Otherwise, since there are no aggregate
                 *    functions in a DISTINCT, we can exit early regardless of if the
                 *    groups are in row key order or unordered.
                 * 4) GROUP BY clause with no aggregate functions. This is in the same category
                 *    as (3). If we're using aggregate functions, we need to look at all the
                 *    rows, as otherwise we'd exit early with incorrect aggregate function
                 *    calculations.
                 * 5) GROUP BY clause along the pk axis, as the rows are processed in row key
                 *    order, so we can early exit, even when aggregate functions are used, as
                 *    the rows in the group are contiguous.
                 */
            scan.setAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT, PInteger.INSTANCE.toBytes(limit + (offset == null ? 0 : offset)));
        }
    }
    BaseResultIterators iterators = isSerial ? new SerialIterators(this, null, null, wrapParallelIteratorFactory(), scanGrouper, scan) : new ParallelIterators(this, null, wrapParallelIteratorFactory(), scan, false);
    estimatedRows = iterators.getEstimatedRowCount();
    estimatedSize = iterators.getEstimatedByteCount();
    splits = iterators.getSplits();
    scans = iterators.getScans();
    AggregatingResultIterator aggResultIterator;
    // No need to merge sort for ungrouped aggregation
    if (groupBy.isEmpty() || groupBy.isUngroupedAggregate()) {
        aggResultIterator = new UngroupedAggregatingResultIterator(new ConcatResultIterator(iterators), aggregators);
    // If salted or local index we still need a merge sort as we'll potentially have multiple group by keys that aren't contiguous.
    } else if (groupBy.isOrderPreserving() && !(this.getTableRef().getTable().getBucketNum() != null || this.getTableRef().getTable().getIndexType() == IndexType.LOCAL)) {
        aggResultIterator = new RowKeyOrderedAggregateResultIterator(iterators, aggregators);
    } else {
        aggResultIterator = new GroupedAggregatingResultIterator(new MergeSortRowKeyResultIterator(iterators, 0, this.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY), aggregators);
    }
    if (having != null) {
        aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having);
    }
    if (statement.isDistinct() && statement.isAggregate()) {
        // Dedup on client if select distinct and aggregation
        aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector());
    }
    ResultIterator resultScanner = aggResultIterator;
    if (orderBy.getOrderByExpressions().isEmpty()) {
        if (offset != null) {
            resultScanner = new OffsetResultIterator(aggResultIterator, offset);
        }
        if (limit != null) {
            resultScanner = new LimitingResultIterator(resultScanner, limit);
        }
    } else {
        int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
        resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, orderBy.getOrderByExpressions(), thresholdBytes, limit, offset);
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager());
    }
    return resultScanner;
}
Also used : ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) SerialIterators(org.apache.phoenix.iterate.SerialIterators) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) RowKeyOrderedAggregateResultIterator(org.apache.phoenix.iterate.RowKeyOrderedAggregateResultIterator) MergeSortRowKeyResultIterator(org.apache.phoenix.iterate.MergeSortRowKeyResultIterator) OrderedResultIterator(org.apache.phoenix.iterate.OrderedResultIterator) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) AggregatingResultIterator(org.apache.phoenix.iterate.AggregatingResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) PeekingResultIterator(org.apache.phoenix.iterate.PeekingResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) RowKeyOrderedAggregateResultIterator(org.apache.phoenix.iterate.RowKeyOrderedAggregateResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) BaseResultIterators(org.apache.phoenix.iterate.BaseResultIterators) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) AggregatingResultIterator(org.apache.phoenix.iterate.AggregatingResultIterator) GroupedAggregatingResultIterator(org.apache.phoenix.iterate.GroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) DistinctAggregatingResultIterator(org.apache.phoenix.iterate.DistinctAggregatingResultIterator) UngroupedAggregatingResultIterator(org.apache.phoenix.iterate.UngroupedAggregatingResultIterator) FilterAggregatingResultIterator(org.apache.phoenix.iterate.FilterAggregatingResultIterator) OrderedAggregatingResultIterator(org.apache.phoenix.iterate.OrderedAggregatingResultIterator)

Example 2 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    if (statement.getHint().hasHint(Hint.SMALL)) {
        scan.setSmall(true);
    }
    PhoenixConnection connection = context.getConnection();
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // If we haven't resolved the time at the beginning of compilation, don't
            // force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
            scn = tableRef.getTimeStamp();
            if (scn == QueryConstants.UNSET_TIMESTAMP) {
                scn = HConstants.LATEST_TIMESTAMP;
            }
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes. 
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan);
    iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {

        @Override
        public void close() throws SQLException {
            try {
                super.close();
            } finally {
                SQLCloseables.closeAll(dependencies);
            }
        }
    };
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 3 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class PhoenixDatabaseMetaData method getColumns.

@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
    StringBuilder buf = new StringBuilder("select \n " + TENANT_ID + " " + TABLE_CAT + // use this for tenant id
    "," + TABLE_SCHEM + "," + TABLE_NAME + " ," + COLUMN_NAME + "," + ExternalSqlTypeIdFunction.NAME + "(" + DATA_TYPE + ") AS " + DATA_TYPE + "," + SqlTypeNameFunction.NAME + "(" + DATA_TYPE + ") AS " + TYPE_NAME + "," + COLUMN_SIZE + "," + BUFFER_LENGTH + "," + DECIMAL_DIGITS + "," + NUM_PREC_RADIX + "," + NULLABLE + "," + REMARKS + "," + COLUMN_DEF + "," + SQL_DATA_TYPE + "," + SQL_DATETIME_SUB + "," + CHAR_OCTET_LENGTH + "," + "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + ORDINAL_POSITION + "-1 ELSE " + ORDINAL_POSITION + " END AS " + ORDINAL_POSITION + "," + "CASE " + NULLABLE + " WHEN " + DatabaseMetaData.attributeNoNulls + " THEN '" + Boolean.FALSE.toString() + "' WHEN " + DatabaseMetaData.attributeNullable + " THEN '" + Boolean.TRUE.toString() + "' END AS " + IS_NULLABLE + "," + SCOPE_CATALOG + "," + SCOPE_SCHEMA + "," + SCOPE_TABLE + "," + SOURCE_DATA_TYPE + "," + IS_AUTOINCREMENT + "," + ARRAY_SIZE + "," + COLUMN_FAMILY + "," + DATA_TYPE + " " + TYPE_ID + // raw type id for potential internal consumption
    "," + VIEW_CONSTANT + "," + MULTI_TENANT + "," + "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + KEY_SEQ + "-1 ELSE " + KEY_SEQ + " END AS " + KEY_SEQ + " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(" + TENANT_POS_SHIFT + " BOOLEAN)");
    StringBuilder where = new StringBuilder();
    addTenantIdFilter(where, catalog);
    if (schemaPattern != null) {
        appendConjunction(where);
        where.append(TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'"));
    }
    if (tableNamePattern != null && tableNamePattern.length() > 0) {
        appendConjunction(where);
        where.append(TABLE_NAME + " like '" + StringUtil.escapeStringConstant(tableNamePattern) + "'");
    }
    // Allow a "." in columnNamePattern for column family match
    String colPattern = null;
    if (columnNamePattern != null && columnNamePattern.length() > 0) {
        String cfPattern = null;
        int index = columnNamePattern.indexOf('.');
        if (index <= 0) {
            colPattern = columnNamePattern;
        } else {
            cfPattern = columnNamePattern.substring(0, index);
            if (columnNamePattern.length() > index + 1) {
                colPattern = columnNamePattern.substring(index + 1);
            }
        }
        if (cfPattern != null && cfPattern.length() > 0) {
            // if null or empty, will pick up all columns
            // Will pick up only KV columns
            appendConjunction(where);
            where.append(COLUMN_FAMILY + " like '" + StringUtil.escapeStringConstant(cfPattern) + "'");
        }
        if (colPattern != null && colPattern.length() > 0) {
            appendConjunction(where);
            where.append(COLUMN_NAME + " like '" + StringUtil.escapeStringConstant(colPattern) + "'");
        }
    }
    if (colPattern == null || colPattern.length() == 0) {
        appendConjunction(where);
        where.append(COLUMN_NAME + " is not null");
        appendConjunction(where);
        where.append(LINK_TYPE + " is null");
    }
    boolean isTenantSpecificConnection = connection.getTenantId() != null;
    if (isTenantSpecificConnection) {
        buf.append(" where (" + where + ") OR (" + COLUMN_FAMILY + " is null AND " + COLUMN_NAME + " is null)");
    } else {
        buf.append(" where " + where);
    }
    buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + SYSTEM_CATALOG_ALIAS + "." + ORDINAL_POSITION);
    Statement stmt;
    if (isTenantSpecificConnection) {
        stmt = connection.createStatement(new PhoenixStatementFactory() {

            @Override
            public PhoenixStatement newStatement(PhoenixConnection connection) {
                return new PhoenixStatement(connection) {

                    @Override
                    public PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector, StatementContext context) throws SQLException {
                        return new PhoenixResultSet(new TenantColumnFilteringIterator(iterator, projector), projector, context);
                    }
                };
            }
        });
    } else {
        stmt = connection.createStatement();
    }
    return stmt.executeQuery(buf.toString());
}
Also used : Statement(java.sql.Statement) MaterializedResultIterator(org.apache.phoenix.iterate.MaterializedResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector)

Example 4 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class CorrelatePlanTest method testCorrelatePlan.

private void testCorrelatePlan(Object[][] leftRelation, Object[][] rightRelation, int leftCorrelColumn, int rightCorrelColumn, JoinType type, Object[][] expectedResult, Integer offset) throws SQLException {
    TableRef leftTable = createProjectedTableFromLiterals(leftRelation[0]);
    TableRef rightTable = createProjectedTableFromLiterals(rightRelation[0]);
    String varName = "$cor0";
    RuntimeContext runtimeContext = new RuntimeContextImpl();
    runtimeContext.defineCorrelateVariable(varName, leftTable);
    QueryPlan leftPlan = newLiteralResultIterationPlan(leftRelation, offset);
    QueryPlan rightPlan = newLiteralResultIterationPlan(rightRelation, offset);
    Expression columnExpr = new ColumnRef(rightTable, rightCorrelColumn).newColumnExpression();
    Expression fieldAccess = new CorrelateVariableFieldAccessExpression(runtimeContext, varName, new ColumnRef(leftTable, leftCorrelColumn).newColumnExpression());
    Expression filter = ComparisonExpression.create(CompareOp.EQUAL, Arrays.asList(columnExpr, fieldAccess), CONTEXT.getTempPtr(), false);
    rightPlan = new ClientScanPlan(CONTEXT, SelectStatement.SELECT_ONE, rightTable, RowProjector.EMPTY_PROJECTOR, null, null, filter, OrderBy.EMPTY_ORDER_BY, rightPlan);
    PTable joinedTable = JoinCompiler.joinProjectedTables(leftTable.getTable(), rightTable.getTable(), type);
    CorrelatePlan correlatePlan = new CorrelatePlan(leftPlan, rightPlan, varName, type, false, runtimeContext, joinedTable, leftTable.getTable(), rightTable.getTable(), leftTable.getTable().getColumns().size());
    ResultIterator iter = correlatePlan.iterator();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (Object[] row : expectedResult) {
        Tuple next = iter.next();
        assertNotNull(next);
        for (int i = 0; i < row.length; i++) {
            PColumn column = joinedTable.getColumns().get(i);
            boolean eval = new ProjectedColumnExpression(column, joinedTable, column.getName().getString()).evaluate(next, ptr);
            Object o = eval ? column.getDataType().toObject(ptr) : null;
            assertEquals(row[i], o);
        }
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) CorrelateVariableFieldAccessExpression(org.apache.phoenix.expression.CorrelateVariableFieldAccessExpression) ResultIterator(org.apache.phoenix.iterate.ResultIterator) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) QueryPlan(org.apache.phoenix.compile.QueryPlan) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ComparisonExpression(org.apache.phoenix.expression.ComparisonExpression) CorrelateVariableFieldAccessExpression(org.apache.phoenix.expression.CorrelateVariableFieldAccessExpression) ColumnRef(org.apache.phoenix.schema.ColumnRef) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Example 5 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class UnnestArrayPlanTest method testUnnestArrays.

private void testUnnestArrays(PArrayDataType arrayType, List<Object[]> arrays, boolean withOrdinality) throws Exception {
    PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
    List<Tuple> tuples = toTuples(arrayType, arrays);
    LiteralResultIterationPlan subPlan = new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, null, OrderBy.EMPTY_ORDER_BY, null);
    LiteralExpression dummy = LiteralExpression.newConstant(null, arrayType);
    RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0);
    UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality);
    PName colName = PNameFactory.newName("ELEM");
    PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes());
    colName = PNameFactory.newName("IDX");
    PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes()) : null;
    List<PColumn> columns = withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn);
    ProjectedColumnExpression elemExpr = new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString());
    ProjectedColumnExpression indexExpr = withOrdinality ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) : null;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ResultIterator iterator = plan.iterator();
    for (Object[] o : flatten(arrays)) {
        Tuple tuple = iterator.next();
        assertNotNull(tuple);
        assertTrue(elemExpr.evaluate(tuple, ptr));
        Object elem = baseType.toObject(ptr);
        assertEquals(o[0], elem);
        if (withOrdinality) {
            assertTrue(indexExpr.evaluate(tuple, ptr));
            Object index = PInteger.INSTANCE.toObject(ptr);
            assertEquals(o[1], index);
        }
    }
    assertNull(iterator.next());
}
Also used : PColumnImpl(org.apache.phoenix.schema.PColumnImpl) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) RowKeyValueAccessor(org.apache.phoenix.schema.RowKeyValueAccessor) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ResultIterator(org.apache.phoenix.iterate.ResultIterator) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) PColumn(org.apache.phoenix.schema.PColumn) PDataType(org.apache.phoenix.schema.types.PDataType) PName(org.apache.phoenix.schema.PName) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Aggregations

ResultIterator (org.apache.phoenix.iterate.ResultIterator)26 SequenceResultIterator (org.apache.phoenix.iterate.SequenceResultIterator)9 List (java.util.List)8 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)8 LimitingResultIterator (org.apache.phoenix.iterate.LimitingResultIterator)8 OffsetResultIterator (org.apache.phoenix.iterate.OffsetResultIterator)8 SQLException (java.sql.SQLException)7 ConcatResultIterator (org.apache.phoenix.iterate.ConcatResultIterator)7 PColumn (org.apache.phoenix.schema.PColumn)7 TableRef (org.apache.phoenix.schema.TableRef)7 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)5 DelegateResultIterator (org.apache.phoenix.iterate.DelegateResultIterator)5 ParallelIterators (org.apache.phoenix.iterate.ParallelIterators)5 SpoolingResultIterator (org.apache.phoenix.iterate.SpoolingResultIterator)5 PTable (org.apache.phoenix.schema.PTable)5 Tuple (org.apache.phoenix.schema.tuple.Tuple)5 ArrayList (java.util.ArrayList)4 QueryPlan (org.apache.phoenix.compile.QueryPlan)4 StatementContext (org.apache.phoenix.compile.StatementContext)4