Search in sources :

Example 1 with ServerAggregators

use of org.apache.phoenix.expression.aggregator.ServerAggregators in project phoenix by apache.

the class QueryCompilerTest method testCountAggregatorFirst.

@Test
public void testCountAggregatorFirst() throws Exception {
    String[] queries = new String[] { "SELECT sum(2.5),organization_id FROM atable GROUP BY organization_id,entity_id", "SELECT avg(a_integer) FROM atable GROUP BY organization_id,substr(entity_id,1,3),entity_id", "SELECT count(a_string) FROM atable GROUP BY substr(organization_id,1),entity_id", "SELECT min('foo') FROM atable GROUP BY entity_id,organization_id", "SELECT min('foo'),sum(a_integer),avg(2.5),4.5,max(b_string) FROM atable GROUP BY substr(organization_id,1),entity_id", "SELECT sum(2.5) FROM atable", "SELECT avg(a_integer) FROM atable", "SELECT count(a_string) FROM atable", "SELECT min('foo') FROM atable LIMIT 5", "SELECT min('foo'),sum(a_integer),avg(2.5),4.5,max(b_string) FROM atable" };
    List<Object> binds = Collections.emptyList();
    String query = null;
    try {
        for (int i = 0; i < queries.length; i++) {
            query = queries[i];
            Scan scan = compileQuery(query, binds);
            ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), null);
            Aggregator aggregator = aggregators.getAggregators()[0];
            assertTrue(aggregator instanceof CountAggregator);
        }
    } catch (Exception e) {
        throw new Exception(query, e);
    }
}
Also used : CountAggregator(org.apache.phoenix.expression.aggregator.CountAggregator) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) CountAggregator(org.apache.phoenix.expression.aggregator.CountAggregator) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) Scan(org.apache.hadoop.hbase.client.Scan) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) SQLException(java.sql.SQLException) ColumnAlreadyExistsException(org.apache.phoenix.schema.ColumnAlreadyExistsException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 2 with ServerAggregators

use of org.apache.phoenix.expression.aggregator.ServerAggregators in project phoenix by apache.

the class GroupedAggregateRegionObserver method doPostScannerOpen.

/**
 * Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
 * of expressions from the scan and returns the aggregated rows of each group. For example,
 * given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
 * the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
 * do a sort and a final aggregation, since multiple rows with the same key may be returned from
 * different regions.
 */
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
    boolean keyOrdered = false;
    byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);
    if (expressionBytes == null) {
        expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
        keyOrdered = true;
    }
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        Region region = c.getEnvironment().getRegion();
        offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offset);
    }
    List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
    ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
    RegionScanner innerScanner = s;
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
        innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        innerScanner = new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier);
    }
    long limit = Long.MAX_VALUE;
    byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
    if (limitBytes != null) {
        limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
    }
    if (keyOrdered) {
        // already in the required group by key order
        return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
    } else {
        // Otherwse, collect them all up in an in memory map
        return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
    }
}
Also used : TupleProjector(org.apache.phoenix.execute.TupleProjector) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) Region(org.apache.hadoop.hbase.regionserver.Region) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 3 with ServerAggregators

use of org.apache.phoenix.expression.aggregator.ServerAggregators in project phoenix by apache.

the class QueryCompilerTest method testOrderByWithNoProjection.

@Test
public void testOrderByWithNoProjection() throws SQLException {
    Connection conn = DriverManager.getConnection(getUrl());
    try {
        conn.createStatement().execute("create table x (id integer primary key, A.i1 integer," + " B.i2 integer)");
        Scan scan = projectQuery("select A.i1 from X group by i1 order by avg(B.i2) " + "desc");
        ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), null);
        assertEquals(2, aggregators.getAggregatorCount());
    } finally {
        conn.close();
    }
}
Also used : Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Aggregations

ServerAggregators (org.apache.phoenix.expression.aggregator.ServerAggregators)3 Scan (org.apache.hadoop.hbase.client.Scan)2 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)2 Test (org.junit.Test)2 Connection (java.sql.Connection)1 SQLException (java.sql.SQLException)1 Region (org.apache.hadoop.hbase.regionserver.Region)1 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)1 TupleProjector (org.apache.phoenix.execute.TupleProjector)1 Expression (org.apache.phoenix.expression.Expression)1 Aggregator (org.apache.phoenix.expression.aggregator.Aggregator)1 CountAggregator (org.apache.phoenix.expression.aggregator.CountAggregator)1 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)1 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)1 IndexMaintainer (org.apache.phoenix.index.IndexMaintainer)1 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)1 HashJoinInfo (org.apache.phoenix.join.HashJoinInfo)1 AmbiguousColumnException (org.apache.phoenix.schema.AmbiguousColumnException)1 ColumnAlreadyExistsException (org.apache.phoenix.schema.ColumnAlreadyExistsException)1 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)1