Search in sources :

Example 6 with ResultTuple

use of org.apache.phoenix.schema.tuple.ResultTuple in project phoenix by apache.

the class TraceQueryPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    final PhoenixConnection conn = stmt.getConnection();
    if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    return new ResultIterator() {

        @Override
        public void close() throws SQLException {
        }

        @Override
        public Tuple next() throws SQLException {
            if (!first)
                return null;
            TraceScope traceScope = conn.getTraceScope();
            if (traceStatement.isTraceOn()) {
                conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
                if (conn.getSampler() == Sampler.NEVER) {
                    closeTraceScope(conn);
                }
                if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
                    traceScope = Tracing.startNewSpan(conn, "Enabling trace");
                    if (traceScope.getSpan() != null) {
                        conn.setTraceScope(traceScope);
                    } else {
                        closeTraceScope(conn);
                    }
                }
            } else {
                closeTraceScope(conn);
                conn.setSampler(Sampler.NEVER);
            }
            if (traceScope == null || traceScope.getSpan() == null)
                return null;
            first = false;
            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
            ParseNodeFactory factory = new ParseNodeFactory();
            LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
            LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
            expression.evaluate(null, ptr);
            byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
            Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
            List<Cell> cells = new ArrayList<Cell>(1);
            cells.add(cell);
            return new ResultTuple(Result.create(cells));
        }

        private void closeTraceScope(final PhoenixConnection conn) {
            if (conn.getTraceScope() != null) {
                conn.getTraceScope().close();
                conn.setTraceScope(null);
            }
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ResultIterator(org.apache.phoenix.iterate.ResultIterator) TraceScope(org.apache.htrace.TraceScope) ArrayList(java.util.ArrayList) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 7 with ResultTuple

use of org.apache.phoenix.schema.tuple.ResultTuple in project phoenix by apache.

the class MetaDataEndpointImpl method findChildViews_4_11.

private TableViewFinder findChildViews_4_11(Region region, byte[] tenantId, byte[] schemaName, byte[] tableName) throws IOException {
    Scan scan = new Scan();
    byte[] startRow = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
    byte[] stopRow = ByteUtil.nextKey(startRow);
    scan.setStartRow(startRow);
    scan.setStopRow(stopRow);
    SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, CHILD_TABLE_BYTES);
    linkFilter.setFilterIfMissing(true);
    scan.setFilter(linkFilter);
    scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
    scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES);
    // Original region-only scanner modified due to PHOENIX-1208
    // RegionScanner scanner = region.getScanner(scan);
    // The following *should* work, but doesn't due to HBASE-11837
    // TableName systemCatalogTableName = region.getTableDesc().getTableName();
    // HTableInterface hTable = env.getTable(systemCatalogTableName);
    // These deprecated calls work around the issue
    HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, region.getTableDesc().getTableName().getName());
    try {
        boolean allViewsInCurrentRegion = true;
        int numOfChildViews = 0;
        List<ViewInfo> viewInfoList = Lists.newArrayList();
        ResultScanner scanner = hTable.getScanner(scan);
        try {
            for (Result result = scanner.next(); (result != null); result = scanner.next()) {
                numOfChildViews++;
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                ResultTuple resultTuple = new ResultTuple(result);
                resultTuple.getKey(ptr);
                byte[] key = ptr.copyBytes();
                if (checkTableKeyInRegion(key, region) != null) {
                    allViewsInCurrentRegion = false;
                }
                byte[][] rowViewKeyMetaData = new byte[5][];
                getVarChars(result.getRow(), 5, rowViewKeyMetaData);
                byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
                byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
                byte[] viewName = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
                viewInfoList.add(new ViewInfo(viewTenantId, viewSchemaName, viewName));
            }
            TableViewFinder tableViewFinderResult = new TableViewFinder(viewInfoList);
            if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
                tableViewFinderResult.setAllViewsNotInSingleRegion();
            }
            return tableViewFinderResult;
        } finally {
            scanner.close();
        }
    } finally {
        hTable.close();
    }
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan)

Example 8 with ResultTuple

use of org.apache.phoenix.schema.tuple.ResultTuple in project phoenix by apache.

the class HashJoinRegionScanner method processResults.

private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
    if (result.isEmpty())
        return;
    Tuple tuple = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : new ResultTuple(Result.create(result));
    // always returns true.
    if (joinInfo.forceProjection()) {
        tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
    }
    // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well.
    if (hasBatchLimit)
        throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
    int count = joinInfo.getJoinIds().length;
    boolean cont = true;
    for (int i = 0; i < count; i++) {
        if (!(joinInfo.earlyEvaluation()[i]) || hashCaches[i] == null)
            continue;
        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]);
        tempTuples[i] = hashCaches[i].get(key);
        JoinType type = joinInfo.getJoinTypes()[i];
        if (((type == JoinType.Inner || type == JoinType.Semi) && tempTuples[i] == null) || (type == JoinType.Anti && tempTuples[i] != null)) {
            cont = false;
            break;
        }
    }
    if (cont) {
        if (projector == null) {
            int dup = 1;
            for (int i = 0; i < count; i++) {
                dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size());
            }
            for (int i = 0; i < dup; i++) {
                resultQueue.offer(tuple);
            }
        } else {
            KeyValueSchema schema = joinInfo.getJoinedSchema();
            if (!joinInfo.forceProjection()) {
                // backward compatibility
                tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
            }
            resultQueue.offer(tuple);
            for (int i = 0; i < count; i++) {
                boolean earlyEvaluation = joinInfo.earlyEvaluation()[i];
                JoinType type = joinInfo.getJoinTypes()[i];
                if (earlyEvaluation && (type == JoinType.Semi || type == JoinType.Anti))
                    continue;
                int j = resultQueue.size();
                while (j-- > 0) {
                    Tuple lhs = resultQueue.poll();
                    if (!earlyEvaluation) {
                        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
                        tempTuples[i] = hashCaches[i].get(key);
                        if (tempTuples[i] == null) {
                            if (type == JoinType.Inner || type == JoinType.Semi) {
                                continue;
                            } else if (type == JoinType.Anti) {
                                resultQueue.offer(lhs);
                                continue;
                            }
                        }
                    }
                    if (tempTuples[i] == null) {
                        Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, null, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
                        resultQueue.offer(joined);
                        continue;
                    }
                    for (Tuple t : tempTuples[i]) {
                        Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, t, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
                        resultQueue.offer(joined);
                    }
                }
            }
        }
        // apply post-join filter
        Expression postFilter = joinInfo.getPostJoinFilterExpression();
        if (postFilter != null) {
            for (Iterator<Tuple> iter = resultQueue.iterator(); iter.hasNext(); ) {
                Tuple t = iter.next();
                postFilter.reset();
                ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
                try {
                    if (!postFilter.evaluate(t, tempPtr)) {
                        iter.remove();
                        continue;
                    }
                } catch (IllegalDataException e) {
                    iter.remove();
                    continue;
                }
                Boolean b = (Boolean) postFilter.getDataType().toObject(tempPtr);
                if (!b.booleanValue()) {
                    iter.remove();
                }
            }
        }
    }
}
Also used : PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) JoinType(org.apache.phoenix.parse.JoinTableNode.JoinType) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) Expression(org.apache.phoenix.expression.Expression) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Example 9 with ResultTuple

use of org.apache.phoenix.schema.tuple.ResultTuple in project phoenix by apache.

the class MetaDataEndpointImpl method findChildViews_deprecated.

// TODO remove this in 4.13 release 
@Deprecated
private TableViewFinder findChildViews_deprecated(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
    byte[] schemaName = table.getSchemaName().getBytes();
    byte[] tableName = table.getTableName().getBytes();
    boolean isMultiTenant = table.isMultiTenant();
    Scan scan = new Scan();
    // the same tenantId.
    if (!isMultiTenant) {
        byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
        byte[] stopRow = ByteUtil.nextKey(startRow);
        scan.setStartRow(startRow);
        scan.setStopRow(stopRow);
    }
    SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
    SingleColumnValueFilter tableTypeFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, CompareOp.EQUAL, PTableType.VIEW.getSerializedValue().getBytes());
    tableTypeFilter.setFilterIfMissing(false);
    linkFilter.setFilterIfMissing(true);
    byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getPhysicalTableName(SchemaUtil.getTableNameAsBytes(schemaName, tableName), table.isNamespaceMapped()).getName());
    SuffixFilter rowFilter = new SuffixFilter(suffix);
    FilterList filter = new FilterList(linkFilter, tableTypeFilter, rowFilter);
    scan.setFilter(filter);
    scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
    scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
    scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
    // Original region-only scanner modified due to PHOENIX-1208
    // RegionScanner scanner = region.getScanner(scan);
    // The following *should* work, but doesn't due to HBASE-11837
    // TableName systemCatalogTableName = region.getTableDesc().getTableName();
    // HTableInterface hTable = env.getTable(systemCatalogTableName);
    // These deprecated calls work around the issue
    HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, region.getTableDesc().getTableName().getName());
    try {
        boolean allViewsInCurrentRegion = true;
        int numOfChildViews = 0;
        List<ViewInfo> viewInfoList = Lists.newArrayList();
        ResultScanner scanner = hTable.getScanner(scan);
        try {
            for (Result result = scanner.next(); (result != null); result = scanner.next()) {
                numOfChildViews++;
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                ResultTuple resultTuple = new ResultTuple(result);
                resultTuple.getKey(ptr);
                byte[] key = ptr.copyBytes();
                if (checkTableKeyInRegion(key, region) != null) {
                    allViewsInCurrentRegion = false;
                }
                byte[][] rowKeyMetaData = new byte[3][];
                getVarChars(result.getRow(), 3, rowKeyMetaData);
                byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
                byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
                byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
                viewInfoList.add(new ViewInfo(viewTenantId, viewSchemaName, viewName));
            }
            TableViewFinder tableViewFinderResult = new TableViewFinder(viewInfoList);
            if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
                tableViewFinderResult.setAllViewsNotInSingleRegion();
            }
            return tableViewFinderResult;
        } finally {
            scanner.close();
        }
    } finally {
        hTable.close();
    }
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) FilterList(org.apache.hadoop.hbase.filter.FilterList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan)

Example 10 with ResultTuple

use of org.apache.phoenix.schema.tuple.ResultTuple in project phoenix by apache.

the class ListJarsQueryPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    return new ResultIterator() {

        private RemoteIterator<LocatedFileStatus> listFiles = null;

        @Override
        public void close() throws SQLException {
        }

        @Override
        public Tuple next() throws SQLException {
            try {
                if (first) {
                    String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps().get(QueryServices.DYNAMIC_JARS_DIR_KEY);
                    if (dynamicJarsDir == null) {
                        throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY + " is not configured for the listing the jars.");
                    }
                    dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/';
                    Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
                    Path dynamicJarsDirPath = new Path(dynamicJarsDir);
                    FileSystem fs = dynamicJarsDirPath.getFileSystem(conf);
                    listFiles = fs.listFiles(dynamicJarsDirPath, true);
                    first = false;
                }
                if (listFiles == null || !listFiles.hasNext())
                    return null;
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                ParseNodeFactory factory = new ParseNodeFactory();
                LiteralParseNode literal = factory.literal(listFiles.next().getPath().toString());
                LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PVarchar.INSTANCE, Determinism.ALWAYS);
                expression.evaluate(null, ptr);
                byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
                Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, System.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
                List<Cell> cells = new ArrayList<Cell>(1);
                cells.add(cell);
                return new ResultTuple(Result.create(cells));
            } catch (IOException e) {
                throw new SQLException(e);
            }
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Configuration(org.apache.hadoop.conf.Configuration) SQLException(java.sql.SQLException) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ResultIterator(org.apache.phoenix.iterate.ResultIterator) ArrayList(java.util.ArrayList) IOException(java.io.IOException) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Aggregations

ResultTuple (org.apache.phoenix.schema.tuple.ResultTuple)11 Result (org.apache.hadoop.hbase.client.Result)8 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)6 Scan (org.apache.hadoop.hbase.client.Scan)6 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)5 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 Cell (org.apache.hadoop.hbase.Cell)3 HTable (org.apache.hadoop.hbase.client.HTable)3 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)3 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)3 SingleCellColumnExpression (org.apache.phoenix.expression.SingleCellColumnExpression)3 Tuple (org.apache.phoenix.schema.tuple.Tuple)3 SQLException (java.sql.SQLException)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)2 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)2 ResultIterator (org.apache.phoenix.iterate.ResultIterator)2