Search in sources :

Example 31 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class ProtobufUtil method toGet.

/**
   * Convert a protocol buffer Get to a client Get
   *
   * @param proto the protocol buffer Get to convert
   * @return the converted client Get
   * @throws IOException
   */
public static Get toGet(final ClientProtos.Get proto) throws IOException {
    if (proto == null)
        return null;
    byte[] row = proto.getRow().toByteArray();
    Get get = new Get(row);
    if (proto.hasCacheBlocks()) {
        get.setCacheBlocks(proto.getCacheBlocks());
    }
    if (proto.hasMaxVersions()) {
        get.setMaxVersions(proto.getMaxVersions());
    }
    if (proto.hasStoreLimit()) {
        get.setMaxResultsPerColumnFamily(proto.getStoreLimit());
    }
    if (proto.hasStoreOffset()) {
        get.setRowOffsetPerColumnFamily(proto.getStoreOffset());
    }
    if (proto.getCfTimeRangeCount() > 0) {
        for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
            TimeRange timeRange = protoToTimeRange(cftr.getTimeRange());
            get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange);
        }
    }
    if (proto.hasTimeRange()) {
        TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
        get.setTimeRange(timeRange);
    }
    if (proto.hasFilter()) {
        FilterProtos.Filter filter = proto.getFilter();
        get.setFilter(ProtobufUtil.toFilter(filter));
    }
    for (NameBytesPair attribute : proto.getAttributeList()) {
        get.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    if (proto.getColumnCount() > 0) {
        for (Column column : proto.getColumnList()) {
            byte[] family = column.getFamily().toByteArray();
            if (column.getQualifierCount() > 0) {
                for (ByteString qualifier : column.getQualifierList()) {
                    get.addColumn(family, qualifier.toByteArray());
                }
            } else {
                get.addFamily(family);
            }
        }
    }
    if (proto.hasExistenceOnly() && proto.getExistenceOnly()) {
        get.setCheckExistenceOnly(true);
    }
    if (proto.hasConsistency()) {
        get.setConsistency(toConsistency(proto.getConsistency()));
    }
    if (proto.hasLoadColumnFamiliesOnDemand()) {
        get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
    }
    return get;
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) Column(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column) ByteString(com.google.protobuf.ByteString) Get(org.apache.hadoop.hbase.client.Get) FilterProtos(org.apache.hadoop.hbase.protobuf.generated.FilterProtos) HBaseProtos(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos)

Example 32 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class ProtobufUtil method toScan.

/**
   * Convert a client Scan to a protocol buffer Scan
   *
   * @param scan the client Scan to convert
   * @return the converted protocol buffer Scan
   * @throws IOException
   */
public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
    ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
    scanBuilder.setCacheBlocks(scan.getCacheBlocks());
    if (scan.getBatch() > 0) {
        scanBuilder.setBatchSize(scan.getBatch());
    }
    if (scan.getMaxResultSize() > 0) {
        scanBuilder.setMaxResultSize(scan.getMaxResultSize());
    }
    if (scan.isSmall()) {
        scanBuilder.setSmall(scan.isSmall());
    }
    if (scan.getAllowPartialResults()) {
        scanBuilder.setAllowPartialResults(scan.getAllowPartialResults());
    }
    Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
    if (loadColumnFamiliesOnDemand != null) {
        scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand);
    }
    scanBuilder.setMaxVersions(scan.getMaxVersions());
    for (Entry<byte[], TimeRange> cftr : scan.getColumnFamilyTimeRange().entrySet()) {
        HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
        b.setColumnFamily(ByteStringer.wrap(cftr.getKey()));
        b.setTimeRange(timeRangeToProto(cftr.getValue()));
        scanBuilder.addCfTimeRange(b);
    }
    TimeRange timeRange = scan.getTimeRange();
    if (!timeRange.isAllTime()) {
        HBaseProtos.TimeRange.Builder timeRangeBuilder = HBaseProtos.TimeRange.newBuilder();
        timeRangeBuilder.setFrom(timeRange.getMin());
        timeRangeBuilder.setTo(timeRange.getMax());
        scanBuilder.setTimeRange(timeRangeBuilder.build());
    }
    Map<String, byte[]> attributes = scan.getAttributesMap();
    if (!attributes.isEmpty()) {
        NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
        for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
            attributeBuilder.setName(attribute.getKey());
            attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
            scanBuilder.addAttribute(attributeBuilder.build());
        }
    }
    byte[] startRow = scan.getStartRow();
    if (startRow != null && startRow.length > 0) {
        scanBuilder.setStartRow(ByteStringer.wrap(startRow));
    }
    byte[] stopRow = scan.getStopRow();
    if (stopRow != null && stopRow.length > 0) {
        scanBuilder.setStopRow(ByteStringer.wrap(stopRow));
    }
    if (scan.hasFilter()) {
        scanBuilder.setFilter(ProtobufUtil.toFilter(scan.getFilter()));
    }
    if (scan.hasFamilies()) {
        Column.Builder columnBuilder = Column.newBuilder();
        for (Map.Entry<byte[], NavigableSet<byte[]>> family : scan.getFamilyMap().entrySet()) {
            columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
            NavigableSet<byte[]> qualifiers = family.getValue();
            columnBuilder.clearQualifier();
            if (qualifiers != null && qualifiers.size() > 0) {
                for (byte[] qualifier : qualifiers) {
                    columnBuilder.addQualifier(ByteStringer.wrap(qualifier));
                }
            }
            scanBuilder.addColumn(columnBuilder.build());
        }
    }
    if (scan.getMaxResultsPerColumnFamily() >= 0) {
        scanBuilder.setStoreLimit(scan.getMaxResultsPerColumnFamily());
    }
    if (scan.getRowOffsetPerColumnFamily() > 0) {
        scanBuilder.setStoreOffset(scan.getRowOffsetPerColumnFamily());
    }
    if (scan.isReversed()) {
        scanBuilder.setReversed(scan.isReversed());
    }
    if (scan.getConsistency() == Consistency.TIMELINE) {
        scanBuilder.setConsistency(toConsistency(scan.getConsistency()));
    }
    if (scan.getCaching() > 0) {
        scanBuilder.setCaching(scan.getCaching());
    }
    long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan);
    if (mvccReadPoint > 0) {
        scanBuilder.setMvccReadPoint(mvccReadPoint);
    }
    if (!scan.includeStartRow()) {
        scanBuilder.setIncludeStartRow(false);
    }
    if (scan.includeStopRow()) {
        scanBuilder.setIncludeStopRow(true);
    }
    if (scan.getReadType() != Scan.ReadType.DEFAULT) {
        scanBuilder.setReadType(toReadType(scan.getReadType()));
    }
    return scanBuilder.build();
}
Also used : NavigableSet(java.util.NavigableSet) ByteString(com.google.protobuf.ByteString) TimeRange(org.apache.hadoop.hbase.io.TimeRange) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) Column(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap)

Example 33 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class ExplainTable method explain.

protected void explain(String prefix, List<String> planSteps) {
    StringBuilder buf = new StringBuilder(prefix);
    ScanRanges scanRanges = context.getScanRanges();
    Scan scan = context.getScan();
    if (scan.getConsistency() != Consistency.STRONG) {
        buf.append("TIMELINE-CONSISTENCY ");
    }
    if (hint.hasHint(Hint.SMALL)) {
        buf.append(Hint.SMALL).append(" ");
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        buf.append("REVERSE ");
    }
    if (scanRanges.isEverything()) {
        buf.append("FULL SCAN ");
    } else {
        explainSkipScan(buf);
    }
    buf.append("OVER ").append(tableRef.getTable().getPhysicalName().getString());
    if (!scanRanges.isPointLookup()) {
        appendKeyRanges(buf);
    }
    planSteps.add(buf.toString());
    if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) {
        TimeRange range = context.getScan().getTimeRange();
        planSteps.add("    ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")");
    }
    PageFilter pageFilter = null;
    FirstKeyOnlyFilter firstKeyOnlyFilter = null;
    BooleanExpressionFilter whereFilter = null;
    DistinctPrefixFilter distinctFilter = null;
    Iterator<Filter> filterIterator = ScanUtil.getFilterIterator(scan);
    if (filterIterator.hasNext()) {
        do {
            Filter filter = filterIterator.next();
            if (filter instanceof FirstKeyOnlyFilter) {
                firstKeyOnlyFilter = (FirstKeyOnlyFilter) filter;
            } else if (filter instanceof PageFilter) {
                pageFilter = (PageFilter) filter;
            } else if (filter instanceof BooleanExpressionFilter) {
                whereFilter = (BooleanExpressionFilter) filter;
            } else if (filter instanceof DistinctPrefixFilter) {
                distinctFilter = (DistinctPrefixFilter) filter;
            }
        } while (filterIterator.hasNext());
    }
    if (whereFilter != null) {
        planSteps.add("    SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + whereFilter.toString());
    } else if (firstKeyOnlyFilter != null) {
        planSteps.add("    SERVER FILTER BY FIRST KEY ONLY");
    }
    if (distinctFilter != null) {
        planSteps.add("    SERVER DISTINCT PREFIX FILTER OVER " + groupBy.getExpressions().toString());
    }
    if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) {
        // with GROUP BY, sort happens client-side
        planSteps.add("    SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
    } else {
        if (offset != null) {
            planSteps.add("    SERVER OFFSET " + offset);
        }
        if (pageFilter != null) {
            planSteps.add("    SERVER " + pageFilter.getPageSize() + " ROW LIMIT");
        }
    }
    Integer groupByLimit = null;
    byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT);
    if (groupByLimitBytes != null) {
        groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes);
    }
    groupBy.explain(planSteps, groupByLimit);
    if (scan.getAttribute(BaseScannerRegionObserver.SPECIFIC_ARRAY_INDEX) != null) {
        planSteps.add("    SERVER ARRAY ELEMENT PROJECTION");
    }
}
Also used : PInteger(org.apache.phoenix.schema.types.PInteger) TimeRange(org.apache.hadoop.hbase.io.TimeRange) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) Filter(org.apache.hadoop.hbase.filter.Filter) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) ScanRanges(org.apache.phoenix.compile.ScanRanges) BooleanExpressionFilter(org.apache.phoenix.filter.BooleanExpressionFilter)

Example 34 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class BaseScannerRegionObserver method preScannerOpen.

@Override
public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
    byte[] txnScn = scan.getAttribute(TX_SCN);
    if (txnScn != null) {
        TimeRange timeRange = scan.getTimeRange();
        scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn));
    }
    if (isRegionObserverFor(scan)) {
        // on region boundaries is out of date and can safely be ignored.
        if (!skipRegionBoundaryCheck(scan) || ScanUtil.isLocalIndex(scan)) {
            throwIfScanOutOfRegion(scan, c.getEnvironment().getRegion());
        }
        // Muck with the start/stop row of the scan and set as reversed at the
        // last possible moment. You need to swap the start/stop and make the
        // start exclusive and the stop inclusive.
        ScanUtil.setupReverseScan(scan);
    }
    this.encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan);
    this.useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan);
    return s;
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange)

Example 35 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final Map<ImmutableBytesPtr, ServerCache> caches, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    ScanRanges scanRanges = context.getScanRanges();
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (scanRanges == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return getWrappedIterator(caches, ResultIterator.EMPTY_ITERATOR);
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan, caches);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    PhoenixConnection connection = context.getConnection();
    final int smallScanThreshold = connection.getQueryServices().getProps().getInt(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD);
    if (statement.getHint().hasHint(Hint.SMALL) || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold)) {
        scan.setSmall(true);
    }
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = scanRanges.getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // Always use latest timestamp unless scn is set or transactional (see PHOENIX-4089)
            scn = HConstants.LATEST_TIMESTAMP;
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes.
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan, caches);
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) IOException(java.io.IOException) ScanRanges(org.apache.phoenix.compile.ScanRanges) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Aggregations

TimeRange (org.apache.hadoop.hbase.io.TimeRange)45 Test (org.junit.Test)11 Map (java.util.Map)10 Get (org.apache.hadoop.hbase.client.Get)10 Scan (org.apache.hadoop.hbase.client.Scan)10 Cell (org.apache.hadoop.hbase.Cell)8 NavigableSet (java.util.NavigableSet)7 NameBytesPair (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)7 HashMap (java.util.HashMap)6 Filter (org.apache.hadoop.hbase.filter.Filter)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteString (com.google.protobuf.ByteString)5 ArrayList (java.util.ArrayList)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)5 Put (org.apache.hadoop.hbase.client.Put)5 List (java.util.List)4 Increment (org.apache.hadoop.hbase.client.Increment)4 Result (org.apache.hadoop.hbase.client.Result)4 Column (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column)4 Column (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Column)4