Search in sources :

Example 6 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class TestTimeRangeTracker method testExtreme.

@Test
public void testExtreme() {
    TimeRange tr = new TimeRange();
    assertTrue(tr.includesTimeRange(new TimeRange()));
    TimeRangeTracker trt = new TimeRangeTracker();
    assertFalse(trt.includesTimeRange(new TimeRange()));
    trt.includeTimestamp(1);
    trt.includeTimestamp(10);
    assertTrue(trt.includesTimeRange(new TimeRange()));
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) Test(org.junit.Test)

Example 7 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class ProtobufUtil method toGet.

/**
   * Create a protocol buffer Get based on a client Get.
   *
   * @param get the client Get
   * @return a protocol buffer Get
   * @throws IOException
   */
public static ClientProtos.Get toGet(final Get get) throws IOException {
    ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
    builder.setRow(ByteStringer.wrap(get.getRow()));
    builder.setCacheBlocks(get.getCacheBlocks());
    builder.setMaxVersions(get.getMaxVersions());
    if (get.getFilter() != null) {
        builder.setFilter(ProtobufUtil.toFilter(get.getFilter()));
    }
    for (Entry<byte[], TimeRange> cftr : get.getColumnFamilyTimeRange().entrySet()) {
        HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
        b.setColumnFamily(ByteStringer.wrap(cftr.getKey()));
        b.setTimeRange(timeRangeToProto(cftr.getValue()));
        builder.addCfTimeRange(b);
    }
    TimeRange timeRange = get.getTimeRange();
    if (!timeRange.isAllTime()) {
        HBaseProtos.TimeRange.Builder timeRangeBuilder = HBaseProtos.TimeRange.newBuilder();
        timeRangeBuilder.setFrom(timeRange.getMin());
        timeRangeBuilder.setTo(timeRange.getMax());
        builder.setTimeRange(timeRangeBuilder.build());
    }
    Map<String, byte[]> attributes = get.getAttributesMap();
    if (!attributes.isEmpty()) {
        NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
        for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
            attributeBuilder.setName(attribute.getKey());
            attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
            builder.addAttribute(attributeBuilder.build());
        }
    }
    if (get.hasFamilies()) {
        Column.Builder columnBuilder = Column.newBuilder();
        Map<byte[], NavigableSet<byte[]>> families = get.getFamilyMap();
        for (Map.Entry<byte[], NavigableSet<byte[]>> family : families.entrySet()) {
            NavigableSet<byte[]> qualifiers = family.getValue();
            columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
            columnBuilder.clearQualifier();
            if (qualifiers != null && qualifiers.size() > 0) {
                for (byte[] qualifier : qualifiers) {
                    columnBuilder.addQualifier(ByteStringer.wrap(qualifier));
                }
            }
            builder.addColumn(columnBuilder.build());
        }
    }
    if (get.getMaxResultsPerColumnFamily() >= 0) {
        builder.setStoreLimit(get.getMaxResultsPerColumnFamily());
    }
    if (get.getRowOffsetPerColumnFamily() > 0) {
        builder.setStoreOffset(get.getRowOffsetPerColumnFamily());
    }
    if (get.isCheckExistenceOnly()) {
        builder.setExistenceOnly(true);
    }
    if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
        builder.setConsistency(toConsistency(get.getConsistency()));
    }
    Boolean loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
    if (loadColumnFamiliesOnDemand != null) {
        builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand);
    }
    return builder.build();
}
Also used : NavigableSet(java.util.NavigableSet) ByteString(com.google.protobuf.ByteString) TimeRange(org.apache.hadoop.hbase.io.TimeRange) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) Column(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) HashMap(java.util.HashMap)

Example 8 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class ProtobufUtil method toScan.

/**
   * Convert a protocol buffer Scan to a client Scan
   *
   * @param proto the protocol buffer Scan to convert
   * @return the converted client Scan
   * @throws IOException
   */
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
    byte[] startRow = HConstants.EMPTY_START_ROW;
    byte[] stopRow = HConstants.EMPTY_END_ROW;
    boolean includeStartRow = true;
    boolean includeStopRow = false;
    if (proto.hasStartRow()) {
        startRow = proto.getStartRow().toByteArray();
    }
    if (proto.hasStopRow()) {
        stopRow = proto.getStopRow().toByteArray();
    }
    if (proto.hasIncludeStartRow()) {
        includeStartRow = proto.getIncludeStartRow();
    }
    if (proto.hasIncludeStopRow()) {
        includeStopRow = proto.getIncludeStopRow();
    }
    Scan scan = new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
    if (proto.hasCacheBlocks()) {
        scan.setCacheBlocks(proto.getCacheBlocks());
    }
    if (proto.hasMaxVersions()) {
        scan.setMaxVersions(proto.getMaxVersions());
    }
    if (proto.hasStoreLimit()) {
        scan.setMaxResultsPerColumnFamily(proto.getStoreLimit());
    }
    if (proto.hasStoreOffset()) {
        scan.setRowOffsetPerColumnFamily(proto.getStoreOffset());
    }
    if (proto.hasLoadColumnFamiliesOnDemand()) {
        scan.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
    }
    if (proto.getCfTimeRangeCount() > 0) {
        for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
            TimeRange timeRange = protoToTimeRange(cftr.getTimeRange());
            scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange);
        }
    }
    if (proto.hasTimeRange()) {
        TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
        scan.setTimeRange(timeRange);
    }
    if (proto.hasFilter()) {
        FilterProtos.Filter filter = proto.getFilter();
        scan.setFilter(ProtobufUtil.toFilter(filter));
    }
    if (proto.hasBatchSize()) {
        scan.setBatch(proto.getBatchSize());
    }
    if (proto.hasMaxResultSize()) {
        scan.setMaxResultSize(proto.getMaxResultSize());
    }
    if (proto.hasSmall()) {
        scan.setSmall(proto.getSmall());
    }
    if (proto.hasAllowPartialResults()) {
        scan.setAllowPartialResults(proto.getAllowPartialResults());
    }
    for (NameBytesPair attribute : proto.getAttributeList()) {
        scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    if (proto.getColumnCount() > 0) {
        for (Column column : proto.getColumnList()) {
            byte[] family = column.getFamily().toByteArray();
            if (column.getQualifierCount() > 0) {
                for (ByteString qualifier : column.getQualifierList()) {
                    scan.addColumn(family, qualifier.toByteArray());
                }
            } else {
                scan.addFamily(family);
            }
        }
    }
    if (proto.hasReversed()) {
        scan.setReversed(proto.getReversed());
    }
    if (proto.hasConsistency()) {
        scan.setConsistency(toConsistency(proto.getConsistency()));
    }
    if (proto.hasCaching()) {
        scan.setCaching(proto.getCaching());
    }
    if (proto.hasMvccReadPoint()) {
        PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint());
    }
    if (scan.isSmall()) {
        scan.setReadType(Scan.ReadType.PREAD);
    } else if (proto.hasReadType()) {
        scan.setReadType(toReadType(proto.getReadType()));
    }
    return scan;
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) Column(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column) ByteString(com.google.protobuf.ByteString) FilterProtos(org.apache.hadoop.hbase.protobuf.generated.FilterProtos) Scan(org.apache.hadoop.hbase.client.Scan) HBaseProtos(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos)

Example 9 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class StoreFileScanner method shouldUseScanner.

@Override
public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
    // if the file has no entries, no need to validate or create a scanner.
    byte[] cf = store.getFamily().getName();
    TimeRange timeRange = scan.getColumnFamilyTimeRange().get(cf);
    if (timeRange == null) {
        timeRange = scan.getTimeRange();
    }
    return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader.passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf));
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange)

Example 10 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    if (statement.getHint().hasHint(Hint.SMALL)) {
        scan.setSmall(true);
    }
    PhoenixConnection connection = context.getConnection();
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // If we haven't resolved the time at the beginning of compilation, don't
            // force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
            scn = tableRef.getTimeStamp();
            if (scn == QueryConstants.UNSET_TIMESTAMP) {
                scn = HConstants.LATEST_TIMESTAMP;
            }
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes. 
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan);
    iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {

        @Override
        public void close() throws SQLException {
            try {
                super.close();
            } finally {
                SQLCloseables.closeAll(dependencies);
            }
        }
    };
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Aggregations

TimeRange (org.apache.hadoop.hbase.io.TimeRange)45 Test (org.junit.Test)11 Map (java.util.Map)10 Get (org.apache.hadoop.hbase.client.Get)10 Scan (org.apache.hadoop.hbase.client.Scan)10 Cell (org.apache.hadoop.hbase.Cell)8 NavigableSet (java.util.NavigableSet)7 NameBytesPair (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)7 HashMap (java.util.HashMap)6 Filter (org.apache.hadoop.hbase.filter.Filter)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteString (com.google.protobuf.ByteString)5 ArrayList (java.util.ArrayList)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)5 Put (org.apache.hadoop.hbase.client.Put)5 List (java.util.List)4 Increment (org.apache.hadoop.hbase.client.Increment)4 Result (org.apache.hadoop.hbase.client.Result)4 Column (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column)4 Column (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Column)4