use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.
the class ProtobufUtil method toGet.
/**
* Convert a protocol buffer Get to a client Get
*
* @param proto the protocol buffer Get to convert
* @return the converted client Get
* @throws IOException
*/
public static Get toGet(final ClientProtos.Get proto) throws IOException {
if (proto == null)
return null;
byte[] row = proto.getRow().toByteArray();
Get get = new Get(row);
if (proto.hasCacheBlocks()) {
get.setCacheBlocks(proto.getCacheBlocks());
}
if (proto.hasMaxVersions()) {
get.setMaxVersions(proto.getMaxVersions());
}
if (proto.hasStoreLimit()) {
get.setMaxResultsPerColumnFamily(proto.getStoreLimit());
}
if (proto.hasStoreOffset()) {
get.setRowOffsetPerColumnFamily(proto.getStoreOffset());
}
if (proto.getCfTimeRangeCount() > 0) {
for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
TimeRange timeRange = protoToTimeRange(cftr.getTimeRange());
get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange);
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
get.setTimeRange(timeRange);
}
if (proto.hasFilter()) {
FilterProtos.Filter filter = proto.getFilter();
get.setFilter(ProtobufUtil.toFilter(filter));
}
for (NameBytesPair attribute : proto.getAttributeList()) {
get.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
if (proto.getColumnCount() > 0) {
for (Column column : proto.getColumnList()) {
byte[] family = column.getFamily().toByteArray();
if (column.getQualifierCount() > 0) {
for (ByteString qualifier : column.getQualifierList()) {
get.addColumn(family, qualifier.toByteArray());
}
} else {
get.addFamily(family);
}
}
}
if (proto.hasExistenceOnly() && proto.getExistenceOnly()) {
get.setCheckExistenceOnly(true);
}
if (proto.hasConsistency()) {
get.setConsistency(toConsistency(proto.getConsistency()));
}
if (proto.hasLoadColumnFamiliesOnDemand()) {
get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
}
return get;
}
use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.
the class ProtobufUtil method toScan.
/**
* Convert a client Scan to a protocol buffer Scan
*
* @param scan the client Scan to convert
* @return the converted protocol buffer Scan
* @throws IOException
*/
public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
scanBuilder.setCacheBlocks(scan.getCacheBlocks());
if (scan.getBatch() > 0) {
scanBuilder.setBatchSize(scan.getBatch());
}
if (scan.getMaxResultSize() > 0) {
scanBuilder.setMaxResultSize(scan.getMaxResultSize());
}
if (scan.isSmall()) {
scanBuilder.setSmall(scan.isSmall());
}
if (scan.getAllowPartialResults()) {
scanBuilder.setAllowPartialResults(scan.getAllowPartialResults());
}
Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
if (loadColumnFamiliesOnDemand != null) {
scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand);
}
scanBuilder.setMaxVersions(scan.getMaxVersions());
for (Entry<byte[], TimeRange> cftr : scan.getColumnFamilyTimeRange().entrySet()) {
HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
b.setColumnFamily(ByteStringer.wrap(cftr.getKey()));
b.setTimeRange(timeRangeToProto(cftr.getValue()));
scanBuilder.addCfTimeRange(b);
}
TimeRange timeRange = scan.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder = HBaseProtos.TimeRange.newBuilder();
timeRangeBuilder.setFrom(timeRange.getMin());
timeRangeBuilder.setTo(timeRange.getMax());
scanBuilder.setTimeRange(timeRangeBuilder.build());
}
Map<String, byte[]> attributes = scan.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
scanBuilder.addAttribute(attributeBuilder.build());
}
}
byte[] startRow = scan.getStartRow();
if (startRow != null && startRow.length > 0) {
scanBuilder.setStartRow(ByteStringer.wrap(startRow));
}
byte[] stopRow = scan.getStopRow();
if (stopRow != null && stopRow.length > 0) {
scanBuilder.setStopRow(ByteStringer.wrap(stopRow));
}
if (scan.hasFilter()) {
scanBuilder.setFilter(ProtobufUtil.toFilter(scan.getFilter()));
}
if (scan.hasFamilies()) {
Column.Builder columnBuilder = Column.newBuilder();
for (Map.Entry<byte[], NavigableSet<byte[]>> family : scan.getFamilyMap().entrySet()) {
columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
NavigableSet<byte[]> qualifiers = family.getValue();
columnBuilder.clearQualifier();
if (qualifiers != null && qualifiers.size() > 0) {
for (byte[] qualifier : qualifiers) {
columnBuilder.addQualifier(ByteStringer.wrap(qualifier));
}
}
scanBuilder.addColumn(columnBuilder.build());
}
}
if (scan.getMaxResultsPerColumnFamily() >= 0) {
scanBuilder.setStoreLimit(scan.getMaxResultsPerColumnFamily());
}
if (scan.getRowOffsetPerColumnFamily() > 0) {
scanBuilder.setStoreOffset(scan.getRowOffsetPerColumnFamily());
}
if (scan.isReversed()) {
scanBuilder.setReversed(scan.isReversed());
}
if (scan.getConsistency() == Consistency.TIMELINE) {
scanBuilder.setConsistency(toConsistency(scan.getConsistency()));
}
if (scan.getCaching() > 0) {
scanBuilder.setCaching(scan.getCaching());
}
long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan);
if (mvccReadPoint > 0) {
scanBuilder.setMvccReadPoint(mvccReadPoint);
}
if (!scan.includeStartRow()) {
scanBuilder.setIncludeStartRow(false);
}
if (scan.includeStopRow()) {
scanBuilder.setIncludeStopRow(true);
}
if (scan.getReadType() != Scan.ReadType.DEFAULT) {
scanBuilder.setReadType(toReadType(scan.getReadType()));
}
return scanBuilder.build();
}
use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.
the class ExplainTable method explain.
protected void explain(String prefix, List<String> planSteps) {
StringBuilder buf = new StringBuilder(prefix);
ScanRanges scanRanges = context.getScanRanges();
Scan scan = context.getScan();
if (scan.getConsistency() != Consistency.STRONG) {
buf.append("TIMELINE-CONSISTENCY ");
}
if (hint.hasHint(Hint.SMALL)) {
buf.append(Hint.SMALL).append(" ");
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
buf.append("REVERSE ");
}
if (scanRanges.isEverything()) {
buf.append("FULL SCAN ");
} else {
explainSkipScan(buf);
}
buf.append("OVER ").append(tableRef.getTable().getPhysicalName().getString());
if (!scanRanges.isPointLookup()) {
appendKeyRanges(buf);
}
planSteps.add(buf.toString());
if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) {
TimeRange range = context.getScan().getTimeRange();
planSteps.add(" ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")");
}
PageFilter pageFilter = null;
FirstKeyOnlyFilter firstKeyOnlyFilter = null;
BooleanExpressionFilter whereFilter = null;
DistinctPrefixFilter distinctFilter = null;
Iterator<Filter> filterIterator = ScanUtil.getFilterIterator(scan);
if (filterIterator.hasNext()) {
do {
Filter filter = filterIterator.next();
if (filter instanceof FirstKeyOnlyFilter) {
firstKeyOnlyFilter = (FirstKeyOnlyFilter) filter;
} else if (filter instanceof PageFilter) {
pageFilter = (PageFilter) filter;
} else if (filter instanceof BooleanExpressionFilter) {
whereFilter = (BooleanExpressionFilter) filter;
} else if (filter instanceof DistinctPrefixFilter) {
distinctFilter = (DistinctPrefixFilter) filter;
}
} while (filterIterator.hasNext());
}
if (whereFilter != null) {
planSteps.add(" SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + whereFilter.toString());
} else if (firstKeyOnlyFilter != null) {
planSteps.add(" SERVER FILTER BY FIRST KEY ONLY");
}
if (distinctFilter != null) {
planSteps.add(" SERVER DISTINCT PREFIX FILTER OVER " + groupBy.getExpressions().toString());
}
if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) {
// with GROUP BY, sort happens client-side
planSteps.add(" SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
} else {
if (offset != null) {
planSteps.add(" SERVER OFFSET " + offset);
}
if (pageFilter != null) {
planSteps.add(" SERVER " + pageFilter.getPageSize() + " ROW LIMIT");
}
}
Integer groupByLimit = null;
byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT);
if (groupByLimitBytes != null) {
groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes);
}
groupBy.explain(planSteps, groupByLimit);
if (scan.getAttribute(BaseScannerRegionObserver.SPECIFIC_ARRAY_INDEX) != null) {
planSteps.add(" SERVER ARRAY ELEMENT PROJECTION");
}
}
use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.
the class BaseScannerRegionObserver method preScannerOpen.
@Override
public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
byte[] txnScn = scan.getAttribute(TX_SCN);
if (txnScn != null) {
TimeRange timeRange = scan.getTimeRange();
scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn));
}
if (isRegionObserverFor(scan)) {
// on region boundaries is out of date and can safely be ignored.
if (!skipRegionBoundaryCheck(scan) || ScanUtil.isLocalIndex(scan)) {
throwIfScanOutOfRegion(scan, c.getEnvironment().getRegion());
}
// Muck with the start/stop row of the scan and set as reversed at the
// last possible moment. You need to swap the start/stop and make the
// start exclusive and the stop inclusive.
ScanUtil.setupReverseScan(scan);
}
this.encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan);
this.useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan);
return s;
}
use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.
the class BaseQueryPlan method iterator.
public final ResultIterator iterator(final Map<ImmutableBytesPtr, ServerCache> caches, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = context.getScan();
}
ScanRanges scanRanges = context.getScanRanges();
/*
* For aggregate queries, we still need to let the AggregationPlan to
* proceed so that we can give proper aggregates even if there are no
* row to be scanned.
*/
if (scanRanges == ScanRanges.NOTHING && !getStatement().isAggregate()) {
return getWrappedIterator(caches, ResultIterator.EMPTY_ITERATOR);
}
if (tableRef == TableRef.EMPTY_TABLE_REF) {
return newIterator(scanGrouper, scan, caches);
}
// Set miscellaneous scan attributes. This is the last chance to set them before we
// clone the scan for each parallelized chunk.
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
if (dynamicFilter != null) {
WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
ScanUtil.setReversed(scan);
// Hack for working around PHOENIX-3121 and HBASE-16296.
// TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
int scannerCacheSize = context.getStatement().getFetchSize();
if (limit != null && limit % scannerCacheSize == 0) {
scan.setCaching(scannerCacheSize + 1);
}
}
PhoenixConnection connection = context.getConnection();
final int smallScanThreshold = connection.getQueryServices().getProps().getInt(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD);
if (statement.getHint().hasHint(Hint.SMALL) || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold)) {
scan.setSmall(true);
}
// set read consistency
if (table.getType() != PTableType.SYSTEM) {
scan.setConsistency(connection.getConsistency());
}
// TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
if (!table.isTransactional()) {
// Get the time range of row_timestamp column
TimeRange rowTimestampRange = scanRanges.getRowTimestampRange();
// Get the already existing time range on the scan.
TimeRange scanTimeRange = scan.getTimeRange();
Long scn = connection.getSCN();
if (scn == null) {
// Always use latest timestamp unless scn is set or transactional (see PHOENIX-4089)
scn = HConstants.LATEST_TIMESTAMP;
}
try {
TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
if (timeRangeToUse == null) {
return ResultIterator.EMPTY_ITERATOR;
}
scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
byte[] tenantIdBytes;
if (table.isMultiTenant() == true) {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
ScanUtil.setTenantId(scan, tenantIdBytes);
String customAnnotations = LogUtil.customAnnotationsToString(connection);
ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
// Set local index related scan attributes.
if (table.getIndexType() == IndexType.LOCAL) {
ScanUtil.setLocalIndex(scan);
Set<PColumn> dataColumns = context.getDataColumns();
// project is not present in the index then we need to skip this plan.
if (!dataColumns.isEmpty()) {
// Set data columns to be join back from data table.
PTable parentTable = context.getCurrentTable().getTable();
String parentSchemaName = parentTable.getParentSchemaName().getString();
String parentTableName = parentTable.getParentTableName().getString();
final ParseNodeFactory FACTORY = new ParseNodeFactory();
// TODO: is it necessary to re-resolve the table?
TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
PTable dataTable = dataTableRef.getTable();
// Set data columns to be join back from data table.
serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
// Set key value schema of the data columns.
serializeSchemaIntoScan(scan, schema);
// Set index maintainer of the local index.
serializeIndexMaintainerIntoScan(scan, dataTable);
// Set view constants if exists.
serializeViewConstantsIntoScan(scan, dataTable);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
}
ResultIterator iterator = newIterator(scanGrouper, scan, caches);
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
}
// wrap the iterator so we start/end tracing as we expect
TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Aggregations