use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.
the class ScanRanges method getDescTimeRange.
public static TimeRange getDescTimeRange(KeyRange lowestKeyRange, KeyRange highestKeyRange, Field f) throws IOException {
boolean lowerUnbound = lowestKeyRange.lowerUnbound();
boolean lowerInclusive = lowestKeyRange.isLowerInclusive();
boolean upperUnbound = highestKeyRange.upperUnbound();
boolean upperInclusive = highestKeyRange.isUpperInclusive();
PDataCodec codec = PLong.INSTANCE.getCodec();
long low = lowerUnbound ? -1 : codec.decodeLong(lowestKeyRange.getLowerRange(), 0, SortOrder.DESC);
long high = upperUnbound ? -1 : codec.decodeLong(highestKeyRange.getUpperRange(), 0, SortOrder.DESC);
long newHigh;
long newLow;
if (!lowerUnbound && !upperUnbound) {
newHigh = lowerInclusive ? safelyIncrement(low) : low;
newLow = upperInclusive ? high : safelyIncrement(high);
return new TimeRange(newLow, newHigh);
} else if (!lowerUnbound && upperUnbound) {
newHigh = lowerInclusive ? safelyIncrement(low) : low;
newLow = 0;
return new TimeRange(newLow, newHigh);
} else if (lowerUnbound && !upperUnbound) {
newLow = upperInclusive ? high : safelyIncrement(high);
newHigh = HConstants.LATEST_TIMESTAMP;
return new TimeRange(newLow, newHigh);
} else {
newLow = 0;
newHigh = HConstants.LATEST_TIMESTAMP;
return new TimeRange(newLow, newHigh);
}
}
use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.
the class StoreFileScanner method shouldUseScanner.
@Override
public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) {
// if the file has no entries, no need to validate or create a scanner.
byte[] cf = store.getColumnFamilyDescriptor().getName();
TimeRange timeRange = scan.getColumnFamilyTimeRange().get(cf);
if (timeRange == null) {
timeRange = scan.getTimeRange();
}
return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader.passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf));
}
use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.
the class ProtobufUtil method toAppend.
/**
* Convert a protocol buffer Mutate to an Append
* @param cellScanner
* @param proto the protocol buffer Mutate to convert
* @return the converted client Append
* @throws IOException
*/
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.APPEND : type.name();
Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), Append::add, proto, cellScanner);
if (proto.hasTimeRange()) {
TimeRange timeRange = toTimeRange(proto.getTimeRange());
append.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
return append;
}
use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.
the class ProtobufUtil method toScan.
/**
* Convert a protocol buffer Scan to a client Scan
*
* @param proto the protocol buffer Scan to convert
* @return the converted client Scan
* @throws IOException
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] stopRow = HConstants.EMPTY_END_ROW;
boolean includeStartRow = true;
boolean includeStopRow = false;
if (proto.hasStartRow()) {
startRow = proto.getStartRow().toByteArray();
}
if (proto.hasStopRow()) {
stopRow = proto.getStopRow().toByteArray();
}
if (proto.hasIncludeStartRow()) {
includeStartRow = proto.getIncludeStartRow();
}
if (proto.hasIncludeStopRow()) {
includeStopRow = proto.getIncludeStopRow();
} else {
// old client without this flag, we should consider start=end as a get.
if (ClientUtil.areScanStartRowAndStopRowEqual(startRow, stopRow)) {
includeStopRow = true;
}
}
Scan scan = new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
if (proto.hasCacheBlocks()) {
scan.setCacheBlocks(proto.getCacheBlocks());
}
if (proto.hasMaxVersions()) {
scan.readVersions(proto.getMaxVersions());
}
if (proto.hasStoreLimit()) {
scan.setMaxResultsPerColumnFamily(proto.getStoreLimit());
}
if (proto.hasStoreOffset()) {
scan.setRowOffsetPerColumnFamily(proto.getStoreOffset());
}
if (proto.hasLoadColumnFamiliesOnDemand()) {
scan.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
}
if (proto.getCfTimeRangeCount() > 0) {
for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
TimeRange timeRange = toTimeRange(cftr.getTimeRange());
scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), timeRange.getMax());
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = toTimeRange(proto.getTimeRange());
scan.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
if (proto.hasFilter()) {
FilterProtos.Filter filter = proto.getFilter();
scan.setFilter(ProtobufUtil.toFilter(filter));
}
if (proto.hasBatchSize()) {
scan.setBatch(proto.getBatchSize());
}
if (proto.hasMaxResultSize()) {
scan.setMaxResultSize(proto.getMaxResultSize());
}
if (proto.hasAllowPartialResults()) {
scan.setAllowPartialResults(proto.getAllowPartialResults());
}
for (NameBytesPair attribute : proto.getAttributeList()) {
scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
if (proto.getColumnCount() > 0) {
for (Column column : proto.getColumnList()) {
byte[] family = column.getFamily().toByteArray();
if (column.getQualifierCount() > 0) {
for (ByteString qualifier : column.getQualifierList()) {
scan.addColumn(family, qualifier.toByteArray());
}
} else {
scan.addFamily(family);
}
}
}
if (proto.hasReversed()) {
scan.setReversed(proto.getReversed());
}
if (proto.hasConsistency()) {
scan.setConsistency(toConsistency(proto.getConsistency()));
}
if (proto.hasCaching()) {
scan.setCaching(proto.getCaching());
}
if (proto.hasMvccReadPoint()) {
PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint());
}
if (proto.hasReadType()) {
scan.setReadType(toReadType(proto.getReadType()));
}
if (proto.getNeedCursorResult()) {
scan.setNeedCursorResult(true);
}
return scan;
}
use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.
the class ProtobufUtil method toCheckAndMutate.
public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, List<Mutation> mutations) throws IOException {
assert mutations.size() > 0;
byte[] row = condition.getRow().toByteArray();
CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row);
Filter filter = condition.hasFilter() ? ProtobufUtil.toFilter(condition.getFilter()) : null;
if (filter != null) {
builder.ifMatches(filter);
} else {
builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue());
}
TimeRange timeRange = condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime();
builder.timeRange(timeRange);
try {
if (mutations.size() == 1) {
Mutation m = mutations.get(0);
if (m instanceof Put) {
return builder.build((Put) m);
} else if (m instanceof Delete) {
return builder.build((Delete) m);
} else if (m instanceof Increment) {
return builder.build((Increment) m);
} else if (m instanceof Append) {
return builder.build((Append) m);
} else {
throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass().getSimpleName().toUpperCase());
}
} else {
return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations));
}
} catch (IllegalArgumentException e) {
throw new DoNotRetryIOException(e.getMessage());
}
}
Aggregations