use of org.apache.hadoop.hbase.CompareOperator in project hbase by apache.
the class DependentColumnFilter method parseFrom.
/**
* @param pbBytes A pb serialized {@link DependentColumnFilter} instance
* @return An instance of {@link DependentColumnFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray
*/
public static DependentColumnFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.DependentColumnFilter proto;
try {
proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new DependentColumnFilter(proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, proto.getDropDependentColumn(), valueCompareOp, valueComparator);
}
use of org.apache.hadoop.hbase.CompareOperator in project hbase by apache.
the class FamilyFilter method parseFrom.
/**
* @param pbBytes A pb serialized {@link FamilyFilter} instance
* @return An instance of {@link FamilyFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray
*/
public static FamilyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FamilyFilter proto;
try {
proto = FilterProtos.FamilyFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new FamilyFilter(valueCompareOp, valueComparator);
}
use of org.apache.hadoop.hbase.CompareOperator in project hbase by apache.
the class MultiRowMutationEndpoint method matches.
private boolean matches(Region region, ClientProtos.Condition condition) throws IOException {
byte[] row = condition.getRow().toByteArray();
Filter filter = null;
byte[] family = null;
byte[] qualifier = null;
CompareOperator op = null;
ByteArrayComparable comparator = null;
if (condition.hasFilter()) {
filter = ProtobufUtil.toFilter(condition.getFilter());
} else {
family = condition.getFamily().toByteArray();
qualifier = condition.getQualifier().toByteArray();
op = CompareOperator.valueOf(condition.getCompareType().name());
comparator = ProtobufUtil.toComparator(condition.getComparator());
}
TimeRange timeRange = condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime();
Get get = new Get(row);
if (family != null) {
checkFamily(region, family);
get.addColumn(family, qualifier);
}
if (filter != null) {
get.setFilter(filter);
}
if (timeRange != null) {
get.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
boolean matches = false;
try (RegionScanner scanner = region.getScanner(new Scan(get))) {
// NOTE: Please don't use HRegion.get() instead,
// because it will copy cells to heap. See HBASE-26036
List<Cell> result = new ArrayList<>();
scanner.next(result);
if (filter != null) {
if (!result.isEmpty()) {
matches = true;
}
} else {
boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
if (result.isEmpty() && valueIsNull) {
matches = true;
} else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) {
matches = true;
} else if (result.size() == 1 && !valueIsNull) {
Cell kv = result.get(0);
int compareResult = PrivateCellUtil.compareValue(kv, comparator);
matches = matches(op, compareResult);
}
}
}
return matches;
}
use of org.apache.hadoop.hbase.CompareOperator in project gora by apache.
the class DefaultFactory method createFilter.
@Override
public org.apache.hadoop.hbase.filter.Filter createFilter(Filter<K, T> filter, HBaseStore<K, T> store) {
if (filter instanceof FilterList) {
FilterList<K, T> filterList = (FilterList<K, T>) filter;
org.apache.hadoop.hbase.filter.FilterList hbaseFilter = new org.apache.hadoop.hbase.filter.FilterList(Operator.valueOf(filterList.getOperator().name()));
for (Filter<K, T> rowFitler : filterList.getFilters()) {
FilterFactory<K, T> factory = getHbaseFitlerUtil().getFactory(rowFitler);
if (factory == null) {
LOG.warn("HBase remote filter factory not yet implemented for " + rowFitler.getClass().getCanonicalName());
return null;
}
org.apache.hadoop.hbase.filter.Filter hbaseRowFilter = factory.createFilter(rowFitler, store);
if (hbaseRowFilter != null) {
hbaseFilter.addFilter(hbaseRowFilter);
}
}
return hbaseFilter;
} else if (filter instanceof SingleFieldValueFilter) {
SingleFieldValueFilter<K, T> fieldFilter = (SingleFieldValueFilter<K, T>) filter;
HBaseColumn column = store.getMapping().getColumn(fieldFilter.getFieldName());
CompareOperator compareOp = getCompareOp(fieldFilter.getFilterOp());
byte[] family = column.getFamily();
byte[] qualifier = column.getQualifier();
byte[] value = HBaseByteInterface.toBytes(fieldFilter.getOperands().get(0));
SingleColumnValueFilter hbaseFilter = new SingleColumnValueFilter(family, qualifier, compareOp, value);
hbaseFilter.setFilterIfMissing(fieldFilter.isFilterIfMissing());
return hbaseFilter;
} else if (filter instanceof MapFieldValueFilter) {
MapFieldValueFilter<K, T> mapFilter = (MapFieldValueFilter<K, T>) filter;
HBaseColumn column = store.getMapping().getColumn(mapFilter.getFieldName());
CompareOperator compareOp = getCompareOp(mapFilter.getFilterOp());
byte[] family = column.getFamily();
byte[] qualifier = HBaseByteInterface.toBytes(mapFilter.getMapKey());
byte[] value = HBaseByteInterface.toBytes(mapFilter.getOperands().get(0));
SingleColumnValueFilter hbaseFilter = new SingleColumnValueFilter(family, qualifier, compareOp, value);
hbaseFilter.setFilterIfMissing(mapFilter.isFilterIfMissing());
return hbaseFilter;
} else {
LOG.warn("HBase remote filter not yet implemented for " + filter.getClass().getCanonicalName());
return null;
}
}
use of org.apache.hadoop.hbase.CompareOperator in project hbase by apache.
the class HRegion method checkAndMutateInternal.
private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutate, long nonceGroup, long nonce) throws IOException {
byte[] row = checkAndMutate.getRow();
Filter filter = null;
byte[] family = null;
byte[] qualifier = null;
CompareOperator op = null;
ByteArrayComparable comparator = null;
if (checkAndMutate.hasFilter()) {
filter = checkAndMutate.getFilter();
} else {
family = checkAndMutate.getFamily();
qualifier = checkAndMutate.getQualifier();
op = checkAndMutate.getCompareOp();
comparator = new BinaryComparator(checkAndMutate.getValue());
}
TimeRange timeRange = checkAndMutate.getTimeRange();
Mutation mutation = null;
RowMutations rowMutations = null;
if (checkAndMutate.getAction() instanceof Mutation) {
mutation = (Mutation) checkAndMutate.getAction();
} else {
rowMutations = (RowMutations) checkAndMutate.getAction();
}
if (mutation != null) {
checkMutationType(mutation);
checkRow(mutation, row);
} else {
checkRow(rowMutations, row);
}
checkReadOnly();
// TODO, add check for value length also move this check to the client
checkResources();
startRegionOperation();
try {
Get get = new Get(row);
if (family != null) {
checkFamily(family);
get.addColumn(family, qualifier);
}
if (filter != null) {
get.setFilter(filter);
}
if (timeRange != null) {
get.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
// Lock row - note that doBatchMutate will relock this row if called
checkRow(row, "doCheckAndRowMutate");
RowLock rowLock = getRowLock(get.getRow(), false, null);
try {
if (this.getCoprocessorHost() != null) {
CheckAndMutateResult result = getCoprocessorHost().preCheckAndMutateAfterRowLock(checkAndMutate);
if (result != null) {
return result;
}
}
// NOTE: We used to wait here until mvcc caught up: mvcc.await();
// Supposition is that now all changes are done under row locks, then when we go to read,
// we'll get the latest on this row.
boolean matches = false;
long cellTs = 0;
try (RegionScanner scanner = getScanner(new Scan(get))) {
// NOTE: Please don't use HRegion.get() instead,
// because it will copy cells to heap. See HBASE-26036
List<Cell> result = new ArrayList<>(1);
scanner.next(result);
if (filter != null) {
if (!result.isEmpty()) {
matches = true;
cellTs = result.get(0).getTimestamp();
}
} else {
boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
if (result.isEmpty() && valueIsNull) {
matches = op != CompareOperator.NOT_EQUAL;
} else if (result.size() > 0 && valueIsNull) {
matches = (result.get(0).getValueLength() == 0) == (op != CompareOperator.NOT_EQUAL);
cellTs = result.get(0).getTimestamp();
} else if (result.size() == 1) {
Cell kv = result.get(0);
cellTs = kv.getTimestamp();
int compareResult = PrivateCellUtil.compareValue(kv, comparator);
matches = matches(op, compareResult);
}
}
}
// If matches, perform the mutation or the rowMutations
if (matches) {
// We have acquired the row lock already. If the system clock is NOT monotonically
// non-decreasing (see HBASE-14070) we should make sure that the mutation has a
// larger timestamp than what was observed via Get. doBatchMutate already does this, but
// there is no way to pass the cellTs. See HBASE-14054.
long now = EnvironmentEdgeManager.currentTime();
// ensure write is not eclipsed
long ts = Math.max(now, cellTs);
byte[] byteTs = Bytes.toBytes(ts);
if (mutation != null) {
if (mutation instanceof Put) {
updateCellTimestamps(mutation.getFamilyCellMap().values(), byteTs);
}
// And else 'delete' is not needed since it already does a second get, and sets the
// timestamp from get (see prepareDeleteTimestamps).
} else {
for (Mutation m : rowMutations.getMutations()) {
if (m instanceof Put) {
updateCellTimestamps(m.getFamilyCellMap().values(), byteTs);
}
}
// And else 'delete' is not needed since it already does a second get, and sets the
// timestamp from get (see prepareDeleteTimestamps).
}
// All edits for the given row (across all column families) must happen atomically.
Result r;
if (mutation != null) {
r = mutate(mutation, true, nonceGroup, nonce).getResult();
} else {
r = mutateRow(rowMutations, nonceGroup, nonce);
}
this.checkAndMutateChecksPassed.increment();
return new CheckAndMutateResult(true, r);
}
this.checkAndMutateChecksFailed.increment();
return new CheckAndMutateResult(false, null);
} finally {
rowLock.release();
}
} finally {
closeRegionOperation();
}
}
Aggregations