Search in sources :

Example 6 with CompareOp

use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project phoenix by apache.

the class RoundDateExpression method newKeyPart.

/**
     * Form the key range from the key to the key right before or at the
     * next rounded value.
     */
@Override
public KeyPart newKeyPart(final KeyPart childPart) {
    return new KeyPart() {

        private final List<Expression> extractNodes = Collections.<Expression>singletonList(RoundDateExpression.this);

        @Override
        public PColumn getColumn() {
            return childPart.getColumn();
        }

        @Override
        public List<Expression> getExtractNodes() {
            return extractNodes;
        }

        @Override
        public KeyRange getKeyRange(CompareOp op, Expression rhs) {
            PDataType type = getColumn().getDataType();
            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
            rhs.evaluate(null, ptr);
            byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr);
            // No need to take into account SortOrder, because ROUND
            // always forces the value to be in ascending order
            PDataCodec codec = getKeyRangeCodec(type);
            int offset = ByteUtil.isInclusive(op) ? 1 : 0;
            long value = codec.decodeLong(key, 0, SortOrder.getDefault());
            byte[] nextKey = new byte[type.getByteSize()];
            switch(op) {
                case EQUAL:
                    // boundary.
                    if (value % divBy != 0) {
                        return KeyRange.EMPTY_RANGE;
                    }
                    codec.encodeLong(value + divBy, nextKey, 0);
                    return type.getKeyRange(key, true, nextKey, false);
                case GREATER:
                case GREATER_OR_EQUAL:
                    codec.encodeLong((value + divBy - offset) / divBy * divBy, nextKey, 0);
                    return type.getKeyRange(nextKey, true, KeyRange.UNBOUND, false);
                case LESS:
                case LESS_OR_EQUAL:
                    codec.encodeLong((value + divBy - (1 - offset)) / divBy * divBy, nextKey, 0);
                    return type.getKeyRange(KeyRange.UNBOUND, false, nextKey, false);
                default:
                    return childPart.getKeyRange(op, rhs);
            }
        }

        @Override
        public PTable getTable() {
            return childPart.getTable();
        }
    };
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PDataCodec(org.apache.phoenix.schema.types.PDataType.PDataCodec) PDataType(org.apache.phoenix.schema.types.PDataType) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) KeyPart(org.apache.phoenix.compile.KeyPart) List(java.util.List) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 7 with CompareOp

use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project phoenix by apache.

the class RTrimFunction method newKeyPart.

@Override
public KeyPart newKeyPart(final KeyPart childPart) {
    return new KeyPart() {

        @Override
        public KeyRange getKeyRange(CompareOp op, Expression rhs) {
            byte[] lowerRange = KeyRange.UNBOUND;
            byte[] upperRange = KeyRange.UNBOUND;
            boolean lowerInclusive = true;
            boolean upperInclusive = false;
            PDataType type = getColumn().getDataType();
            SortOrder sortOrder = getColumn().getSortOrder();
            switch(op) {
                case LESS_OR_EQUAL:
                    lowerInclusive = false;
                case EQUAL:
                    upperRange = evaluateExpression(rhs);
                    if (op == CompareOp.EQUAL) {
                        lowerRange = upperRange;
                    }
                    if (sortOrder == SortOrder.ASC || !getTable().rowKeyOrderOptimizable()) {
                        upperRange = Arrays.copyOf(upperRange, upperRange.length + 1);
                        upperRange[upperRange.length - 1] = StringUtil.SPACE_UTF8;
                        ByteUtil.nextKey(upperRange, upperRange.length);
                    } else {
                        upperInclusive = true;
                        if (op == CompareOp.LESS_OR_EQUAL) {
                            // will be the RHS value.
                            break;
                        }
                        /*
                         * Somewhat tricky to get the range correct for the DESC equality case.
                         * The lower range is the RHS value followed by any number of inverted spaces.
                         * We need to add a zero byte as the lower range will have an \xFF byte
                         * appended to it and otherwise we'd skip past any rows where there is more
                         * than one space following the RHS.
                         * The upper range should span up to and including the RHS value. We need
                         * to add our own \xFF as otherwise this will look like a degenerate query
                         * since the lower would be bigger than the upper range.
                         */
                        lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 2);
                        lowerRange[lowerRange.length - 2] = StringUtil.INVERTED_SPACE_UTF8;
                        lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE;
                        upperRange = Arrays.copyOf(upperRange, upperRange.length + 1);
                        upperRange[upperRange.length - 1] = QueryConstants.DESC_SEPARATOR_BYTE;
                    }
                    break;
                default:
                    // TOOD: Is this ok for DESC?
                    return childPart.getKeyRange(op, rhs);
            }
            Integer length = getColumn().getMaxLength();
            if (type.isFixedWidth() && length != null) {
                // *after* rows with no padding.
                if (lowerRange != KeyRange.UNBOUND) {
                    lowerRange = type.pad(lowerRange, length, SortOrder.ASC);
                }
                if (upperRange != KeyRange.UNBOUND) {
                    upperRange = type.pad(upperRange, length, SortOrder.ASC);
                }
            }
            return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive);
        }

        @Override
        public List<Expression> getExtractNodes() {
            // non blank characters such as 'foo  bar' where the RHS constant is 'foo'.
            return Collections.<Expression>emptyList();
        }

        @Override
        public PColumn getColumn() {
            return childPart.getColumn();
        }

        @Override
        public PTable getTable() {
            return childPart.getTable();
        }
    };
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) Expression(org.apache.phoenix.expression.Expression) KeyPart(org.apache.phoenix.compile.KeyPart) SortOrder(org.apache.phoenix.schema.SortOrder) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 8 with CompareOp

use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project hbase by apache.

the class SingleColumnValueFilter method parseFrom.

/**
   * @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
   * @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
   * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
   * @see #toByteArray
   */
public static SingleColumnValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
    FilterProtos.SingleColumnValueFilter proto;
    try {
        proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes);
    } catch (InvalidProtocolBufferException e) {
        throw new DeserializationException(e);
    }
    final CompareOp compareOp = CompareOp.valueOf(proto.getCompareOp().name());
    final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator;
    try {
        comparator = ProtobufUtil.toComparator(proto.getComparator());
    } catch (IOException ioe) {
        throw new DeserializationException(ioe);
    }
    return new SingleColumnValueFilter(proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, compareOp, comparator, proto.getFilterIfMissing(), proto.getLatestVersionOnly());
}
Also used : InvalidProtocolBufferException(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException) FilterProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 9 with CompareOp

use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project hbase by apache.

the class SingleColumnValueExcludeFilter method parseFrom.

/**
   * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
   * @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code>
   * @throws DeserializationException
   * @see #toByteArray
   */
public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
    FilterProtos.SingleColumnValueExcludeFilter proto;
    try {
        proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes);
    } catch (InvalidProtocolBufferException e) {
        throw new DeserializationException(e);
    }
    FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter();
    final CompareOp compareOp = CompareOp.valueOf(parentProto.getCompareOp().name());
    final ByteArrayComparable comparator;
    try {
        comparator = ProtobufUtil.toComparator(parentProto.getComparator());
    } catch (IOException ioe) {
        throw new DeserializationException(ioe);
    }
    return new SingleColumnValueExcludeFilter(parentProto.hasColumnFamily() ? parentProto.getColumnFamily().toByteArray() : null, parentProto.hasColumnQualifier() ? parentProto.getColumnQualifier().toByteArray() : null, compareOp, comparator, parentProto.getFilterIfMissing(), parentProto.getLatestVersionOnly());
}
Also used : InvalidProtocolBufferException(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException) FilterProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 10 with CompareOp

use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project hbase by apache.

the class RSRpcServices method mutate.

/**
   * Mutate data in a table.
   *
   * @param rpcc the RPC controller
   * @param request the mutate request
   * @throws ServiceException
   */
@Override
public MutateResponse mutate(final RpcController rpcc, final MutateRequest request) throws ServiceException {
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    OperationQuota quota = null;
    RpcCallContext context = RpcServer.getCurrentCall();
    // Clear scanner so we are not holding on to reference across call.
    if (controller != null) {
        controller.setCellScanner(null);
    }
    try {
        checkOpen();
        requestCount.increment();
        rpcMutateRequestCount.increment();
        Region region = getRegion(request.getRegion());
        MutateResponse.Builder builder = MutateResponse.newBuilder();
        MutationProto mutation = request.getMutation();
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
        Result r = null;
        Boolean processed = null;
        MutationType type = mutation.getMutateType();
        quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
        switch(type) {
            case APPEND:
                // TODO: this doesn't actually check anything.
                r = append(region, quota, mutation, cellScanner, nonceGroup);
                break;
            case INCREMENT:
                // TODO: this doesn't actually check anything.
                r = increment(region, quota, mutation, cellScanner, nonceGroup);
                break;
            case PUT:
                Put put = ProtobufUtil.toPut(mutation, cellScanner);
                quota.addMutation(put);
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    if (region.getCoprocessorHost() != null) {
                        processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier, compareOp, comparator, put);
                    }
                    if (processed == null) {
                        boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, put, true);
                        if (region.getCoprocessorHost() != null) {
                            result = region.getCoprocessorHost().postCheckAndPut(row, family, qualifier, compareOp, comparator, put, result);
                        }
                        processed = result;
                    }
                } else {
                    region.put(put);
                    processed = Boolean.TRUE;
                }
                break;
            case DELETE:
                Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
                quota.addMutation(delete);
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    if (region.getCoprocessorHost() != null) {
                        processed = region.getCoprocessorHost().preCheckAndDelete(row, family, qualifier, compareOp, comparator, delete);
                    }
                    if (processed == null) {
                        boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, delete, true);
                        if (region.getCoprocessorHost() != null) {
                            result = region.getCoprocessorHost().postCheckAndDelete(row, family, qualifier, compareOp, comparator, delete, result);
                        }
                        processed = result;
                    }
                } else {
                    region.delete(delete);
                    processed = Boolean.TRUE;
                }
                break;
            default:
                throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
        }
        if (processed != null) {
            builder.setProcessed(processed.booleanValue());
        }
        boolean clientCellBlockSupported = isClientCellBlockSupport(context);
        addResult(builder, r, controller, clientCellBlockSupported);
        if (clientCellBlockSupported) {
            addSize(context, r, null);
        }
        return builder.build();
    } catch (IOException ie) {
        regionServer.checkFileSystem();
        throw new ServiceException(ie);
    } finally {
        if (quota != null) {
            quota.close();
        }
    }
}
Also used : RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Condition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition) Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) CellScanner(org.apache.hadoop.hbase.CellScanner) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) MutateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Aggregations

CompareOp (org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)14 IOException (java.io.IOException)4 ByteArrayComparable (org.apache.hadoop.hbase.filter.ByteArrayComparable)4 Expression (org.apache.phoenix.expression.Expression)4 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)3 KeyPart (org.apache.phoenix.compile.KeyPart)3 PDataType (org.apache.phoenix.schema.types.PDataType)3 InterruptedIOException (java.io.InterruptedIOException)2 List (java.util.List)2 SchemaPath (org.apache.drill.common.expression.SchemaPath)2 CellScanner (org.apache.hadoop.hbase.CellScanner)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 DeserializationException (org.apache.hadoop.hbase.exceptions.DeserializationException)2 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)2 Filter (org.apache.hadoop.hbase.filter.Filter)2 NullComparator (org.apache.hadoop.hbase.filter.NullComparator)2 RegexStringComparator (org.apache.hadoop.hbase.filter.RegexStringComparator)2 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)2 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2