Search in sources :

Example 36 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class TestIncrementTimeRange method checkHTableInterfaceMethods.

private void checkHTableInterfaceMethods() throws Exception {
    long time = EnvironmentEdgeManager.currentTime();
    mee.setValue(time);
    hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
    checkRowValue(ROW_A, Bytes.toBytes(1L));
    time = EnvironmentEdgeManager.currentTime();
    mee.setValue(time);
    TimeRange range10 = TimeRange.between(1, time + 10);
    hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L).setTimeRange(range10.getMin(), range10.getMax()));
    checkRowValue(ROW_A, Bytes.toBytes(11L));
    assertEquals(MyObserver.tr10.getMin(), range10.getMin());
    assertEquals(MyObserver.tr10.getMax(), range10.getMax());
    time = EnvironmentEdgeManager.currentTime();
    mee.setValue(time);
    TimeRange range2 = TimeRange.between(1, time + 20);
    List<Row> actions = Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()), new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()) });
    Object[] results3 = new Object[actions.size()];
    Object[] results1 = results3;
    hTableInterface.batch(actions, results1);
    assertEquals(MyObserver.tr2.getMin(), range2.getMin());
    assertEquals(MyObserver.tr2.getMax(), range2.getMax());
    for (Object r2 : results1) {
        assertTrue(r2 instanceof Result);
    }
    checkRowValue(ROW_A, Bytes.toBytes(15L));
    hTableInterface.close();
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result)

Example 37 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class RemoteHTable method get.

@Override
public Result get(Get get) throws IOException {
    TimeRange range = get.getTimeRange();
    String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), range.getMin(), range.getMax(), get.getMaxVersions());
    if (get.getFilter() != null) {
        LOG.warn("filters not supported on gets");
    }
    Result[] results = getResults(spec);
    if (results.length > 0) {
        if (results.length > 1) {
            LOG.warn("too many results for get (" + results.length + ")");
        }
        return results[0];
    } else {
        return new Result();
    }
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult)

Example 38 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class HRegion method checkAndMutateInternal.

private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutate, long nonceGroup, long nonce) throws IOException {
    byte[] row = checkAndMutate.getRow();
    Filter filter = null;
    byte[] family = null;
    byte[] qualifier = null;
    CompareOperator op = null;
    ByteArrayComparable comparator = null;
    if (checkAndMutate.hasFilter()) {
        filter = checkAndMutate.getFilter();
    } else {
        family = checkAndMutate.getFamily();
        qualifier = checkAndMutate.getQualifier();
        op = checkAndMutate.getCompareOp();
        comparator = new BinaryComparator(checkAndMutate.getValue());
    }
    TimeRange timeRange = checkAndMutate.getTimeRange();
    Mutation mutation = null;
    RowMutations rowMutations = null;
    if (checkAndMutate.getAction() instanceof Mutation) {
        mutation = (Mutation) checkAndMutate.getAction();
    } else {
        rowMutations = (RowMutations) checkAndMutate.getAction();
    }
    if (mutation != null) {
        checkMutationType(mutation);
        checkRow(mutation, row);
    } else {
        checkRow(rowMutations, row);
    }
    checkReadOnly();
    // TODO, add check for value length also move this check to the client
    checkResources();
    startRegionOperation();
    try {
        Get get = new Get(row);
        if (family != null) {
            checkFamily(family);
            get.addColumn(family, qualifier);
        }
        if (filter != null) {
            get.setFilter(filter);
        }
        if (timeRange != null) {
            get.setTimeRange(timeRange.getMin(), timeRange.getMax());
        }
        // Lock row - note that doBatchMutate will relock this row if called
        checkRow(row, "doCheckAndRowMutate");
        RowLock rowLock = getRowLock(get.getRow(), false, null);
        try {
            if (this.getCoprocessorHost() != null) {
                CheckAndMutateResult result = getCoprocessorHost().preCheckAndMutateAfterRowLock(checkAndMutate);
                if (result != null) {
                    return result;
                }
            }
            // NOTE: We used to wait here until mvcc caught up: mvcc.await();
            // Supposition is that now all changes are done under row locks, then when we go to read,
            // we'll get the latest on this row.
            boolean matches = false;
            long cellTs = 0;
            try (RegionScanner scanner = getScanner(new Scan(get))) {
                // NOTE: Please don't use HRegion.get() instead,
                // because it will copy cells to heap. See HBASE-26036
                List<Cell> result = new ArrayList<>(1);
                scanner.next(result);
                if (filter != null) {
                    if (!result.isEmpty()) {
                        matches = true;
                        cellTs = result.get(0).getTimestamp();
                    }
                } else {
                    boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
                    if (result.isEmpty() && valueIsNull) {
                        matches = op != CompareOperator.NOT_EQUAL;
                    } else if (result.size() > 0 && valueIsNull) {
                        matches = (result.get(0).getValueLength() == 0) == (op != CompareOperator.NOT_EQUAL);
                        cellTs = result.get(0).getTimestamp();
                    } else if (result.size() == 1) {
                        Cell kv = result.get(0);
                        cellTs = kv.getTimestamp();
                        int compareResult = PrivateCellUtil.compareValue(kv, comparator);
                        matches = matches(op, compareResult);
                    }
                }
            }
            // If matches, perform the mutation or the rowMutations
            if (matches) {
                // We have acquired the row lock already. If the system clock is NOT monotonically
                // non-decreasing (see HBASE-14070) we should make sure that the mutation has a
                // larger timestamp than what was observed via Get. doBatchMutate already does this, but
                // there is no way to pass the cellTs. See HBASE-14054.
                long now = EnvironmentEdgeManager.currentTime();
                // ensure write is not eclipsed
                long ts = Math.max(now, cellTs);
                byte[] byteTs = Bytes.toBytes(ts);
                if (mutation != null) {
                    if (mutation instanceof Put) {
                        updateCellTimestamps(mutation.getFamilyCellMap().values(), byteTs);
                    }
                // And else 'delete' is not needed since it already does a second get, and sets the
                // timestamp from get (see prepareDeleteTimestamps).
                } else {
                    for (Mutation m : rowMutations.getMutations()) {
                        if (m instanceof Put) {
                            updateCellTimestamps(m.getFamilyCellMap().values(), byteTs);
                        }
                    }
                // And else 'delete' is not needed since it already does a second get, and sets the
                // timestamp from get (see prepareDeleteTimestamps).
                }
                // All edits for the given row (across all column families) must happen atomically.
                Result r;
                if (mutation != null) {
                    r = mutate(mutation, true, nonceGroup, nonce).getResult();
                } else {
                    r = mutateRow(rowMutations, nonceGroup, nonce);
                }
                this.checkAndMutateChecksPassed.increment();
                return new CheckAndMutateResult(true, r);
            }
            this.checkAndMutateChecksFailed.increment();
            return new CheckAndMutateResult(false, null);
        } finally {
            rowLock.release();
        }
    } finally {
        closeRegionOperation();
    }
}
Also used : CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Put(org.apache.hadoop.hbase.client.Put) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) CompareOperator(org.apache.hadoop.hbase.CompareOperator) TimeRange(org.apache.hadoop.hbase.io.TimeRange) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) Filter(org.apache.hadoop.hbase.filter.Filter) Get(org.apache.hadoop.hbase.client.Get) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Example 39 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class TestAppendTimeRange method testHTableInterfaceMethods.

@Test
public void testHTableInterfaceMethods() throws Exception {
    try (Table table = util.createTable(TableName.valueOf(name.getMethodName()), TEST_FAMILY)) {
        table.put(new Put(ROW).addColumn(TEST_FAMILY, QUAL, VALUE));
        long time = EnvironmentEdgeManager.currentTime();
        mee.setValue(time);
        table.put(new Put(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("a")));
        checkRowValue(table, ROW, Bytes.toBytes("a"));
        time = EnvironmentEdgeManager.currentTime();
        mee.setValue(time);
        TimeRange range10 = TimeRange.between(1, time + 10);
        table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")).setTimeRange(range10.getMin(), range10.getMax()));
        checkRowValue(table, ROW, Bytes.toBytes("ab"));
        assertEquals(MyObserver.tr10.getMin(), range10.getMin());
        assertEquals(MyObserver.tr10.getMax(), range10.getMax());
        time = EnvironmentEdgeManager.currentTime();
        mee.setValue(time);
        TimeRange range2 = TimeRange.between(1, time + 20);
        List<Row> actions = Arrays.asList(new Row[] { new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")).setTimeRange(range2.getMin(), range2.getMax()), new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")).setTimeRange(range2.getMin(), range2.getMax()) });
        Object[] results1 = new Object[actions.size()];
        table.batch(actions, results1);
        assertEquals(MyObserver.tr2.getMin(), range2.getMin());
        assertEquals(MyObserver.tr2.getMax(), range2.getMax());
        for (Object r2 : results1) {
            assertTrue(r2 instanceof Result);
        }
        checkRowValue(table, ROW, Bytes.toBytes("abcc"));
    }
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) Table(org.apache.hadoop.hbase.client.Table) Append(org.apache.hadoop.hbase.client.Append) Row(org.apache.hadoop.hbase.client.Row) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 40 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project hbase by apache.

the class TestSerialization method testScan.

@Test
public void testScan() throws Exception {
    byte[] startRow = Bytes.toBytes("startRow");
    byte[] stopRow = Bytes.toBytes("stopRow");
    byte[] fam = Bytes.toBytes("fam");
    byte[] qf1 = Bytes.toBytes("qf1");
    long ts = EnvironmentEdgeManager.currentTime();
    int maxVersions = 2;
    Scan scan = new Scan().withStartRow(startRow).withStopRow(stopRow);
    scan.addColumn(fam, qf1);
    scan.setTimeRange(ts, ts + 1);
    scan.readVersions(maxVersions);
    ClientProtos.Scan scanProto = ProtobufUtil.toScan(scan);
    Scan desScan = ProtobufUtil.toScan(scanProto);
    assertTrue(Bytes.equals(scan.getStartRow(), desScan.getStartRow()));
    assertTrue(Bytes.equals(scan.getStopRow(), desScan.getStopRow()));
    assertEquals(scan.getCacheBlocks(), desScan.getCacheBlocks());
    Set<byte[]> set = null;
    Set<byte[]> desSet = null;
    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : scan.getFamilyMap().entrySet()) {
        assertTrue(desScan.getFamilyMap().containsKey(entry.getKey()));
        set = entry.getValue();
        desSet = desScan.getFamilyMap().get(entry.getKey());
        for (byte[] column : set) {
            assertTrue(desSet.contains(column));
        }
        // Test filters are serialized properly.
        scan = new Scan().withStartRow(startRow);
        final String name = "testScan";
        byte[] prefix = Bytes.toBytes(name);
        scan.setFilter(new PrefixFilter(prefix));
        scanProto = ProtobufUtil.toScan(scan);
        desScan = ProtobufUtil.toScan(scanProto);
        Filter f = desScan.getFilter();
        assertTrue(f instanceof PrefixFilter);
    }
    assertEquals(scan.getMaxVersions(), desScan.getMaxVersions());
    TimeRange tr = scan.getTimeRange();
    TimeRange desTr = desScan.getTimeRange();
    assertEquals(tr.getMax(), desTr.getMax());
    assertEquals(tr.getMin(), desTr.getMin());
}
Also used : NavigableSet(java.util.NavigableSet) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) TimeRange(org.apache.hadoop.hbase.io.TimeRange) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) PrefixFilter(org.apache.hadoop.hbase.filter.PrefixFilter) Filter(org.apache.hadoop.hbase.filter.Filter) Scan(org.apache.hadoop.hbase.client.Scan) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) Map(java.util.Map) Test(org.junit.Test)

Aggregations

TimeRange (org.apache.hadoop.hbase.io.TimeRange)45 Test (org.junit.Test)11 Map (java.util.Map)10 Get (org.apache.hadoop.hbase.client.Get)10 Scan (org.apache.hadoop.hbase.client.Scan)10 Cell (org.apache.hadoop.hbase.Cell)8 NavigableSet (java.util.NavigableSet)7 NameBytesPair (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)7 HashMap (java.util.HashMap)6 Filter (org.apache.hadoop.hbase.filter.Filter)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteString (com.google.protobuf.ByteString)5 ArrayList (java.util.ArrayList)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)5 Put (org.apache.hadoop.hbase.client.Put)5 List (java.util.List)4 Increment (org.apache.hadoop.hbase.client.Increment)4 Result (org.apache.hadoop.hbase.client.Result)4 Column (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column)4 Column (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Column)4