Search in sources :

Example 71 with KeyValue

use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.

the class TestUserScanQueryMatcher method testMatch_ExpiredExplicit.

/**
   * Verify that {@link ScanQueryMatcher} only skips expired KeyValue instances and does not exit
   * early from the row (skipping later non-expired KeyValues). This version mimics a Get with
   * explicitly specified column qualifiers.
   * @throws IOException
   */
@Test
public void testMatch_ExpiredExplicit() throws IOException {
    long testTTL = 1000;
    MatchCode[] expected = new MatchCode[] { ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW, ScanQueryMatcher.MatchCode.DONE };
    long now = EnvironmentEdgeManager.currentTime();
    UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, 0, rowComparator), get.getFamilyMap().get(fam2), now - testTTL, now, null);
    KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), new KeyValue(row1, fam2, col2, now - 50, data), new KeyValue(row1, fam2, col3, now - 5000, data), new KeyValue(row1, fam2, col4, now - 500, data), new KeyValue(row1, fam2, col5, now - 10000, data), new KeyValue(row2, fam1, col1, now - 10, data) };
    KeyValue k = kvs[0];
    qm.setToNewRow(k);
    List<MatchCode> actual = new ArrayList<>(kvs.length);
    for (KeyValue kv : kvs) {
        actual.add(qm.match(kv));
    }
    assertEquals(expected.length, actual.size());
    for (int i = 0; i < expected.length; i++) {
        LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
        assertEquals(expected[i], actual.get(i));
    }
}
Also used : MatchCode(org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) Test(org.junit.Test)

Example 72 with KeyValue

use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.

the class AbstractTestProtobufLog method doRead.

/**
   * Appends entries in the WAL and reads it.
   * @param withTrailer If 'withTrailer' is true, it calls a close on the WALwriter before reading
   *          so that a trailer is appended to the WAL. Otherwise, it starts reading after the sync
   *          call. This means that reader is not aware of the trailer. In this scenario, if the
   *          reader tries to read the trailer in its next() call, it returns false from
   *          ProtoBufLogReader.
   * @throws IOException
   */
private void doRead(boolean withTrailer) throws IOException {
    final int columnCount = 5;
    final int recordCount = 5;
    final TableName tableName = TableName.valueOf("tablename");
    final byte[] row = Bytes.toBytes("row");
    long timestamp = System.currentTimeMillis();
    Path path = new Path(dir, "tempwal");
    // delete the log if already exists, for test only
    fs.delete(path, true);
    W writer = null;
    ProtobufLogReader reader = null;
    try {
        HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
        HTableDescriptor htd = new HTableDescriptor(tableName);
        fs.mkdirs(dir);
        // Write log in pb format.
        writer = createWriter(path);
        for (int i = 0; i < recordCount; ++i) {
            WALKey key = new WALKey(hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
            WALEdit edit = new WALEdit();
            for (int j = 0; j < columnCount; ++j) {
                if (i == 0) {
                    htd.addFamily(new HColumnDescriptor("column" + j));
                }
                String value = i + "" + j;
                edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
            }
            append(writer, new WAL.Entry(key, edit));
        }
        sync(writer);
        if (withTrailer)
            writer.close();
        // Now read the log using standard means.
        reader = (ProtobufLogReader) wals.createReader(fs, path);
        if (withTrailer) {
            assertNotNull(reader.trailer);
        } else {
            assertNull(reader.trailer);
        }
        for (int i = 0; i < recordCount; ++i) {
            WAL.Entry entry = reader.next();
            assertNotNull(entry);
            assertEquals(columnCount, entry.getEdit().size());
            assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
            assertEquals(tableName, entry.getKey().getTablename());
            int idx = 0;
            for (Cell val : entry.getEdit().getCells()) {
                assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(), val.getRowLength()));
                String value = i + "" + idx;
                assertArrayEquals(Bytes.toBytes(value), CellUtil.cloneValue(val));
                idx++;
            }
        }
        WAL.Entry entry = reader.next();
        assertNull(entry);
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (reader != null) {
            reader.close();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) Cell(org.apache.hadoop.hbase.Cell)

Example 73 with KeyValue

use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.

the class TestScanDeleteTracker method testDeletedByDelete.

@Test
public void testDeletedByDelete() {
    KeyValue kv = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("f"), Bytes.toBytes("qualifier"), timestamp, KeyValue.Type.Delete);
    sdt.add(kv);
    DeleteResult ret = sdt.isDeleted(kv);
    assertEquals(DeleteResult.VERSION_DELETED, ret);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) DeleteResult(org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult) Test(org.junit.Test)

Example 74 with KeyValue

use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.

the class TestScanDeleteTracker method testDeleteColumnDelete.

@Test
public void testDeleteColumnDelete() {
    byte[] qualifier = Bytes.toBytes("qualifier");
    KeyValue kv = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("f"), qualifier, timestamp, KeyValue.Type.DeleteColumn);
    sdt.add(kv);
    qualifier = Bytes.toBytes("qualifier1");
    kv = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("f"), qualifier, timestamp, KeyValue.Type.Delete);
    sdt.add(kv);
    DeleteResult ret = sdt.isDeleted(kv);
    assertEquals(DeleteResult.VERSION_DELETED, ret);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) DeleteResult(org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult) Test(org.junit.Test)

Example 75 with KeyValue

use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.

the class TestScanDeleteTracker method testDeleteKeepDelete.

// Testing new way where we save the Delete in case of a Delete for specific
// ts, could have just added the last line to the first test, but rather keep
// them separated
@Test
public void testDeleteKeepDelete() {
    byte[] qualifier = Bytes.toBytes("qualifier");
    KeyValue kv = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("f"), qualifier, timestamp, KeyValue.Type.Delete);
    sdt.add(kv);
    sdt.isDeleted(kv);
    assertEquals(false, sdt.isEmpty());
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Test(org.junit.Test)

Aggregations

KeyValue (org.apache.hadoop.hbase.KeyValue)552 Test (org.junit.Test)289 Cell (org.apache.hadoop.hbase.Cell)193 ArrayList (java.util.ArrayList)172 Put (org.apache.hadoop.hbase.client.Put)98 Scan (org.apache.hadoop.hbase.client.Scan)85 Result (org.apache.hadoop.hbase.client.Result)70 Configuration (org.apache.hadoop.conf.Configuration)64 Path (org.apache.hadoop.fs.Path)55 ArrayBackedTag (org.apache.hadoop.hbase.ArrayBackedTag)36 Tag (org.apache.hadoop.hbase.Tag)35 ByteBuffer (java.nio.ByteBuffer)34 List (java.util.List)34 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)34 IOException (java.io.IOException)32 TableName (org.apache.hadoop.hbase.TableName)32 TreeMap (java.util.TreeMap)29 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)28 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)28 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)27