Search in sources :

Example 1 with Type

use of org.apache.hadoop.hbase.KeyValue.Type in project hbase by apache.

the class TestIndividualBytesFieldCell method testConstructorAndVerify.

@BeforeClass
public static void testConstructorAndVerify() {
    // Immutable inputs
    byte[] row = Bytes.toBytes("immutable-row");
    byte[] family = Bytes.toBytes("immutable-family");
    byte[] qualifier = Bytes.toBytes("immutable-qualifier");
    byte[] value = Bytes.toBytes("immutable-value");
    byte[] tags = Bytes.toBytes("immutable-tags");
    // Other inputs
    long timestamp = 5000L;
    long seqId = 0L;
    Type type = KeyValue.Type.Put;
    ic0 = new IndividualBytesFieldCell(row, family, qualifier, timestamp, type, seqId, value, tags);
    kv0 = new KeyValue(row, family, qualifier, timestamp, type, value, tags);
    // Verify if no local copy is made for row, family, qualifier, value or tags.
    assertTrue(ic0.getRowArray() == row);
    assertTrue(ic0.getFamilyArray() == family);
    assertTrue(ic0.getQualifierArray() == qualifier);
    assertTrue(ic0.getValueArray() == value);
    assertTrue(ic0.getTagsArray() == tags);
    // Verify others.
    assertEquals(timestamp, ic0.getTimestamp());
    assertEquals(seqId, ic0.getSequenceId());
    assertEquals(type.getCode(), ic0.getTypeByte());
    // Verify offsets of backing byte arrays are always 0.
    assertEquals(0, ic0.getRowOffset());
    assertEquals(0, ic0.getFamilyOffset());
    assertEquals(0, ic0.getQualifierOffset());
    assertEquals(0, ic0.getValueOffset());
    assertEquals(0, ic0.getTagsOffset());
}
Also used : Type(org.apache.hadoop.hbase.KeyValue.Type) BeforeClass(org.junit.BeforeClass)

Example 2 with Type

use of org.apache.hadoop.hbase.KeyValue.Type in project hbase by apache.

the class TestIndividualBytesFieldCell method testNullFamilyQualifierValueTags.

/**
   * Verify getXXXArray() and getXXXLength() when family/qualifier/value/tags are null.
   * Should have the same behaviors as {@link KeyValue}.
   */
@Test
public void testNullFamilyQualifierValueTags() {
    byte[] row = Bytes.toBytes("row1");
    long timestamp = 5000L;
    long seqId = 0L;
    Type type = KeyValue.Type.Put;
    // Test when following fields are null.
    byte[] family = null;
    byte[] qualifier = null;
    byte[] value = null;
    byte[] tags = null;
    Cell ic1 = new IndividualBytesFieldCell(row, family, qualifier, timestamp, type, seqId, value, tags);
    Cell kv1 = new KeyValue(row, family, qualifier, timestamp, type, value, tags);
    byte[] familyArrayInKV = Bytes.copy(kv1.getFamilyArray(), kv1.getFamilyOffset(), kv1.getFamilyLength());
    byte[] qualifierArrayInKV = Bytes.copy(kv1.getQualifierArray(), kv1.getQualifierOffset(), kv1.getQualifierLength());
    byte[] valueArrayInKV = Bytes.copy(kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength());
    byte[] tagsArrayInKV = Bytes.copy(kv1.getTagsArray(), kv1.getTagsOffset(), kv1.getTagsLength());
    // getXXXArray() for family, qualifier, value and tags are supposed to return empty byte array, rather than null.
    assertArrayEquals(familyArrayInKV, ic1.getFamilyArray());
    assertArrayEquals(qualifierArrayInKV, ic1.getQualifierArray());
    assertArrayEquals(valueArrayInKV, ic1.getValueArray());
    assertArrayEquals(tagsArrayInKV, ic1.getTagsArray());
    // getXXXLength() for family, qualifier, value and tags are supposed to return 0.
    assertEquals(kv1.getFamilyLength(), ic1.getFamilyLength());
    assertEquals(kv1.getQualifierLength(), ic1.getQualifierLength());
    assertEquals(kv1.getValueLength(), ic1.getValueLength());
    assertEquals(kv1.getTagsLength(), ic1.getTagsLength());
}
Also used : Type(org.apache.hadoop.hbase.KeyValue.Type) Test(org.junit.Test)

Example 3 with Type

use of org.apache.hadoop.hbase.KeyValue.Type in project hbase by apache.

the class TestCellUtil method testToString1.

@Test
public void testToString1() {
    String row = "test.row";
    String family = "test.family";
    String qualifier = "test.qualifier";
    long timestamp = 42;
    Type type = Type.Put;
    String value = "test.value";
    long seqId = 1042;
    Cell cell = CellUtil.createCell(Bytes.toBytes(row), Bytes.toBytes(family), Bytes.toBytes(qualifier), timestamp, type.getCode(), Bytes.toBytes(value), seqId);
    String nonVerbose = CellUtil.toString(cell, false);
    String verbose = CellUtil.toString(cell, true);
    System.out.println("nonVerbose=" + nonVerbose);
    System.out.println("verbose=" + verbose);
    Assert.assertEquals(String.format("%s/%s:%s/%d/%s/vlen=%s/seqid=%s", row, family, qualifier, timestamp, type.toString(), Bytes.toBytes(value).length, seqId), nonVerbose);
    Assert.assertEquals(String.format("%s/%s:%s/%d/%s/vlen=%s/seqid=%s/%s", row, family, qualifier, timestamp, type.toString(), Bytes.toBytes(value).length, seqId, value), verbose);
// TODO: test with tags
}
Also used : Type(org.apache.hadoop.hbase.KeyValue.Type) Test(org.junit.Test)

Example 4 with Type

use of org.apache.hadoop.hbase.KeyValue.Type in project phoenix by apache.

the class CoveredColumnIndexer method getIndexUpdateForFilteredRows.

@Override
public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(Collection<KeyValue> filtered, IndexMetaData indexMetaData) throws IOException {
    // stores all the return values
    IndexUpdateManager updateMap = new IndexUpdateManager(indexMetaData);
    // batch the updates by row to make life easier and ordered
    Collection<Batch> batches = batchByRow(filtered);
    for (Batch batch : batches) {
        Cell curKV = batch.getKvs().iterator().next();
        Put p = new Put(curKV.getRowArray(), curKV.getRowOffset(), curKV.getRowLength());
        for (Cell kv : batch.getKvs()) {
            // we only need to cleanup Put entries
            byte type = kv.getTypeByte();
            Type t = KeyValue.Type.codeToType(type);
            if (!t.equals(Type.Put)) {
                continue;
            }
            // add the kv independently
            p.add(kv);
        }
        // do the usual thing as for deletes
        Collection<Batch> timeBatch = createTimestampBatchesFromMutation(p);
        LocalTableState state = new LocalTableState(env, localTable, p);
        for (Batch entry : timeBatch) {
            //just set the timestamp on the table - it already has all the future state
            state.setCurrentTimestamp(entry.getTimestamp());
            this.addDeleteUpdatesToMap(updateMap, state, entry.getTimestamp(), indexMetaData);
        }
    }
    return updateMap.toMap();
}
Also used : Type(org.apache.hadoop.hbase.KeyValue.Type) IndexUpdateManager(org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager) Batch(org.apache.phoenix.hbase.index.covered.Batch) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) LocalTableState(org.apache.phoenix.hbase.index.covered.LocalTableState)

Example 5 with Type

use of org.apache.hadoop.hbase.KeyValue.Type in project phoenix by apache.

the class MetaDataEndpointImpl method buildDeletedTable.

private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }
    Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    List<Cell> results = Lists.<Cell>newArrayList();
    try (RegionScanner scanner = region.getScanner(scan)) {
        scanner.next(results);
    }
    for (Cell kv : results) {
        KeyValue.Type type = Type.codeToType(kv.getTypeByte());
        if (type == Type.DeleteFamily) {
            // Row was deleted
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            PTable table = newDeletedTableMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, table);
            return table;
        }
    }
    return null;
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Scan(org.apache.hadoop.hbase.client.Scan) Type(org.apache.hadoop.hbase.KeyValue.Type) Cell(org.apache.hadoop.hbase.Cell) PTable(org.apache.phoenix.schema.PTable)

Aggregations

Type (org.apache.hadoop.hbase.KeyValue.Type)5 Cell (org.apache.hadoop.hbase.Cell)2 Test (org.junit.Test)2 KeyValue (org.apache.hadoop.hbase.KeyValue)1 Put (org.apache.hadoop.hbase.client.Put)1 Scan (org.apache.hadoop.hbase.client.Scan)1 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)1 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)1 Batch (org.apache.phoenix.hbase.index.covered.Batch)1 LocalTableState (org.apache.phoenix.hbase.index.covered.LocalTableState)1 IndexUpdateManager (org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager)1 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)1 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)1 PTable (org.apache.phoenix.schema.PTable)1 BeforeClass (org.junit.BeforeClass)1