Search in sources :

Example 56 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestStore method testStoreUsesConfigurationFromHcdAndHtd.

@Test
public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
    final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
    long anyValue = 10;
    // We'll check that it uses correct config and propagates it appropriately by going thru
    // the simplest "real" path I can find - "throttleCompaction", which just checks whether
    // a number we pass in is higher than some config value, inside compactionPolicy.
    Configuration conf = HBaseConfiguration.create();
    conf.setLong(CONFIG_KEY, anyValue);
    init(name.getMethodName() + "-xml", conf);
    Assert.assertTrue(store.throttleCompaction(anyValue + 1));
    Assert.assertFalse(store.throttleCompaction(anyValue));
    // HTD overrides XML.
    --anyValue;
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
    init(name.getMethodName() + "-htd", conf, htd, hcd);
    Assert.assertTrue(store.throttleCompaction(anyValue + 1));
    Assert.assertFalse(store.throttleCompaction(anyValue));
    // HCD overrides them both.
    --anyValue;
    hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
    init(name.getMethodName() + "-hcd", conf, htd, hcd);
    Assert.assertTrue(store.throttleCompaction(anyValue + 1));
    Assert.assertFalse(store.throttleCompaction(anyValue));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 57 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestStoreFile method bloomWriteRead.

private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception {
    float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
    Path f = writer.getPath();
    long now = System.currentTimeMillis();
    for (int i = 0; i < 2000; i += 2) {
        String row = String.format(localFormatter, i);
        KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(), "col".getBytes(), now, "value".getBytes());
        writer.append(kv);
    }
    writer.close();
    StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf);
    reader.loadFileInfo();
    reader.loadBloomfilter();
    StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
    // check false positives rate
    int falsePos = 0;
    int falseNeg = 0;
    for (int i = 0; i < 2000; i++) {
        String row = String.format(localFormatter, i);
        TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
        columns.add("family:col".getBytes());
        Scan scan = new Scan(row.getBytes(), row.getBytes());
        scan.addColumn("family".getBytes(), "family:col".getBytes());
        Store store = mock(Store.class);
        HColumnDescriptor hcd = mock(HColumnDescriptor.class);
        when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
        when(store.getFamily()).thenReturn(hcd);
        boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
        if (i % 2 == 0) {
            if (!exists)
                falseNeg++;
        } else {
            if (exists)
                falsePos++;
        }
    }
    // evict because we are about to delete the file
    reader.close(true);
    fs.delete(f, true);
    assertEquals("False negatives: " + falseNeg, 0, falseNeg);
    int maxFalsePos = (int) (2 * 2000 * err);
    assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + maxFalsePos + ")", falsePos <= maxFalsePos);
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) TreeSet(java.util.TreeSet) Scan(org.apache.hadoop.hbase.client.Scan)

Example 58 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestTags method testFlushAndCompactionwithCombinations.

@Test
public void testFlushAndCompactionwithCombinations() throws Exception {
    TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    byte[] fam = Bytes.toBytes("info");
    byte[] row = Bytes.toBytes("rowa");
    // column names
    byte[] qual = Bytes.toBytes("qual");
    byte[] row1 = Bytes.toBytes("rowb");
    byte[] row2 = Bytes.toBytes("rowc");
    byte[] rowd = Bytes.toBytes("rowd");
    byte[] rowe = Bytes.toBytes("rowe");
    Table table = null;
    for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        HColumnDescriptor colDesc = new HColumnDescriptor(fam);
        colDesc.setBlockCacheEnabled(true);
        colDesc.setDataBlockEncoding(encoding);
        desc.addFamily(colDesc);
        Admin admin = TEST_UTIL.getAdmin();
        admin.createTable(desc);
        try {
            table = TEST_UTIL.getConnection().getTable(tableName);
            Put put = new Put(row);
            byte[] value = Bytes.toBytes("value");
            put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
            int bigTagLen = Short.MAX_VALUE - 5;
            put.setAttribute("visibility", new byte[bigTagLen]);
            table.put(put);
            Put put1 = new Put(row1);
            byte[] value1 = Bytes.toBytes("1000dfsdf");
            put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
            table.put(put1);
            admin.flush(tableName);
            // We are lacking an API for confirming flush request compaction.
            // Just sleep for a short time. We won't be able to confirm flush
            // completion but the test won't hang now or in the future if
            // default compaction policy causes compaction between flush and
            // when we go to confirm it.
            Thread.sleep(1000);
            put1 = new Put(row2);
            value1 = Bytes.toBytes("1000dfsdf");
            put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
            table.put(put1);
            admin.flush(tableName);
            Thread.sleep(1000);
            Put put2 = new Put(rowd);
            byte[] value2 = Bytes.toBytes("1000dfsdf");
            put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
            table.put(put2);
            put2 = new Put(rowe);
            value2 = Bytes.toBytes("1000dfsddfdf");
            put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
            put.setAttribute("visibility", Bytes.toBytes("ram"));
            table.put(put2);
            admin.flush(tableName);
            Thread.sleep(1000);
            TestCoprocessorForTags.checkTagPresence = true;
            Scan s = new Scan(row);
            s.setCaching(1);
            ResultScanner scanner = table.getScanner(s);
            try {
                Result next = null;
                while ((next = scanner.next()) != null) {
                    CellScanner cellScanner = next.cellScanner();
                    cellScanner.advance();
                    Cell current = cellScanner.current();
                    if (CellUtil.matchingRow(current, row)) {
                        assertEquals(1, TestCoprocessorForTags.tags.size());
                        Tag tag = TestCoprocessorForTags.tags.get(0);
                        assertEquals(bigTagLen, tag.getValueLength());
                    } else {
                        assertEquals(0, TestCoprocessorForTags.tags.size());
                    }
                }
            } finally {
                if (scanner != null) {
                    scanner.close();
                }
                TestCoprocessorForTags.checkTagPresence = false;
            }
            while (admin.getCompactionState(tableName) != CompactionState.NONE) {
                Thread.sleep(10);
            }
            TestCoprocessorForTags.checkTagPresence = true;
            scanner = table.getScanner(s);
            try {
                Result next = null;
                while ((next = scanner.next()) != null) {
                    CellScanner cellScanner = next.cellScanner();
                    cellScanner.advance();
                    Cell current = cellScanner.current();
                    if (CellUtil.matchingRow(current, row)) {
                        assertEquals(1, TestCoprocessorForTags.tags.size());
                        Tag tag = TestCoprocessorForTags.tags.get(0);
                        assertEquals(bigTagLen, tag.getValueLength());
                    } else {
                        assertEquals(0, TestCoprocessorForTags.tags.size());
                    }
                }
            } finally {
                if (scanner != null) {
                    scanner.close();
                }
                TestCoprocessorForTags.checkTagPresence = false;
            }
        } finally {
            if (table != null) {
                table.close();
            }
            // delete the table
            admin.disableTable(tableName);
            admin.deleteTable(tableName);
        }
    }
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) CellScanner(org.apache.hadoop.hbase.CellScanner) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Scan(org.apache.hadoop.hbase.client.Scan) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 59 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestTags method testTagsWithAppendAndIncrement.

@Test
public void testTagsWithAppendAndIncrement() throws Exception {
    TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    byte[] f = Bytes.toBytes("f");
    byte[] q = Bytes.toBytes("q");
    byte[] row1 = Bytes.toBytes("r1");
    byte[] row2 = Bytes.toBytes("r2");
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor colDesc = new HColumnDescriptor(f);
    desc.addFamily(colDesc);
    TEST_UTIL.getAdmin().createTable(desc);
    Table table = null;
    try {
        table = TEST_UTIL.getConnection().getTable(tableName);
        Put put = new Put(row1);
        byte[] v = Bytes.toBytes(2L);
        put.addColumn(f, q, v);
        put.setAttribute("visibility", Bytes.toBytes("tag1"));
        table.put(put);
        Increment increment = new Increment(row1);
        increment.addColumn(f, q, 1L);
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        ResultScanner scanner = table.getScanner(new Scan());
        Result result = scanner.next();
        KeyValue kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        List<Tag> tags = TestCoprocessorForTags.tags;
        assertEquals(3L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(1, tags.size());
        assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        increment = new Increment(row1);
        increment.add(new KeyValue(row1, f, q, 1234L, v));
        increment.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan());
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(5L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(2, tags.size());
        // We cannot assume the ordering of tags
        List<String> tagValues = new ArrayList<>();
        for (Tag tag : tags) {
            tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
        }
        assertTrue(tagValues.contains("tag1"));
        assertTrue(tagValues.contains("tag2"));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        put = new Put(row2);
        v = Bytes.toBytes(2L);
        put.addColumn(f, q, v);
        table.put(put);
        increment = new Increment(row2);
        increment.add(new KeyValue(row2, f, q, 1234L, v));
        increment.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.increment(increment);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row2));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(4L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
        assertEquals(1, tags.size());
        assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        // Test Append
        byte[] row3 = Bytes.toBytes("r3");
        put = new Put(row3);
        put.addColumn(f, q, Bytes.toBytes("a"));
        put.setAttribute("visibility", Bytes.toBytes("tag1"));
        table.put(put);
        Append append = new Append(row3);
        append.add(f, q, Bytes.toBytes("b"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row3));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(1, tags.size());
        assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        append = new Append(row3);
        append.add(new KeyValue(row3, f, q, 1234L, v));
        append.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row3));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(2, tags.size());
        // We cannot assume the ordering of tags
        tagValues.clear();
        for (Tag tag : tags) {
            tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
        }
        assertTrue(tagValues.contains("tag1"));
        assertTrue(tagValues.contains("tag2"));
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        byte[] row4 = Bytes.toBytes("r4");
        put = new Put(row4);
        put.addColumn(f, q, Bytes.toBytes("a"));
        table.put(put);
        append = new Append(row4);
        append.add(new KeyValue(row4, f, q, 1234L, v));
        append.setAttribute("visibility", Bytes.toBytes("tag2"));
        table.append(append);
        TestCoprocessorForTags.checkTagPresence = true;
        scanner = table.getScanner(new Scan().setStartRow(row4));
        result = scanner.next();
        kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
        tags = TestCoprocessorForTags.tags;
        assertEquals(1, tags.size());
        assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
    } finally {
        TestCoprocessorForTags.checkTagPresence = false;
        TestCoprocessorForTags.tags = null;
        if (table != null) {
            table.close();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) Test(org.junit.Test)

Example 60 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestTags method testFlushAndCompactionWithoutTags.

@Test
public void testFlushAndCompactionWithoutTags() throws Exception {
    Table table = null;
    try {
        TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
        byte[] fam = Bytes.toBytes("info");
        byte[] row = Bytes.toBytes("rowa");
        // column names
        byte[] qual = Bytes.toBytes("qual");
        byte[] row1 = Bytes.toBytes("rowb");
        byte[] row2 = Bytes.toBytes("rowc");
        HTableDescriptor desc = new HTableDescriptor(tableName);
        HColumnDescriptor colDesc = new HColumnDescriptor(fam);
        colDesc.setBlockCacheEnabled(true);
        // colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
        colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
        desc.addFamily(colDesc);
        Admin admin = TEST_UTIL.getAdmin();
        admin.createTable(desc);
        table = TEST_UTIL.getConnection().getTable(tableName);
        Put put = new Put(row);
        byte[] value = Bytes.toBytes("value");
        put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
        table.put(put);
        admin.flush(tableName);
        // We are lacking an API for confirming flush request compaction.
        // Just sleep for a short time. We won't be able to confirm flush
        // completion but the test won't hang now or in the future if
        // default compaction policy causes compaction between flush and
        // when we go to confirm it.
        Thread.sleep(1000);
        Put put1 = new Put(row1);
        byte[] value1 = Bytes.toBytes("1000dfsdf");
        put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
        table.put(put1);
        admin.flush(tableName);
        Thread.sleep(1000);
        Put put2 = new Put(row2);
        byte[] value2 = Bytes.toBytes("1000dfsdf");
        put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
        table.put(put2);
        admin.flush(tableName);
        Thread.sleep(1000);
        Scan s = new Scan(row);
        ResultScanner scanner = table.getScanner(s);
        try {
            Result[] next = scanner.next(3);
            for (Result result : next) {
                CellScanner cellScanner = result.cellScanner();
                cellScanner.advance();
                Cell current = cellScanner.current();
                assertEquals(0, current.getTagsLength());
            }
        } finally {
            if (scanner != null)
                scanner.close();
        }
        admin.compact(tableName);
        while (admin.getCompactionState(tableName) != CompactionState.NONE) {
            Thread.sleep(10);
        }
        s = new Scan(row);
        scanner = table.getScanner(s);
        try {
            Result[] next = scanner.next(3);
            for (Result result : next) {
                CellScanner cellScanner = result.cellScanner();
                cellScanner.advance();
                Cell current = cellScanner.current();
                assertEquals(0, current.getTagsLength());
            }
        } finally {
            if (scanner != null) {
                scanner.close();
            }
        }
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) CellScanner(org.apache.hadoop.hbase.CellScanner) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38