Search in sources :

Example 86 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestFilterFromRegionSide method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
    for (byte[] family : FAMILIES) {
        HColumnDescriptor hcd = new HColumnDescriptor(family);
        htd.addFamily(hcd);
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    REGION = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    for (Put put : createPuts(ROWS, FAMILIES, QUALIFIERS, VALUE)) {
        REGION.put(put);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 87 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestRegionObserverInterface method testCompactionOverride.

/**
   * Tests overriding compaction handling via coprocessor hooks
   * @throws Exception
   */
@Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
    final TableName compactTable = TableName.valueOf(name.getMethodName());
    Admin admin = util.getAdmin();
    if (admin.tableExists(compactTable)) {
        admin.disableTable(compactTable);
        admin.deleteTable(compactTable);
    }
    HTableDescriptor htd = new HTableDescriptor(compactTable);
    htd.addFamily(new HColumnDescriptor(A));
    htd.addCoprocessor(EvenOnlyCompactor.class.getName());
    admin.createTable(htd);
    Table table = util.getConnection().getTable(compactTable);
    for (long i = 1; i <= 10; i++) {
        byte[] iBytes = Bytes.toBytes(i);
        Put put = new Put(iBytes);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(A, A, iBytes);
        table.put(put);
    }
    HRegion firstRegion = cluster.getRegions(compactTable).get(0);
    Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
    assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
    EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
    // force a compaction
    long ts = System.currentTimeMillis();
    admin.flush(compactTable);
    // wait for flush
    for (int i = 0; i < 10; i++) {
        if (compactor.lastFlush >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
    LOG.debug("Flush complete");
    ts = compactor.lastFlush;
    admin.majorCompact(compactTable);
    // wait for compaction
    for (int i = 0; i < 30; i++) {
        if (compactor.lastCompaction >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    LOG.debug("Last compaction was at " + compactor.lastCompaction);
    assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
    // only even rows should remain
    ResultScanner scanner = table.getScanner(new Scan());
    try {
        for (long i = 2; i <= 10; i += 2) {
            Result r = scanner.next();
            assertNotNull(r);
            assertFalse(r.isEmpty());
            byte[] iBytes = Bytes.toBytes(i);
            assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
            assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
        }
    } finally {
        scanner.close();
    }
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 88 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestFilterWrapper method createTable.

private static void createTable() {
    assertNotNull("HBaseAdmin is not initialized successfully.", admin);
    if (admin != null) {
        HTableDescriptor desc = new HTableDescriptor(name);
        HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1"));
        desc.addFamily(coldef);
        try {
            admin.createTable(desc);
            assertTrue("Fail to create the table", admin.tableExists(name));
        } catch (IOException e) {
            assertNull("Exception found while creating table", e);
        }
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 89 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestHFileOutputFormat2 method testColumnFamilySettings.

/**
   * Test that {@link HFileOutputFormat2} RecordWriter uses compression and
   * bloom filter settings from the column family descriptor
   */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testColumnFamilySettings() throws Exception {
    Configuration conf = new Configuration(this.util.getConfiguration());
    RecordWriter<ImmutableBytesWritable, Cell> writer = null;
    TaskAttemptContext context = null;
    Path dir = util.getDataTestDir("testColumnFamilySettings");
    // Setup table descriptor
    Table table = Mockito.mock(Table.class);
    RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
    HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
    Mockito.doReturn(htd).when(table).getTableDescriptor();
    for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) {
        htd.addFamily(hcd);
    }
    // set up the table to return some mock keys
    setupMockStartKeys(regionLocator);
    try {
        // partial map red setup to get an operational writer for testing
        // We turn off the sequence file compression, because DefaultCodec
        // pollutes the GZip codec pool with an incompatible compressor.
        conf.set("io.seqfile.compression.type", "NONE");
        conf.set("hbase.fs.tmp.dir", dir.toString());
        // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs
        conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, false);
        Job job = new Job(conf, "testLocalMRIncrementalLoad");
        job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
        setupRandomGeneratorMapper(job, false);
        HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
        FileOutputFormat.setOutputPath(job, dir);
        context = createTestTaskAttemptContext(job);
        HFileOutputFormat2 hof = new HFileOutputFormat2();
        writer = hof.getRecordWriter(context);
        // write out random rows
        writeRandomKeyValues(writer, context, htd.getFamiliesKeys(), ROWSPERSPLIT);
        writer.close(context);
        // Make sure that a directory was created for every CF
        FileSystem fs = dir.getFileSystem(conf);
        // commit so that the filesystem has one directory per column family
        hof.getOutputCommitter(context).commitTask(context);
        hof.getOutputCommitter(context).commitJob(context);
        FileStatus[] families = FSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs));
        assertEquals(htd.getFamilies().size(), families.length);
        for (FileStatus f : families) {
            String familyStr = f.getPath().getName();
            HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes(familyStr));
            // verify that the compression on this file matches the configured
            // compression
            Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
            Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf), conf);
            Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
            byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
            if (bloomFilter == null)
                bloomFilter = Bytes.toBytes("NONE");
            assertEquals("Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)));
            assertEquals("Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", hcd.getCompressionType(), reader.getFileContext().getCompression());
        }
    } finally {
        dir.getFileSystem(conf).delete(dir, true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Reader(org.apache.hadoop.hbase.io.hfile.HFile.Reader) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Job(org.apache.hadoop.mapreduce.Job) Cell(org.apache.hadoop.hbase.Cell) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) FSUtils(org.apache.hadoop.hbase.util.FSUtils) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 90 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestImportExport method testWithDeletes.

@Test
public void testWithDeletes() throws Exception {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
    UTIL.getAdmin().createTable(desc);
    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
        Put p = new Put(ROW1);
        p.addColumn(FAMILYA, QUAL, now, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
        t.put(p);
        Delete d = new Delete(ROW1, now + 3);
        t.delete(d);
        d = new Delete(ROW1);
        d.addColumns(FAMILYA, QUAL, now + 2);
        t.delete(d);
    }
    String[] args = new String[] { "-D" + Export.RAW_SCAN + "=true", name.getMethodName(), FQ_OUTPUT_DIR, // max number of key versions per key to export
    "1000" };
    assertTrue(runExport(args));
    final String IMPORT_TABLE = name.getMethodName() + "import";
    desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
    desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
    UTIL.getAdmin().createTable(desc);
    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
        args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR };
        assertTrue(runImport(args));
        Scan s = new Scan();
        s.setMaxVersions();
        s.setRaw(true);
        ResultScanner scanner = t.getScanner(s);
        Result r = scanner.next();
        Cell[] res = r.rawCells();
        assertTrue(CellUtil.isDeleteFamily(res[0]));
        assertEquals(now + 4, res[1].getTimestamp());
        assertEquals(now + 3, res[2].getTimestamp());
        assertTrue(CellUtil.isDelete(res[3]));
        assertEquals(now + 2, res[4].getTimestamp());
        assertEquals(now + 1, res[5].getTimestamp());
        assertEquals(now, res[6].getTimestamp());
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38