Search in sources :

Example 61 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestFIFOCompactionPolicy method testSanityCheckMinVersion.

@Test
public void testSanityCheckMinVersion() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
    TEST_UTIL.startMiniCluster(1);
    Admin admin = TEST_UTIL.getAdmin();
    TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
    if (admin.tableExists(tableName)) {
        admin.disableTable(tableName);
        admin.deleteTable(tableName);
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName());
    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName());
    HColumnDescriptor colDesc = new HColumnDescriptor(family);
    // 1 sec
    colDesc.setTimeToLive(1);
    colDesc.setMinVersions(1);
    desc.addFamily(colDesc);
    try {
        admin.createTable(desc);
        Assert.fail();
    } catch (Exception e) {
    } finally {
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) DisabledRegionSplitPolicy(org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy) Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 62 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestFIFOCompactionPolicy method prepareData.

private Store prepareData() throws IOException {
    Admin admin = TEST_UTIL.getAdmin();
    if (admin.tableExists(tableName)) {
        admin.disableTable(tableName);
        admin.deleteTable(tableName);
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName());
    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName());
    HColumnDescriptor colDesc = new HColumnDescriptor(family);
    // 1 sec
    colDesc.setTimeToLive(1);
    desc.addFamily(colDesc);
    admin.createTable(desc);
    Table table = TEST_UTIL.getConnection().getTable(tableName);
    Random rand = new Random();
    TimeOffsetEnvironmentEdge edge = (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
    for (int i = 0; i < 10; i++) {
        for (int j = 0; j < 10; j++) {
            byte[] value = new byte[128 * 1024];
            rand.nextBytes(value);
            table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
        }
        admin.flush(tableName);
        edge.increment(1001);
    }
    return getStoreWithName(tableName);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Random(java.util.Random) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) DisabledRegionSplitPolicy(org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy) Admin(org.apache.hadoop.hbase.client.Admin) TimeOffsetEnvironmentEdge(org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 63 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestStore method testDeleteExpiredStoreFiles.

/*
   * @param minVersions the MIN_VERSIONS for the column family
   */
public void testDeleteExpiredStoreFiles(int minVersions) throws Exception {
    int storeFileNum = 4;
    int ttl = 4;
    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
    EnvironmentEdgeManagerTestHelper.injectEdge(edge);
    Configuration conf = HBaseConfiguration.create();
    // Enable the expired store file deletion
    conf.setBoolean("hbase.store.delete.expired.storefile", true);
    // Set the compaction threshold higher to avoid normal compactions.
    conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 5);
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setMinVersions(minVersions);
    hcd.setTimeToLive(ttl);
    init(name.getMethodName() + "-" + minVersions, conf, hcd);
    long storeTtl = this.store.getScanInfo().getTtl();
    long sleepTime = storeTtl / storeFileNum;
    long timeStamp;
    // store files will be (this.store.ttl / storeFileNum)
    for (int i = 1; i <= storeFileNum; i++) {
        LOG.info("Adding some data for the store file #" + i);
        timeStamp = EnvironmentEdgeManager.currentTime();
        this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null), null);
        this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null), null);
        this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null), null);
        flush(i);
        edge.incrementTime(sleepTime);
    }
    // Verify the total number of store files
    Assert.assertEquals(storeFileNum, this.store.getStorefiles().size());
    // There will be no compaction due to threshold above. Last file will not be replaced.
    for (int i = 1; i <= storeFileNum - 1; i++) {
        // verify the expired store file.
        assertNull(this.store.requestCompaction());
        Collection<StoreFile> sfs = this.store.getStorefiles();
        // Ensure i files are gone.
        if (minVersions == 0) {
            assertEquals(storeFileNum - i, sfs.size());
            // Ensure only non-expired files remain.
            for (StoreFile sf : sfs) {
                assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl));
            }
        } else {
            assertEquals(storeFileNum, sfs.size());
        }
        // Let the next store file expired.
        edge.incrementTime(sleepTime);
    }
    assertNull(this.store.requestCompaction());
    Collection<StoreFile> sfs = this.store.getStorefiles();
    // Assert the last expired file is not removed.
    if (minVersions == 0) {
        assertEquals(1, sfs.size());
    }
    long ts = sfs.iterator().next().getReader().getMaxTimestamp();
    assertTrue(ts < (edge.currentTime() - storeTtl));
    for (StoreFile sf : sfs) {
        sf.closeReader(true);
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge)

Example 64 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestStoreFile method testEmptyStoreFileRestrictKeyRanges.

@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
    StoreFileReader reader = mock(StoreFileReader.class);
    Store store = mock(Store.class);
    HColumnDescriptor hcd = mock(HColumnDescriptor.class);
    byte[] cf = Bytes.toBytes("ty");
    when(hcd.getName()).thenReturn(cf);
    when(store.getFamily()).thenReturn(hcd);
    StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true);
    Scan scan = new Scan();
    scan.setColumnFamilyTimeRange(cf, 0, 1);
    assertFalse(scanner.shouldUseScanner(scan, store, 0));
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 65 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class MobSnapshotTestingUtils method createMobTable.

private static void createMobTable(final HBaseTestingUtility util, final TableName tableName, final byte[][] splitKeys, int regionReplication, final byte[]... families) throws IOException, InterruptedException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.setRegionReplication(regionReplication);
    for (byte[] family : families) {
        HColumnDescriptor hcd = new HColumnDescriptor(family);
        hcd.setMobEnabled(true);
        hcd.setMobThreshold(0L);
        htd.addFamily(hcd);
    }
    util.getAdmin().createTable(htd, splitKeys);
    SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
    assertEquals((splitKeys.length + 1) * regionReplication, util.getAdmin().getTableRegions(tableName).size());
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38