Search in sources :

Example 1 with IncrementingEnvironmentEdge

use of org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge in project hbase by apache.

the class TestStore method testDeleteExpiredStoreFiles.

/*
   * @param minVersions the MIN_VERSIONS for the column family
   */
public void testDeleteExpiredStoreFiles(int minVersions) throws Exception {
    int storeFileNum = 4;
    int ttl = 4;
    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
    EnvironmentEdgeManagerTestHelper.injectEdge(edge);
    Configuration conf = HBaseConfiguration.create();
    // Enable the expired store file deletion
    conf.setBoolean("hbase.store.delete.expired.storefile", true);
    // Set the compaction threshold higher to avoid normal compactions.
    conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 5);
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setMinVersions(minVersions);
    hcd.setTimeToLive(ttl);
    init(name.getMethodName() + "-" + minVersions, conf, hcd);
    long storeTtl = this.store.getScanInfo().getTtl();
    long sleepTime = storeTtl / storeFileNum;
    long timeStamp;
    // store files will be (this.store.ttl / storeFileNum)
    for (int i = 1; i <= storeFileNum; i++) {
        LOG.info("Adding some data for the store file #" + i);
        timeStamp = EnvironmentEdgeManager.currentTime();
        this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null), null);
        this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null), null);
        this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null), null);
        flush(i);
        edge.incrementTime(sleepTime);
    }
    // Verify the total number of store files
    Assert.assertEquals(storeFileNum, this.store.getStorefiles().size());
    // There will be no compaction due to threshold above. Last file will not be replaced.
    for (int i = 1; i <= storeFileNum - 1; i++) {
        // verify the expired store file.
        assertNull(this.store.requestCompaction());
        Collection<StoreFile> sfs = this.store.getStorefiles();
        // Ensure i files are gone.
        if (minVersions == 0) {
            assertEquals(storeFileNum - i, sfs.size());
            // Ensure only non-expired files remain.
            for (StoreFile sf : sfs) {
                assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl));
            }
        } else {
            assertEquals(storeFileNum, sfs.size());
        }
        // Let the next store file expired.
        edge.incrementTime(sleepTime);
    }
    assertNull(this.store.requestCompaction());
    Collection<StoreFile> sfs = this.store.getStorefiles();
    // Assert the last expired file is not removed.
    if (minVersions == 0) {
        assertEquals(1, sfs.size());
    }
    long ts = sfs.iterator().next().getReader().getMaxTimestamp();
    assertTrue(ts < (edge.currentTime() - storeTtl));
    for (StoreFile sf : sfs) {
        sf.closeReader(true);
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge)

Example 2 with IncrementingEnvironmentEdge

use of org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge in project hbase by apache.

the class TestHRegion method testCellTTLs.

@Test
public void testCellTTLs() throws IOException {
    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(edge);
    final byte[] row = Bytes.toBytes("testRow");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    // 10 seconds
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(10).build()).build();
    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
    region = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), TEST_UTIL.getDataTestDir(), conf, tableDescriptor);
    assertNotNull(region);
    long now = EnvironmentEdgeManager.currentTime();
    // Add a cell that will expire in 5 seconds via cell TTL
    region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
    // Add a cell that will expire after 10 seconds via family setting
    region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
    // Add a cell that will expire in 15 seconds via cell TTL
    region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
    new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
    // Add a cell that will expire in 20 seconds via family setting
    region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
    // Flush so we are sure store scanning gets this right
    region.flush(true);
    // A query at time T+0 should return all cells
    Result r = region.get(new Get(row));
    assertNotNull(r.getValue(fam1, q1));
    assertNotNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+5 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNotNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+10 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNotNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+15 seconds
    edge.incrementTime(5000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNull(r.getValue(fam1, q3));
    assertNotNull(r.getValue(fam1, q4));
    // Increment time to T+20 seconds
    edge.incrementTime(10000);
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
    assertNull(r.getValue(fam1, q2));
    assertNull(r.getValue(fam1, q3));
    assertNull(r.getValue(fam1, q4));
    // Fun with disappearing increments
    // Start at 1
    region.put(new Put(row).addColumn(fam1, q1, Bytes.toBytes(1L)));
    r = region.get(new Get(row));
    byte[] val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(1L, Bytes.toLong(val));
    // Increment with a TTL of 5 seconds
    Increment incr = new Increment(row).addColumn(fam1, q1, 1L);
    incr.setTTL(5000);
    // 2
    region.increment(incr);
    // New value should be 2
    r = region.get(new Get(row));
    val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(2L, Bytes.toLong(val));
    // Increment time to T+25 seconds
    edge.incrementTime(5000);
    // Value should be back to 1
    r = region.get(new Get(row));
    val = r.getValue(fam1, q1);
    assertNotNull(val);
    assertEquals(1L, Bytes.toLong(val));
    // Increment time to T+30 seconds
    edge.incrementTime(5000);
    // Original value written at T+20 should be gone now via family TTL
    r = region.get(new Get(row));
    assertNull(r.getValue(fam1, q1));
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 3 with IncrementingEnvironmentEdge

use of org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge in project hbase by apache.

the class TestThriftHttpServer method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().setBoolean(Constants.USE_HTTP_CONF_KEY, true);
    TEST_UTIL.getConfiguration().setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, false);
    TEST_UTIL.startMiniCluster();
    // ensure that server time increments every time we do an operation, otherwise
    // successive puts having the same timestamp will override each other
    EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
Also used : IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge) BeforeClass(org.junit.BeforeClass)

Example 4 with IncrementingEnvironmentEdge

use of org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge in project hbase by apache.

the class TestThriftServerCmdLine method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, false);
    TEST_UTIL.startMiniCluster();
    // ensure that server time increments every time we do an operation, otherwise
    // successive puts having the same timestamp will override each other
    EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
Also used : IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge) BeforeClass(org.junit.BeforeClass)

Example 5 with IncrementingEnvironmentEdge

use of org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge in project hbase by apache.

the class TestRegionServerReportForDuty method testReportForDutyWithEnvironmentEdge.

/**
 * Tests region sever reportForDuty with a non-default environment edge
 */
@Test
public void testReportForDutyWithEnvironmentEdge() throws Exception {
    // Start a master and wait for it to become the active/primary master.
    // Use a random unique port
    cluster.getConfiguration().setInt(HConstants.MASTER_PORT, HBaseTestingUtil.randomFreePort());
    // Set the dispatch and retry delay to 0 since we want the rpc request to be sent immediately
    cluster.getConfiguration().setInt("hbase.procedure.remote.dispatcher.delay.msec", 0);
    cluster.getConfiguration().setLong("hbase.regionserver.rpc.retry.interval", 0);
    cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
    cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 1);
    // Inject non-default environment edge
    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(edge);
    master = cluster.addMaster();
    rs = cluster.addRegionServer();
    LOG.debug("Starting master: " + master.getMaster().getServerName());
    master.start();
    rs.start();
    waitForClusterOnline(master);
}
Also used : IncrementingEnvironmentEdge(org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge) Test(org.junit.Test)

Aggregations

IncrementingEnvironmentEdge (org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge)10 Put (org.apache.hadoop.hbase.client.Put)5 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)3 Configuration (org.apache.hadoop.conf.Configuration)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 KeyValue (org.apache.hadoop.hbase.KeyValue)3 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)3 Get (org.apache.hadoop.hbase.client.Get)3 Result (org.apache.hadoop.hbase.client.Result)3 Table (org.apache.hadoop.hbase.client.Table)2 CompactionConfiguration (org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration)2 BeforeClass (org.junit.BeforeClass)2 ArrayBackedTag (org.apache.hadoop.hbase.ArrayBackedTag)1 Cell (org.apache.hadoop.hbase.Cell)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 Delete (org.apache.hadoop.hbase.client.Delete)1 Increment (org.apache.hadoop.hbase.client.Increment)1 Scan (org.apache.hadoop.hbase.client.Scan)1 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)1