Search in sources :

Example 1 with SpaceQuotaSnapshotPredicate

use of org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate in project hbase by apache.

the class TestSnapshotQuotaObserverChore method testSnapshotSize.

@Test
public void testSnapshotSize() throws Exception {
    // Create a table and set a quota
    TableName tn1 = helper.createTableWithRegions(5);
    admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
    // Write some data and flush it
    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
    admin.flush(tn1);
    final long snapshotSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream().flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum();
    // Wait for the Master chore to run to see the usage (with a fudge factor)
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() == snapshotSize;
        }
    });
    // Create a snapshot on the table
    final String snapshotName = tn1 + "snapshot";
    admin.snapshot(new SnapshotDescription(snapshotName, tn1, SnapshotType.SKIPFLUSH));
    // Get the snapshots
    Multimap<TableName, String> snapshotsToCompute = testChore.getSnapshotsToComputeSize();
    assertEquals("Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size());
    // Get the size of our snapshot
    Map<String, Long> namespaceSnapshotSizes = testChore.computeSnapshotSizes(snapshotsToCompute);
    assertEquals(1, namespaceSnapshotSizes.size());
    Long size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString());
    assertNotNull(size);
    // The snapshot should take up no space since the table refers to it completely
    assertEquals(0, size.longValue());
    // Write some more data, flush it, and then major_compact the table
    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
    admin.flush(tn1);
    TEST_UTIL.compact(tn1, true);
    // Test table should reflect it's original size since ingest was deterministic
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        private final long regionSize = TEST_UTIL.getMiniHBaseCluster().getRegions(tn1).stream().flatMap(r -> r.getStores().stream()).mapToLong(HStore::getHFilesSize).sum();

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            LOG.debug("Current usage=" + snapshot.getUsage() + " snapshotSize=" + snapshotSize);
            // The usage of table space consists of region size and snapshot size
            return closeInSize(snapshot.getUsage(), snapshotSize + regionSize, SpaceQuotaHelperForTests.ONE_KILOBYTE);
        }
    });
    // Wait for no compacted files on the regions of our table
    TEST_UTIL.waitFor(30_000, new NoFilesToDischarge(TEST_UTIL.getMiniHBaseCluster(), tn1));
    // Still should see only one snapshot
    snapshotsToCompute = testChore.getSnapshotsToComputeSize();
    assertEquals("Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size());
    namespaceSnapshotSizes = testChore.computeSnapshotSizes(snapshotsToCompute);
    assertEquals(1, namespaceSnapshotSizes.size());
    size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString());
    assertNotNull(size);
    // The snapshot should take up the size the table originally took up
    assertEquals(snapshotSize, size.longValue());
}
Also used : SpaceQuotaSnapshotPredicate(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) SnapshotDescription(org.apache.hadoop.hbase.client.SnapshotDescription) NoFilesToDischarge(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge) IOException(java.io.IOException) Test(org.junit.Test)

Example 2 with SpaceQuotaSnapshotPredicate

use of org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate in project hbase by apache.

the class TestSnapshotQuotaObserverChore method testRemovedSnapshots.

@Test
public void testRemovedSnapshots() throws Exception {
    // Create a table and set a quota
    TableName tn1 = helper.createTableWithRegions(1);
    admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
    // Write some data and flush it
    // 256 KB
    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
    final AtomicReference<Long> lastSeenSize = new AtomicReference<>();
    // Wait for the Master chore to run to see the usage (with a fudge factor)
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            lastSeenSize.set(snapshot.getUsage());
            return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
        }
    });
    // Create a snapshot on the table
    final String snapshotName1 = tn1 + "snapshot1";
    admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH));
    // Snapshot size has to be 0 as the snapshot shares the data with the table
    final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME);
    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1);
            Result r = quotaTable.get(g);
            if (r == null || r.isEmpty()) {
                return false;
            }
            r.advance();
            Cell c = r.current();
            return QuotaTableUtil.parseSnapshotSize(c) == 0;
        }
    });
    // Total usage has to remain same as what we saw before taking a snapshot
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() == lastSeenSize.get();
        }
    });
    // Major compact the table to force a rewrite
    TEST_UTIL.compact(tn1, true);
    // Now the snapshot size has to prev total size
    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1);
            Result r = quotaTable.get(g);
            if (r == null || r.isEmpty()) {
                return false;
            }
            r.advance();
            Cell c = r.current();
            // The compaction result file has an additional compaction event tracker
            return lastSeenSize.get() == QuotaTableUtil.parseSnapshotSize(c);
        }
    });
    // The total size now has to be equal/more than double of prev total size
    // as double the number of store files exist now.
    final AtomicReference<Long> sizeAfterCompaction = new AtomicReference<>();
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            sizeAfterCompaction.set(snapshot.getUsage());
            return snapshot.getUsage() >= 2 * lastSeenSize.get();
        }
    });
    // Delete the snapshot
    admin.deleteSnapshot(snapshotName1);
    // Total size has to come down to prev totalsize - snapshot size(which was removed)
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() == (sizeAfterCompaction.get() - lastSeenSize.get());
        }
    });
}
Also used : Table(org.apache.hadoop.hbase.client.Table) AtomicReference(java.util.concurrent.atomic.AtomicReference) SnapshotDescription(org.apache.hadoop.hbase.client.SnapshotDescription) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) SpaceQuotaSnapshotPredicate(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate) TableName(org.apache.hadoop.hbase.TableName) Get(org.apache.hadoop.hbase.client.Get) AtomicLong(java.util.concurrent.atomic.AtomicLong) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 3 with SpaceQuotaSnapshotPredicate

use of org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate in project hbase by apache.

the class TestSnapshotQuotaObserverChore method testBucketingFilesToSnapshots.

@Test
public void testBucketingFilesToSnapshots() throws Exception {
    // Create a table and set a quota
    TableName tn1 = helper.createTableWithRegions(1);
    admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));
    // Write some data and flush it
    helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
    admin.flush(tn1);
    final AtomicReference<Long> lastSeenSize = new AtomicReference<>();
    // Wait for the Master chore to run to see the usage (with a fudge factor)
    TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            lastSeenSize.set(snapshot.getUsage());
            return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
        }
    });
    // Create a snapshot on the table
    final String snapshotName1 = tn1 + "snapshot1";
    admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH));
    // Major compact the table to force a rewrite
    TEST_UTIL.compact(tn1, true);
    // Make sure that the snapshot owns the size
    final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME);
    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1);
            Result r = quotaTable.get(g);
            if (r == null || r.isEmpty()) {
                return false;
            }
            r.advance();
            Cell c = r.current();
            // The compaction result file has an additional compaction event tracker
            return lastSeenSize.get() <= QuotaTableUtil.parseSnapshotSize(c);
        }
    });
    // Create another snapshot on the table
    final String snapshotName2 = tn1 + "snapshot2";
    admin.snapshot(new SnapshotDescription(snapshotName2, tn1, SnapshotType.SKIPFLUSH));
    // Major compact the table to force a rewrite
    TEST_UTIL.compact(tn1, true);
    // Make sure that the snapshot owns the size
    TEST_UTIL.waitFor(30_000, new Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName2);
            Result r = quotaTable.get(g);
            if (r == null || r.isEmpty()) {
                return false;
            }
            r.advance();
            Cell c = r.current();
            // The compaction result file has an additional compaction event tracker
            return lastSeenSize.get() <= QuotaTableUtil.parseSnapshotSize(c);
        }
    });
    Get g = QuotaTableUtil.createGetNamespaceSnapshotSize(tn1.getNamespaceAsString());
    Result r = quotaTable.get(g);
    assertNotNull(r);
    assertFalse(r.isEmpty());
    r.advance();
    long size = QuotaTableUtil.parseSnapshotSize(r.current());
    assertTrue(lastSeenSize.get() * 2 <= size);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) AtomicReference(java.util.concurrent.atomic.AtomicReference) SnapshotDescription(org.apache.hadoop.hbase.client.SnapshotDescription) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) SpaceQuotaSnapshotPredicate(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate) TableName(org.apache.hadoop.hbase.TableName) Get(org.apache.hadoop.hbase.client.Get) AtomicLong(java.util.concurrent.atomic.AtomicLong) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 4 with SpaceQuotaSnapshotPredicate

use of org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate in project hbase by apache.

the class TestSpaceQuotasWithSnapshots method testRematerializedTablesDoNoInheritSpace.

@Test
public void testRematerializedTablesDoNoInheritSpace() throws Exception {
    TableName tn = helper.createTableWithRegions(1);
    TableName tn2 = helper.getNextTableName();
    LOG.info("Writing data");
    // Set a quota on both tables
    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
    admin.setQuota(settings);
    QuotaSettings settings2 = QuotaSettingsFactory.limitTableSpace(tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
    admin.setQuota(settings2);
    // Write some data
    final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
    helper.writeData(tn, initialSize);
    LOG.info("Waiting until table size reflects written data");
    // Wait until that data is seen by the master
    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() >= initialSize;
        }
    });
    // Make sure we see the final quota usage size
    waitForStableQuotaSize(conn, tn, null);
    // The actual size on disk after we wrote our data the first time
    final long actualInitialSize = conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn).getUsage();
    LOG.info("Initial table size was " + actualInitialSize);
    LOG.info("Snapshot the table");
    final String snapshot1 = tn.toString() + "_snapshot1";
    admin.snapshot(snapshot1, tn);
    admin.cloneSnapshot(snapshot1, tn2);
    // Write some more data to the first table
    helper.writeData(tn, initialSize, "q2");
    admin.flush(tn);
    // Watch the usage of the first table with some more data to know when the new
    // region size reports were sent to the master
    TEST_UTIL.waitFor(30_000, 1_000, new SpaceQuotaSnapshotPredicate(conn, tn) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() >= actualInitialSize * 2;
        }
    });
    // We know that reports were sent by our RS, verify that they take up zero size.
    SpaceQuotaSnapshot snapshot = (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn2);
    assertNotNull(snapshot);
    assertEquals(0, snapshot.getUsage());
    // Compact the cloned table to force it to own its own files.
    TEST_UTIL.compact(tn2, true);
    // After the table is compacted, it should have its own files and be the same size as originally
    // But The compaction result file has an additional compaction event tracker
    TEST_UTIL.waitFor(30_000, 1_000, new SpaceQuotaSnapshotPredicate(conn, tn2) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() >= actualInitialSize;
        }
    });
}
Also used : SpaceQuotaSnapshotPredicate(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate) TableName(org.apache.hadoop.hbase.TableName) IOException(java.io.IOException) Test(org.junit.Test)

Example 5 with SpaceQuotaSnapshotPredicate

use of org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate in project hbase by apache.

the class TestLowLatencySpaceQuotas method testFlushes.

@Test
public void testFlushes() throws Exception {
    TableName tn = helper.createTableWithRegions(1);
    // Set a quota
    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
    admin.setQuota(settings);
    // Write some data
    final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
    helper.writeData(tn, initialSize);
    // Make sure a flush happened
    admin.flush(tn);
    // We should be able to observe the system recording an increase in size (even
    // though we know the filesystem scanning did not happen).
    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() >= initialSize;
        }
    });
}
Also used : SpaceQuotaSnapshotPredicate(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Aggregations

SpaceQuotaSnapshotPredicate (org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate)12 TableName (org.apache.hadoop.hbase.TableName)11 Test (org.junit.Test)11 IOException (java.io.IOException)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)6 Result (org.apache.hadoop.hbase.client.Result)3 SnapshotDescription (org.apache.hadoop.hbase.client.SnapshotDescription)3 Table (org.apache.hadoop.hbase.client.Table)3 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 Cell (org.apache.hadoop.hbase.Cell)2 Get (org.apache.hadoop.hbase.client.Get)2 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)2 List (java.util.List)1 Map (java.util.Map)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 ServerName (org.apache.hadoop.hbase.ServerName)1 NoFilesToDischarge (org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge)1