Search in sources :

Example 11 with RegionServerThread

use of org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread in project hbase by apache.

the class TestQuotaThrottle method triggerCacheRefresh.

private void triggerCacheRefresh(boolean bypass, boolean userLimiter, boolean tableLimiter, boolean nsLimiter, final TableName... tables) throws Exception {
    envEdge.incValue(2 * REFRESH_TIME);
    for (RegionServerThread rst : TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
        RegionServerQuotaManager quotaManager = rst.getRegionServer().getRegionServerQuotaManager();
        QuotaCache quotaCache = quotaManager.getQuotaCache();
        quotaCache.triggerCacheRefresh();
        // sleep for cache update
        Thread.sleep(250);
        for (TableName table : tables) {
            quotaCache.getTableLimiter(table);
        }
        boolean isUpdated = false;
        while (!isUpdated) {
            quotaCache.triggerCacheRefresh();
            isUpdated = true;
            for (TableName table : tables) {
                boolean isBypass = true;
                if (userLimiter) {
                    isBypass &= quotaCache.getUserLimiter(User.getCurrent().getUGI(), table).isBypass();
                }
                if (tableLimiter) {
                    isBypass &= quotaCache.getTableLimiter(table).isBypass();
                }
                if (nsLimiter) {
                    isBypass &= quotaCache.getNamespaceLimiter(table.getNamespaceAsString()).isBypass();
                }
                if (isBypass != bypass) {
                    envEdge.incValue(100);
                    isUpdated = false;
                    break;
                }
            }
        }
        LOG.debug("QuotaCache");
        LOG.debug(quotaCache.getNamespaceQuotaCache());
        LOG.debug(quotaCache.getTableQuotaCache());
        LOG.debug(quotaCache.getUserQuotaCache());
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread)

Example 12 with RegionServerThread

use of org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread in project hbase by apache.

the class TestVisibilityLabels method testVisibilityLabelsOnRSRestart.

@Test(timeout = 60 * 1000)
public void testVisibilityLabelsOnRSRestart() throws Exception {
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster().getRegionServerThreads();
    for (RegionServerThread rsThread : regionServerThreads) {
        rsThread.getRegionServer().abort("Aborting ");
    }
    // Start one new RS
    RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
    waitForLabelsRegionAvailability(rs.getRegionServer());
    try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) {
        Scan s = new Scan();
        s.setAuthorizations(new Authorizations(SECRET));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertTrue(next.length == 1);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) RegionActionResult(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 13 with RegionServerThread

use of org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread in project hbase by apache.

the class TestVisibilityLabels method testVisibilityLabelsOnKillingOfRSContainingLabelsTable.

@Test
public void testVisibilityLabelsOnKillingOfRSContainingLabelsTable() throws Exception {
    List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster().getRegionServerThreads();
    int liveRS = 0;
    for (RegionServerThread rsThreads : regionServerThreads) {
        if (!rsThreads.getRegionServer().isAborted()) {
            liveRS++;
        }
    }
    if (liveRS == 1) {
        TEST_UTIL.getHBaseCluster().startRegionServer();
    }
    Thread t1 = new Thread() {

        public void run() {
            List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster().getRegionServerThreads();
            for (RegionServerThread rsThread : regionServerThreads) {
                List<Region> onlineRegions = rsThread.getRegionServer().getOnlineRegions(LABELS_TABLE_NAME);
                if (onlineRegions.size() > 0) {
                    rsThread.getRegionServer().abort("Aborting ");
                    killedRS = true;
                    break;
                }
            }
        }
    };
    t1.start();
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    Thread t = new Thread() {

        public void run() {
            try {
                while (!killedRS) {
                    Thread.sleep(1);
                }
                createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE);
            } catch (Exception e) {
            }
        }
    };
    t.start();
    regionServerThreads = TEST_UTIL.getHBaseCluster().getRegionServerThreads();
    while (!killedRS) {
        Thread.sleep(10);
    }
    regionServerThreads = TEST_UTIL.getHBaseCluster().getRegionServerThreads();
    for (RegionServerThread rsThread : regionServerThreads) {
        while (true) {
            if (!rsThread.getRegionServer().isAborted()) {
                List<Region> onlineRegions = rsThread.getRegionServer().getOnlineRegions(LABELS_TABLE_NAME);
                if (onlineRegions.size() > 0) {
                    break;
                } else {
                    Thread.sleep(10);
                }
            } else {
                break;
            }
        }
    }
    TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
    t.join();
    try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
        Scan s = new Scan();
        s.setAuthorizations(new Authorizations(SECRET));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertTrue(next.length == 1);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) IOException(java.io.IOException) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) RegionActionResult(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Test(org.junit.Test)

Example 14 with RegionServerThread

use of org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread in project hbase by apache.

the class TestCorruptedRegionStoreFile method evictHFileCache.

private void evictHFileCache(final Path hfile) throws Exception {
    for (RegionServerThread rst : UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
        HRegionServer rs = rst.getRegionServer();
        rs.getCacheConfig().getBlockCache().evictBlocksByHfileName(hfile.getName());
    }
}
Also used : RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread)

Example 15 with RegionServerThread

use of org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread in project hbase by apache.

the class TestRegionReplicaFailover method testSecondaryRegionKillWhilePrimaryIsAcceptingWrites.

/**
   * Tests the case where there are 3 region replicas and the primary is continuously accepting
   * new writes while one of the secondaries is killed. Verification is done for both of the
   * secondary replicas.
   */
@Test(timeout = 120000)
public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Exception {
    try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
        Table table = connection.getTable(htd.getTableName());
        Admin admin = connection.getAdmin()) {
        // start a thread to do the loading of primary
        // start with some base
        HTU.loadNumericRows(table, fam, 0, 1000);
        admin.flush(table.getName());
        HTU.loadNumericRows(table, fam, 1000, 2000);
        final AtomicReference<Throwable> ex = new AtomicReference<>(null);
        final AtomicBoolean done = new AtomicBoolean(false);
        final AtomicInteger key = new AtomicInteger(2000);
        Thread loader = new Thread() {

            @Override
            public void run() {
                while (!done.get()) {
                    try {
                        HTU.loadNumericRows(table, fam, key.get(), key.get() + 1000);
                        key.addAndGet(1000);
                    } catch (Throwable e) {
                        ex.compareAndSet(null, e);
                    }
                }
            }
        };
        loader.start();
        Thread aborter = new Thread() {

            @Override
            public void run() {
                try {
                    boolean aborted = false;
                    for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
                        for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) {
                            if (r.getRegionInfo().getReplicaId() == 1) {
                                LOG.info("Aborting region server hosting secondary region replica");
                                rs.getRegionServer().abort("for test");
                                aborted = true;
                            }
                        }
                    }
                    assertTrue(aborted);
                } catch (Throwable e) {
                    ex.compareAndSet(null, e);
                }
            }

            ;
        };
        aborter.start();
        aborter.join();
        done.set(true);
        loader.join();
        assertNull(ex.get());
        // assert that the test is working as designed
        assertTrue(key.get() > 1000);
        LOG.info("Loaded up to key :" + key.get());
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 0, 30000);
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 1, 30000);
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 2, 30000);
    }
    // restart the region server
    HTU.getMiniHBaseCluster().startRegionServer();
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Table(org.apache.hadoop.hbase.client.Table) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Connection(org.apache.hadoop.hbase.client.Connection) AtomicReference(java.util.concurrent.atomic.AtomicReference) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Admin(org.apache.hadoop.hbase.client.Admin) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Test(org.junit.Test)

Aggregations

RegionServerThread (org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread)34 Test (org.junit.Test)24 Table (org.apache.hadoop.hbase.client.Table)22 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)15 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)14 IOException (java.io.IOException)13 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)12 Ignore (org.junit.Ignore)11 TableName (org.apache.hadoop.hbase.TableName)9 Waiter (org.apache.hadoop.hbase.Waiter)8 Result (org.apache.hadoop.hbase.client.Result)8 OperationConflictException (org.apache.hadoop.hbase.exceptions.OperationConflictException)8 ServerNotRunningYetException (org.apache.hadoop.hbase.ipc.ServerNotRunningYetException)8 ArrayList (java.util.ArrayList)7 TimeoutException (java.util.concurrent.TimeoutException)7 RetriesExhaustedWithDetailsException (org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException)7 RegionInRecoveryException (org.apache.hadoop.hbase.exceptions.RegionInRecoveryException)7 Path (org.apache.hadoop.fs.Path)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 ServerName (org.apache.hadoop.hbase.ServerName)5