use of org.apache.hadoop.hbase.executor.ExecutorService in project hbase by apache.
the class RegionServicesForStores method getInMemoryCompactionPool.
ThreadPoolExecutor getInMemoryCompactionPool() {
if (rsServices != null) {
ExecutorService executorService = rsServices.getExecutorService();
ExecutorConfig config = executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_IN_MEMORY_COMPACTION).setCorePoolSize(inMemoryPoolSize);
return executorService.getExecutorLazily(config);
} else {
// this could only happen in tests
return getInMemoryCompactionPoolForTest();
}
}
use of org.apache.hadoop.hbase.executor.ExecutorService in project hbase by apache.
the class TestExecutorStatusChore method testMetricsCollect.
@Test
public void testMetricsCollect() throws Exception {
int maxThreads = 5;
int maxTries = 10;
int sleepInterval = 1000;
Server mockedServer = mock(Server.class);
when(mockedServer.getConfiguration()).thenReturn(HBaseConfiguration.create());
// Start an executor service pool with max 5 threads
ExecutorService executorService = new ExecutorService("unit_test");
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_PARALLEL_SEEK).setCorePoolSize(maxThreads));
MetricsRegionServerSource serverSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class).createServer(null);
assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
ExecutorStatusChore statusChore = new ExecutorStatusChore(60000, mockedServer, executorService, serverSource);
AtomicBoolean lock = new AtomicBoolean(true);
AtomicInteger counter = new AtomicInteger(0);
for (int i = 0; i < maxThreads + 1; i++) {
executorService.submit(new TestEventHandler(mockedServer, EventType.RS_PARALLEL_SEEK, lock, counter));
}
// The TestEventHandler will increment counter when it starts.
int tries = 0;
while (counter.get() < maxThreads && tries < maxTries) {
LOG.info("Waiting for all event handlers to start...");
Thread.sleep(sleepInterval);
tries++;
}
// Assert that pool is at max threads.
assertEquals(maxThreads, counter.get());
statusChore.chore();
Pair<Long, Long> executorStatus = statusChore.getExecutorStatus("RS_PARALLEL_SEEK");
// running
assertEquals(maxThreads, executorStatus.getFirst().intValue());
// pending
assertEquals(1, executorStatus.getSecond().intValue());
// Now interrupt the running Executor
synchronized (lock) {
lock.set(false);
lock.notifyAll();
}
executorService.shutdown();
}
use of org.apache.hadoop.hbase.executor.ExecutorService in project hbase by apache.
the class TestSplitLogWorker method setup.
@Before
public void setup() throws Exception {
TEST_UTIL.startMiniZKCluster();
Configuration conf = TEST_UTIL.getConfiguration();
zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "split-log-worker-tests", null);
ds = new DummyServer(zkw, conf);
ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode);
ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode);
assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode), not(is(-1)));
LOG.debug(zkw.getZNodePaths().baseZNode + " created");
ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().splitLogZNode);
assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().splitLogZNode), not(is(-1)));
LOG.debug(zkw.getZNodePaths().splitLogZNode + " created");
ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().rsZNode);
assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().rsZNode), not(is(-1)));
SplitLogCounters.resetCounters();
executorService = new ExecutorService("TestSplitLogWorker");
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS).setCorePoolSize(10));
}
use of org.apache.hadoop.hbase.executor.ExecutorService in project hbase by apache.
the class TestHRegionReplayEvents method setUp.
@Before
public void setUp() throws Exception {
CONF = TEST_UTIL.getConfiguration();
dir = TEST_UTIL.getDataTestDir("TestHRegionReplayEvents").toString();
method = name.getMethodName();
tableName = Bytes.toBytes(name.getMethodName());
rootDir = new Path(dir + method);
TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
method = name.getMethodName();
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(method));
for (byte[] family : families) {
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
}
htd = builder.build();
long time = EnvironmentEdgeManager.currentTime();
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
primaryHri = RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(0).build();
secondaryHri = RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(1).build();
WALFactory wals = TestHRegion.createWALFactory(CONF, rootDir);
walPrimary = wals.getWAL(primaryHri);
walSecondary = wals.getWAL(secondaryHri);
rss = mock(RegionServerServices.class);
when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
when(rss.getConfiguration()).thenReturn(CONF);
when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(CONF));
String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER.toString();
ExecutorService es = new ExecutorService(string);
es.startExecutorService(es.new ExecutorConfig().setCorePoolSize(1).setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER));
when(rss.getExecutorService()).thenReturn(es);
primaryRegion = HRegion.createHRegion(primaryHri, rootDir, CONF, htd, walPrimary);
primaryRegion.close();
List<HRegion> regions = new ArrayList<>();
regions.add(primaryRegion);
Mockito.doReturn(regions).when(rss).getRegions();
primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null);
reader = null;
}
use of org.apache.hadoop.hbase.executor.ExecutorService in project hbase by apache.
the class TestHRegionReplayEvents method setup.
@Before
public void setup() throws IOException {
TEST_UTIL = HBaseTestingUtility.createLocalHTU();
CONF = TEST_UTIL.getConfiguration();
dir = TEST_UTIL.getDataTestDir("TestHRegionReplayEvents").toString();
method = name.getMethodName();
tableName = Bytes.toBytes(name.getMethodName());
rootDir = new Path(dir + method);
TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
method = name.getMethodName();
htd = new HTableDescriptor(TableName.valueOf(method));
for (byte[] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
time = System.currentTimeMillis();
primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, time, 0);
secondaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, time, 1);
wals = TestHRegion.createWALFactory(CONF, rootDir);
walPrimary = wals.getWAL(primaryHri.getEncodedNameAsBytes(), primaryHri.getTable().getNamespace());
walSecondary = wals.getWAL(secondaryHri.getEncodedNameAsBytes(), secondaryHri.getTable().getNamespace());
rss = mock(RegionServerServices.class);
when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
when(rss.getConfiguration()).thenReturn(CONF);
when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(CONF));
String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER.toString();
ExecutorService es = new ExecutorService(string);
es.startExecutorService(string + "-" + string, 1);
when(rss.getExecutorService()).thenReturn(es);
primaryRegion = HRegion.createHRegion(primaryHri, rootDir, CONF, htd, walPrimary);
primaryRegion.close();
List<Region> regions = new ArrayList<>();
regions.add(primaryRegion);
when(rss.getOnlineRegions()).thenReturn(regions);
primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null);
reader = null;
}
Aggregations