use of java.util.concurrent.ExecutorService in project hadoop by apache.
the class TestInMemorySCMStore method testAddResourceRefAddResourceConcurrency.
@Test
public void testAddResourceRefAddResourceConcurrency() throws Exception {
startEmptyStore();
final String key = "key1";
final String fileName = "foo.jar";
final String user = "user";
final ApplicationId id = createAppId(1, 1L);
// add the resource and add the resource ref at the same time
ExecutorService exec = HadoopExecutors.newFixedThreadPool(2);
final CountDownLatch start = new CountDownLatch(1);
Callable<String> addKeyTask = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResource(key, fileName);
}
};
Callable<String> addAppIdTask = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResourceReference(key, new SharedCacheResourceReference(id, user));
}
};
Future<String> addAppIdFuture = exec.submit(addAppIdTask);
Future<String> addKeyFuture = exec.submit(addKeyTask);
// start them at the same time
start.countDown();
// get the results
String addKeyResult = addKeyFuture.get();
String addAppIdResult = addAppIdFuture.get();
assertEquals(fileName, addKeyResult);
System.out.println("addAppId() result: " + addAppIdResult);
// it may be null or the fileName depending on the timing
assertTrue(addAppIdResult == null || addAppIdResult.equals(fileName));
exec.shutdown();
}
use of java.util.concurrent.ExecutorService in project hbase by apache.
the class TestClientNoCluster method run.
@Override
public int run(String[] arg0) throws Exception {
int errCode = 0;
// TODO: Make command options.
// How many servers to fake.
final int servers = 1;
// How many regions to put on the faked servers.
final int regions = 100000;
// How many 'keys' in the faked regions.
final long namespaceSpan = 50000000;
// How long to take to pause after doing a put; make this long if you want to fake a struggling
// server.
final long multiPause = 0;
// Check args make basic sense.
if ((namespaceSpan < regions) || (regions < servers)) {
throw new IllegalArgumentException("namespaceSpan=" + namespaceSpan + " must be > regions=" + regions + " which must be > servers=" + servers);
}
// Set my many servers and many regions faking connection in place.
getConf().set("hbase.client.connection.impl", ManyServersManyRegionsConnection.class.getName());
// Use simple kv registry rather than zk
getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName());
// When to report fails. Default is we report the 10th. This means we'll see log everytime
// an exception is thrown -- usually RegionTooBusyException when we have more than
// hbase.test.multi.too.many requests outstanding at any time.
getConf().setInt("hbase.client.start.log.errors.counter", 0);
// Ugly but this is only way to pass in configs.into ManyServersManyRegionsConnection class.
getConf().setInt("hbase.test.regions", regions);
getConf().setLong("hbase.test.namespace.span", namespaceSpan);
getConf().setLong("hbase.test.servers", servers);
getConf().set("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE));
getConf().setLong("hbase.test.multi.pause.when.done", multiPause);
// Let there be ten outstanding requests at a time before we throw RegionBusyException.
getConf().setInt("hbase.test.multi.too.many", 10);
final int clients = 2;
// Have them all share the same connection so they all share the same instance of
// ManyServersManyRegionsConnection so I can keep an eye on how many requests by server.
final ExecutorService pool = Executors.newCachedThreadPool(Threads.getNamedThreadFactory("p"));
// Executors.newFixedThreadPool(servers * 10, Threads.getNamedThreadFactory("p"));
// Share a connection so I can keep counts in the 'server' on concurrency.
final Connection sharedConnection = ConnectionFactory.createConnection(getConf());
try {
Thread[] ts = new Thread[clients];
for (int j = 0; j < ts.length; j++) {
final int id = j;
ts[j] = new Thread("" + j) {
final Configuration c = getConf();
@Override
public void run() {
try {
cycle(id, c, sharedConnection);
} catch (IOException e) {
e.printStackTrace();
}
}
};
ts[j].start();
}
for (int j = 0; j < ts.length; j++) {
ts[j].join();
}
} finally {
sharedConnection.close();
}
return errCode;
}
use of java.util.concurrent.ExecutorService in project hbase by apache.
the class MobUtils method doMobCompaction.
/**
* Performs the mob compaction.
* @param conf the Configuration
* @param fs the file system
* @param tableName the table the compact
* @param hcd the column descriptor
* @param pool the thread pool
* @param allFiles Whether add all mob files into the compaction.
*/
public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName, HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock) throws IOException {
String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY, PartitionedMobCompactor.class.getName());
// instantiate the mob compactor.
MobCompactor compactor = null;
try {
compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class, ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
} catch (Exception e) {
throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
}
// with major compaction in mob-enabled column.
try {
lock.acquire();
compactor.compact(allFiles);
} catch (Exception e) {
LOG.error("Failed to compact the mob files for the column " + hcd.getNameAsString() + " in the table " + tableName.getNameAsString(), e);
} finally {
lock.release();
}
}
use of java.util.concurrent.ExecutorService in project hive by apache.
the class HiveMetaStoreChecker method checkPartitionDirs.
/**
* Assume that depth is 2, i.e., partition columns are a and b
* tblPath/a=1 => throw exception
* tblPath/a=1/file => throw exception
* tblPath/a=1/b=2/file => return a=1/b=2
* tblPath/a=1/b=2/c=3 => return a=1/b=2
* tblPath/a=1/b=2/c=3/file => return a=1/b=2
*
* @param basePath
* Start directory
* @param allDirs
* This set will contain the leaf paths at the end.
* @param maxDepth
* Specify how deep the search goes.
* @throws IOException
* Thrown if we can't get lists from the fs.
* @throws HiveException
*/
private void checkPartitionDirs(Path basePath, Set<Path> allDirs, int maxDepth) throws IOException, HiveException {
// Here we just reuse the THREAD_COUNT configuration for
// METASTORE_FS_HANDLER_THREADS_COUNT since this results in better performance
// The number of missing partitions discovered are later added by metastore using a
// threadpool of size METASTORE_FS_HANDLER_THREADS_COUNT. If we have different sized
// pool here the smaller sized pool of the two becomes a bottleneck
int poolSize = conf.getInt(ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT.varname, 15);
ExecutorService executor;
if (poolSize <= 1) {
LOG.debug("Using single-threaded version of MSCK-GetPaths");
executor = MoreExecutors.sameThreadExecutor();
} else {
LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of threads " + poolSize);
ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MSCK-GetPaths-%d").build();
executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(poolSize, threadFactory);
}
checkPartitionDirs(executor, basePath, allDirs, basePath.getFileSystem(conf), maxDepth);
executor.shutdown();
}
use of java.util.concurrent.ExecutorService in project hbase by apache.
the class TestDistributedLogSplitting method testDelayedDeleteOnFailure.
@Test(timeout = 30000)
public void testDelayedDeleteOnFailure() throws Exception {
LOG.info("testDelayedDeleteOnFailure");
startCluster(1);
final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
final Path logDir = new Path(new Path(FSUtils.getRootDir(conf), HConstants.HREGION_LOGDIR_NAME), ServerName.valueOf("x", 1, 1).toString());
fs.mkdirs(logDir);
ExecutorService executor = null;
try {
final Path corruptedLogFile = new Path(logDir, "x");
FSDataOutputStream out;
out = fs.create(corruptedLogFile);
out.write(0);
out.write(Bytes.toBytes("corrupted bytes"));
out.close();
ZKSplitLogManagerCoordination coordination = (ZKSplitLogManagerCoordination) ((BaseCoordinatedStateManager) master.getCoordinatedStateManager()).getSplitLogManagerCoordination();
coordination.setIgnoreDeleteForTesting(true);
executor = Executors.newSingleThreadExecutor();
Runnable runnable = new Runnable() {
@Override
public void run() {
try {
// since the logDir is a fake, corrupted one, so the split log worker
// will finish it quickly with error, and this call will fail and throw
// an IOException.
slm.splitLogDistributed(logDir);
} catch (IOException ioe) {
try {
assertTrue(fs.exists(corruptedLogFile));
// this call will block waiting for the task to be removed from the
// tasks map which is not going to happen since ignoreZKDeleteForTesting
// is set to true, until it is interrupted.
slm.splitLogDistributed(logDir);
} catch (IOException e) {
assertTrue(Thread.currentThread().isInterrupted());
return;
}
fail("did not get the expected IOException from the 2nd call");
}
fail("did not get the expected IOException from the 1st call");
}
};
Future<?> result = executor.submit(runnable);
try {
result.get(2000, TimeUnit.MILLISECONDS);
} catch (TimeoutException te) {
// it is ok, expected.
}
waitForCounter(tot_mgr_wait_for_zk_delete, 0, 1, 10000);
executor.shutdownNow();
executor = null;
// make sure the runnable is finished with no exception thrown.
result.get();
} finally {
if (executor != null) {
// interrupt the thread in case the test fails in the middle.
// it has no effect if the thread is already terminated.
executor.shutdownNow();
}
fs.delete(logDir, true);
}
}
Aggregations