use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.
the class HStore method openStoreFiles.
private List<StoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException {
if (files == null || files.isEmpty()) {
return new ArrayList<>();
}
// initialize the thread pool for opening store files in parallel..
ThreadPoolExecutor storeFileOpenerThreadPool = this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.getColumnFamilyName());
CompletionService<StoreFile> completionService = new ExecutorCompletionService<>(storeFileOpenerThreadPool);
int totalValidStoreFile = 0;
for (final StoreFileInfo storeFileInfo : files) {
// open each store file in parallel
completionService.submit(new Callable<StoreFile>() {
@Override
public StoreFile call() throws IOException {
StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
return storeFile;
}
});
totalValidStoreFile++;
}
ArrayList<StoreFile> results = new ArrayList<>(files.size());
IOException ioe = null;
try {
for (int i = 0; i < totalValidStoreFile; i++) {
try {
Future<StoreFile> future = completionService.take();
StoreFile storeFile = future.get();
if (storeFile != null) {
long length = storeFile.getReader().length();
this.storeSize += length;
this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
if (LOG.isDebugEnabled()) {
LOG.debug("loaded " + storeFile.toStringDetailed());
}
results.add(storeFile);
}
} catch (InterruptedException e) {
if (ioe == null)
ioe = new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
if (ioe == null)
ioe = new IOException(e.getCause());
}
}
} finally {
storeFileOpenerThreadPool.shutdownNow();
}
if (ioe != null) {
// close StoreFile readers
boolean evictOnClose = cacheConf != null ? cacheConf.shouldEvictOnClose() : true;
for (StoreFile file : results) {
try {
if (file != null)
file.closeReader(evictOnClose);
} catch (IOException e) {
LOG.warn(e.getMessage());
}
}
throw ioe;
}
return results;
}
use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.
the class TestIdReadWriteLock method testMultipleClients.
@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
try {
ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i));
for (int i = 0; i < NUM_THREADS; ++i) {
Future<Boolean> result = ecs.take();
assertTrue(result.get());
}
int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
ReferenceType refType = idLock.getReferenceType();
switch(refType) {
case WEAK:
// make sure the entry pool will be cleared after GC and purge call
assertEquals(0, entryPoolSize);
break;
case SOFT:
// make sure the entry pool won't be cleared when JVM memory is enough
// even after GC and purge call
assertEquals(NUM_IDS, entryPoolSize);
break;
default:
break;
}
} finally {
exec.shutdown();
exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
}
}
use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.
the class ModifyRegionUtils method createRegions.
/**
* Create new set of regions on the specified file-system.
* NOTE: that you should add the regions to hbase:meta after this operation.
*
* @param exec Thread Pool Executor
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec, final Configuration conf, final Path rootDir, final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException {
if (newRegions == null)
return null;
int regionNumber = newRegions.length;
CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
List<HRegionInfo> regionInfos = new ArrayList<>();
for (final HRegionInfo newRegion : newRegions) {
completionService.submit(new Callable<HRegionInfo>() {
@Override
public HRegionInfo call() throws IOException {
return createRegion(conf, rootDir, hTableDescriptor, newRegion, task);
}
});
}
try {
// wait for all regions to finish creation
for (int i = 0; i < regionNumber; i++) {
regionInfos.add(completionService.take().get());
}
} catch (InterruptedException e) {
LOG.error("Caught " + e + " during region creation");
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
throw new IOException(e);
}
return regionInfos;
}
use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.
the class TestHFileBlock method testConcurrentReadingInternals.
protected void testConcurrentReadingInternals() throws IOException, InterruptedException, ExecutionException {
for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
Random rand = defaultRandom();
List<Long> offsets = new ArrayList<>();
List<BlockType> types = new ArrayList<>();
writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
FSDataInputStream is = fs.open(path);
long fileSize = fs.getFileStatus(path).getLen();
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(compressAlgo).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, fileSize, meta);
Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
for (int i = 0; i < NUM_READER_THREADS; ++i) {
ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, offsets, types, fileSize));
}
for (int i = 0; i < NUM_READER_THREADS; ++i) {
Future<Boolean> result = ecs.take();
assertTrue(result.get());
if (detailedLogging) {
LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + compressAlgo + ")");
}
}
is.close();
}
}
use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.
the class HRegion method doClose.
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK_EXCEPTION_PATH", justification = "I think FindBugs is confused")
private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status) throws IOException {
if (isClosed()) {
LOG.warn("Region " + this + " already closed");
return null;
}
if (coprocessorHost != null) {
status.setStatus("Running coprocessor pre-close hooks");
this.coprocessorHost.preClose(abort);
}
status.setStatus("Disabling compacts and flushes for region");
boolean canFlush = true;
synchronized (writestate) {
// Disable compacting and flushing by background threads for this
// region.
canFlush = !writestate.readOnly;
writestate.writesEnabled = false;
LOG.debug("Closing " + this + ": disabling compactions & flushes");
waitForFlushesAndCompactions();
}
// the close flag?
if (!abort && worthPreFlushing() && canFlush) {
status.setStatus("Pre-flushing region before close");
LOG.info("Running close preflush of " + getRegionInfo().getRegionNameAsString());
try {
internalFlushcache(status);
} catch (IOException ioe) {
// Failed to flush the region. Keep going.
status.setStatus("Failed pre-flush " + this + "; " + ioe.getMessage());
}
}
if (timeoutForWriteLock == null || timeoutForWriteLock == Long.MAX_VALUE) {
// block waiting for the lock for closing
// FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine
lock.writeLock().lock();
} else {
try {
boolean succeed = lock.writeLock().tryLock(timeoutForWriteLock, TimeUnit.SECONDS);
if (!succeed) {
throw new IOException("Failed to get write lock when closing region");
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
}
this.closing.set(true);
status.setStatus("Disabling writes for close");
try {
if (this.isClosed()) {
status.abort("Already got closed by another process");
// SplitTransaction handles the null
return null;
}
LOG.debug("Updates disabled for region " + this);
// Don't flush the cache if we are aborting
if (!abort && canFlush) {
int failedfFlushCount = 0;
int flushCount = 0;
long tmp = 0;
long remainingSize = this.memstoreDataSize.get();
while (remainingSize > 0) {
try {
internalFlushcache(status);
if (flushCount > 0) {
LOG.info("Running extra flush, " + flushCount + " (carrying snapshot?) " + this);
}
flushCount++;
tmp = this.memstoreDataSize.get();
if (tmp >= remainingSize) {
failedfFlushCount++;
}
remainingSize = tmp;
if (failedfFlushCount > 5) {
// so we do not lose data
throw new DroppedSnapshotException("Failed clearing memory after " + flushCount + " attempts on region: " + Bytes.toStringBinary(getRegionInfo().getRegionName()));
}
} catch (IOException ioe) {
status.setStatus("Failed flush " + this + ", putting online again");
synchronized (writestate) {
writestate.writesEnabled = true;
}
// Have to throw to upper layers. I can't abort server from here.
throw ioe;
}
}
}
Map<byte[], List<StoreFile>> result = new TreeMap<>(Bytes.BYTES_COMPARATOR);
if (!stores.isEmpty()) {
// initialize the thread pool for closing stores in parallel.
ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool("StoreCloserThread-" + getRegionInfo().getRegionNameAsString());
CompletionService<Pair<byte[], Collection<StoreFile>>> completionService = new ExecutorCompletionService<>(storeCloserThreadPool);
// close each store in parallel
for (final Store store : stores.values()) {
MemstoreSize flushableSize = store.getSizeToFlush();
if (!(abort || flushableSize.getDataSize() == 0 || writestate.readOnly)) {
if (getRegionServerServices() != null) {
getRegionServerServices().abort("Assertion failed while closing store " + getRegionInfo().getRegionNameAsString() + " " + store + ". flushableSize expected=0, actual= " + flushableSize + ". Current memstoreSize=" + getMemstoreSize() + ". Maybe a coprocessor " + "operation failed and left the memstore in a partially updated state.", null);
}
}
completionService.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
@Override
public Pair<byte[], Collection<StoreFile>> call() throws IOException {
return new Pair<>(store.getFamily().getName(), store.close());
}
});
}
try {
for (int i = 0; i < stores.size(); i++) {
Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
if (familyFiles == null) {
familyFiles = new ArrayList<>();
result.put(storeFiles.getFirst(), familyFiles);
}
familyFiles.addAll(storeFiles.getSecond());
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
throw (IOException) cause;
}
throw new IOException(cause);
} finally {
storeCloserThreadPool.shutdownNow();
}
}
status.setStatus("Writing region close event to WAL");
if (!abort && wal != null && getRegionServerServices() != null && !writestate.readOnly) {
writeRegionCloseMarker(wal);
}
this.closed.set(true);
if (!canFlush) {
this.decrMemstoreSize(new MemstoreSize(memstoreDataSize.get(), getMemstoreHeapSize()));
} else if (memstoreDataSize.get() != 0) {
LOG.error("Memstore size is " + memstoreDataSize.get());
}
if (coprocessorHost != null) {
status.setStatus("Running coprocessor post-close hooks");
this.coprocessorHost.postClose(abort);
}
if (this.metricsRegion != null) {
this.metricsRegion.close();
}
if (this.metricsRegionWrapper != null) {
Closeables.closeQuietly(this.metricsRegionWrapper);
}
// stop the Compacted hfile discharger
if (this.compactedFileDischarger != null)
this.compactedFileDischarger.cancel(true);
status.markComplete("Closed");
LOG.info("Closed " + this);
return result;
} finally {
lock.writeLock().unlock();
}
}
Aggregations