Search in sources :

Example 36 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.

the class HStore method openStoreFiles.

private List<StoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException {
    if (files == null || files.isEmpty()) {
        return new ArrayList<>();
    }
    // initialize the thread pool for opening store files in parallel..
    ThreadPoolExecutor storeFileOpenerThreadPool = this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.getColumnFamilyName());
    CompletionService<StoreFile> completionService = new ExecutorCompletionService<>(storeFileOpenerThreadPool);
    int totalValidStoreFile = 0;
    for (final StoreFileInfo storeFileInfo : files) {
        // open each store file in parallel
        completionService.submit(new Callable<StoreFile>() {

            @Override
            public StoreFile call() throws IOException {
                StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
                return storeFile;
            }
        });
        totalValidStoreFile++;
    }
    ArrayList<StoreFile> results = new ArrayList<>(files.size());
    IOException ioe = null;
    try {
        for (int i = 0; i < totalValidStoreFile; i++) {
            try {
                Future<StoreFile> future = completionService.take();
                StoreFile storeFile = future.get();
                if (storeFile != null) {
                    long length = storeFile.getReader().length();
                    this.storeSize += length;
                    this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("loaded " + storeFile.toStringDetailed());
                    }
                    results.add(storeFile);
                }
            } catch (InterruptedException e) {
                if (ioe == null)
                    ioe = new InterruptedIOException(e.getMessage());
            } catch (ExecutionException e) {
                if (ioe == null)
                    ioe = new IOException(e.getCause());
            }
        }
    } finally {
        storeFileOpenerThreadPool.shutdownNow();
    }
    if (ioe != null) {
        // close StoreFile readers
        boolean evictOnClose = cacheConf != null ? cacheConf.shouldEvictOnClose() : true;
        for (StoreFile file : results) {
            try {
                if (file != null)
                    file.closeReader(evictOnClose);
            } catch (IOException e) {
                LOG.warn(e.getMessage());
            }
        }
        throw ioe;
    }
    return results;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ExecutionException(java.util.concurrent.ExecutionException)

Example 37 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.

the class TestIdReadWriteLock method testMultipleClients.

@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
        for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
        LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
        ReferenceType refType = idLock.getReferenceType();
        switch(refType) {
            case WEAK:
                // make sure the entry pool will be cleared after GC and purge call
                assertEquals(0, entryPoolSize);
                break;
            case SOFT:
                // make sure the entry pool won't be cleared when JVM memory is enough
                // even after GC and purge call
                assertEquals(NUM_IDS, entryPoolSize);
                break;
            default:
                break;
        }
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) ReferenceType(org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType) Test(org.junit.Test)

Example 38 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.

the class ModifyRegionUtils method createRegions.

/**
   * Create new set of regions on the specified file-system.
   * NOTE: that you should add the regions to hbase:meta after this operation.
   *
   * @param exec Thread Pool Executor
   * @param conf {@link Configuration}
   * @param rootDir Root directory for HBase instance
   * @param hTableDescriptor description of the table
   * @param newRegions {@link HRegionInfo} that describes the regions to create
   * @param task {@link RegionFillTask} custom code to populate region after creation
   * @throws IOException
   */
public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec, final Configuration conf, final Path rootDir, final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException {
    if (newRegions == null)
        return null;
    int regionNumber = newRegions.length;
    CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
    List<HRegionInfo> regionInfos = new ArrayList<>();
    for (final HRegionInfo newRegion : newRegions) {
        completionService.submit(new Callable<HRegionInfo>() {

            @Override
            public HRegionInfo call() throws IOException {
                return createRegion(conf, rootDir, hTableDescriptor, newRegion, task);
            }
        });
    }
    try {
        // wait for all regions to finish creation
        for (int i = 0; i < regionNumber; i++) {
            regionInfos.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        LOG.error("Caught " + e + " during region creation");
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        throw new IOException(e);
    }
    return regionInfos;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 39 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.

the class TestHFileBlock method testConcurrentReadingInternals.

protected void testConcurrentReadingInternals() throws IOException, InterruptedException, ExecutionException {
    for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
        Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
        Random rand = defaultRandom();
        List<Long> offsets = new ArrayList<>();
        List<BlockType> types = new ArrayList<>();
        writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
        FSDataInputStream is = fs.open(path);
        long fileSize = fs.getFileStatus(path).getLen();
        HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(compressAlgo).build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, fileSize, meta);
        Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
        for (int i = 0; i < NUM_READER_THREADS; ++i) {
            ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, offsets, types, fileSize));
        }
        for (int i = 0; i < NUM_READER_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
            if (detailedLogging) {
                LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + compressAlgo + ")");
            }
        }
        is.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) Executor(java.util.concurrent.Executor) Random(java.util.Random) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 40 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.

the class HRegion method doClose.

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK_EXCEPTION_PATH", justification = "I think FindBugs is confused")
private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status) throws IOException {
    if (isClosed()) {
        LOG.warn("Region " + this + " already closed");
        return null;
    }
    if (coprocessorHost != null) {
        status.setStatus("Running coprocessor pre-close hooks");
        this.coprocessorHost.preClose(abort);
    }
    status.setStatus("Disabling compacts and flushes for region");
    boolean canFlush = true;
    synchronized (writestate) {
        // Disable compacting and flushing by background threads for this
        // region.
        canFlush = !writestate.readOnly;
        writestate.writesEnabled = false;
        LOG.debug("Closing " + this + ": disabling compactions & flushes");
        waitForFlushesAndCompactions();
    }
    // the close flag?
    if (!abort && worthPreFlushing() && canFlush) {
        status.setStatus("Pre-flushing region before close");
        LOG.info("Running close preflush of " + getRegionInfo().getRegionNameAsString());
        try {
            internalFlushcache(status);
        } catch (IOException ioe) {
            // Failed to flush the region. Keep going.
            status.setStatus("Failed pre-flush " + this + "; " + ioe.getMessage());
        }
    }
    if (timeoutForWriteLock == null || timeoutForWriteLock == Long.MAX_VALUE) {
        // block waiting for the lock for closing
        // FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine
        lock.writeLock().lock();
    } else {
        try {
            boolean succeed = lock.writeLock().tryLock(timeoutForWriteLock, TimeUnit.SECONDS);
            if (!succeed) {
                throw new IOException("Failed to get write lock when closing region");
            }
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }
    this.closing.set(true);
    status.setStatus("Disabling writes for close");
    try {
        if (this.isClosed()) {
            status.abort("Already got closed by another process");
            // SplitTransaction handles the null
            return null;
        }
        LOG.debug("Updates disabled for region " + this);
        // Don't flush the cache if we are aborting
        if (!abort && canFlush) {
            int failedfFlushCount = 0;
            int flushCount = 0;
            long tmp = 0;
            long remainingSize = this.memstoreDataSize.get();
            while (remainingSize > 0) {
                try {
                    internalFlushcache(status);
                    if (flushCount > 0) {
                        LOG.info("Running extra flush, " + flushCount + " (carrying snapshot?) " + this);
                    }
                    flushCount++;
                    tmp = this.memstoreDataSize.get();
                    if (tmp >= remainingSize) {
                        failedfFlushCount++;
                    }
                    remainingSize = tmp;
                    if (failedfFlushCount > 5) {
                        // so we do not lose data
                        throw new DroppedSnapshotException("Failed clearing memory after " + flushCount + " attempts on region: " + Bytes.toStringBinary(getRegionInfo().getRegionName()));
                    }
                } catch (IOException ioe) {
                    status.setStatus("Failed flush " + this + ", putting online again");
                    synchronized (writestate) {
                        writestate.writesEnabled = true;
                    }
                    // Have to throw to upper layers.  I can't abort server from here.
                    throw ioe;
                }
            }
        }
        Map<byte[], List<StoreFile>> result = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        if (!stores.isEmpty()) {
            // initialize the thread pool for closing stores in parallel.
            ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool("StoreCloserThread-" + getRegionInfo().getRegionNameAsString());
            CompletionService<Pair<byte[], Collection<StoreFile>>> completionService = new ExecutorCompletionService<>(storeCloserThreadPool);
            // close each store in parallel
            for (final Store store : stores.values()) {
                MemstoreSize flushableSize = store.getSizeToFlush();
                if (!(abort || flushableSize.getDataSize() == 0 || writestate.readOnly)) {
                    if (getRegionServerServices() != null) {
                        getRegionServerServices().abort("Assertion failed while closing store " + getRegionInfo().getRegionNameAsString() + " " + store + ". flushableSize expected=0, actual= " + flushableSize + ". Current memstoreSize=" + getMemstoreSize() + ". Maybe a coprocessor " + "operation failed and left the memstore in a partially updated state.", null);
                    }
                }
                completionService.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {

                    @Override
                    public Pair<byte[], Collection<StoreFile>> call() throws IOException {
                        return new Pair<>(store.getFamily().getName(), store.close());
                    }
                });
            }
            try {
                for (int i = 0; i < stores.size(); i++) {
                    Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
                    Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
                    List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
                    if (familyFiles == null) {
                        familyFiles = new ArrayList<>();
                        result.put(storeFiles.getFirst(), familyFiles);
                    }
                    familyFiles.addAll(storeFiles.getSecond());
                }
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            } catch (ExecutionException e) {
                Throwable cause = e.getCause();
                if (cause instanceof IOException) {
                    throw (IOException) cause;
                }
                throw new IOException(cause);
            } finally {
                storeCloserThreadPool.shutdownNow();
            }
        }
        status.setStatus("Writing region close event to WAL");
        if (!abort && wal != null && getRegionServerServices() != null && !writestate.readOnly) {
            writeRegionCloseMarker(wal);
        }
        this.closed.set(true);
        if (!canFlush) {
            this.decrMemstoreSize(new MemstoreSize(memstoreDataSize.get(), getMemstoreHeapSize()));
        } else if (memstoreDataSize.get() != 0) {
            LOG.error("Memstore size is " + memstoreDataSize.get());
        }
        if (coprocessorHost != null) {
            status.setStatus("Running coprocessor post-close hooks");
            this.coprocessorHost.postClose(abort);
        }
        if (this.metricsRegion != null) {
            this.metricsRegion.close();
        }
        if (this.metricsRegionWrapper != null) {
            Closeables.closeQuietly(this.metricsRegionWrapper);
        }
        // stop the Compacted hfile discharger
        if (this.compactedFileDischarger != null)
            this.compactedFileDischarger.cancel(true);
        status.markComplete("Closed");
        LOG.info("Closed " + this);
        return result;
    } finally {
        lock.writeLock().unlock();
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) Pair(org.apache.hadoop.hbase.util.Pair) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) TreeMap(java.util.TreeMap) Collection(java.util.Collection) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Aggregations

ExecutorCompletionService (java.util.concurrent.ExecutorCompletionService)69 ArrayList (java.util.ArrayList)31 ExecutorService (java.util.concurrent.ExecutorService)30 ExecutionException (java.util.concurrent.ExecutionException)27 IOException (java.io.IOException)24 Test (org.junit.Test)21 Future (java.util.concurrent.Future)18 List (java.util.List)10 InterruptedIOException (java.io.InterruptedIOException)9 Path (org.apache.hadoop.fs.Path)8 KieSession (org.kie.api.runtime.KieSession)8 Callable (java.util.concurrent.Callable)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)6 EntryPoint (org.kie.api.runtime.rule.EntryPoint)6 HashMap (java.util.HashMap)4 Executor (java.util.concurrent.Executor)4 TimeoutException (java.util.concurrent.TimeoutException)4 KieBase (org.kie.api.KieBase)4 Random (java.util.Random)3