Search in sources :

Example 1 with SimpleThreadPool

use of org.apache.accumulo.core.util.SimpleThreadPool in project accumulo by apache.

the class LoadFiles method getThreadPool.

private static synchronized ExecutorService getThreadPool(Master master) {
    if (threadPool == null) {
        int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE);
        ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import");
        pool.allowCoreThreadTimeOut(true);
        threadPool = new TraceExecutorService(pool);
    }
    return threadPool;
}
Also used : TraceExecutorService(org.apache.htrace.wrappers.TraceExecutorService) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool)

Example 2 with SimpleThreadPool

use of org.apache.accumulo.core.util.SimpleThreadPool in project accumulo by apache.

the class TServerUtils method createSelfResizingThreadPool.

/**
 * Creates a {@link SimpleThreadPool} which uses {@link SimpleTimer} to inspect the core pool size and number of active threads of the
 * {@link ThreadPoolExecutor} and increase or decrease the core pool size based on activity (excessive or lack thereof).
 *
 * @param serverName
 *          A name to describe the thrift server this executor will service
 * @param executorThreads
 *          The maximum number of threads for the executor
 * @param simpleTimerThreads
 *          The numbers of threads used to get the {@link SimpleTimer} instance
 * @param timeBetweenThreadChecks
 *          The amount of time, in millis, between attempts to resize the executor thread pool
 * @return A {@link ThreadPoolExecutor} which will resize itself automatically
 */
public static ThreadPoolExecutor createSelfResizingThreadPool(final String serverName, final int executorThreads, int simpleTimerThreads, long timeBetweenThreadChecks) {
    final ThreadPoolExecutor pool = new SimpleThreadPool(executorThreads, "ClientPool");
    // periodically adjust the number of threads we need by checking how busy our threads are
    SimpleTimer.getInstance(simpleTimerThreads).schedule(new Runnable() {

        @Override
        public void run() {
            // however, this isn't really an issue, since it adjusts periodically anyway
            if (pool.getCorePoolSize() <= pool.getActiveCount()) {
                int larger = pool.getCorePoolSize() + Math.min(pool.getQueue().size(), 2);
                log.info("Increasing server thread pool size on {} to {}", serverName, larger);
                pool.setMaximumPoolSize(larger);
                pool.setCorePoolSize(larger);
            } else {
                if (pool.getCorePoolSize() > pool.getActiveCount() + 3) {
                    int smaller = Math.max(executorThreads, pool.getCorePoolSize() - 1);
                    if (smaller != pool.getCorePoolSize()) {
                        log.info("Decreasing server thread pool size on {} to {}", serverName, smaller);
                        pool.setCorePoolSize(smaller);
                    }
                }
            }
        }
    }, timeBetweenThreadChecks, timeBetweenThreadChecks);
    return pool;
}
Also used : LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool)

Example 3 with SimpleThreadPool

use of org.apache.accumulo.core.util.SimpleThreadPool in project accumulo by apache.

the class BulkImport method prepareBulkImport.

private String prepareBulkImport(Master master, final VolumeManager fs, String dir, Table.ID tableId) throws Exception {
    final Path bulkDir = createNewBulkDir(fs, tableId);
    MetadataTableUtil.addBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
    Path dirPath = new Path(dir);
    FileStatus[] mapFiles = fs.listStatus(dirPath);
    final UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS);
    SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move");
    List<Future<Exception>> results = new ArrayList<>();
    for (FileStatus file : mapFiles) {
        final FileStatus fileStatus = file;
        results.add(workers.submit(new Callable<Exception>() {

            @Override
            public Exception call() throws Exception {
                try {
                    String[] sa = fileStatus.getPath().getName().split("\\.");
                    String extension = "";
                    if (sa.length > 1) {
                        extension = sa[sa.length - 1];
                        if (!FileOperations.getValidExtensions().contains(extension)) {
                            log.warn("{} does not have a valid extension, ignoring", fileStatus.getPath());
                            return null;
                        }
                    } else {
                        // assume it is a map file
                        extension = Constants.MAPFILE_EXTENSION;
                    }
                    if (extension.equals(Constants.MAPFILE_EXTENSION)) {
                        if (!fileStatus.isDirectory()) {
                            log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                            return null;
                        }
                        if (fileStatus.getPath().getName().equals("_logs")) {
                            log.info("{} is probably a log directory from a map/reduce task, skipping", fileStatus.getPath());
                            return null;
                        }
                        try {
                            FileStatus dataStatus = fs.getFileStatus(new Path(fileStatus.getPath(), MapFile.DATA_FILE_NAME));
                            if (dataStatus.isDirectory()) {
                                log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                                return null;
                            }
                        } catch (FileNotFoundException fnfe) {
                            log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                            return null;
                        }
                    }
                    String newName = "I" + namer.getNextName() + "." + extension;
                    Path newPath = new Path(bulkDir, newName);
                    try {
                        fs.rename(fileStatus.getPath(), newPath);
                        log.debug("Moved {} to {}", fileStatus.getPath(), newPath);
                    } catch (IOException E1) {
                        log.error("Could not move: {} {}", fileStatus.getPath().toString(), E1.getMessage());
                    }
                } catch (Exception ex) {
                    return ex;
                }
                return null;
            }
        }));
    }
    workers.shutdown();
    while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {
    }
    for (Future<Exception> ex : results) {
        if (ex.get() != null) {
            throw ex.get();
        }
    }
    return bulkDir.toString();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException) FileNotFoundException(java.io.FileNotFoundException) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) Future(java.util.concurrent.Future) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool)

Example 4 with SimpleThreadPool

use of org.apache.accumulo.core.util.SimpleThreadPool in project accumulo by apache.

the class TabletServerLogger method startLogMaker.

private synchronized void startLogMaker() {
    if (nextLogMaker != null) {
        return;
    }
    nextLogMaker = new SimpleThreadPool(1, "WALog creator");
    nextLogMaker.submit(new LoggingRunnable(log, new Runnable() {

        @Override
        public void run() {
            final ServerResources conf = tserver.getServerConfig();
            final VolumeManager fs = conf.getFileSystem();
            while (!nextLogMaker.isShutdown()) {
                DfsLogger alog = null;
                try {
                    log.debug("Creating next WAL");
                    alog = new DfsLogger(conf, syncCounter, flushCounter);
                    alog.open(tserver.getClientAddressString());
                    String fileName = alog.getFileName();
                    log.debug("Created next WAL " + fileName);
                    tserver.addNewLogMarker(alog);
                    while (!nextLog.offer(alog, 12, TimeUnit.HOURS)) {
                        log.info("Our WAL was not used for 12 hours: {}", fileName);
                    }
                } catch (Exception t) {
                    log.error("Failed to open WAL", t);
                    if (null != alog) {
                        // object before trying to create a new one.
                        try {
                            alog.close();
                        } catch (Exception e) {
                            log.error("Failed to close WAL after it failed to open", e);
                        }
                        // Try to avoid leaving a bunch of empty WALs lying around
                        try {
                            Path path = alog.getPath();
                            if (fs.exists(path)) {
                                fs.delete(path);
                            }
                        } catch (Exception e) {
                            log.warn("Failed to delete a WAL that failed to open", e);
                        }
                    }
                    try {
                        nextLog.offer(t, 12, TimeUnit.HOURS);
                    } catch (InterruptedException ex) {
                    // ignore
                    }
                }
            }
        }
    }));
}
Also used : Path(org.apache.hadoop.fs.Path) LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool) ServerResources(org.apache.accumulo.tserver.log.DfsLogger.ServerResources) IOException(java.io.IOException)

Example 5 with SimpleThreadPool

use of org.apache.accumulo.core.util.SimpleThreadPool in project accumulo by apache.

the class DeleteTableDuringSplitIT method test.

@Test
public void test() throws Exception {
    // 96 invocations, 8 at a time
    int batches = 12, batchSize = 8;
    String[] tableNames = getUniqueNames(batches * batchSize);
    // make a bunch of tables
    for (String tableName : tableNames) {
        getConnector().tableOperations().create(tableName);
    }
    final SortedSet<Text> splits = new TreeSet<>();
    for (byte i = 0; i < 100; i++) {
        splits.add(new Text(new byte[] { 0, 0, i }));
    }
    List<Future<?>> results = new ArrayList<>();
    List<Runnable> tasks = new ArrayList<>();
    SimpleThreadPool es = new SimpleThreadPool(batchSize * 2, "concurrent-api-requests");
    for (String tableName : tableNames) {
        final String finalName = tableName;
        tasks.add(new Runnable() {

            @Override
            public void run() {
                try {
                    getConnector().tableOperations().addSplits(finalName, splits);
                } catch (TableNotFoundException ex) {
                // expected, ignore
                } catch (Exception ex) {
                    throw new RuntimeException(finalName, ex);
                }
            }
        });
        tasks.add(new Runnable() {

            @Override
            public void run() {
                try {
                    UtilWaitThread.sleep(500);
                    getConnector().tableOperations().delete(finalName);
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        });
    }
    Iterator<Runnable> itr = tasks.iterator();
    for (int batch = 0; batch < batches; batch++) {
        for (int i = 0; i < batchSize; i++) {
            Future<?> f = es.submit(itr.next());
            results.add(f);
            f = es.submit(itr.next());
            results.add(f);
        }
        for (Future<?> f : results) {
            f.get();
        }
        results.clear();
    }
    // Shut down the ES
    List<Runnable> queued = es.shutdownNow();
    Assert.assertTrue("Had more tasks to run", queued.isEmpty());
    Assert.assertFalse("Had more tasks that needed to be submitted", itr.hasNext());
    for (String tableName : tableNames) {
        assertFalse(getConnector().tableOperations().exists(tableName));
    }
}
Also used : ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TreeSet(java.util.TreeSet) Future(java.util.concurrent.Future) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool) Test(org.junit.Test)

Aggregations

SimpleThreadPool (org.apache.accumulo.core.util.SimpleThreadPool)9 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)3 TreeSet (java.util.TreeSet)3 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)3 LoggingRunnable (org.apache.accumulo.fate.util.LoggingRunnable)3 Path (org.apache.hadoop.fs.Path)3 Text (org.apache.hadoop.io.Text)3 Test (org.junit.Test)3 Future (java.util.concurrent.Future)2 BatchWriter (org.apache.accumulo.core.client.BatchWriter)2 Connector (org.apache.accumulo.core.client.Connector)2 Scanner (org.apache.accumulo.core.client.Scanner)2 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)2 Key (org.apache.accumulo.core.data.Key)2 Mutation (org.apache.accumulo.core.data.Mutation)2 Value (org.apache.accumulo.core.data.Value)2 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)2 FileNotFoundException (java.io.FileNotFoundException)1 UnknownHostException (java.net.UnknownHostException)1