Search in sources :

Example 36 with ConcurrentHashMap

use of java.util.concurrent.ConcurrentHashMap in project storm by apache.

the class PacemakerTest method testServerCreatePath.

@Test
public void testServerCreatePath() {
    Pacemaker handler = new Pacemaker(new ConcurrentHashMap());
    messageWithRandId(HBServerMessageType.CREATE_PATH, HBMessageData.path("/testpath"));
    HBMessage response = handler.handleMessage(hbMessage, true);
    Assert.assertEquals(mid, response.get_message_id());
    Assert.assertEquals(HBServerMessageType.CREATE_PATH_RESPONSE, response.get_type());
    Assert.assertNull(response.get_data());
}
Also used : Pacemaker(org.apache.storm.pacemaker.Pacemaker) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HBMessage(org.apache.storm.generated.HBMessage) Test(org.junit.Test)

Example 37 with ConcurrentHashMap

use of java.util.concurrent.ConcurrentHashMap in project hbase by apache.

the class SnapshotInfo method getSnapshotsFilesMap.

/**
   * Returns the map of store files based on path for all snapshots
   * @param conf the {@link Configuration} to use
   * @param uniqueHFilesArchiveSize pass out the size for store files in archive
   * @param uniqueHFilesSize pass out the size for store files shared
   * @param uniqueHFilesMobSize pass out the size for mob store files shared
   * @return the map of store files
   */
public static Map<Path, Integer> getSnapshotsFilesMap(final Configuration conf, AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, AtomicLong uniqueHFilesMobSize) throws IOException {
    List<SnapshotDescription> snapshotList = getSnapshotList(conf);
    if (snapshotList.isEmpty()) {
        return Collections.emptyMap();
    }
    ConcurrentHashMap<Path, Integer> fileMap = new ConcurrentHashMap<>();
    ExecutorService exec = SnapshotManifest.createExecutor(conf, "SnapshotsFilesMapping");
    try {
        for (final SnapshotDescription snapshot : snapshotList) {
            getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, uniqueHFilesSize, uniqueHFilesMobSize);
        }
    } finally {
        exec.shutdown();
    }
    return fileMap;
}
Also used : Path(org.apache.hadoop.fs.Path) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) SnapshotDescription(org.apache.hadoop.hbase.client.SnapshotDescription) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 38 with ConcurrentHashMap

use of java.util.concurrent.ConcurrentHashMap in project crate by crate.

the class ApplySettingsTest method testOnRefreshSettings.

@Test
public void testOnRefreshSettings() throws Exception {
    ConcurrentHashMap<String, Object> values = new ConcurrentHashMap<>();
    ClusterSettingsExpression.ApplySettings applySettings = new ClusterSettingsExpression.ApplySettings(Settings.EMPTY, values);
    Settings.Builder builder = Settings.builder().put(CrateSettings.STATS_JOBS_LOG_SIZE.settingName(), 1).put(CrateSettings.STATS_ENABLED.settingName(), false).put(CrateSettings.GRACEFUL_STOP_MIN_AVAILABILITY.settingName(), "full").put(CrateSettings.GRACEFUL_STOP_TIMEOUT.settingName(), "1m").put(CrateSettings.DISCOVERY_ZEN_MIN_MASTER_NODES.settingName(), 2);
    Settings settings = builder.build();
    applySettings.onRefreshSettings(settings);
    String name = CrateSettings.STATS_JOBS_LOG_SIZE.settingName();
    assertEquals(values.get(name), settings.getAsInt(name, 0));
    name = CrateSettings.STATS_ENABLED.settingName();
    assertEquals(values.get(name), settings.getAsBoolean(name, true));
    name = CrateSettings.GRACEFUL_STOP_MIN_AVAILABILITY.settingName();
    assertEquals(values.get(name), settings.get(name, "none"));
    name = CrateSettings.GRACEFUL_STOP_TIMEOUT.settingName();
    assertEquals(values.get(name), settings.get(name, "1h"));
    name = CrateSettings.DISCOVERY_ZEN_MIN_MASTER_NODES.settingName();
    assertEquals(values.get(name), settings.getAsInt(name, 2));
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CrateSettings(io.crate.metadata.settings.CrateSettings) Settings(org.elasticsearch.common.settings.Settings) Test(org.junit.Test) CrateUnitTest(io.crate.test.integration.CrateUnitTest)

Example 39 with ConcurrentHashMap

use of java.util.concurrent.ConcurrentHashMap in project hive by apache.

the class StatsTask method aggregateStats.

private int aggregateStats(Hive db) {
    StatsAggregator statsAggregator = null;
    int ret = 0;
    StatsCollectionContext scc = null;
    EnvironmentContext environmentContext = null;
    try {
        // Stats setup:
        final Warehouse wh = new Warehouse(conf);
        if (!getWork().getNoStatsAggregator() && !getWork().isNoScanAnalyzeCommand()) {
            try {
                scc = getContext();
                statsAggregator = createStatsAggregator(scc, conf);
            } catch (HiveException e) {
                if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
                    throw e;
                }
                console.printError(ErrorMsg.STATS_SKIPPING_BY_ERROR.getErrorCodedMsg(e.toString()));
            }
        }
        List<Partition> partitions = getPartitionsList(db);
        boolean atomic = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_ATOMIC);
        String tableFullName = table.getDbName() + "." + table.getTableName();
        if (partitions == null) {
            org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
            Map<String, String> parameters = tTable.getParameters();
            // acidTable will not have accurate stats unless it is set through analyze command.
            if (work.getTableSpecs() == null && AcidUtils.isAcidTable(table)) {
                StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
            } else if (work.getTableSpecs() != null || (work.getLoadTableDesc() != null && work.getLoadTableDesc().getReplace()) || (work.getLoadFileDesc() != null && !work.getLoadFileDesc().getDestinationCreateTable().isEmpty())) {
                StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
            }
            // non-partitioned tables:
            if (!existStats(parameters) && atomic) {
                return 0;
            }
            // For eg. if a file is being loaded, the old number of rows are not valid
            if (work.isClearAggregatorStats()) {
                // we choose to keep the invalid stats and only change the setting.
                StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
            }
            updateQuickStats(wh, parameters, tTable.getSd());
            if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
                if (statsAggregator != null) {
                    String prefix = getAggregationPrefix(table, null);
                    updateStats(statsAggregator, parameters, prefix, atomic);
                }
                // write table stats to metastore
                if (!getWork().getNoStatsAggregator()) {
                    environmentContext = new EnvironmentContext();
                    environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
                }
            }
            getHive().alterTable(tableFullName, new Table(tTable), environmentContext);
            if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
                console.printInfo("Table " + tableFullName + " stats: [" + toString(parameters) + ']');
            }
            LOG.info("Table " + tableFullName + " stats: [" + toString(parameters) + ']');
        } else {
            // Partitioned table:
            // Need to get the old stats of the partition
            // and update the table stats based on the old and new stats.
            List<Partition> updates = new ArrayList<Partition>();
            //Get the file status up-front for all partitions. Beneficial in cases of blob storage systems
            final Map<String, FileStatus[]> fileStatusMap = new ConcurrentHashMap<String, FileStatus[]>();
            int poolSize = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 1);
            // In case thread count is set to 0, use single thread.
            poolSize = Math.max(poolSize, 1);
            final ExecutorService pool = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("stats-updater-thread-%d").build());
            final List<Future<Void>> futures = Lists.newLinkedList();
            LOG.debug("Getting file stats of all partitions. threadpool size:" + poolSize);
            try {
                for (final Partition partn : partitions) {
                    final String partitionName = partn.getName();
                    final org.apache.hadoop.hive.metastore.api.Partition tPart = partn.getTPartition();
                    Map<String, String> parameters = tPart.getParameters();
                    if (!existStats(parameters) && atomic) {
                        continue;
                    }
                    futures.add(pool.submit(new Callable<Void>() {

                        @Override
                        public Void call() throws Exception {
                            FileStatus[] partfileStatus = wh.getFileStatusesForSD(tPart.getSd());
                            fileStatusMap.put(partitionName, partfileStatus);
                            return null;
                        }
                    }));
                }
                pool.shutdown();
                for (Future<Void> future : futures) {
                    future.get();
                }
            } catch (InterruptedException e) {
                LOG.debug("Cancelling " + futures.size() + " file stats lookup tasks");
                //cancel other futures
                for (Future future : futures) {
                    future.cancel(true);
                }
                // Fail the query if the stats are supposed to be reliable
                if (work.isStatsReliable()) {
                    ret = 1;
                }
            } finally {
                if (pool != null) {
                    pool.shutdownNow();
                }
                LOG.debug("Finished getting file stats of all partitions");
            }
            for (Partition partn : partitions) {
                //
                // get the old partition stats
                //
                org.apache.hadoop.hive.metastore.api.Partition tPart = partn.getTPartition();
                Map<String, String> parameters = tPart.getParameters();
                if (work.getTableSpecs() == null && AcidUtils.isAcidTable(table)) {
                    StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
                } else if (work.getTableSpecs() != null || (work.getLoadTableDesc() != null && work.getLoadTableDesc().getReplace()) || (work.getLoadFileDesc() != null && !work.getLoadFileDesc().getDestinationCreateTable().isEmpty())) {
                    StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
                }
                //only when the stats exist, it is added to fileStatusMap
                if (!fileStatusMap.containsKey(partn.getName())) {
                    continue;
                }
                // For eg. if a file is being loaded, the old number of rows are not valid
                if (work.isClearAggregatorStats()) {
                    // we choose to keep the invalid stats and only change the setting.
                    StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
                }
                updateQuickStats(parameters, fileStatusMap.get(partn.getName()));
                if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
                    if (statsAggregator != null) {
                        String prefix = getAggregationPrefix(table, partn);
                        updateStats(statsAggregator, parameters, prefix, atomic);
                    }
                    if (!getWork().getNoStatsAggregator()) {
                        environmentContext = new EnvironmentContext();
                        environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
                    }
                }
                updates.add(new Partition(table, tPart));
                if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
                    console.printInfo("Partition " + tableFullName + partn.getSpec() + " stats: [" + toString(parameters) + ']');
                }
                LOG.info("Partition " + tableFullName + partn.getSpec() + " stats: [" + toString(parameters) + ']');
            }
            if (!updates.isEmpty()) {
                db.alterPartitions(tableFullName, updates, environmentContext);
            }
        }
    } catch (Exception e) {
        console.printInfo("[Warning] could not update stats.", "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e));
        // Fail the query if the stats are supposed to be reliable
        if (work.isStatsReliable()) {
            ret = 1;
        }
    } finally {
        if (statsAggregator != null) {
            statsAggregator.closeConnection(scc);
        }
    }
    // anything else indicates failure
    return ret;
}
Also used : Warehouse(org.apache.hadoop.hive.metastore.Warehouse) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) StatsAggregator(org.apache.hadoop.hive.ql.stats.StatsAggregator) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future)

Example 40 with ConcurrentHashMap

use of java.util.concurrent.ConcurrentHashMap in project pinot by linkedin.

the class MCombineGroupByOperator method combineBlocks.

/**
   * This method combines the result blocks from underlying operators and builds a
   * merged, sorted and trimmed result block.
   * 1. Result blocks from underlying operators are merged concurrently into a
   *   HashMap, with appropriate synchronizations. Result blocks themselves are stored
   *   in the specified blocks[].
   *   - The key in this concurrent map is the group-by key, and value is an array of
   *     Objects (one for each aggregation function).
   *   - Synchronization is provided by locking the group-key that is to be modified.
   *
   * 2. The result of the concurrent map is then translated into what is expected by
   *    the broker (List<Map<String, Object>>).
   *
   * 3. This result is then sorted and then trimmed as per 'TOP N' in the brokerRequest.
   *
   * @return IntermediateResultBlock containing the final results from combine operation.
   */
private IntermediateResultsBlock combineBlocks() throws InterruptedException {
    int numOperators = _operators.size();
    final CountDownLatch operatorLatch = new CountDownLatch(numOperators);
    final Map<String, Object[]> resultsMap = new ConcurrentHashMap<>();
    final ConcurrentLinkedQueue<ProcessingException> mergedProcessingExceptions = new ConcurrentLinkedQueue<>();
    List<AggregationInfo> aggregationInfos = _brokerRequest.getAggregationsInfo();
    final AggregationFunctionContext[] aggregationFunctionContexts = AggregationFunctionUtils.getAggregationFunctionContexts(aggregationInfos, null);
    final int numAggregationFunctions = aggregationFunctionContexts.length;
    for (int i = 0; i < numOperators; i++) {
        final int index = i;
        _executorService.execute(new TraceRunnable() {

            @SuppressWarnings("unchecked")
            @Override
            public void runJob() {
                AggregationGroupByResult aggregationGroupByResult;
                try {
                    IntermediateResultsBlock intermediateResultsBlock = (IntermediateResultsBlock) _operators.get(index).nextBlock();
                    // Merge processing exceptions.
                    List<ProcessingException> processingExceptionsToMerge = intermediateResultsBlock.getProcessingExceptions();
                    if (processingExceptionsToMerge != null) {
                        mergedProcessingExceptions.addAll(processingExceptionsToMerge);
                    }
                    // Merge aggregation group-by result.
                    aggregationGroupByResult = intermediateResultsBlock.getAggregationGroupByResult();
                    if (aggregationGroupByResult != null) {
                        // Iterate over the group-by keys, for each key, update the group-by result in the resultsMap.
                        Iterator<GroupKeyGenerator.GroupKey> groupKeyIterator = aggregationGroupByResult.getGroupKeyIterator();
                        while (groupKeyIterator.hasNext()) {
                            GroupKeyGenerator.GroupKey groupKey = groupKeyIterator.next();
                            String groupKeyString = groupKey.getStringKey();
                            // HashCode method might return negative value, make it non-negative
                            int lockIndex = (groupKeyString.hashCode() & Integer.MAX_VALUE) % NUM_LOCKS;
                            synchronized (LOCKS[lockIndex]) {
                                Object[] results = resultsMap.get(groupKeyString);
                                if (results == null) {
                                    results = new Object[numAggregationFunctions];
                                    for (int j = 0; j < numAggregationFunctions; j++) {
                                        results[j] = aggregationGroupByResult.getResultForKey(groupKey, j);
                                    }
                                    resultsMap.put(groupKeyString, results);
                                } else {
                                    for (int j = 0; j < numAggregationFunctions; j++) {
                                        results[j] = aggregationFunctionContexts[j].getAggregationFunction().merge(results[j], aggregationGroupByResult.getResultForKey(groupKey, j));
                                    }
                                }
                            }
                        }
                    }
                } catch (Exception e) {
                    LOGGER.error("Exception processing CombineGroupBy for index {}, operator {}", index, _operators.get(index).getClass().getName(), e);
                    mergedProcessingExceptions.add(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
                }
                operatorLatch.countDown();
            }
        });
    }
    boolean opCompleted = operatorLatch.await(_timeOutMs, TimeUnit.MILLISECONDS);
    if (!opCompleted) {
        // If this happens, the broker side should already timed out, just log the error in server side.
        LOGGER.error("Timed out while combining group-by results, after {}ms.", _timeOutMs);
        return new IntermediateResultsBlock(new TimeoutException("CombineGroupBy timed out."));
    }
    // Trim the results map.
    AggregationGroupByTrimmingService aggregationGroupByTrimmingService = new AggregationGroupByTrimmingService(aggregationFunctionContexts, (int) _brokerRequest.getGroupBy().getTopN());
    List<Map<String, Object>> trimmedResults = aggregationGroupByTrimmingService.trimIntermediateResultsMap(resultsMap);
    IntermediateResultsBlock mergedBlock = new IntermediateResultsBlock(aggregationFunctionContexts, trimmedResults, true);
    // Set the processing exceptions.
    if (!mergedProcessingExceptions.isEmpty()) {
        mergedBlock.setProcessingExceptions(new ArrayList<>(mergedProcessingExceptions));
    }
    // Set the execution statistics.
    ExecutionStatistics executionStatistics = new ExecutionStatistics();
    for (Operator operator : _operators) {
        ExecutionStatistics executionStatisticsToMerge = operator.getExecutionStatistics();
        if (executionStatisticsToMerge != null) {
            executionStatistics.merge(executionStatisticsToMerge);
        }
    }
    mergedBlock.setNumDocsScanned(executionStatistics.getNumDocsScanned());
    mergedBlock.setNumEntriesScannedInFilter(executionStatistics.getNumEntriesScannedInFilter());
    mergedBlock.setNumEntriesScannedPostFilter(executionStatistics.getNumEntriesScannedPostFilter());
    mergedBlock.setNumTotalRawDocs(executionStatistics.getNumTotalRawDocs());
    return mergedBlock;
}
Also used : Operator(com.linkedin.pinot.core.common.Operator) AggregationGroupByTrimmingService(com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByTrimmingService) AggregationGroupByResult(com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByResult) TraceRunnable(com.linkedin.pinot.core.util.trace.TraceRunnable) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) List(java.util.List) AggregationInfo(com.linkedin.pinot.common.request.AggregationInfo) IntermediateResultsBlock(com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) AggregationFunctionContext(com.linkedin.pinot.core.query.aggregation.AggregationFunctionContext) GroupKeyGenerator(com.linkedin.pinot.core.query.aggregation.groupby.GroupKeyGenerator) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) TimeoutException(java.util.concurrent.TimeoutException) CountDownLatch(java.util.concurrent.CountDownLatch) TimeoutException(java.util.concurrent.TimeoutException) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) QueryException(com.linkedin.pinot.common.exception.QueryException) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Aggregations

ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)420 Map (java.util.Map)106 Test (org.junit.Test)102 HashMap (java.util.HashMap)75 ArrayList (java.util.ArrayList)73 CountDownLatch (java.util.concurrent.CountDownLatch)53 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)49 IOException (java.io.IOException)47 List (java.util.List)38 Set (java.util.Set)36 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)33 HashSet (java.util.HashSet)31 AtomicLong (java.util.concurrent.atomic.AtomicLong)29 ConcurrentMap (java.util.concurrent.ConcurrentMap)28 Random (java.util.Random)25 ExecutorService (java.util.concurrent.ExecutorService)23 Collection (java.util.Collection)20 UUID (java.util.UUID)20 Iterator (java.util.Iterator)19 Configuration (org.apache.hadoop.conf.Configuration)17