Search in sources :

Example 71 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project apex-core by apache.

the class StreamingContainerManager method processEvents.

public int processEvents() {
    for (PTOperator o : reportStats.keySet()) {
        List<OperatorStats> stats = o.stats.listenerStats.poll();
        if (stats != null) {
            // append into single list
            List<OperatorStats> moreStats;
            while ((moreStats = o.stats.listenerStats.poll()) != null) {
                stats.addAll(moreStats);
            }
        }
        o.stats.lastWindowedStats = stats;
        o.stats.operatorResponses = null;
        if (!o.stats.responses.isEmpty()) {
            o.stats.operatorResponses = new ArrayList<>();
            StatsListener.OperatorResponse operatorResponse;
            while ((operatorResponse = o.stats.responses.poll()) != null) {
                o.stats.operatorResponses.add(operatorResponse);
            }
        }
        if (o.stats.lastWindowedStats != null) {
            // call listeners only with non empty window list
            if (o.statsListeners != null) {
                plan.onStatusUpdate(o);
            }
        }
        reportStats.remove(o);
    }
    if (!this.shutdownOperators.isEmpty()) {
        synchronized (this.shutdownOperators) {
            Iterator<Map.Entry<Long, Set<PTOperator>>> it = shutdownOperators.entrySet().iterator();
            while (it.hasNext()) {
                Map.Entry<Long, Set<PTOperator>> windowAndOpers = it.next();
                if (windowAndOpers.getKey().longValue() <= this.committedWindowId || checkDownStreamOperators(windowAndOpers)) {
                    LOG.info("Removing inactive operators at window {} {}", Codec.getStringWindowId(windowAndOpers.getKey()), windowAndOpers.getValue());
                    for (PTOperator oper : windowAndOpers.getValue()) {
                        plan.removeTerminatedPartition(oper);
                    }
                    it.remove();
                }
            }
        }
    }
    if (!eventQueue.isEmpty()) {
        for (PTOperator oper : plan.getAllOperators().values()) {
            if (oper.getState() != PTOperator.State.ACTIVE) {
                LOG.debug("Skipping plan updates due to inactive operator {} {}", oper, oper.getState());
                return 0;
            }
        }
    }
    int count = 0;
    Runnable command;
    while ((command = this.eventQueue.poll()) != null) {
        eventQueueProcessing.set(true);
        try {
            command.run();
            count++;
        } catch (Exception e) {
            // TODO: handle error
            LOG.error("Failed to execute {}", command, e);
        }
        eventQueueProcessing.set(false);
    }
    if (count > 0) {
        try {
            checkpoint();
        } catch (Exception e) {
            throw new RuntimeException("Failed to checkpoint state.", e);
        }
    }
    return count;
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) TreeSet(java.util.TreeSet) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) OperatorStats(com.datatorrent.api.Stats.OperatorStats) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) NotFoundException(org.apache.hadoop.yarn.webapp.NotFoundException) IOException(java.io.IOException) JSONException(org.codehaus.jettison.json.JSONException) KryoException(com.esotericsoftware.kryo.KryoException) MutableLong(org.apache.commons.lang3.mutable.MutableLong) MovingAverageLong(com.datatorrent.stram.util.MovingAverage.MovingAverageLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Example 72 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project flink by apache.

the class Scheduler method findInstance.

/**
	 * Tries to find a requested instance. If no such instance is available it will return a non-
	 * local instance. If no such instance exists (all slots occupied), then return null.
	 * 
	 * <p><b>NOTE:</b> This method is not thread-safe, it needs to be synchronized by the caller.</p>
	 *
	 * @param requestedLocations The list of preferred instances. May be null or empty, which indicates that
	 *                           no locality preference exists.   
	 * @param localOnly Flag to indicate whether only one of the exact local instances can be chosen.  
	 */
private Pair<Instance, Locality> findInstance(Iterable<TaskManagerLocation> requestedLocations, boolean localOnly) {
    // drain the queue of newly available instances
    while (this.newlyAvailableInstances.size() > 0) {
        Instance queuedInstance = this.newlyAvailableInstances.poll();
        if (queuedInstance != null) {
            this.instancesWithAvailableResources.put(queuedInstance.getTaskManagerID(), queuedInstance);
        }
    }
    // if nothing is available at all, return null
    if (this.instancesWithAvailableResources.isEmpty()) {
        return null;
    }
    Iterator<TaskManagerLocation> locations = requestedLocations == null ? null : requestedLocations.iterator();
    if (locations != null && locations.hasNext()) {
        while (locations.hasNext()) {
            TaskManagerLocation location = locations.next();
            if (location != null) {
                Instance instance = instancesWithAvailableResources.remove(location.getResourceID());
                if (instance != null) {
                    return new ImmutablePair<Instance, Locality>(instance, Locality.LOCAL);
                }
            }
        }
        // no local instance available
        if (localOnly) {
            return null;
        } else {
            // take the first instance from the instances with resources
            Iterator<Instance> instances = instancesWithAvailableResources.values().iterator();
            Instance instanceToUse = instances.next();
            instances.remove();
            return new ImmutablePair<>(instanceToUse, Locality.NON_LOCAL);
        }
    } else {
        // no location preference, so use some instance
        Iterator<Instance> instances = instancesWithAvailableResources.values().iterator();
        Instance instanceToUse = instances.next();
        instances.remove();
        return new ImmutablePair<>(instanceToUse, Locality.UNCONSTRAINED);
    }
}
Also used : ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) Instance(org.apache.flink.runtime.instance.Instance) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation)

Example 73 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project gatk-protected by broadinstitute.

the class CoverageModelParameters method adaptModelToReadCountCollection.

/**
     * This method "adapts" a model to a read count collection in the following sense:
     *
     *     - removes targets that are not included in the model from the read counts collection
     *     - removes targets that are in the read count collection from the model
     *     - rearranges model targets in the same order as read count collection targets
     *
     * The modifications are not done in-place and the original input parameters remain intact.
     *
     * @param model a model
     * @param readCounts a read count collection
     * @return a pair of model and read count collection
     */
public static ImmutablePair<CoverageModelParameters, ReadCountCollection> adaptModelToReadCountCollection(@Nonnull final CoverageModelParameters model, @Nonnull final ReadCountCollection readCounts, @Nonnull final Logger logger) {
    logger.info("Adapting model to read counts...");
    Utils.nonNull(model, "The model parameters must be non-null");
    Utils.nonNull(readCounts, "The read count collection must be non-null");
    Utils.nonNull(logger, "The logger must be non-null");
    final List<Target> modelTargetList = model.getTargetList();
    final List<Target> readCountsTargetList = readCounts.targets();
    final Set<Target> mutualTargetSet = Sets.intersection(new HashSet<>(modelTargetList), new HashSet<>(readCountsTargetList));
    final List<Target> mutualTargetList = readCountsTargetList.stream().filter(mutualTargetSet::contains).collect(Collectors.toList());
    logger.info("Number of mutual targets: " + mutualTargetList.size());
    Utils.validateArg(mutualTargetList.size() > 0, "The intersection between model targets and targets from read count" + " collection is empty. Please check there the model is compatible with the given read count" + " collection.");
    if (modelTargetList.size() > mutualTargetList.size()) {
        logger.info("The following targets dropped from the model: " + Sets.difference(new HashSet<>(modelTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
    }
    if (readCountsTargetList.size() > mutualTargetList.size()) {
        logger.info("The following targets dropped from read counts: " + Sets.difference(new HashSet<>(readCountsTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
    }
    /* the targets in {@code subsetReadCounts} follow the original order of targets in {@code readCounts} */
    final ReadCountCollection subsetReadCounts = readCounts.subsetTargets(mutualTargetSet);
    /* fetch original model parameters */
    final INDArray originalModelTargetMeanBias = model.getTargetMeanLogBias();
    final INDArray originalModelTargetUnexplainedVariance = model.getTargetUnexplainedVariance();
    final INDArray originalModelMeanBiasCovariates = model.getMeanBiasCovariates();
    /* re-arrange targets, mean log bias, and target-specific unexplained variance */
    final Map<Target, Integer> modelTargetsToIndexMap = IntStream.range(0, modelTargetList.size()).mapToObj(ti -> ImmutablePair.of(modelTargetList.get(ti), ti)).collect(Collectors.toMap(Pair<Target, Integer>::getLeft, Pair<Target, Integer>::getRight));
    final int[] newTargetIndicesInOriginalModel = mutualTargetList.stream().mapToInt(modelTargetsToIndexMap::get).toArray();
    final INDArray newModelTargetMeanBias = Nd4j.create(new int[] { 1, mutualTargetList.size() });
    final INDArray newModelTargetUnexplainedVariance = Nd4j.create(new int[] { 1, mutualTargetList.size() });
    IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
        newModelTargetMeanBias.put(0, ti, originalModelTargetMeanBias.getDouble(0, newTargetIndicesInOriginalModel[ti]));
        newModelTargetUnexplainedVariance.put(0, ti, originalModelTargetUnexplainedVariance.getDouble(0, newTargetIndicesInOriginalModel[ti]));
    });
    /* if model has bias covariates and/or ARD, re-arrange mean/var of bias covariates as well */
    final INDArray newModelMeanBiasCovariates;
    if (model.isBiasCovariatesEnabled()) {
        newModelMeanBiasCovariates = Nd4j.create(new int[] { mutualTargetList.size(), model.getNumLatents() });
        IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
            newModelMeanBiasCovariates.get(NDArrayIndex.point(ti), NDArrayIndex.all()).assign(originalModelMeanBiasCovariates.get(NDArrayIndex.point(newTargetIndicesInOriginalModel[ti]), NDArrayIndex.all()));
        });
    } else {
        newModelMeanBiasCovariates = null;
    }
    return ImmutablePair.of(new CoverageModelParameters(mutualTargetList, newModelTargetMeanBias, newModelTargetUnexplainedVariance, newModelMeanBiasCovariates, model.getBiasCovariateARDCoefficients()), subsetReadCounts);
}
Also used : IntStream(java.util.stream.IntStream) java.util(java.util) NDArrayIndex(org.nd4j.linalg.indexing.NDArrayIndex) Nd4jIOUtils(org.broadinstitute.hellbender.tools.coveragemodel.nd4jutils.Nd4jIOUtils) Nd4j(org.nd4j.linalg.factory.Nd4j) Collectors(java.util.stream.Collectors) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) ParamUtils(org.broadinstitute.hellbender.utils.param.ParamUtils) Sets(com.google.cloud.dataflow.sdk.repackaged.com.google.common.collect.Sets) Logger(org.apache.logging.log4j.Logger) ReadCountCollection(org.broadinstitute.hellbender.tools.exome.ReadCountCollection) Pair(org.apache.commons.lang3.tuple.Pair) UserException(org.broadinstitute.hellbender.exceptions.UserException) java.io(java.io) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) RandomGeneratorFactory(org.apache.commons.math3.random.RandomGeneratorFactory) Target(org.broadinstitute.hellbender.tools.exome.Target) TargetTableReader(org.broadinstitute.hellbender.tools.exome.TargetTableReader) INDArray(org.nd4j.linalg.api.ndarray.INDArray) TargetWriter(org.broadinstitute.hellbender.tools.exome.TargetWriter) Utils(org.broadinstitute.hellbender.utils.Utils) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Target(org.broadinstitute.hellbender.tools.exome.Target) INDArray(org.nd4j.linalg.api.ndarray.INDArray) ReadCountCollection(org.broadinstitute.hellbender.tools.exome.ReadCountCollection)

Example 74 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project gatk by broadinstitute.

the class CoverageModelParameters method adaptModelToReadCountCollection.

/**
     * This method "adapts" a model to a read count collection in the following sense:
     *
     *     - removes targets that are not included in the model from the read counts collection
     *     - removes targets that are in the read count collection from the model
     *     - rearranges model targets in the same order as read count collection targets
     *
     * The modifications are not done in-place and the original input parameters remain intact.
     *
     * @param model a model
     * @param readCounts a read count collection
     * @return a pair of model and read count collection
     */
public static ImmutablePair<CoverageModelParameters, ReadCountCollection> adaptModelToReadCountCollection(@Nonnull final CoverageModelParameters model, @Nonnull final ReadCountCollection readCounts, @Nonnull final Logger logger) {
    logger.info("Adapting model to read counts...");
    Utils.nonNull(model, "The model parameters must be non-null");
    Utils.nonNull(readCounts, "The read count collection must be non-null");
    Utils.nonNull(logger, "The logger must be non-null");
    final List<Target> modelTargetList = model.getTargetList();
    final List<Target> readCountsTargetList = readCounts.targets();
    final Set<Target> mutualTargetSet = Sets.intersection(new HashSet<>(modelTargetList), new HashSet<>(readCountsTargetList));
    final List<Target> mutualTargetList = readCountsTargetList.stream().filter(mutualTargetSet::contains).collect(Collectors.toList());
    logger.info("Number of mutual targets: " + mutualTargetList.size());
    Utils.validateArg(mutualTargetList.size() > 0, "The intersection between model targets and targets from read count" + " collection is empty. Please check there the model is compatible with the given read count" + " collection.");
    if (modelTargetList.size() > mutualTargetList.size()) {
        logger.info("The following targets dropped from the model: " + Sets.difference(new HashSet<>(modelTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
    }
    if (readCountsTargetList.size() > mutualTargetList.size()) {
        logger.info("The following targets dropped from read counts: " + Sets.difference(new HashSet<>(readCountsTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
    }
    /* the targets in {@code subsetReadCounts} follow the original order of targets in {@code readCounts} */
    final ReadCountCollection subsetReadCounts = readCounts.subsetTargets(mutualTargetSet);
    /* fetch original model parameters */
    final INDArray originalModelTargetMeanBias = model.getTargetMeanLogBias();
    final INDArray originalModelTargetUnexplainedVariance = model.getTargetUnexplainedVariance();
    final INDArray originalModelMeanBiasCovariates = model.getMeanBiasCovariates();
    /* re-arrange targets, mean log bias, and target-specific unexplained variance */
    final Map<Target, Integer> modelTargetsToIndexMap = IntStream.range(0, modelTargetList.size()).mapToObj(ti -> ImmutablePair.of(modelTargetList.get(ti), ti)).collect(Collectors.toMap(Pair<Target, Integer>::getLeft, Pair<Target, Integer>::getRight));
    final int[] newTargetIndicesInOriginalModel = mutualTargetList.stream().mapToInt(modelTargetsToIndexMap::get).toArray();
    final INDArray newModelTargetMeanBias = Nd4j.create(new int[] { 1, mutualTargetList.size() });
    final INDArray newModelTargetUnexplainedVariance = Nd4j.create(new int[] { 1, mutualTargetList.size() });
    IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
        newModelTargetMeanBias.put(0, ti, originalModelTargetMeanBias.getDouble(0, newTargetIndicesInOriginalModel[ti]));
        newModelTargetUnexplainedVariance.put(0, ti, originalModelTargetUnexplainedVariance.getDouble(0, newTargetIndicesInOriginalModel[ti]));
    });
    /* if model has bias covariates and/or ARD, re-arrange mean/var of bias covariates as well */
    final INDArray newModelMeanBiasCovariates;
    if (model.isBiasCovariatesEnabled()) {
        newModelMeanBiasCovariates = Nd4j.create(new int[] { mutualTargetList.size(), model.getNumLatents() });
        IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
            newModelMeanBiasCovariates.get(NDArrayIndex.point(ti), NDArrayIndex.all()).assign(originalModelMeanBiasCovariates.get(NDArrayIndex.point(newTargetIndicesInOriginalModel[ti]), NDArrayIndex.all()));
        });
    } else {
        newModelMeanBiasCovariates = null;
    }
    return ImmutablePair.of(new CoverageModelParameters(mutualTargetList, newModelTargetMeanBias, newModelTargetUnexplainedVariance, newModelMeanBiasCovariates, model.getBiasCovariateARDCoefficients()), subsetReadCounts);
}
Also used : IntStream(java.util.stream.IntStream) java.util(java.util) NDArrayIndex(org.nd4j.linalg.indexing.NDArrayIndex) Nd4jIOUtils(org.broadinstitute.hellbender.tools.coveragemodel.nd4jutils.Nd4jIOUtils) Nd4j(org.nd4j.linalg.factory.Nd4j) Collectors(java.util.stream.Collectors) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) ParamUtils(org.broadinstitute.hellbender.utils.param.ParamUtils) Sets(com.google.cloud.dataflow.sdk.repackaged.com.google.common.collect.Sets) Logger(org.apache.logging.log4j.Logger) ReadCountCollection(org.broadinstitute.hellbender.tools.exome.ReadCountCollection) Pair(org.apache.commons.lang3.tuple.Pair) UserException(org.broadinstitute.hellbender.exceptions.UserException) java.io(java.io) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) RandomGeneratorFactory(org.apache.commons.math3.random.RandomGeneratorFactory) Target(org.broadinstitute.hellbender.tools.exome.Target) TargetTableReader(org.broadinstitute.hellbender.tools.exome.TargetTableReader) INDArray(org.nd4j.linalg.api.ndarray.INDArray) TargetWriter(org.broadinstitute.hellbender.tools.exome.TargetWriter) Utils(org.broadinstitute.hellbender.utils.Utils) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Target(org.broadinstitute.hellbender.tools.exome.Target) INDArray(org.nd4j.linalg.api.ndarray.INDArray) ReadCountCollection(org.broadinstitute.hellbender.tools.exome.ReadCountCollection)

Example 75 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project hive by apache.

the class DruidStorageHandler method checkLoadStatus.

/**
 * This function checks the load status of Druid segments by polling druid coordinator.
 * @param segments List of druid segments to check for
 *
 * @return count of yet to load segments.
 */
private int checkLoadStatus(List<DataSegment> segments) {
    final String coordinatorAddress = HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS);
    int maxTries = HiveConf.getIntVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_MAX_TRIES);
    if (maxTries == 0) {
        return segments.size();
    }
    LOG.debug("checking load status from coordinator {}", coordinatorAddress);
    String coordinatorResponse;
    try {
        coordinatorResponse = RetryUtils.retry(() -> DruidStorageHandlerUtils.getURL(getHttpClient(), new URL(String.format("http://%s/status", coordinatorAddress))), input -> input instanceof IOException, maxTries);
    } catch (Exception e) {
        console.printInfo("Will skip waiting for data loading, coordinator unavailable");
        return segments.size();
    }
    if (Strings.isNullOrEmpty(coordinatorResponse)) {
        console.printInfo("Will skip waiting for data loading empty response from coordinator");
        return segments.size();
    }
    console.printInfo(String.format("Waiting for the loading of [%s] segments", segments.size()));
    long passiveWaitTimeMs = HiveConf.getLongVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_PASSIVE_WAIT_TIME);
    Set<URL> UrlsOfUnloadedSegments = segments.stream().map(dataSegment -> {
        try {
            // Need to make sure that we are using segment identifier
            return new URL(String.format("http://%s/druid/coordinator/v1/datasources/%s/segments/%s", coordinatorAddress, dataSegment.getDataSource(), dataSegment.getIdentifier()));
        } catch (MalformedURLException e) {
            Throwables.propagate(e);
        }
        return null;
    }).collect(Collectors.toSet());
    int numRetries = 0;
    while (numRetries++ < maxTries && !UrlsOfUnloadedSegments.isEmpty()) {
        UrlsOfUnloadedSegments = ImmutableSet.copyOf(Sets.filter(UrlsOfUnloadedSegments, input -> {
            try {
                String result = DruidStorageHandlerUtils.getURL(getHttpClient(), input);
                LOG.debug("Checking segment [{}] response is [{}]", input, result);
                return Strings.isNullOrEmpty(result);
            } catch (IOException e) {
                LOG.error(String.format("Error while checking URL [%s]", input), e);
                return true;
            }
        }));
        try {
            if (!UrlsOfUnloadedSegments.isEmpty()) {
                Thread.sleep(passiveWaitTimeMs);
            }
        } catch (InterruptedException e) {
            Thread.interrupted();
            Throwables.propagate(e);
        }
    }
    if (!UrlsOfUnloadedSegments.isEmpty()) {
        // We are not Throwing an exception since it might be a transient issue that is blocking loading
        console.printError(String.format("Wait time exhausted and we have [%s] out of [%s] segments not loaded yet", UrlsOfUnloadedSegments.size(), segments.size()));
    }
    return UrlsOfUnloadedSegments.size();
}
Also used : SQLMetadataConnector(io.druid.metadata.SQLMetadataConnector) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Lifecycle(com.metamx.common.lifecycle.Lifecycle) FileSystem(org.apache.hadoop.fs.FileSystem) URL(java.net.URL) LoggerFactory(org.slf4j.LoggerFactory) HttpClientInit(com.metamx.http.client.HttpClientInit) DerbyConnector(io.druid.metadata.storage.derby.DerbyConnector) StringUtils(org.apache.commons.lang3.StringUtils) AbstractSerDe(org.apache.hadoop.hive.serde2.AbstractSerDe) HttpClientConfig(com.metamx.http.client.HttpClientConfig) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) DruidRecordWriter(org.apache.hadoop.hive.druid.io.DruidRecordWriter) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) OutputFormat(org.apache.hadoop.mapred.OutputFormat) DruidSerDe(org.apache.hadoop.hive.druid.serde.DruidSerDe) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) DefaultHiveAuthorizationProvider(org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider) ImmutableSet(com.google.common.collect.ImmutableSet) DataSegment(io.druid.timeline.DataSegment) Collection(java.util.Collection) HiveMetaHook(org.apache.hadoop.hive.metastore.HiveMetaHook) KerberosHttpClient(org.apache.hadoop.hive.druid.security.KerberosHttpClient) Set(java.util.Set) DefaultHiveMetaHook(org.apache.hadoop.hive.metastore.DefaultHiveMetaHook) MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) SessionState(org.apache.hadoop.hive.ql.session.SessionState) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) List(java.util.List) DerbyMetadataStorage(io.druid.metadata.storage.derby.DerbyMetadataStorage) HttpClient(com.metamx.http.client.HttpClient) MetadataStorageConnectorConfig(io.druid.metadata.MetadataStorageConnectorConfig) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Supplier(com.google.common.base.Supplier) RetryUtils(com.metamx.common.RetryUtils) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Suppliers(com.google.common.base.Suppliers) Constants(org.apache.hadoop.hive.conf.Constants) DruidQueryBasedInputFormat(org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat) Period(org.joda.time.Period) Logger(org.slf4j.Logger) MalformedURLException(java.net.MalformedURLException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) DateTime(org.joda.time.DateTime) Throwables(com.google.common.base.Throwables) IOException(java.io.IOException) Table(org.apache.hadoop.hive.metastore.api.Table) ShutdownHookManager(org.apache.hive.common.util.ShutdownHookManager) JobConf(org.apache.hadoop.mapred.JobConf) HdfsDataSegmentPusherConfig(io.druid.storage.hdfs.HdfsDataSegmentPusherConfig) DruidOutputFormat(org.apache.hadoop.hive.druid.io.DruidOutputFormat) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HiveAuthorizationProvider(org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider) PostgreSQLConnector(io.druid.metadata.storage.postgresql.PostgreSQLConnector) MySQLConnector(io.druid.metadata.storage.mysql.MySQLConnector) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) URL(java.net.URL) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException)

Aggregations

List (java.util.List)44 Map (java.util.Map)42 ArrayList (java.util.ArrayList)41 StringUtils (org.apache.commons.lang3.StringUtils)38 Collectors (java.util.stream.Collectors)37 HashMap (java.util.HashMap)33 IOException (java.io.IOException)27 Set (java.util.Set)25 HashSet (java.util.HashSet)22 LoggerFactory (org.slf4j.LoggerFactory)22 Pair (org.apache.commons.lang3.tuple.Pair)20 Logger (org.slf4j.Logger)20 Optional (java.util.Optional)19 Collections (java.util.Collections)17 ImmutablePair (org.apache.commons.lang3.tuple.ImmutablePair)17 java.util (java.util)15 Arrays.asList (java.util.Arrays.asList)14 Collection (java.util.Collection)14 Stream (java.util.stream.Stream)14 Arrays (java.util.Arrays)12