Search in sources :

Example 1 with Multimap

use of com.google.common.collect.Multimap in project hive by apache.

the class CustomPartitionVertex method processAllEvents.

private void processAllEvents(String inputName, Multimap<Integer, InputSplit> bucketToGroupedSplitMap, boolean secondLevelGroupingDone) throws IOException {
    int totalInputsCount = 0;
    List<Integer> numSplitsForTask = new ArrayList<Integer>();
    for (Entry<Integer, Collection<InputSplit>> entry : bucketToGroupedSplitMap.asMap().entrySet()) {
        int bucketNum = entry.getKey();
        Collection<InputSplit> initialSplits = entry.getValue();
        finalSplits.addAll(initialSplits);
        for (InputSplit inputSplit : initialSplits) {
            bucketToTaskMap.put(bucketNum, taskCount);
            if (secondLevelGroupingDone) {
                TezGroupedSplit groupedSplit = (TezGroupedSplit) inputSplit;
                numSplitsForTask.add(groupedSplit.getGroupedSplits().size());
                totalInputsCount += groupedSplit.getGroupedSplits().size();
            } else {
                numSplitsForTask.add(1);
                totalInputsCount += 1;
            }
            taskCount++;
        }
    }
    inputNameInputSpecMap.put(inputName, InputSpecUpdate.createPerTaskInputSpecUpdate(numSplitsForTask));
    // Construct the EdgeManager descriptor to be used by all edges which need
    // the routing table.
    EdgeManagerPluginDescriptor hiveEdgeManagerDesc = null;
    if ((vertexType == VertexType.MULTI_INPUT_INITIALIZED_EDGES) || (vertexType == VertexType.INITIALIZED_EDGES)) {
        hiveEdgeManagerDesc = EdgeManagerPluginDescriptor.create(CustomPartitionEdge.class.getName());
        UserPayload payload = getBytePayload(bucketToTaskMap);
        hiveEdgeManagerDesc.setUserPayload(payload);
    }
    // Replace the edge manager for all vertices which have routing type custom.
    for (Entry<String, EdgeProperty> edgeEntry : context.getInputVertexEdgeProperties().entrySet()) {
        if (edgeEntry.getValue().getDataMovementType() == DataMovementType.CUSTOM && edgeEntry.getValue().getEdgeManagerDescriptor().getClassName().equals(CustomPartitionEdge.class.getName())) {
            emMap.put(edgeEntry.getKey(), hiveEdgeManagerDesc);
        }
    }
    LOG.info("Task count is " + taskCount + " for input name: " + inputName);
    List<InputDataInformationEvent> taskEvents = Lists.newArrayListWithCapacity(totalInputsCount);
    // Re-serialize the splits after grouping.
    int count = 0;
    for (InputSplit inputSplit : finalSplits) {
        if (secondLevelGroupingDone) {
            TezGroupedSplit tezGroupedSplit = (TezGroupedSplit) inputSplit;
            for (InputSplit subSplit : tezGroupedSplit.getGroupedSplits()) {
                if ((subSplit instanceof TezGroupedSplit) == false) {
                    throw new IOException("Unexpected split type found: " + subSplit.getClass().getCanonicalName());
                }
                MRSplitProto serializedSplit = MRInputHelpers.createSplitProto(subSplit);
                InputDataInformationEvent diEvent = InputDataInformationEvent.createWithSerializedPayload(count, serializedSplit.toByteString().asReadOnlyByteBuffer());
                diEvent.setTargetIndex(count);
                taskEvents.add(diEvent);
            }
        } else {
            MRSplitProto serializedSplit = MRInputHelpers.createSplitProto(inputSplit);
            InputDataInformationEvent diEvent = InputDataInformationEvent.createWithSerializedPayload(count, serializedSplit.toByteString().asReadOnlyByteBuffer());
            diEvent.setTargetIndex(count);
            taskEvents.add(diEvent);
        }
        count++;
    }
    // Set the actual events for the tasks.
    LOG.info("For input name: " + inputName + " task events size is " + taskEvents.size());
    context.addRootInputEvents(inputName, taskEvents);
    if (inputToGroupedSplitMap.isEmpty() == false) {
        for (Entry<String, Multimap<Integer, InputSplit>> entry : inputToGroupedSplitMap.entrySet()) {
            processAllSideEvents(entry.getKey(), entry.getValue());
        }
        setVertexParallelismAndRootInputSpec(inputNameInputSpecMap);
        inputToGroupedSplitMap.clear();
    }
    // Only done when it is a bucket map join only no SMB.
    if (numInputsAffectingRootInputSpecUpdate == 1) {
        setVertexParallelismAndRootInputSpec(inputNameInputSpecMap);
    }
}
Also used : UserPayload(org.apache.tez.dag.api.UserPayload) ArrayList(java.util.ArrayList) TezGroupedSplit(org.apache.hadoop.mapred.split.TezGroupedSplit) ByteString(com.google.protobuf.ByteString) IOException(java.io.IOException) VertexLocationHint(org.apache.tez.dag.api.VertexLocationHint) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) HashMultimap(com.google.common.collect.HashMultimap) LinkedListMultimap(com.google.common.collect.LinkedListMultimap) Multimap(com.google.common.collect.Multimap) EdgeManagerPluginDescriptor(org.apache.tez.dag.api.EdgeManagerPluginDescriptor) Collection(java.util.Collection) EdgeProperty(org.apache.tez.dag.api.EdgeProperty) InputSplit(org.apache.hadoop.mapred.InputSplit) InputDataInformationEvent(org.apache.tez.runtime.api.events.InputDataInformationEvent) MRSplitProto(org.apache.tez.mapreduce.protos.MRRuntimeProtos.MRSplitProto)

Example 2 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitWhenRegionHoleExistsInMeta.

@Test(timeout = 120000)
public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
    // Share connection. We were failing to find the table with our new reverse scan because it
    // looks for first region, not any region -- that is how it works now.  The below removes first
    // region in test.  Was reliant on the Connection caching having first region.
    Connection connection = ConnectionFactory.createConnection(util.getConfiguration());
    Table table = connection.getTable(tableName);
    setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
    Path dir = buildBulkFiles(tableName, 2);
    final AtomicInteger countedLqis = new AtomicInteger();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()) {

        @Override
        protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
            Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
            if (lqis != null && lqis.getFirst() != null) {
                countedLqis.addAndGet(lqis.getFirst().size());
            }
            return lqis;
        }
    };
    // do bulkload when there is no region hole in hbase:meta.
    try (Table t = connection.getTable(tableName);
        RegionLocator locator = connection.getRegionLocator(tableName);
        Admin admin = connection.getAdmin()) {
        loader.doBulkLoad(dir, admin, t, locator);
    } catch (Exception e) {
        LOG.error("exeception=", e);
    }
    // check if all the data are loaded into the table.
    this.assertExpectedTable(tableName, ROWCOUNT, 2);
    dir = buildBulkFiles(tableName, 3);
    // Mess it up by leaving a hole in the hbase:meta
    List<HRegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
    for (HRegionInfo regionInfo : regionInfos) {
        if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
            MetaTableAccessor.deleteRegion(connection, regionInfo);
            break;
        }
    }
    try (Table t = connection.getTable(tableName);
        RegionLocator locator = connection.getRegionLocator(tableName);
        Admin admin = connection.getAdmin()) {
        loader.doBulkLoad(dir, admin, t, locator);
    } catch (Exception e) {
        LOG.error("exception=", e);
        assertTrue("IOException expected", e instanceof IOException);
    }
    table.close();
    // Make sure at least the one region that still exists can be found.
    regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
    assertTrue(regionInfos.size() >= 1);
    this.assertExpectedTable(connection, tableName, ROWCOUNT, 2);
    connection.close();
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableExistsException(org.apache.hadoop.hbase.TableExistsException) IOException(java.io.IOException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 3 with Multimap

use of com.google.common.collect.Multimap in project buck by facebook.

the class AndroidBinary method addDexingSteps.

/**
   * Create dex artifacts for all of the individual directories of compiled .class files (or
   * the obfuscated jar files if proguard is used).  If split dex is used, multiple dex artifacts
   * will be produced.
   *  @param classpathEntriesToDex Full set of classpath entries that must make
   *     their way into the final APK structure (but not necessarily into the
   *     primary dex).
   * @param secondaryDexDirectories The contract for updating this builder must match that
   *     of {@link PreDexMerge#getSecondaryDexDirectories()}.
   * @param steps List of steps to add to.
   * @param primaryDexPath Output path for the primary dex file.
   */
@VisibleForTesting
void addDexingSteps(Set<Path> classpathEntriesToDex, Supplier<ImmutableMap<String, HashCode>> classNamesToHashesSupplier, ImmutableSet.Builder<Path> secondaryDexDirectories, ImmutableList.Builder<Step> steps, Path primaryDexPath, Optional<SourcePath> dexReorderToolFile, Optional<SourcePath> dexReorderDataDumpFile, ImmutableMultimap<APKModule, Path> additionalDexStoreToJarPathMap, SourcePathResolver resolver) {
    final Supplier<Set<Path>> primaryInputsToDex;
    final Optional<Path> secondaryDexDir;
    final Optional<Supplier<Multimap<Path, Path>>> secondaryOutputToInputs;
    Path secondaryDexParentDir = getBinPath("__%s_secondary_dex__/");
    Path additionalDexParentDir = getBinPath("__%s_additional_dex__/");
    Path additionalDexAssetsDir = additionalDexParentDir.resolve("assets");
    final Optional<ImmutableSet<Path>> additionalDexDirs;
    if (shouldSplitDex()) {
        Optional<Path> proguardFullConfigFile = Optional.empty();
        Optional<Path> proguardMappingFile = Optional.empty();
        if (packageType.isBuildWithObfuscation()) {
            Path proguardConfigDir = getProguardTextFilesPath();
            proguardFullConfigFile = Optional.of(proguardConfigDir.resolve("configuration.txt"));
            proguardMappingFile = Optional.of(proguardConfigDir.resolve("mapping.txt"));
        }
        // DexLibLoader expects that metadata.txt and secondary jar files are under this dir
        // in assets.
        // Intermediate directory holding the primary split-zip jar.
        Path splitZipDir = getBinPath("__%s_split_zip__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), splitZipDir));
        Path primaryJarPath = splitZipDir.resolve("primary.jar");
        Path secondaryJarMetaDirParent = splitZipDir.resolve("secondary_meta");
        Path secondaryJarMetaDir = secondaryJarMetaDirParent.resolve(SECONDARY_DEX_SUBDIR);
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryJarMetaDir));
        Path secondaryJarMeta = secondaryJarMetaDir.resolve("metadata.txt");
        // Intermediate directory holding _ONLY_ the secondary split-zip jar files.  This is
        // important because SmartDexingCommand will try to dx every entry in this directory.  It
        // does this because it's impossible to know what outputs split-zip will generate until it
        // runs.
        final Path secondaryZipDir = getBinPath("__%s_secondary_zip__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryZipDir));
        // Intermediate directory holding the directories holding _ONLY_ the additional split-zip
        // jar files that are intended for that dex store.
        final Path additionalDexStoresZipDir = getBinPath("__%s_dex_stores_zip__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), additionalDexStoresZipDir));
        for (APKModule dexStore : additionalDexStoreToJarPathMap.keySet()) {
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), additionalDexStoresZipDir.resolve(dexStore.getName())));
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryJarMetaDirParent.resolve("assets").resolve(dexStore.getName())));
        }
        // Run the split-zip command which is responsible for dividing the large set of input
        // classpaths into a more compact set of jar files such that no one jar file when dexed will
        // yield a dex artifact too large for dexopt or the dx method limit to handle.
        Path zipSplitReportDir = getBinPath("__%s_split_zip_report__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), zipSplitReportDir));
        SplitZipStep splitZipCommand = new SplitZipStep(getProjectFilesystem(), classpathEntriesToDex, secondaryJarMeta, primaryJarPath, secondaryZipDir, "secondary-%d.jar", secondaryJarMetaDirParent, additionalDexStoresZipDir, proguardFullConfigFile, proguardMappingFile, skipProguard, dexSplitMode, dexSplitMode.getPrimaryDexScenarioFile().map(resolver::getAbsolutePath), dexSplitMode.getPrimaryDexClassesFile().map(resolver::getAbsolutePath), dexSplitMode.getSecondaryDexHeadClassesFile().map(resolver::getAbsolutePath), dexSplitMode.getSecondaryDexTailClassesFile().map(resolver::getAbsolutePath), additionalDexStoreToJarPathMap, enhancementResult.getAPKModuleGraph(), zipSplitReportDir);
        steps.add(splitZipCommand);
        // smart dexing command.  Smart dex will handle "cleaning" this directory properly.
        if (reorderClassesIntraDex) {
            secondaryDexDir = Optional.of(secondaryDexParentDir.resolve(SMART_DEX_SECONDARY_DEX_SUBDIR));
            Path intraDexReorderSecondaryDexDir = secondaryDexParentDir.resolve(SECONDARY_DEX_SUBDIR);
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryDexDir.get()));
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), intraDexReorderSecondaryDexDir));
        } else {
            secondaryDexDir = Optional.of(secondaryDexParentDir.resolve(SECONDARY_DEX_SUBDIR));
            steps.add(new MkdirStep(getProjectFilesystem(), secondaryDexDir.get()));
        }
        if (additionalDexStoreToJarPathMap.isEmpty()) {
            additionalDexDirs = Optional.empty();
        } else {
            ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
            for (APKModule dexStore : additionalDexStoreToJarPathMap.keySet()) {
                Path dexStorePath = additionalDexAssetsDir.resolve(dexStore.getName());
                builder.add(dexStorePath);
                steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), dexStorePath));
            }
            additionalDexDirs = Optional.of(builder.build());
        }
        if (dexSplitMode.getDexStore() == DexStore.RAW) {
            secondaryDexDirectories.add(secondaryDexDir.get());
        } else {
            secondaryDexDirectories.add(secondaryJarMetaDirParent);
            secondaryDexDirectories.add(secondaryDexParentDir);
        }
        if (additionalDexDirs.isPresent()) {
            secondaryDexDirectories.add(additionalDexParentDir);
        }
        // Adjust smart-dex inputs for the split-zip case.
        primaryInputsToDex = Suppliers.ofInstance(ImmutableSet.of(primaryJarPath));
        Supplier<Multimap<Path, Path>> secondaryOutputToInputsMap = splitZipCommand.getOutputToInputsMapSupplier(secondaryDexDir.get(), additionalDexAssetsDir);
        secondaryOutputToInputs = Optional.of(secondaryOutputToInputsMap);
    } else {
        // Simple case where our inputs are the natural classpath directories and we don't have
        // to worry about secondary jar/dex files.
        primaryInputsToDex = Suppliers.ofInstance(classpathEntriesToDex);
        secondaryDexDir = Optional.empty();
        secondaryOutputToInputs = Optional.empty();
    }
    HashInputJarsToDexStep hashInputJarsToDexStep = new HashInputJarsToDexStep(getProjectFilesystem(), primaryInputsToDex, secondaryOutputToInputs, classNamesToHashesSupplier);
    steps.add(hashInputJarsToDexStep);
    // Stores checksum information from each invocation to intelligently decide when dx needs
    // to be re-run.
    Path successDir = getBinPath("__%s_smart_dex__/.success");
    steps.add(new MkdirStep(getProjectFilesystem(), successDir));
    // Add the smart dexing tool that is capable of avoiding the external dx invocation(s) if
    // it can be shown that the inputs have not changed.  It also parallelizes dx invocations
    // where applicable.
    //
    // Note that by not specifying the number of threads this command will use it will select an
    // optimal default regardless of the value of --num-threads.  This decision was made with the
    // assumption that --num-threads specifies the threading of build rule execution and does not
    // directly apply to the internal threading/parallelization details of various build commands
    // being executed.  For example, aapt is internally threaded by default when preprocessing
    // images.
    EnumSet<DxStep.Option> dxOptions = PackageType.RELEASE.equals(packageType) ? EnumSet.of(DxStep.Option.NO_LOCALS) : EnumSet.of(DxStep.Option.NO_OPTIMIZE);
    Path selectedPrimaryDexPath = primaryDexPath;
    if (reorderClassesIntraDex) {
        String primaryDexFileName = primaryDexPath.getFileName().toString();
        String smartDexPrimaryDexFileName = "smart-dex-" + primaryDexFileName;
        Path smartDexPrimaryDexPath = Paths.get(primaryDexPath.toString().replace(primaryDexFileName, smartDexPrimaryDexFileName));
        selectedPrimaryDexPath = smartDexPrimaryDexPath;
    }
    SmartDexingStep smartDexingCommand = new SmartDexingStep(getProjectFilesystem(), selectedPrimaryDexPath, primaryInputsToDex, secondaryDexDir, secondaryOutputToInputs, hashInputJarsToDexStep, successDir, dxOptions, dxExecutorService, xzCompressionLevel, dxMaxHeapSize);
    steps.add(smartDexingCommand);
    if (reorderClassesIntraDex) {
        IntraDexReorderStep intraDexReorderStep = new IntraDexReorderStep(getProjectFilesystem(), resolver.getAbsolutePath(dexReorderToolFile.get()), resolver.getAbsolutePath(dexReorderDataDumpFile.get()), getBuildTarget(), selectedPrimaryDexPath, primaryDexPath, secondaryOutputToInputs, SMART_DEX_SECONDARY_DEX_SUBDIR, SECONDARY_DEX_SUBDIR);
        steps.add(intraDexReorderStep);
    }
}
Also used : Path(java.nio.file.Path) SourcePath(com.facebook.buck.rules.SourcePath) ExplicitBuildTargetSourcePath(com.facebook.buck.rules.ExplicitBuildTargetSourcePath) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) ImmutableSortedSet(com.google.common.collect.ImmutableSortedSet) MkdirStep(com.facebook.buck.step.fs.MkdirStep) Multimap(com.google.common.collect.Multimap) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) ImmutableSet(com.google.common.collect.ImmutableSet) MakeCleanDirectoryStep(com.facebook.buck.step.fs.MakeCleanDirectoryStep) Supplier(com.google.common.base.Supplier) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 4 with Multimap

use of com.google.common.collect.Multimap in project buck by facebook.

the class PreDexMerge method addStepsForSplitDex.

private void addStepsForSplitDex(ImmutableList.Builder<Step> steps, BuildableContext buildableContext) {
    // Collect all of the DexWithClasses objects to use for merging.
    ImmutableMultimap.Builder<APKModule, DexWithClasses> dexFilesToMergeBuilder = ImmutableMultimap.builder();
    dexFilesToMergeBuilder.putAll(FluentIterable.from(preDexDeps.entries()).transform(input -> new AbstractMap.SimpleEntry<>(input.getKey(), DexWithClasses.TO_DEX_WITH_CLASSES.apply(input.getValue()))).filter(input -> input.getValue() != null).toSet());
    final SplitDexPaths paths = new SplitDexPaths();
    final ImmutableSet.Builder<Path> secondaryDexDirectories = ImmutableSet.builder();
    if (dexSplitMode.getDexStore() == DexStore.RAW) {
        // Raw classes*.dex files go in the top-level of the APK.
        secondaryDexDirectories.add(paths.jarfilesSubdir);
    } else {
        // Otherwise, we want to include the metadata and jars as assets.
        secondaryDexDirectories.add(paths.metadataDir);
        secondaryDexDirectories.add(paths.jarfilesDir);
    }
    //always add additional dex stores and metadata as assets
    secondaryDexDirectories.add(paths.additionalJarfilesDir);
    // Do not clear existing directory which might contain secondary dex files that are not
    // re-merged (since their contents did not change).
    steps.add(new MkdirStep(getProjectFilesystem(), paths.jarfilesSubdir));
    steps.add(new MkdirStep(getProjectFilesystem(), paths.additionalJarfilesSubdir));
    steps.add(new MkdirStep(getProjectFilesystem(), paths.successDir));
    steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), paths.metadataSubdir));
    steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), paths.scratchDir));
    buildableContext.addMetadata(SECONDARY_DEX_DIRECTORIES_KEY, secondaryDexDirectories.build().stream().map(Object::toString).collect(MoreCollectors.toImmutableList()));
    buildableContext.recordArtifact(primaryDexPath);
    buildableContext.recordArtifact(paths.jarfilesSubdir);
    buildableContext.recordArtifact(paths.metadataSubdir);
    buildableContext.recordArtifact(paths.successDir);
    buildableContext.recordArtifact(paths.additionalJarfilesSubdir);
    PreDexedFilesSorter preDexedFilesSorter = new PreDexedFilesSorter(Optional.ofNullable(DexWithClasses.TO_DEX_WITH_CLASSES.apply(dexForUberRDotJava)), dexFilesToMergeBuilder.build(), dexSplitMode.getPrimaryDexPatterns(), apkModuleGraph, paths.scratchDir, // to set the dex weight limit during pre-dex merging.
    dexSplitMode.getLinearAllocHardLimit(), dexSplitMode.getDexStore(), paths.jarfilesSubdir, paths.additionalJarfilesSubdir);
    final ImmutableMap<String, PreDexedFilesSorter.Result> sortResults = preDexedFilesSorter.sortIntoPrimaryAndSecondaryDexes(getProjectFilesystem(), steps);
    PreDexedFilesSorter.Result rootApkModuleResult = sortResults.get(APKModuleGraph.ROOT_APKMODULE_NAME);
    if (rootApkModuleResult == null) {
        throw new HumanReadableException("No classes found in primary or secondary dexes");
    }
    Multimap<Path, Path> aggregatedOutputToInputs = HashMultimap.create();
    ImmutableMap.Builder<Path, Sha1HashCode> dexInputHashesBuilder = ImmutableMap.builder();
    for (PreDexedFilesSorter.Result result : sortResults.values()) {
        if (!result.apkModule.equals(apkModuleGraph.getRootAPKModule())) {
            Path dexOutputPath = paths.additionalJarfilesSubdir.resolve(result.apkModule.getName());
            steps.add(new MkdirStep(getProjectFilesystem(), dexOutputPath));
        }
        aggregatedOutputToInputs.putAll(result.secondaryOutputToInputs);
        dexInputHashesBuilder.putAll(result.dexInputHashes);
    }
    final ImmutableMap<Path, Sha1HashCode> dexInputHashes = dexInputHashesBuilder.build();
    steps.add(new SmartDexingStep(getProjectFilesystem(), primaryDexPath, Suppliers.ofInstance(rootApkModuleResult.primaryDexInputs), Optional.of(paths.jarfilesSubdir), Optional.of(Suppliers.ofInstance(aggregatedOutputToInputs)), () -> dexInputHashes, paths.successDir, DX_MERGE_OPTIONS, dxExecutorService, xzCompressionLevel, dxMaxHeapSize));
    // Record the primary dex SHA1 so exopackage apks can use it to compute their ABI keys.
    // Single dex apks cannot be exopackages, so they will never need ABI keys.
    steps.add(new RecordFileSha1Step(getProjectFilesystem(), primaryDexPath, PRIMARY_DEX_HASH_KEY, buildableContext));
    for (PreDexedFilesSorter.Result result : sortResults.values()) {
        if (!result.apkModule.equals(apkModuleGraph.getRootAPKModule())) {
            Path dexMetadataOutputPath = paths.additionalJarfilesSubdir.resolve(result.apkModule.getName()).resolve("metadata.txt");
            addMetadataWriteStep(result, steps, dexMetadataOutputPath);
        }
    }
    addMetadataWriteStep(rootApkModuleResult, steps, paths.metadataFile);
}
Also used : Iterables(com.google.common.collect.Iterables) Step(com.facebook.buck.step.Step) SourcePath(com.facebook.buck.rules.SourcePath) Multimap(com.google.common.collect.Multimap) BuildOutput(com.facebook.buck.android.PreDexMerge.BuildOutput) MkdirStep(com.facebook.buck.step.fs.MkdirStep) AbstractExecutionStep(com.facebook.buck.step.AbstractExecutionStep) ExecutionContext(com.facebook.buck.step.ExecutionContext) HashMultimap(com.google.common.collect.HashMultimap) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) FluentIterable(com.google.common.collect.FluentIterable) Map(java.util.Map) Suppliers(com.google.common.base.Suppliers) BuildOutputInitializer(com.facebook.buck.rules.BuildOutputInitializer) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) BuildRuleParams(com.facebook.buck.rules.BuildRuleParams) Path(java.nio.file.Path) EnumSet(java.util.EnumSet) Nullable(javax.annotation.Nullable) MoreCollectors(com.facebook.buck.util.MoreCollectors) Function(com.google.common.base.Function) ImmutableSet(com.google.common.collect.ImmutableSet) AddToRuleKey(com.facebook.buck.rules.AddToRuleKey) MakeCleanDirectoryStep(com.facebook.buck.step.fs.MakeCleanDirectoryStep) ImmutableMap(com.google.common.collect.ImmutableMap) InitializableFromDisk(com.facebook.buck.rules.InitializableFromDisk) BuildableContext(com.facebook.buck.rules.BuildableContext) IOException(java.io.IOException) HumanReadableException(com.facebook.buck.util.HumanReadableException) OnDiskBuildInfo(com.facebook.buck.rules.OnDiskBuildInfo) AbstractBuildRule(com.facebook.buck.rules.AbstractBuildRule) Objects(java.util.Objects) AbstractMap(java.util.AbstractMap) List(java.util.List) Paths(java.nio.file.Paths) RecordFileSha1Step(com.facebook.buck.rules.RecordFileSha1Step) Sha1HashCode(com.facebook.buck.util.sha1.Sha1HashCode) BuildContext(com.facebook.buck.rules.BuildContext) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) BuildTargets(com.facebook.buck.model.BuildTargets) StepExecutionResult(com.facebook.buck.step.StepExecutionResult) Collections(java.util.Collections) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) MkdirStep(com.facebook.buck.step.fs.MkdirStep) StepExecutionResult(com.facebook.buck.step.StepExecutionResult) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) RecordFileSha1Step(com.facebook.buck.rules.RecordFileSha1Step) SourcePath(com.facebook.buck.rules.SourcePath) Path(java.nio.file.Path) ImmutableMap(com.google.common.collect.ImmutableMap) HumanReadableException(com.facebook.buck.util.HumanReadableException) Sha1HashCode(com.facebook.buck.util.sha1.Sha1HashCode) MakeCleanDirectoryStep(com.facebook.buck.step.fs.MakeCleanDirectoryStep)

Example 5 with Multimap

use of com.google.common.collect.Multimap in project sonarqube by SonarSource.

the class InheritanceAction method handle.

@Override
public void handle(Request request, Response response) throws Exception {
    DbSession dbSession = dbClient.openSession(false);
    try {
        QualityProfileDto profile = profileFactory.find(dbSession, QProfileRef.from(request));
        List<QProfile> ancestors = profileLookup.ancestors(profile, dbSession);
        List<QualityProfileDto> children = dbClient.qualityProfileDao().selectChildren(dbSession, profile.getKey());
        Map<String, Multimap<String, FacetValue>> profileStats = profileLoader.getAllProfileStats();
        writeResponse(response.newJsonWriter(), profile, ancestors, children, profileStats);
    } finally {
        dbSession.close();
    }
}
Also used : DbSession(org.sonar.db.DbSession) Multimap(com.google.common.collect.Multimap) QualityProfileDto(org.sonar.db.qualityprofile.QualityProfileDto) QProfile(org.sonar.server.qualityprofile.QProfile)

Aggregations

Multimap (com.google.common.collect.Multimap)47 HashMultimap (com.google.common.collect.HashMultimap)16 Test (org.junit.Test)15 List (java.util.List)13 InetAddress (java.net.InetAddress)11 Map (java.util.Map)9 IOException (java.io.IOException)8 ImmutableList (com.google.common.collect.ImmutableList)7 Collection (java.util.Collection)7 ArrayListMultimap (com.google.common.collect.ArrayListMultimap)6 ImmutableMap (com.google.common.collect.ImmutableMap)6 ImmutableMultimap (com.google.common.collect.ImmutableMultimap)6 ArrayList (java.util.ArrayList)6 Set (java.util.Set)6 ImmutableSet (com.google.common.collect.ImmutableSet)5 LinkedListMultimap (com.google.common.collect.LinkedListMultimap)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 Collectors (java.util.stream.Collectors)5 Token (org.apache.cassandra.dht.Token)4 HashMap (java.util.HashMap)3