Search in sources :

Example 1 with EnumSet

use of java.util.EnumSet in project buck by facebook.

the class AndroidBinary method addDexingSteps.

/**
   * Create dex artifacts for all of the individual directories of compiled .class files (or
   * the obfuscated jar files if proguard is used).  If split dex is used, multiple dex artifacts
   * will be produced.
   *  @param classpathEntriesToDex Full set of classpath entries that must make
   *     their way into the final APK structure (but not necessarily into the
   *     primary dex).
   * @param secondaryDexDirectories The contract for updating this builder must match that
   *     of {@link PreDexMerge#getSecondaryDexDirectories()}.
   * @param steps List of steps to add to.
   * @param primaryDexPath Output path for the primary dex file.
   */
@VisibleForTesting
void addDexingSteps(Set<Path> classpathEntriesToDex, Supplier<ImmutableMap<String, HashCode>> classNamesToHashesSupplier, ImmutableSet.Builder<Path> secondaryDexDirectories, ImmutableList.Builder<Step> steps, Path primaryDexPath, Optional<SourcePath> dexReorderToolFile, Optional<SourcePath> dexReorderDataDumpFile, ImmutableMultimap<APKModule, Path> additionalDexStoreToJarPathMap, SourcePathResolver resolver) {
    final Supplier<Set<Path>> primaryInputsToDex;
    final Optional<Path> secondaryDexDir;
    final Optional<Supplier<Multimap<Path, Path>>> secondaryOutputToInputs;
    Path secondaryDexParentDir = getBinPath("__%s_secondary_dex__/");
    Path additionalDexParentDir = getBinPath("__%s_additional_dex__/");
    Path additionalDexAssetsDir = additionalDexParentDir.resolve("assets");
    final Optional<ImmutableSet<Path>> additionalDexDirs;
    if (shouldSplitDex()) {
        Optional<Path> proguardFullConfigFile = Optional.empty();
        Optional<Path> proguardMappingFile = Optional.empty();
        if (packageType.isBuildWithObfuscation()) {
            Path proguardConfigDir = getProguardTextFilesPath();
            proguardFullConfigFile = Optional.of(proguardConfigDir.resolve("configuration.txt"));
            proguardMappingFile = Optional.of(proguardConfigDir.resolve("mapping.txt"));
        }
        // DexLibLoader expects that metadata.txt and secondary jar files are under this dir
        // in assets.
        // Intermediate directory holding the primary split-zip jar.
        Path splitZipDir = getBinPath("__%s_split_zip__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), splitZipDir));
        Path primaryJarPath = splitZipDir.resolve("primary.jar");
        Path secondaryJarMetaDirParent = splitZipDir.resolve("secondary_meta");
        Path secondaryJarMetaDir = secondaryJarMetaDirParent.resolve(SECONDARY_DEX_SUBDIR);
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryJarMetaDir));
        Path secondaryJarMeta = secondaryJarMetaDir.resolve("metadata.txt");
        // Intermediate directory holding _ONLY_ the secondary split-zip jar files.  This is
        // important because SmartDexingCommand will try to dx every entry in this directory.  It
        // does this because it's impossible to know what outputs split-zip will generate until it
        // runs.
        final Path secondaryZipDir = getBinPath("__%s_secondary_zip__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryZipDir));
        // Intermediate directory holding the directories holding _ONLY_ the additional split-zip
        // jar files that are intended for that dex store.
        final Path additionalDexStoresZipDir = getBinPath("__%s_dex_stores_zip__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), additionalDexStoresZipDir));
        for (APKModule dexStore : additionalDexStoreToJarPathMap.keySet()) {
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), additionalDexStoresZipDir.resolve(dexStore.getName())));
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryJarMetaDirParent.resolve("assets").resolve(dexStore.getName())));
        }
        // Run the split-zip command which is responsible for dividing the large set of input
        // classpaths into a more compact set of jar files such that no one jar file when dexed will
        // yield a dex artifact too large for dexopt or the dx method limit to handle.
        Path zipSplitReportDir = getBinPath("__%s_split_zip_report__");
        steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), zipSplitReportDir));
        SplitZipStep splitZipCommand = new SplitZipStep(getProjectFilesystem(), classpathEntriesToDex, secondaryJarMeta, primaryJarPath, secondaryZipDir, "secondary-%d.jar", secondaryJarMetaDirParent, additionalDexStoresZipDir, proguardFullConfigFile, proguardMappingFile, skipProguard, dexSplitMode, dexSplitMode.getPrimaryDexScenarioFile().map(resolver::getAbsolutePath), dexSplitMode.getPrimaryDexClassesFile().map(resolver::getAbsolutePath), dexSplitMode.getSecondaryDexHeadClassesFile().map(resolver::getAbsolutePath), dexSplitMode.getSecondaryDexTailClassesFile().map(resolver::getAbsolutePath), additionalDexStoreToJarPathMap, enhancementResult.getAPKModuleGraph(), zipSplitReportDir);
        steps.add(splitZipCommand);
        // smart dexing command.  Smart dex will handle "cleaning" this directory properly.
        if (reorderClassesIntraDex) {
            secondaryDexDir = Optional.of(secondaryDexParentDir.resolve(SMART_DEX_SECONDARY_DEX_SUBDIR));
            Path intraDexReorderSecondaryDexDir = secondaryDexParentDir.resolve(SECONDARY_DEX_SUBDIR);
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), secondaryDexDir.get()));
            steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), intraDexReorderSecondaryDexDir));
        } else {
            secondaryDexDir = Optional.of(secondaryDexParentDir.resolve(SECONDARY_DEX_SUBDIR));
            steps.add(new MkdirStep(getProjectFilesystem(), secondaryDexDir.get()));
        }
        if (additionalDexStoreToJarPathMap.isEmpty()) {
            additionalDexDirs = Optional.empty();
        } else {
            ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
            for (APKModule dexStore : additionalDexStoreToJarPathMap.keySet()) {
                Path dexStorePath = additionalDexAssetsDir.resolve(dexStore.getName());
                builder.add(dexStorePath);
                steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), dexStorePath));
            }
            additionalDexDirs = Optional.of(builder.build());
        }
        if (dexSplitMode.getDexStore() == DexStore.RAW) {
            secondaryDexDirectories.add(secondaryDexDir.get());
        } else {
            secondaryDexDirectories.add(secondaryJarMetaDirParent);
            secondaryDexDirectories.add(secondaryDexParentDir);
        }
        if (additionalDexDirs.isPresent()) {
            secondaryDexDirectories.add(additionalDexParentDir);
        }
        // Adjust smart-dex inputs for the split-zip case.
        primaryInputsToDex = Suppliers.ofInstance(ImmutableSet.of(primaryJarPath));
        Supplier<Multimap<Path, Path>> secondaryOutputToInputsMap = splitZipCommand.getOutputToInputsMapSupplier(secondaryDexDir.get(), additionalDexAssetsDir);
        secondaryOutputToInputs = Optional.of(secondaryOutputToInputsMap);
    } else {
        // Simple case where our inputs are the natural classpath directories and we don't have
        // to worry about secondary jar/dex files.
        primaryInputsToDex = Suppliers.ofInstance(classpathEntriesToDex);
        secondaryDexDir = Optional.empty();
        secondaryOutputToInputs = Optional.empty();
    }
    HashInputJarsToDexStep hashInputJarsToDexStep = new HashInputJarsToDexStep(getProjectFilesystem(), primaryInputsToDex, secondaryOutputToInputs, classNamesToHashesSupplier);
    steps.add(hashInputJarsToDexStep);
    // Stores checksum information from each invocation to intelligently decide when dx needs
    // to be re-run.
    Path successDir = getBinPath("__%s_smart_dex__/.success");
    steps.add(new MkdirStep(getProjectFilesystem(), successDir));
    // Add the smart dexing tool that is capable of avoiding the external dx invocation(s) if
    // it can be shown that the inputs have not changed.  It also parallelizes dx invocations
    // where applicable.
    //
    // Note that by not specifying the number of threads this command will use it will select an
    // optimal default regardless of the value of --num-threads.  This decision was made with the
    // assumption that --num-threads specifies the threading of build rule execution and does not
    // directly apply to the internal threading/parallelization details of various build commands
    // being executed.  For example, aapt is internally threaded by default when preprocessing
    // images.
    EnumSet<DxStep.Option> dxOptions = PackageType.RELEASE.equals(packageType) ? EnumSet.of(DxStep.Option.NO_LOCALS) : EnumSet.of(DxStep.Option.NO_OPTIMIZE);
    Path selectedPrimaryDexPath = primaryDexPath;
    if (reorderClassesIntraDex) {
        String primaryDexFileName = primaryDexPath.getFileName().toString();
        String smartDexPrimaryDexFileName = "smart-dex-" + primaryDexFileName;
        Path smartDexPrimaryDexPath = Paths.get(primaryDexPath.toString().replace(primaryDexFileName, smartDexPrimaryDexFileName));
        selectedPrimaryDexPath = smartDexPrimaryDexPath;
    }
    SmartDexingStep smartDexingCommand = new SmartDexingStep(getProjectFilesystem(), selectedPrimaryDexPath, primaryInputsToDex, secondaryDexDir, secondaryOutputToInputs, hashInputJarsToDexStep, successDir, dxOptions, dxExecutorService, xzCompressionLevel, dxMaxHeapSize);
    steps.add(smartDexingCommand);
    if (reorderClassesIntraDex) {
        IntraDexReorderStep intraDexReorderStep = new IntraDexReorderStep(getProjectFilesystem(), resolver.getAbsolutePath(dexReorderToolFile.get()), resolver.getAbsolutePath(dexReorderDataDumpFile.get()), getBuildTarget(), selectedPrimaryDexPath, primaryDexPath, secondaryOutputToInputs, SMART_DEX_SECONDARY_DEX_SUBDIR, SECONDARY_DEX_SUBDIR);
        steps.add(intraDexReorderStep);
    }
}
Also used : Path(java.nio.file.Path) SourcePath(com.facebook.buck.rules.SourcePath) ExplicitBuildTargetSourcePath(com.facebook.buck.rules.ExplicitBuildTargetSourcePath) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) ImmutableSortedSet(com.google.common.collect.ImmutableSortedSet) MkdirStep(com.facebook.buck.step.fs.MkdirStep) Multimap(com.google.common.collect.Multimap) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) ImmutableSet(com.google.common.collect.ImmutableSet) MakeCleanDirectoryStep(com.facebook.buck.step.fs.MakeCleanDirectoryStep) Supplier(com.google.common.base.Supplier) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with EnumSet

use of java.util.EnumSet in project elasticsearch by elastic.

the class RestClusterStateAction method prepareRequest.

@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
    final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest();
    clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions()));
    clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
    clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
    final String[] indices = Strings.splitStringByCommaToArray(request.param("indices", "_all"));
    boolean isAllIndicesOnly = indices.length == 1 && "_all".equals(indices[0]);
    if (!isAllIndicesOnly) {
        clusterStateRequest.indices(indices);
    }
    if (request.hasParam("metric")) {
        EnumSet<ClusterState.Metric> metrics = ClusterState.Metric.parseString(request.param("metric"), true);
        // do not ask for what we do not need.
        clusterStateRequest.nodes(metrics.contains(ClusterState.Metric.NODES) || metrics.contains(ClusterState.Metric.MASTER_NODE));
        /*
             * there is no distinction in Java api between routing_table and routing_nodes, it's the same info set over the wire, one single
             * flag to ask for it
             */
        clusterStateRequest.routingTable(metrics.contains(ClusterState.Metric.ROUTING_TABLE) || metrics.contains(ClusterState.Metric.ROUTING_NODES));
        clusterStateRequest.metaData(metrics.contains(ClusterState.Metric.METADATA));
        clusterStateRequest.blocks(metrics.contains(ClusterState.Metric.BLOCKS));
        clusterStateRequest.customs(metrics.contains(ClusterState.Metric.CUSTOMS));
    }
    settingsFilter.addFilterSettingParams(request);
    return channel -> client.admin().cluster().state(clusterStateRequest, new RestBuilderListener<ClusterStateResponse>(channel) {

        @Override
        public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception {
            builder.startObject();
            builder.field(Fields.CLUSTER_NAME, response.getClusterName().value());
            builder.byteSizeField(Fields.CLUSTER_STATE_SIZE_IN_BYTES, Fields.CLUSTER_STATE_SIZE, response.getTotalCompressedSize());
            response.getState().toXContent(builder, request);
            builder.endObject();
            return new BytesRestResponse(RestStatus.OK, builder);
        }
    });
}
Also used : BaseRestHandler(org.elasticsearch.rest.BaseRestHandler) SettingsFilter(org.elasticsearch.common.settings.SettingsFilter) RestResponse(org.elasticsearch.rest.RestResponse) RestBuilderListener(org.elasticsearch.rest.action.RestBuilderListener) Set(java.util.Set) IOException(java.io.IOException) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder) RestController(org.elasticsearch.rest.RestController) Strings(org.elasticsearch.common.Strings) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) BytesRestResponse(org.elasticsearch.rest.BytesRestResponse) HashSet(java.util.HashSet) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) RestStatus(org.elasticsearch.rest.RestStatus) IndicesOptions(org.elasticsearch.action.support.IndicesOptions) RestRequest(org.elasticsearch.rest.RestRequest) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) NodeClient(org.elasticsearch.client.node.NodeClient) Requests(org.elasticsearch.client.Requests) Collections(java.util.Collections) EnumSet(java.util.EnumSet) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) RestResponse(org.elasticsearch.rest.RestResponse) BytesRestResponse(org.elasticsearch.rest.BytesRestResponse) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) IOException(java.io.IOException) BytesRestResponse(org.elasticsearch.rest.BytesRestResponse) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder)

Example 3 with EnumSet

use of java.util.EnumSet in project hadoop by apache.

the class ApplicationEntityReader method createFilterListForColsOfInfoFamily.

/**
   * Creates a filter list which indicates that only some of the column
   * qualifiers in the info column family will be returned in result.
   *
   * @return filter list.
   * @throws IOException if any problem occurs while creating filter list.
   */
private FilterList createFilterListForColsOfInfoFamily() throws IOException {
    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
    // Add filters for each column in entity table.
    updateFixedColumns(infoFamilyColsFilter);
    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
    // with INFO column prefix.
    if (hasField(fieldsToRetrieve, Field.INFO)) {
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.INFO));
    }
    TimelineFilterList relatesTo = getFilters().getRelatesTo();
    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
        // If RELATES_TO field has to be retrieved, add a filter for fetching
        // columns with RELATES_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.RELATES_TO));
    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain RELATES_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // relatesTo filters are specified. relatesTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> relatesToCols = TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(ApplicationColumnPrefix.RELATES_TO, relatesToCols));
    }
    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
        // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
        // columns with IS_RELATED_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.IS_RELATED_TO));
    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain IS_RELATED_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // isRelatedTo filters are specified. isRelatedTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> isRelatedToCols = TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(ApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
    }
    TimelineFilterList eventFilters = getFilters().getEventFilters();
    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
        // If EVENTS field has to be retrieved, add a filter for fetching columns
        // with EVENT column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.EVENT));
    } else if (eventFilters != null && !eventFilters.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain EVENTS, we still need to
        // have a filter to fetch some of the column qualifiers on the basis of
        // event filters specified. Event filters will then be matched after
        // fetching rows from HBase.
        Set<String> eventCols = TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(ApplicationColumnPrefix.EVENT, eventCols));
    }
    return infoFamilyColsFilter;
}
Also used : Field(org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field) EnumSet(java.util.EnumSet) Set(java.util.Set) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)

Example 4 with EnumSet

use of java.util.EnumSet in project hadoop by apache.

the class GenericEntityReader method createFilterListForColsOfInfoFamily.

/**
   * Creates a filter list which indicates that only some of the column
   * qualifiers in the info column family will be returned in result.
   *
   * @param isApplication If true, it means operations are to be performed for
   *          application table, otherwise for entity table.
   * @return filter list.
   * @throws IOException if any problem occurs while creating filter list.
   */
private FilterList createFilterListForColsOfInfoFamily() throws IOException {
    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
    // Add filters for each column in entity table.
    updateFixedColumns(infoFamilyColsFilter);
    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
    // with INFO column prefix.
    if (hasField(fieldsToRetrieve, Field.INFO)) {
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.INFO));
    }
    TimelineFilterList relatesTo = getFilters().getRelatesTo();
    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
        // If RELATES_TO field has to be retrieved, add a filter for fetching
        // columns with RELATES_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.RELATES_TO));
    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain RELATES_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // relatesTo filters are specified. relatesTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> relatesToCols = TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(EntityColumnPrefix.RELATES_TO, relatesToCols));
    }
    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
        // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
        // columns with IS_RELATED_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.IS_RELATED_TO));
    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain IS_RELATED_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // isRelatedTo filters are specified. isRelatedTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> isRelatedToCols = TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(EntityColumnPrefix.IS_RELATED_TO, isRelatedToCols));
    }
    TimelineFilterList eventFilters = getFilters().getEventFilters();
    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
        // If EVENTS field has to be retrieved, add a filter for fetching columns
        // with EVENT column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.EVENT));
    } else if (eventFilters != null && !eventFilters.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain EVENTS, we still need to
        // have a filter to fetch some of the column qualifiers on the basis of
        // event filters specified. Event filters will then be matched after
        // fetching rows from HBase.
        Set<String> eventCols = TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(EntityColumnPrefix.EVENT, eventCols));
    }
    return infoFamilyColsFilter;
}
Also used : Field(org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field) EnumSet(java.util.EnumSet) Set(java.util.Set) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)

Example 5 with EnumSet

use of java.util.EnumSet in project hadoop by apache.

the class RetriableFileCopyCommand method doExecute.

/**
   * Implementation of RetriableCommand::doExecute().
   * This is the actual copy-implementation.
   * @param arguments Argument-list to the command.
   * @return Number of bytes copied.
   * @throws Exception
   */
@SuppressWarnings("unchecked")
@Override
protected Object doExecute(Object... arguments) throws Exception {
    assert arguments.length == 4 : "Unexpected argument list.";
    CopyListingFileStatus source = (CopyListingFileStatus) arguments[0];
    assert !source.isDirectory() : "Unexpected file-status. Expected file.";
    Path target = (Path) arguments[1];
    Mapper.Context context = (Mapper.Context) arguments[2];
    EnumSet<FileAttribute> fileAttributes = (EnumSet<FileAttribute>) arguments[3];
    return doCopy(source, target, context, fileAttributes);
}
Also used : Path(org.apache.hadoop.fs.Path) Mapper(org.apache.hadoop.mapreduce.Mapper) CopyListingFileStatus(org.apache.hadoop.tools.CopyListingFileStatus) EnumSet(java.util.EnumSet) FileAttribute(org.apache.hadoop.tools.DistCpOptions.FileAttribute)

Aggregations

EnumSet (java.util.EnumSet)143 ArrayList (java.util.ArrayList)43 Set (java.util.Set)43 Map (java.util.Map)30 List (java.util.List)28 HashMap (java.util.HashMap)27 HashSet (java.util.HashSet)27 Collection (java.util.Collection)22 Collectors (java.util.stream.Collectors)19 Test (org.junit.Test)19 IOException (java.io.IOException)15 Collections (java.util.Collections)13 TreeSet (java.util.TreeSet)11 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)11 Arrays (java.util.Arrays)10 LinkedHashSet (java.util.LinkedHashSet)10 File (java.io.File)9 ImmutableSet (com.google.common.collect.ImmutableSet)8 Entry (java.util.Map.Entry)8 TimeUnit (java.util.concurrent.TimeUnit)8