use of com.google.common.base.Optional in project druid by druid-io.
the class IndexTask method generateAndPublishSegments.
private boolean generateAndPublishSegments(final TaskToolbox toolbox, final DataSchema dataSchema, final Map<Interval, List<ShardSpec>> shardSpecs, final String version, final FirehoseFactory firehoseFactory) throws IOException, InterruptedException {
final GranularitySpec granularitySpec = dataSchema.getGranularitySpec();
final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null);
final FireDepartmentMetrics fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
final Map<String, ShardSpec> sequenceNameToShardSpecMap = Maps.newHashMap();
if (toolbox.getMonitorScheduler() != null) {
toolbox.getMonitorScheduler().addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));
}
final SegmentAllocator segmentAllocator;
if (ingestionSchema.getIOConfig().isAppendToExisting()) {
segmentAllocator = new ActionBasedSegmentAllocator(toolbox.getTaskActionClient(), dataSchema);
} else {
segmentAllocator = new SegmentAllocator() {
@Override
public SegmentIdentifier allocate(DateTime timestamp, String sequenceName, String previousSegmentId) throws IOException {
Optional<Interval> interval = granularitySpec.bucketInterval(timestamp);
if (!interval.isPresent()) {
throw new ISE("Could not find interval for timestamp [%s]", timestamp);
}
ShardSpec shardSpec = sequenceNameToShardSpecMap.get(sequenceName);
if (shardSpec == null) {
throw new ISE("Could not find ShardSpec for sequenceName [%s]", sequenceName);
}
return new SegmentIdentifier(getDataSource(), interval.get(), version, shardSpec);
}
};
}
try (final Appenderator appenderator = newAppenderator(fireDepartmentMetrics, toolbox, dataSchema);
final FiniteAppenderatorDriver driver = newDriver(appenderator, toolbox, segmentAllocator, fireDepartmentMetrics);
final Firehose firehose = firehoseFactory.connect(dataSchema.getParser())) {
final Supplier<Committer> committerSupplier = Committers.supplierFromFirehose(firehose);
final Map<Interval, ShardSpecLookup> shardSpecLookups = Maps.newHashMap();
if (driver.startJob() != null) {
driver.clear();
}
try {
while (firehose.hasMore()) {
try {
final InputRow inputRow = firehose.nextRow();
final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp());
if (!optInterval.isPresent()) {
fireDepartmentMetrics.incrementThrownAway();
continue;
}
final Interval interval = optInterval.get();
if (!shardSpecLookups.containsKey(interval)) {
final List<ShardSpec> intervalShardSpecs = shardSpecs.get(interval);
if (intervalShardSpecs == null || intervalShardSpecs.isEmpty()) {
throw new ISE("Failed to get shardSpec for interval[%s]", interval);
}
shardSpecLookups.put(interval, intervalShardSpecs.get(0).getLookup(intervalShardSpecs));
}
final ShardSpec shardSpec = shardSpecLookups.get(interval).getShardSpec(inputRow.getTimestampFromEpoch(), inputRow);
final String sequenceName = String.format("index_%s_%s_%d", interval, version, shardSpec.getPartitionNum());
if (!sequenceNameToShardSpecMap.containsKey(sequenceName)) {
final ShardSpec shardSpecForPublishing = ingestionSchema.getTuningConfig().isForceExtendableShardSpecs() || ingestionSchema.getIOConfig().isAppendToExisting() ? new NumberedShardSpec(shardSpec.getPartitionNum(), shardSpecs.get(interval).size()) : shardSpec;
sequenceNameToShardSpecMap.put(sequenceName, shardSpecForPublishing);
}
final SegmentIdentifier identifier = driver.add(inputRow, sequenceName, committerSupplier);
if (identifier == null) {
throw new ISE("Could not allocate segment for row with timestamp[%s]", inputRow.getTimestamp());
}
fireDepartmentMetrics.incrementProcessed();
} catch (ParseException e) {
if (ingestionSchema.getTuningConfig().isReportParseExceptions()) {
throw e;
} else {
fireDepartmentMetrics.incrementUnparseable();
}
}
}
} finally {
driver.persist(committerSupplier.get());
}
final TransactionalSegmentPublisher publisher = new TransactionalSegmentPublisher() {
@Override
public boolean publishSegments(Set<DataSegment> segments, Object commitMetadata) throws IOException {
final SegmentTransactionalInsertAction action = new SegmentTransactionalInsertAction(segments, null, null);
return toolbox.getTaskActionClient().submit(action).isSuccess();
}
};
final SegmentsAndMetadata published = driver.finish(publisher, committerSupplier.get());
if (published == null) {
log.error("Failed to publish segments, aborting!");
return false;
} else {
log.info("Published segments[%s]", Joiner.on(", ").join(Iterables.transform(published.getSegments(), new Function<DataSegment, String>() {
@Override
public String apply(DataSegment input) {
return input.getIdentifier();
}
})));
return true;
}
}
}
use of com.google.common.base.Optional in project druid by druid-io.
the class IndexTask method determineShardSpecs.
/**
* Determines the number of shards for each interval using a hash of queryGranularity timestamp + all dimensions (i.e
* hash-based partitioning). In the future we may want to also support single-dimension partitioning.
*/
private Map<Interval, List<ShardSpec>> determineShardSpecs(final TaskToolbox toolbox, final FirehoseFactory firehoseFactory) throws IOException {
final ObjectMapper jsonMapper = toolbox.getObjectMapper();
final GranularitySpec granularitySpec = ingestionSchema.getDataSchema().getGranularitySpec();
final Granularity queryGranularity = granularitySpec.getQueryGranularity();
final boolean determineNumPartitions = ingestionSchema.getTuningConfig().getNumShards() == null;
final boolean determineIntervals = !ingestionSchema.getDataSchema().getGranularitySpec().bucketIntervals().isPresent();
final Map<Interval, List<ShardSpec>> shardSpecs = Maps.newHashMap();
// if we were given number of shards per interval and the intervals, we don't need to scan the data
if (!determineNumPartitions && !determineIntervals) {
log.info("numShards and intervals provided, skipping determine partition scan");
final SortedSet<Interval> intervals = ingestionSchema.getDataSchema().getGranularitySpec().bucketIntervals().get();
final int numShards = ingestionSchema.getTuningConfig().getNumShards();
for (Interval interval : intervals) {
final List<ShardSpec> intervalShardSpecs = Lists.newArrayListWithCapacity(numShards);
if (numShards > 1) {
for (int i = 0; i < numShards; i++) {
intervalShardSpecs.add(new HashBasedNumberedShardSpec(i, numShards, null, jsonMapper));
}
} else {
intervalShardSpecs.add(NoneShardSpec.instance());
}
shardSpecs.put(interval, intervalShardSpecs);
}
return shardSpecs;
}
// determine intervals containing data and prime HLL collectors
final Map<Interval, Optional<HyperLogLogCollector>> hllCollectors = Maps.newHashMap();
int thrownAway = 0;
log.info("Determining intervals and shardSpecs");
long determineShardSpecsStartMillis = System.currentTimeMillis();
try (final Firehose firehose = firehoseFactory.connect(ingestionSchema.getDataSchema().getParser())) {
while (firehose.hasMore()) {
final InputRow inputRow = firehose.nextRow();
final Interval interval;
if (determineIntervals) {
interval = granularitySpec.getSegmentGranularity().bucket(inputRow.getTimestamp());
} else {
final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp());
if (!optInterval.isPresent()) {
thrownAway++;
continue;
}
interval = optInterval.get();
}
if (!determineNumPartitions) {
// for the interval and don't instantiate a HLL collector
if (!hllCollectors.containsKey(interval)) {
hllCollectors.put(interval, Optional.<HyperLogLogCollector>absent());
}
continue;
}
if (!hllCollectors.containsKey(interval)) {
hllCollectors.put(interval, Optional.of(HyperLogLogCollector.makeLatestCollector()));
}
List<Object> groupKey = Rows.toGroupKey(queryGranularity.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow);
hllCollectors.get(interval).get().add(hashFunction.hashBytes(jsonMapper.writeValueAsBytes(groupKey)).asBytes());
}
}
if (thrownAway > 0) {
log.warn("Unable to to find a matching interval for [%,d] events", thrownAway);
}
final ImmutableSortedMap<Interval, Optional<HyperLogLogCollector>> sortedMap = ImmutableSortedMap.copyOf(hllCollectors, Comparators.intervalsByStartThenEnd());
for (final Map.Entry<Interval, Optional<HyperLogLogCollector>> entry : sortedMap.entrySet()) {
final Interval interval = entry.getKey();
final Optional<HyperLogLogCollector> collector = entry.getValue();
final int numShards;
if (determineNumPartitions) {
final long numRows = new Double(collector.get().estimateCardinality()).longValue();
numShards = (int) Math.ceil((double) numRows / ingestionSchema.getTuningConfig().getTargetPartitionSize());
log.info("Estimated [%,d] rows of data for interval [%s], creating [%,d] shards", numRows, interval, numShards);
} else {
numShards = ingestionSchema.getTuningConfig().getNumShards();
log.info("Creating [%,d] shards for interval [%s]", numShards, interval);
}
final List<ShardSpec> intervalShardSpecs = Lists.newArrayListWithCapacity(numShards);
if (numShards > 1) {
for (int i = 0; i < numShards; i++) {
intervalShardSpecs.add(new HashBasedNumberedShardSpec(i, numShards, null, jsonMapper));
}
} else {
intervalShardSpecs.add(NoneShardSpec.instance());
}
shardSpecs.put(interval, intervalShardSpecs);
}
log.info("Found intervals and shardSpecs in %,dms", System.currentTimeMillis() - determineShardSpecsStartMillis);
return shardSpecs;
}
use of com.google.common.base.Optional in project opennms by OpenNMS.
the class NewtsFetchStrategy method fetch.
@Override
public FetchResults fetch(long start, long end, long step, int maxrows, Long interval, Long heartbeat, List<Source> sources, boolean relaxed) {
final LateAggregationParams lag = getLagParams(step, interval, heartbeat);
final Optional<Timestamp> startTs = Optional.of(Timestamp.fromEpochMillis(start));
final Optional<Timestamp> endTs = Optional.of(Timestamp.fromEpochMillis(end));
final Map<String, Object> constants = Maps.newHashMap();
// Group the sources by resource id to avoid calling the ResourceDao
// multiple times for the same resource
Map<ResourceId, List<Source>> sourcesByResourceId = sources.stream().collect(Collectors.groupingBy((source) -> ResourceId.fromString(source.getResourceId())));
// Lookup the OnmsResources in parallel
Map<ResourceId, Future<OnmsResource>> resourceFuturesById = Maps.newHashMapWithExpectedSize(sourcesByResourceId.size());
for (ResourceId resourceId : sourcesByResourceId.keySet()) {
resourceFuturesById.put(resourceId, threadPool.submit(getResourceByIdCallable(resourceId)));
}
// Gather the results, fail if any of the resources were not found
Map<OnmsResource, List<Source>> sourcesByResource = Maps.newHashMapWithExpectedSize(sourcesByResourceId.size());
for (Entry<ResourceId, Future<OnmsResource>> entry : resourceFuturesById.entrySet()) {
try {
OnmsResource resource = entry.getValue().get();
if (resource == null) {
if (relaxed)
continue;
LOG.error("No resource with id: {}", entry.getKey());
return null;
}
sourcesByResource.put(resource, sourcesByResourceId.get(entry.getKey()));
} catch (ExecutionException | InterruptedException e) {
throw Throwables.propagate(e);
}
}
// Now group the sources by Newts Resource ID, which differs from the OpenNMS Resource ID.
Map<String, List<Source>> sourcesByNewtsResourceId = Maps.newHashMap();
for (Entry<OnmsResource, List<Source>> entry : sourcesByResource.entrySet()) {
final OnmsResource resource = entry.getKey();
for (Source source : entry.getValue()) {
// Gather the values from strings.properties
Utils.convertStringAttributesToConstants(source.getLabel(), resource.getStringPropertyAttributes(), constants);
// Grab the attribute that matches the source
RrdGraphAttribute rrdGraphAttribute = resource.getRrdGraphAttributes().get(source.getAttribute());
if (rrdGraphAttribute == null && !Strings.isNullOrEmpty(source.getFallbackAttribute())) {
LOG.error("No attribute with name '{}', using fallback-attribute with name '{}'", source.getAttribute(), source.getFallbackAttribute());
source.setAttribute(source.getFallbackAttribute());
source.setFallbackAttribute(null);
rrdGraphAttribute = resource.getRrdGraphAttributes().get(source.getAttribute());
}
if (rrdGraphAttribute == null) {
if (relaxed)
continue;
LOG.error("No attribute with name: {}", source.getAttribute());
return null;
}
// The Newts Resource ID is stored in the rrdFile attribute
String newtsResourceId = rrdGraphAttribute.getRrdRelativePath();
// Remove the file separator prefix, added by the RrdGraphAttribute class
if (newtsResourceId.startsWith(File.separator)) {
newtsResourceId = newtsResourceId.substring(File.separator.length(), newtsResourceId.length());
}
List<Source> listOfSources = sourcesByNewtsResourceId.get(newtsResourceId);
// Create the list if it doesn't exist
if (listOfSources == null) {
listOfSources = Lists.newLinkedList();
sourcesByNewtsResourceId.put(newtsResourceId, listOfSources);
}
listOfSources.add(source);
}
}
// The Newts API only allows us to perform a query using a single (Newts) Resource ID,
// so we perform multiple queries in parallel, and aggregate the results.
Map<String, Future<Collection<Row<Measurement>>>> measurementsByNewtsResourceId = Maps.newHashMapWithExpectedSize(sourcesByNewtsResourceId.size());
for (Entry<String, List<Source>> entry : sourcesByNewtsResourceId.entrySet()) {
measurementsByNewtsResourceId.put(entry.getKey(), threadPool.submit(getMeasurementsForResourceCallable(entry.getKey(), entry.getValue(), startTs, endTs, lag)));
}
long[] timestamps = null;
Map<String, double[]> columns = Maps.newHashMap();
for (Entry<String, Future<Collection<Row<Measurement>>>> entry : measurementsByNewtsResourceId.entrySet()) {
Collection<Row<Measurement>> rows;
try {
rows = entry.getValue().get();
} catch (InterruptedException | ExecutionException e) {
throw Throwables.propagate(e);
}
final int N = rows.size();
if (timestamps == null) {
timestamps = new long[N];
int k = 0;
for (final Row<Measurement> row : rows) {
timestamps[k] = row.getTimestamp().asMillis();
k++;
}
}
int k = 0;
for (Row<Measurement> row : rows) {
for (Measurement measurement : row.getElements()) {
double[] column = columns.get(measurement.getName());
if (column == null) {
column = new double[N];
columns.put(measurement.getName(), column);
}
column[k] = measurement.getValue();
}
k += 1;
}
}
FetchResults fetchResults = new FetchResults(timestamps, columns, lag.getStep(), constants);
if (relaxed) {
Utils.fillMissingValues(fetchResults, sources);
}
LOG.trace("Fetch results: {}", fetchResults);
return fetchResults;
}
use of com.google.common.base.Optional in project core-java by SpineEventEngine.
the class Validator method validateTargetId.
private static void validateTargetId(Message message, ImmutableList.Builder<ConstraintViolation> result) {
final Optional targetId = GetTargetIdFromCommand.asOptional(message);
if (targetId.isPresent()) {
final String targetIdString = idToString(targetId.get());
if (targetIdString.equals(EMPTY_ID)) {
final ConstraintViolation violation = newConstraintViolation(COMMAND_TARGET_ENTITY_ID_CANNOT_BE_EMPTY_OR_BLANK);
result.add(violation);
}
}
}
use of com.google.common.base.Optional in project bazel by bazelbuild.
the class AndroidBinary method init.
private static RuleConfiguredTargetBuilder init(RuleContext ruleContext, NestedSetBuilder<Artifact> filesBuilder, ResourceDependencies resourceDeps, JavaCommon javaCommon, AndroidCommon androidCommon, JavaSemantics javaSemantics, AndroidSemantics androidSemantics) throws InterruptedException, RuleErrorException {
validateRuleContext(ruleContext);
// TODO(bazel-team): Find a way to simplify this code.
// treeKeys() means that the resulting map sorts the entries by key, which is necessary to
// ensure determinism.
Multimap<String, TransitiveInfoCollection> depsByArchitecture = MultimapBuilder.treeKeys().arrayListValues().build();
AndroidConfiguration androidConfig = ruleContext.getFragment(AndroidConfiguration.class);
for (Map.Entry<Optional<String>, ? extends List<? extends TransitiveInfoCollection>> entry : ruleContext.getSplitPrerequisites("deps").entrySet()) {
String cpu = entry.getKey().or(androidConfig.getCpu());
depsByArchitecture.putAll(cpu, entry.getValue());
}
Map<String, BuildConfiguration> configurationMap = new LinkedHashMap<>();
Map<String, CcToolchainProvider> toolchainMap = new LinkedHashMap<>();
for (Map.Entry<Optional<String>, ? extends List<? extends TransitiveInfoCollection>> entry : ruleContext.getSplitPrerequisites(":cc_toolchain_split").entrySet()) {
String cpu = entry.getKey().or(androidConfig.getCpu());
TransitiveInfoCollection dep = Iterables.getOnlyElement(entry.getValue());
CcToolchainProvider toolchain = CppHelper.getToolchain(ruleContext, dep);
configurationMap.put(cpu, dep.getConfiguration());
toolchainMap.put(cpu, toolchain);
}
NativeLibs nativeLibs = NativeLibs.fromLinkedNativeDeps(ruleContext, androidSemantics.getNativeDepsFileName(), depsByArchitecture, toolchainMap, configurationMap);
// TODO(bazel-team): Resolve all the different cases of resource handling so this conditional
// can go away: recompile from android_resources, and recompile from android_binary attributes.
ApplicationManifest applicationManifest;
ResourceApk resourceApk;
ResourceApk incrementalResourceApk;
ResourceApk instantRunResourceApk;
ResourceApk splitResourceApk;
if (LocalResourceContainer.definesAndroidResources(ruleContext.attributes())) {
// Retrieve and compile the resources defined on the android_binary rule.
LocalResourceContainer.validateRuleContext(ruleContext);
ApplicationManifest ruleManifest = androidSemantics.getManifestForRule(ruleContext);
applicationManifest = ruleManifest.mergeWith(ruleContext, resourceDeps);
resourceApk = applicationManifest.packWithDataAndResources(ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_RESOURCES_APK), ruleContext, false, /* isLibrary */
resourceDeps, ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_R_TXT), null, /* Artifact symbolsTxt */
ResourceConfigurationFilter.fromRuleContext(ruleContext), ruleContext.getTokenizedStringListAttr("nocompress_extensions"), ruleContext.attributes().get("crunch_png", Type.BOOLEAN), ruleContext.getTokenizedStringListAttr("densities"), false, /* incremental */
ProguardHelper.getProguardConfigArtifact(ruleContext, ""), createMainDexProguardSpec(ruleContext), ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_PROCESSED_MANIFEST), ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_RESOURCES_ZIP), DataBinding.isEnabled(ruleContext) ? DataBinding.getLayoutInfoFile(ruleContext) : null);
ruleContext.assertNoErrors();
incrementalResourceApk = applicationManifest.addMobileInstallStubApplication(ruleContext).packWithDataAndResources(ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_INCREMENTAL_RESOURCES_APK), ruleContext, false, /* isLibrary */
resourceDeps, null, /* Artifact rTxt */
null, /* Artifact symbolsTxt */
ResourceConfigurationFilter.fromRuleContext(ruleContext), ruleContext.getTokenizedStringListAttr("nocompress_extensions"), ruleContext.attributes().get("crunch_png", Type.BOOLEAN), ruleContext.getTokenizedStringListAttr("densities"), true, /* incremental */
ProguardHelper.getProguardConfigArtifact(ruleContext, "incremental"), null, /* mainDexProguardCfg */
null, /* manifestOut */
null, /* mergedResourcesOut */
null);
ruleContext.assertNoErrors();
instantRunResourceApk = applicationManifest.addInstantRunStubApplication(ruleContext).packWithDataAndResources(getDxArtifact(ruleContext, "android_instant_run.ap_"), ruleContext, false, /* isLibrary */
resourceDeps, null, /* Artifact rTxt */
null, /* Artifact symbolsTxt */
ResourceConfigurationFilter.fromRuleContext(ruleContext), ruleContext.getTokenizedStringListAttr("nocompress_extensions"), ruleContext.attributes().get("crunch_png", Type.BOOLEAN), ruleContext.getTokenizedStringListAttr("densities"), true, /* incremental */
ProguardHelper.getProguardConfigArtifact(ruleContext, "instant_run"), null, /* mainDexProguardCfg */
null, /* manifestOut */
null, /* mergedResourcesOut */
null);
ruleContext.assertNoErrors();
splitResourceApk = applicationManifest.createSplitManifest(ruleContext, "android_resources", false).packWithDataAndResources(getDxArtifact(ruleContext, "android_resources.ap_"), ruleContext, false, /* isLibrary */
resourceDeps, null, /* Artifact rTxt */
null, /* Artifact symbolsTxt */
ResourceConfigurationFilter.fromRuleContext(ruleContext), ruleContext.getTokenizedStringListAttr("nocompress_extensions"), ruleContext.attributes().get("crunch_png", Type.BOOLEAN), ruleContext.getTokenizedStringListAttr("densities"), true, /* incremental */
ProguardHelper.getProguardConfigArtifact(ruleContext, "incremental_split"), null, /* mainDexProguardCfg */
null, /* manifestOut */
null, /* mergedResourcesOut */
null);
ruleContext.assertNoErrors();
} else {
if (!ruleContext.attributes().get("crunch_png", Type.BOOLEAN)) {
ruleContext.throwWithRuleError("Setting crunch_png = 0 is not supported for android_binary" + " rules which depend on android_resources rules.");
}
// Retrieve the resources from the resources attribute on the android_binary rule
// and recompile them if necessary.
ApplicationManifest resourcesManifest = ApplicationManifest.fromResourcesRule(ruleContext);
if (resourcesManifest == null) {
throw new RuleErrorException();
}
applicationManifest = resourcesManifest.mergeWith(ruleContext, resourceDeps);
// Always recompiling resources causes AndroidTest to fail in certain circumstances.
if (shouldRegenerate(ruleContext, resourceDeps)) {
resourceApk = applicationManifest.packWithResources(ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_RESOURCES_APK), ruleContext, resourceDeps, true, /* createSource */
ProguardHelper.getProguardConfigArtifact(ruleContext, ""), createMainDexProguardSpec(ruleContext));
ruleContext.assertNoErrors();
} else {
resourceApk = applicationManifest.useCurrentResources(ruleContext, ProguardHelper.getProguardConfigArtifact(ruleContext, ""), createMainDexProguardSpec(ruleContext));
ruleContext.assertNoErrors();
}
incrementalResourceApk = applicationManifest.addMobileInstallStubApplication(ruleContext).packWithResources(ruleContext.getImplicitOutputArtifact(AndroidRuleClasses.ANDROID_INCREMENTAL_RESOURCES_APK), ruleContext, resourceDeps, false, /* createSource */
ProguardHelper.getProguardConfigArtifact(ruleContext, "incremental"), null);
ruleContext.assertNoErrors();
instantRunResourceApk = applicationManifest.addInstantRunStubApplication(ruleContext).packWithResources(getDxArtifact(ruleContext, "android_instant_run.ap_"), ruleContext, resourceDeps, false, /* createSource */
ProguardHelper.getProguardConfigArtifact(ruleContext, "instant_run"), null);
ruleContext.assertNoErrors();
splitResourceApk = applicationManifest.createSplitManifest(ruleContext, "android_resources", false).packWithResources(getDxArtifact(ruleContext, "android_resources.ap_"), ruleContext, resourceDeps, false, /* createSource */
ProguardHelper.getProguardConfigArtifact(ruleContext, "incremental_split"), null);
ruleContext.assertNoErrors();
}
boolean shrinkResources = shouldShrinkResources(ruleContext);
JavaTargetAttributes resourceClasses = androidCommon.init(javaSemantics, androidSemantics, resourceApk, ruleContext.getConfiguration().isCodeCoverageEnabled(), true, /* collectJavaCompilationArgs */
true);
ruleContext.assertNoErrors();
Function<Artifact, Artifact> derivedJarFunction = collectDesugaredJars(ruleContext, androidCommon, androidSemantics, resourceClasses);
Artifact deployJar = createDeployJar(ruleContext, javaSemantics, androidCommon, resourceClasses, derivedJarFunction);
Artifact proguardMapping = ruleContext.getPrerequisiteArtifact("proguard_apply_mapping", Mode.TARGET);
return createAndroidBinary(ruleContext, filesBuilder, deployJar, derivedJarFunction, /* isBinaryJarFiltered */
false, javaCommon, androidCommon, javaSemantics, androidSemantics, nativeLibs, applicationManifest, resourceApk, incrementalResourceApk, instantRunResourceApk, splitResourceApk, shrinkResources, resourceClasses, ImmutableList.<Artifact>of(), ImmutableList.<Artifact>of(), proguardMapping);
}
Aggregations