use of java.util.Collection in project hive by apache.
the class SparkUtilities method rddToString.
private static void rddToString(RDD rdd, StringBuilder sb, String offset) {
sb.append(offset).append(rdd.getClass().getCanonicalName()).append("[").append(rdd.hashCode()).append("]");
if (rdd.getStorageLevel().useMemory()) {
sb.append("(cached)");
}
sb.append("\n");
Collection<Dependency> dependencies = JavaConversions.asJavaCollection(rdd.dependencies());
if (dependencies != null) {
offset += "\t";
for (Dependency dependency : dependencies) {
RDD parentRdd = dependency.rdd();
rddToString(parentRdd, sb, offset);
}
} else if (rdd instanceof UnionRDD) {
UnionRDD unionRDD = (UnionRDD) rdd;
offset += "\t";
Collection<RDD> parentRdds = JavaConversions.asJavaCollection(unionRDD.rdds());
for (RDD parentRdd : parentRdds) {
rddToString(parentRdd, sb, offset);
}
}
}
use of java.util.Collection in project storm by apache.
the class DefaultResourceAwareStrategy method sortRacks.
/**
* Sort racks
*
* @param topoId topology id
* @param scheduleAssignmentMap calculated assignments so far
* @return a sorted list of racks
* Racks are sorted by two criteria. 1) the number executors of the topology that needs to be scheduled is already on the rack in descending order.
* The reasoning to sort based on criterion 1 is so we schedule the rest of a topology on the same rack as the existing executors of the topology.
* 2) the subordinate/subservient resource availability percentage of a rack in descending order
* We calculate the resource availability percentage by dividing the resource availability on the rack by the resource availability of the entire cluster
* By doing this calculation, racks that have exhausted or little of one of the resources mentioned above will be ranked after racks that have more balanced resource availability.
* So we will be less likely to pick a rack that have a lot of one resource but a low amount of another.
*/
TreeSet<ObjectResources> sortRacks(final String topoId, final Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
AllResources allResources = new AllResources("Cluster");
List<ObjectResources> racks = allResources.objectResources;
final Map<String, String> nodeIdToRackId = new HashMap<String, String>();
for (Map.Entry<String, List<String>> entry : _clusterInfo.entrySet()) {
String rackId = entry.getKey();
List<String> nodeIds = entry.getValue();
ObjectResources rack = new ObjectResources(rackId);
racks.add(rack);
for (String nodeId : nodeIds) {
RAS_Node node = _nodes.getNodeById(this.NodeHostnameToId(nodeId));
double availMem = node.getAvailableMemoryResources();
double availCpu = node.getAvailableCpuResources();
double totalMem = node.getTotalMemoryResources();
double totalCpu = node.getTotalCpuResources();
rack.availMem += availMem;
rack.totalMem += totalMem;
rack.availCpu += availCpu;
rack.totalCpu += totalCpu;
nodeIdToRackId.put(nodeId, rack.id);
allResources.availMemResourcesOverall += availMem;
allResources.availCpuResourcesOverall += availCpu;
allResources.totalMemResourcesOverall += totalMem;
allResources.totalCpuResourcesOverall += totalCpu;
}
}
LOG.debug("Cluster Overall Avail [ CPU {} MEM {} ] Total [ CPU {} MEM {} ]", allResources.availCpuResourcesOverall, allResources.availMemResourcesOverall, allResources.totalCpuResourcesOverall, allResources.totalMemResourcesOverall);
return sortObjectResources(allResources, new ExistingScheduleFunc() {
@Override
public int getNumExistingSchedule(String objectId) {
String rackId = objectId;
//Get execs already assigned in rack
Collection<ExecutorDetails> execs = new LinkedList<ExecutorDetails>();
if (_cluster.getAssignmentById(topoId) != null) {
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : _cluster.getAssignmentById(topoId).getExecutorToSlot().entrySet()) {
String nodeId = entry.getValue().getNodeId();
String hostname = idToNode(nodeId).getHostname();
ExecutorDetails exec = entry.getKey();
if (nodeIdToRackId.get(hostname) != null && nodeIdToRackId.get(hostname).equals(rackId)) {
execs.add(exec);
}
}
}
// get execs already scheduled in the current scheduling
for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : scheduleAssignmentMap.entrySet()) {
WorkerSlot workerSlot = entry.getKey();
String nodeId = workerSlot.getNodeId();
String hostname = idToNode(nodeId).getHostname();
if (nodeIdToRackId.get(hostname).equals(rackId)) {
execs.addAll(entry.getValue());
}
}
return execs.size();
}
});
}
use of java.util.Collection in project storm by apache.
the class RAS_Node method intializeResources.
/**
* initializes resource usages on node
*/
private void intializeResources() {
for (Entry<String, Map<String, Collection<ExecutorDetails>>> entry : _topIdToUsedSlots.entrySet()) {
String topoId = entry.getKey();
Map<String, Collection<ExecutorDetails>> assignment = entry.getValue();
Map<ExecutorDetails, Double> topoMemoryResourceList = _topologies.getById(topoId).getTotalMemoryResourceList();
for (Collection<ExecutorDetails> execs : assignment.values()) {
for (ExecutorDetails exec : execs) {
if (!_isAlive) {
continue;
// We do not free the assigned slots (the orphaned slots) on the inactive supervisors
// The inactive node will be treated as a 0-resource node and not available for other unassigned workers
}
if (topoMemoryResourceList.containsKey(exec)) {
consumeResourcesforTask(exec, _topologies.getById(topoId));
} else {
throw new IllegalStateException("Executor " + exec + "not found!");
}
}
}
}
}
use of java.util.Collection in project buck by facebook.
the class DefaultClassUsageFileWriter method relativizeMap.
private static ImmutableSetMultimap<Path, Path> relativizeMap(ImmutableSetMultimap<Path, Path> classUsageMap, ProjectFilesystem filesystem) {
final ImmutableSetMultimap.Builder<Path, Path> builder = ImmutableSetMultimap.builder();
// Ensure deterministic ordering.
builder.orderKeysBy(Comparator.naturalOrder());
builder.orderValuesBy(Comparator.naturalOrder());
for (Map.Entry<Path, Collection<Path>> jarClassesEntry : classUsageMap.asMap().entrySet()) {
Path jarAbsolutePath = jarClassesEntry.getKey();
Optional<Path> jarRelativePath = filesystem.getPathRelativeToProjectRoot(jarAbsolutePath);
// Don't include jars that are outside of the filesystem
if (!jarRelativePath.isPresent()) {
// outside the project are coming from a build tool (e.g. JDK or Android SDK).
continue;
}
builder.putAll(jarRelativePath.get(), jarClassesEntry.getValue());
}
return builder.build();
}
use of java.util.Collection in project buck by facebook.
the class AbstractJavacOptions method appendOptionsTo.
public void appendOptionsTo(OptionsConsumer optionsConsumer, SourcePathResolver pathResolver, ProjectFilesystem filesystem) {
// Add some standard options.
optionsConsumer.addOptionValue("source", getSourceLevel());
optionsConsumer.addOptionValue("target", getTargetLevel());
// Set the sourcepath to stop us reading source files out of jars by mistake.
optionsConsumer.addOptionValue("sourcepath", "");
if (isDebug()) {
optionsConsumer.addFlag("g");
}
if (isVerbose()) {
optionsConsumer.addFlag("verbose");
}
// Override the bootclasspath if Buck is building Java code for Android.
if (getBootclasspath().isPresent()) {
optionsConsumer.addOptionValue("bootclasspath", getBootclasspath().get());
} else {
String bcp = getSourceToBootclasspath().get(getSourceLevel());
if (bcp != null) {
optionsConsumer.addOptionValue("bootclasspath", bcp);
}
}
// Add annotation processors.
AnnotationProcessingParams annotationProcessingParams = getAnnotationProcessingParams();
if (!annotationProcessingParams.isEmpty()) {
// Specify where to generate sources so IntelliJ can pick them up.
Path generateTo = annotationProcessingParams.getGeneratedSourceFolderName();
if (generateTo != null) {
//noinspection ConstantConditions
optionsConsumer.addOptionValue("s", filesystem.resolve(generateTo).toString());
}
ImmutableList<ResolvedJavacPluginProperties> annotationProcessors = annotationProcessingParams.getAnnotationProcessors(filesystem, pathResolver);
// Specify processorpath to search for processors.
optionsConsumer.addOptionValue("processorpath", annotationProcessors.stream().map(ResolvedJavacPluginProperties::getClasspath).flatMap(Arrays::stream).distinct().map(URL::toString).collect(Collectors.joining(File.pathSeparator)));
// Specify names of processors.
optionsConsumer.addOptionValue("processor", annotationProcessors.stream().map(ResolvedJavacPluginProperties::getProcessorNames).flatMap(Collection::stream).collect(Collectors.joining(",")));
// Add processor parameters.
for (String parameter : annotationProcessingParams.getParameters()) {
optionsConsumer.addFlag("A" + parameter);
}
if (annotationProcessingParams.getProcessOnly()) {
optionsConsumer.addFlag("proc:only");
}
}
// Add extra arguments.
optionsConsumer.addExtras(getExtraArguments());
}
Aggregations