use of java.text.DecimalFormat in project camel by apache.
the class TimeUtils method printDuration.
/**
* Prints the duration in a human readable format as X days Y hours Z minutes etc.
*
* @param uptime the uptime in millis
* @return the time used for displaying on screen or in logs
*/
public static String printDuration(double uptime) {
// Code taken from Karaf
// https://svn.apache.org/repos/asf/karaf/trunk/shell/commands/src/main/java/org/apache/karaf/shell/commands/impl/InfoAction.java
NumberFormat fmtI = new DecimalFormat("###,###", new DecimalFormatSymbols(Locale.ENGLISH));
NumberFormat fmtD = new DecimalFormat("###,##0.000", new DecimalFormatSymbols(Locale.ENGLISH));
uptime /= 1000;
if (uptime < 60) {
return fmtD.format(uptime) + " seconds";
}
uptime /= 60;
if (uptime < 60) {
long minutes = (long) uptime;
String s = fmtI.format(minutes) + (minutes > 1 ? " minutes" : " minute");
return s;
}
uptime /= 60;
if (uptime < 24) {
long hours = (long) uptime;
long minutes = (long) ((uptime - hours) * 60);
String s = fmtI.format(hours) + (hours > 1 ? " hours" : " hour");
if (minutes != 0) {
s += " " + fmtI.format(minutes) + (minutes > 1 ? " minutes" : " minute");
}
return s;
}
uptime /= 24;
long days = (long) uptime;
long hours = (long) ((uptime - days) * 24);
String s = fmtI.format(days) + (days > 1 ? " days" : " day");
if (hours != 0) {
s += " " + fmtI.format(hours) + (hours > 1 ? " hours" : " hour");
}
return s;
}
use of java.text.DecimalFormat in project hadoop by apache.
the class ErasureCodeBenchmarkThroughput method benchmark.
private void benchmark(OpType type, int dataSizeMB, int numClients, boolean isEc, boolean statefulRead) throws Exception {
List<Long> sizes = null;
StopWatch sw = new StopWatch().start();
switch(type) {
case READ:
sizes = doBenchmark(true, dataSizeMB, numClients, isEc, statefulRead, false);
break;
case WRITE:
sizes = doBenchmark(false, dataSizeMB, numClients, isEc, statefulRead, false);
break;
case GEN:
sizes = doBenchmark(false, dataSizeMB, numClients, isEc, statefulRead, true);
}
long elapsedSec = sw.now(TimeUnit.SECONDS);
double totalDataSizeMB = 0;
for (Long size : sizes) {
if (size >= 0) {
totalDataSizeMB += size.doubleValue() / 1024 / 1024;
}
}
double throughput = totalDataSizeMB / elapsedSec;
DecimalFormat df = getDecimalFormat();
System.out.println(type + " " + df.format(totalDataSizeMB) + " MB data takes: " + elapsedSec + " s.\nTotal throughput: " + df.format(throughput) + " MB/s.");
}
use of java.text.DecimalFormat in project hadoop by apache.
the class QueueCLI method printQueueInfo.
private void printQueueInfo(PrintWriter writer, QueueInfo queueInfo) {
writer.print("Queue Name : ");
writer.println(queueInfo.getQueueName());
writer.print("\tState : ");
writer.println(queueInfo.getQueueState());
DecimalFormat df = new DecimalFormat("#.0");
writer.print("\tCapacity : ");
writer.println(df.format(queueInfo.getCapacity() * 100) + "%");
writer.print("\tCurrent Capacity : ");
writer.println(df.format(queueInfo.getCurrentCapacity() * 100) + "%");
writer.print("\tMaximum Capacity : ");
writer.println(df.format(queueInfo.getMaximumCapacity() * 100) + "%");
writer.print("\tDefault Node Label expression : ");
String nodeLabelExpression = queueInfo.getDefaultNodeLabelExpression();
nodeLabelExpression = (nodeLabelExpression == null || nodeLabelExpression.trim().isEmpty()) ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : nodeLabelExpression;
writer.println(nodeLabelExpression);
Set<String> nodeLabels = queueInfo.getAccessibleNodeLabels();
StringBuilder labelList = new StringBuilder();
writer.print("\tAccessible Node Labels : ");
for (String nodeLabel : nodeLabels) {
if (labelList.length() > 0) {
labelList.append(',');
}
labelList.append(nodeLabel);
}
writer.println(labelList.toString());
Boolean preemptStatus = queueInfo.getPreemptionDisabled();
if (preemptStatus != null) {
writer.print("\tPreemption : ");
writer.println(preemptStatus ? "disabled" : "enabled");
}
}
use of java.text.DecimalFormat in project hadoop by apache.
the class FSAppAttempt method reservationExceedsThreshold.
private boolean reservationExceedsThreshold(FSSchedulerNode node, NodeType type) {
// Only if not node-local
if (type != NodeType.NODE_LOCAL) {
int existingReservations = getNumReservations(node.getRackName(), type == NodeType.OFF_SWITCH);
int totalAvailNodes = (type == NodeType.OFF_SWITCH) ? scheduler.getNumClusterNodes() : scheduler.getNumNodesInRack(node.getRackName());
int numAllowedReservations = (int) Math.ceil(totalAvailNodes * scheduler.getReservableNodesRatio());
if (existingReservations >= numAllowedReservations) {
DecimalFormat df = new DecimalFormat();
df.setMaximumFractionDigits(2);
if (LOG.isDebugEnabled()) {
LOG.debug("Reservation Exceeds Allowed number of nodes:" + " app_id=" + getApplicationId() + " existingReservations=" + existingReservations + " totalAvailableNodes=" + totalAvailNodes + " reservableNodesRatio=" + df.format(scheduler.getReservableNodesRatio()) + " numAllowedReservations=" + numAllowedReservations);
}
return true;
}
}
return false;
}
use of java.text.DecimalFormat in project hbase by apache.
the class AssignmentVerificationReport method print.
public void print(boolean isDetailMode) {
if (!isFilledUp) {
System.err.println("[Error] Region assignment verification report" + "hasn't been filled up");
}
DecimalFormat df = new java.text.DecimalFormat("#.##");
// Print some basic information
System.out.println("Region Assignment Verification for Table: " + tableName + "\n\tTotal regions : " + totalRegions);
// Print the number of regions on each kinds of the favored nodes
System.out.println("\tTotal regions on favored nodes " + totalFavoredAssignments);
for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
System.out.println("\t\tTotal regions on " + p.toString() + " region servers: " + favoredNodes[p.ordinal()]);
}
// Print the number of regions in each kinds of invalid assignment
System.out.println("\tTotal unassigned regions: " + unAssignedRegionsList.size());
if (isDetailMode) {
for (HRegionInfo region : unAssignedRegionsList) {
System.out.println("\t\t" + region.getRegionNameAsString());
}
}
System.out.println("\tTotal regions NOT on favored nodes: " + nonFavoredAssignedRegionList.size());
if (isDetailMode) {
for (HRegionInfo region : nonFavoredAssignedRegionList) {
System.out.println("\t\t" + region.getRegionNameAsString());
}
}
System.out.println("\tTotal regions without favored nodes: " + regionsWithoutValidFavoredNodes.size());
if (isDetailMode) {
for (HRegionInfo region : regionsWithoutValidFavoredNodes) {
System.out.println("\t\t" + region.getRegionNameAsString());
}
}
// Print the locality information if enabled
if (this.enforceLocality && totalRegions != 0) {
// Print the actual locality for this table
float actualLocality = 100 * this.actualLocalitySummary / (float) totalRegions;
System.out.println("\n\tThe actual avg locality is " + df.format(actualLocality) + " %");
// favored nodes
for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
float avgLocality = 100 * (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions);
System.out.println("\t\tThe expected avg locality if all regions" + " on the " + p.toString() + " region servers: " + df.format(avgLocality) + " %");
}
}
// Print the region balancing information
System.out.println("\n\tTotal hosting region servers: " + totalRegionServers);
// Print the region balance information
if (totalRegionServers != 0) {
System.out.println("\tAvg dispersion num: " + df.format(avgDispersionNum) + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + " hosts;");
System.out.println("\t\tThe number of the region servers with the max" + " dispersion num: " + this.maxDispersionNumServerSet.size());
if (isDetailMode) {
printHServerAddressSet(maxDispersionNumServerSet);
}
System.out.println("\t\tThe number of the region servers with the min" + " dispersion num: " + this.minDispersionNumServerSet.size());
if (isDetailMode) {
printHServerAddressSet(maxDispersionNumServerSet);
}
System.out.println("\tAvg dispersion score: " + df.format(avgDispersionScore) + ";\tMax dispersion score: " + df.format(maxDispersionScore) + ";\tMin dispersion score: " + df.format(minDispersionScore) + ";");
System.out.println("\t\tThe number of the region servers with the max" + " dispersion score: " + this.maxDispersionScoreServerSet.size());
if (isDetailMode) {
printHServerAddressSet(maxDispersionScoreServerSet);
}
System.out.println("\t\tThe number of the region servers with the min" + " dispersion score: " + this.minDispersionScoreServerSet.size());
if (isDetailMode) {
printHServerAddressSet(minDispersionScoreServerSet);
}
System.out.println("\tAvg regions/region server: " + df.format(avgRegionsOnRS) + ";\tMax regions/region server: " + maxRegionsOnRS + ";\tMin regions/region server: " + minRegionsOnRS + ";");
// Print the details about the most loaded region servers
System.out.println("\t\tThe number of the most loaded region servers: " + mostLoadedRSSet.size());
if (isDetailMode) {
printHServerAddressSet(mostLoadedRSSet);
}
// Print the details about the least loaded region servers
System.out.println("\t\tThe number of the least loaded region servers: " + leastLoadedRSSet.size());
if (isDetailMode) {
printHServerAddressSet(leastLoadedRSSet);
}
}
System.out.println("==============================");
}
Aggregations