Search in sources :

Example 81 with Date

use of java.util.Date in project hadoop by apache.

the class DFSAdmin method getReconfigurationStatus.

int getReconfigurationStatus(final String nodeType, final String address, final PrintStream out, final PrintStream err) throws IOException {
    String outMsg = null;
    String errMsg = null;
    ReconfigurationTaskStatus status = null;
    try {
        status = getReconfigurationStatusDispatch(nodeType, address, out, err);
        outMsg = String.format("Reconfiguring status for node [%s]: ", address);
    } catch (IOException e) {
        errMsg = String.format("Node [%s] reloading configuration: %s.", address, e.toString());
    }
    if (errMsg != null) {
        err.println(errMsg);
        return 1;
    } else {
        out.print(outMsg);
    }
    if (status != null) {
        if (!status.hasTask()) {
            out.println("no task was found.");
            return 0;
        }
        out.print("started at " + new Date(status.getStartTime()));
        if (!status.stopped()) {
            out.println(" and is still running.");
            return 0;
        }
        out.println(" and finished at " + new Date(status.getEndTime()).toString() + ".");
        if (status.getStatus() == null) {
            // Nothing to report.
            return 0;
        }
        for (Map.Entry<PropertyChange, Optional<String>> result : status.getStatus().entrySet()) {
            if (!result.getValue().isPresent()) {
                out.printf("SUCCESS: Changed property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n", result.getKey().prop, result.getKey().oldVal, result.getKey().newVal);
            } else {
                final String errorMsg = result.getValue().get();
                out.printf("FAILED: Change property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n", result.getKey().prop, result.getKey().oldVal, result.getKey().newVal);
                out.println("\tError: " + errorMsg + ".");
            }
        }
    } else {
        return 1;
    }
    return 0;
}
Also used : ReconfigurationTaskStatus(org.apache.hadoop.conf.ReconfigurationTaskStatus) PropertyChange(org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange) Optional(com.google.common.base.Optional) IOException(java.io.IOException) Map(java.util.Map) HashMap(java.util.HashMap) Date(java.util.Date)

Example 82 with Date

use of java.util.Date in project hadoop by apache.

the class DatanodeInfo method getDatanodeReport.

/** A formatted string for reporting the status of the DataNode. */
public String getDatanodeReport() {
    StringBuilder buffer = new StringBuilder();
    long c = getCapacity();
    long r = getRemaining();
    long u = getDfsUsed();
    long nonDFSUsed = getNonDfsUsed();
    float usedPercent = getDfsUsedPercent();
    float remainingPercent = getRemainingPercent();
    long cc = getCacheCapacity();
    long cr = getCacheRemaining();
    long cu = getCacheUsed();
    float cacheUsedPercent = getCacheUsedPercent();
    float cacheRemainingPercent = getCacheRemainingPercent();
    String lookupName = NetUtils.getHostNameOfIP(getName());
    buffer.append("Name: ").append(getName());
    if (lookupName != null) {
        buffer.append(" (").append(lookupName).append(")");
    }
    buffer.append("\n");
    buffer.append("Hostname: ").append(getHostName()).append("\n");
    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
        buffer.append("Rack: ").append(location).append("\n");
    }
    if (upgradeDomain != null) {
        buffer.append("Upgrade domain: ").append(upgradeDomain).append("\n");
    }
    buffer.append("Decommission Status : ");
    if (isDecommissioned()) {
        buffer.append("Decommissioned\n");
    } else if (isDecommissionInProgress()) {
        buffer.append("Decommission in progress\n");
    } else if (isInMaintenance()) {
        buffer.append("In maintenance\n");
    } else if (isEnteringMaintenance()) {
        buffer.append("Entering maintenance\n");
    } else {
        buffer.append("Normal\n");
    }
    buffer.append("Configured Capacity: ").append(c).append(" (").append(StringUtils.byteDesc(c)).append(")").append("\n");
    buffer.append("DFS Used: ").append(u).append(" (").append(StringUtils.byteDesc(u)).append(")").append("\n");
    buffer.append("Non DFS Used: ").append(nonDFSUsed).append(" (").append(StringUtils.byteDesc(nonDFSUsed)).append(")").append("\n");
    buffer.append("DFS Remaining: ").append(r).append(" (").append(StringUtils.byteDesc(r)).append(")").append("\n");
    buffer.append("DFS Used%: ").append(percent2String(usedPercent)).append("\n");
    buffer.append("DFS Remaining%: ").append(percent2String(remainingPercent)).append("\n");
    buffer.append("Configured Cache Capacity: ").append(cc).append(" (").append(StringUtils.byteDesc(cc)).append(")").append("\n");
    buffer.append("Cache Used: ").append(cu).append(" (").append(StringUtils.byteDesc(cu)).append(")").append("\n");
    buffer.append("Cache Remaining: ").append(cr).append(" (").append(StringUtils.byteDesc(cr)).append(")").append("\n");
    buffer.append("Cache Used%: ").append(percent2String(cacheUsedPercent)).append("\n");
    buffer.append("Cache Remaining%: ").append(percent2String(cacheRemainingPercent)).append("\n");
    buffer.append("Xceivers: ").append(getXceiverCount()).append("\n");
    buffer.append("Last contact: ").append(new Date(lastUpdate)).append("\n");
    buffer.append("Last Block Report: ").append(lastBlockReportTime != 0 ? new Date(lastBlockReportTime) : "Never").append("\n");
    return buffer.toString();
}
Also used : DFSUtilClient.percent2String(org.apache.hadoop.hdfs.DFSUtilClient.percent2String) Date(java.util.Date)

Example 83 with Date

use of java.util.Date in project hadoop by apache.

the class SnapshottableDirectoryStatus method print.

/**
   * Print a list of {@link SnapshottableDirectoryStatus} out to a given stream.
   * @param stats The list of {@link SnapshottableDirectoryStatus}
   * @param out The given stream for printing.
   */
public static void print(SnapshottableDirectoryStatus[] stats, PrintStream out) {
    if (stats == null || stats.length == 0) {
        out.println();
        return;
    }
    int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
    int maxSnapshotNum = 0, maxSnapshotQuota = 0;
    for (SnapshottableDirectoryStatus status : stats) {
        maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
        maxLen = maxLength(maxLen, status.dirStatus.getLen());
        maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
        maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
        maxSnapshotNum = maxLength(maxSnapshotNum, status.snapshotNumber);
        maxSnapshotQuota = maxLength(maxSnapshotQuota, status.snapshotQuota);
    }
    String lineFormat = // permission string
    "%s%s " + "%" + maxRepl + "s " + (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s") + (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s") + "%" + maxLen + "s " + // mod time
    "%s " + "%" + maxSnapshotNum + "s " + "%" + maxSnapshotQuota + "s " + // path
    "%s";
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
    for (SnapshottableDirectoryStatus status : stats) {
        String line = String.format(lineFormat, "d", status.dirStatus.getPermission(), status.dirStatus.getReplication(), status.dirStatus.getOwner(), status.dirStatus.getGroup(), String.valueOf(status.dirStatus.getLen()), dateFormat.format(new Date(status.dirStatus.getModificationTime())), status.snapshotNumber, status.snapshotQuota, status.getFullPath().toString());
        out.println(line);
    }
}
Also used : SimpleDateFormat(java.text.SimpleDateFormat) Date(java.util.Date)

Example 84 with Date

use of java.util.Date in project hadoop by apache.

the class CacheReplicationMonitor method rescanCacheDirectives.

/**
   * Scan all CacheDirectives.  Use the information to figure out
   * what cache replication factor each block should have.
   */
private void rescanCacheDirectives() {
    FSDirectory fsDir = namesystem.getFSDirectory();
    final long now = new Date().getTime();
    for (CacheDirective directive : cacheManager.getCacheDirectives()) {
        scannedDirectives++;
        // Skip processing this entry if it has expired
        if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
            LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now);
            continue;
        }
        String path = directive.getPath();
        INode node;
        try {
            node = fsDir.getINode(path, DirOp.READ);
        } catch (IOException e) {
            // We don't cache through symlinks or invalid paths
            LOG.debug("Directive {}: Failed to resolve path {} ({})", directive.getId(), path, e.getMessage());
            continue;
        }
        if (node == null) {
            LOG.debug("Directive {}: No inode found at {}", directive.getId(), path);
        } else if (node.isDirectory()) {
            INodeDirectory dir = node.asDirectory();
            ReadOnlyList<INode> children = dir.getChildrenList(Snapshot.CURRENT_STATE_ID);
            for (INode child : children) {
                if (child.isFile()) {
                    rescanFile(directive, child.asFile());
                }
            }
        } else if (node.isFile()) {
            rescanFile(directive, node.asFile());
        } else {
            LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) ReadOnlyList(org.apache.hadoop.hdfs.util.ReadOnlyList) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) Date(java.util.Date)

Example 85 with Date

use of java.util.Date in project hadoop by apache.

the class NNBench method analyzeResults.

/**
   * Analyze the results
   * @throws IOException on error
   */
private int analyzeResults() throws IOException {
    final FileSystem fs = FileSystem.get(getConf());
    Path reduceDir = new Path(baseDir, OUTPUT_DIR_NAME);
    long totalTimeAL1 = 0l;
    long totalTimeAL2 = 0l;
    long totalTimeTPmS = 0l;
    long lateMaps = 0l;
    long numOfExceptions = 0l;
    long successfulFileOps = 0l;
    long mapStartTimeTPmS = 0l;
    long mapEndTimeTPmS = 0l;
    FileStatus[] fss = fs.listStatus(reduceDir);
    for (FileStatus status : fss) {
        Path reduceFile = status.getPath();
        try (DataInputStream in = new DataInputStream(fs.open(reduceFile));
            BufferedReader lines = new BufferedReader(new InputStreamReader(in))) {
            String line;
            while ((line = lines.readLine()) != null) {
                StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%;");
                String attr = tokens.nextToken();
                if (attr.endsWith(":totalTimeAL1")) {
                    totalTimeAL1 = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":totalTimeAL2")) {
                    totalTimeAL2 = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":totalTimeTPmS")) {
                    totalTimeTPmS = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":latemaps")) {
                    lateMaps = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":numOfExceptions")) {
                    numOfExceptions = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":successfulFileOps")) {
                    successfulFileOps = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":mapStartTimeTPmS")) {
                    mapStartTimeTPmS = Long.parseLong(tokens.nextToken());
                } else if (attr.endsWith(":mapEndTimeTPmS")) {
                    mapEndTimeTPmS = Long.parseLong(tokens.nextToken());
                }
            }
        }
    }
    // Average latency is the average time to perform 'n' number of
    // operations, n being the number of files
    double avgLatency1 = (double) totalTimeAL1 / successfulFileOps;
    double avgLatency2 = (double) totalTimeAL2 / successfulFileOps;
    // The time it takes for the longest running map is measured. Using that,
    // cluster transactions per second is calculated. It includes time to 
    // retry any of the failed operations
    double longestMapTimeTPmS = (double) (mapEndTimeTPmS - mapStartTimeTPmS);
    double totalTimeTPS = (longestMapTimeTPmS == 0) ? (1000 * successfulFileOps) : (double) (1000 * successfulFileOps) / longestMapTimeTPmS;
    // The time it takes to perform 'n' operations is calculated (in ms),
    // n being the number of files. Using that time, the average execution 
    // time is calculated. It includes time to retry any of the
    // failed operations
    double AverageExecutionTime = (totalTimeTPmS == 0) ? (double) successfulFileOps : (double) totalTimeTPmS / successfulFileOps;
    String resultTPSLine1 = null;
    String resultTPSLine2 = null;
    String resultALLine1 = null;
    String resultALLine2 = null;
    if (operation.equals(OP_CREATE_WRITE)) {
        // For create/write/close, it is treated as two transactions,
        // since a file create from a client perspective involves create and close
        resultTPSLine1 = "               TPS: Create/Write/Close: " + (int) (totalTimeTPS * 2);
        resultTPSLine2 = "Avg exec time (ms): Create/Write/Close: " + AverageExecutionTime;
        resultALLine1 = "            Avg Lat (ms): Create/Write: " + avgLatency1;
        resultALLine2 = "                   Avg Lat (ms): Close: " + avgLatency2;
    } else if (operation.equals(OP_OPEN_READ)) {
        resultTPSLine1 = "                        TPS: Open/Read: " + (int) totalTimeTPS;
        resultTPSLine2 = "         Avg Exec time (ms): Open/Read: " + AverageExecutionTime;
        resultALLine1 = "                    Avg Lat (ms): Open: " + avgLatency1;
        if (readFileAfterOpen) {
            resultALLine2 = "                  Avg Lat (ms): Read: " + avgLatency2;
        }
    } else if (operation.equals(OP_RENAME)) {
        resultTPSLine1 = "                           TPS: Rename: " + (int) totalTimeTPS;
        resultTPSLine2 = "            Avg Exec time (ms): Rename: " + AverageExecutionTime;
        resultALLine1 = "                  Avg Lat (ms): Rename: " + avgLatency1;
    } else if (operation.equals(OP_DELETE)) {
        resultTPSLine1 = "                           TPS: Delete: " + (int) totalTimeTPS;
        resultTPSLine2 = "            Avg Exec time (ms): Delete: " + AverageExecutionTime;
        resultALLine1 = "                  Avg Lat (ms): Delete: " + avgLatency1;
    }
    String[] resultLines = { "-------------- NNBench -------------- : ", "                               Version: " + NNBENCH_VERSION, "                           Date & time: " + sdf.format(new Date(System.currentTimeMillis())), "", "                        Test Operation: " + operation, "                            Start time: " + sdf.format(new Date(startTime)), "                           Maps to run: " + numberOfMaps, "                        Reduces to run: " + numberOfReduces, "                    Block Size (bytes): " + blockSize, "                        Bytes to write: " + bytesToWrite, "                    Bytes per checksum: " + bytesPerChecksum, "                       Number of files: " + numberOfFiles, "                    Replication factor: " + replicationFactorPerFile, "            Successful file operations: " + successfulFileOps, "", "        # maps that missed the barrier: " + lateMaps, "                          # exceptions: " + numOfExceptions, "", resultTPSLine1, resultTPSLine2, resultALLine1, resultALLine2, "", "                 RAW DATA: AL Total #1: " + totalTimeAL1, "                 RAW DATA: AL Total #2: " + totalTimeAL2, "              RAW DATA: TPS Total (ms): " + totalTimeTPmS, "       RAW DATA: Longest Map Time (ms): " + longestMapTimeTPmS, "                   RAW DATA: Late maps: " + lateMaps, "             RAW DATA: # of exceptions: " + numOfExceptions, "" };
    try (PrintStream res = new PrintStream(new FileOutputStream(new File(DEFAULT_RES_FILE_NAME), true))) {
        // Write to a file and also dump to log
        for (String resultLine : resultLines) {
            LOG.info(resultLine);
            res.println(resultLine);
        }
    }
    if (numOfExceptions >= MAX_OPERATION_EXCEPTIONS) {
        return -1;
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) FileStatus(org.apache.hadoop.fs.FileStatus) InputStreamReader(java.io.InputStreamReader) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Date(java.util.Date) StringTokenizer(java.util.StringTokenizer) FileSystem(org.apache.hadoop.fs.FileSystem) FileOutputStream(java.io.FileOutputStream) BufferedReader(java.io.BufferedReader) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File)

Aggregations

Date (java.util.Date)11526 Test (org.junit.Test)2903 SimpleDateFormat (java.text.SimpleDateFormat)1601 ArrayList (java.util.ArrayList)1066 Calendar (java.util.Calendar)809 HashMap (java.util.HashMap)615 IOException (java.io.IOException)606 File (java.io.File)577 ParseException (java.text.ParseException)525 GregorianCalendar (java.util.GregorianCalendar)425 List (java.util.List)336 DateFormat (java.text.DateFormat)313 Map (java.util.Map)296 DateTime (org.joda.time.DateTime)239 Test (org.testng.annotations.Test)210 HashSet (java.util.HashSet)190 SQLException (java.sql.SQLException)167 LocalDate (org.joda.time.LocalDate)155 BigDecimal (java.math.BigDecimal)148 JSONObject (org.json.JSONObject)148