Search in sources :

Example 76 with Date

use of java.util.Date in project hadoop by apache.

the class JobBlock method render.

@Override
protected void render(Block html) {
    String jid = $(JOB_ID);
    if (jid.isEmpty()) {
        html.p()._("Sorry, can't do anything without a JobID.")._();
        return;
    }
    JobId jobID = MRApps.toJobID(jid);
    Job job = appContext.getJob(jobID);
    if (job == null) {
        html.p()._("Sorry, ", jid, " not found.")._();
        return;
    }
    List<AMInfo> amInfos = job.getAMInfos();
    String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
    JobInfo jinfo = new JobInfo(job, true);
    info("Job Overview")._("Job Name:", jinfo.getName())._("User Name:", jinfo.getUserName())._("Queue Name:", jinfo.getQueueName())._("State:", jinfo.getState())._("Uberized:", jinfo.isUberized())._("Started:", new Date(jinfo.getStartTime()))._("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
    DIV<Hamlet> div = html._(InfoBlock.class).div(_INFO_WRAP);
    // MRAppMasters Table
    TABLE<DIV<Hamlet>> table = div.table("#job");
    table.tr().th(amString)._().tr().th(_TH, "Attempt Number").th(_TH, "Start Time").th(_TH, "Node").th(_TH, "Logs")._();
    for (AMInfo amInfo : amInfos) {
        AMAttemptInfo attempt = new AMAttemptInfo(amInfo, jinfo.getId(), jinfo.getUserName());
        table.tr().td(String.valueOf(attempt.getAttemptId())).td(new Date(attempt.getStartTime()).toString()).td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress())._().td().a(".logslink", url(attempt.getLogsLink()), "logs")._()._();
    }
    table._();
    div._();
    html.div(_INFO_WRAP).table("#job").tr().th(_TH, "Task Type").th(_TH, "Progress").th(_TH, "Total").th(_TH, "Pending").th(_TH, "Running").th(_TH, "Complete")._().tr(_ODD).th("Map").td().div(_PROGRESSBAR).$title(// tooltip
    join(jinfo.getMapProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._().td().a(url("tasks", jid, "m", "ALL"), String.valueOf(jinfo.getMapsTotal()))._().td().a(url("tasks", jid, "m", "PENDING"), String.valueOf(jinfo.getMapsPending()))._().td().a(url("tasks", jid, "m", "RUNNING"), String.valueOf(jinfo.getMapsRunning()))._().td().a(url("tasks", jid, "m", "COMPLETED"), String.valueOf(jinfo.getMapsCompleted()))._()._().tr(_EVEN).th("Reduce").td().div(_PROGRESSBAR).$title(// tooltip
    join(jinfo.getReduceProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._().td().a(url("tasks", jid, "r", "ALL"), String.valueOf(jinfo.getReducesTotal()))._().td().a(url("tasks", jid, "r", "PENDING"), String.valueOf(jinfo.getReducesPending()))._().td().a(url("tasks", jid, "r", "RUNNING"), String.valueOf(jinfo.getReducesRunning()))._().td().a(url("tasks", jid, "r", "COMPLETED"), String.valueOf(jinfo.getReducesCompleted()))._()._()._().table("#job").tr().th(_TH, "Attempt Type").th(_TH, "New").th(_TH, "Running").th(_TH, "Failed").th(_TH, "Killed").th(_TH, "Successful")._().tr(_ODD).th("Maps").td().a(url("attempts", jid, "m", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulMapAttempts()))._()._().tr(_EVEN).th("Reduces").td().a(url("attempts", jid, "r", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulReduceAttempts()))._()._()._()._();
}
Also used : InfoBlock(org.apache.hadoop.yarn.webapp.view.InfoBlock) Hamlet(org.apache.hadoop.yarn.webapp.hamlet.Hamlet) Date(java.util.Date) AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) DIV(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV) JobInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) AMAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo)

Example 77 with Date

use of java.util.Date in project hadoop by apache.

the class TimedOutTestsListener method buildThreadDiagnosticString.

public static String buildThreadDiagnosticString() {
    StringWriter sw = new StringWriter();
    PrintWriter output = new PrintWriter(sw);
    DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
    output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
    output.println();
    output.println(buildThreadDump());
    String deadlocksInfo = buildDeadlockInfo();
    if (deadlocksInfo != null) {
        output.println("====> DEADLOCKS DETECTED <====");
        output.println();
        output.println(deadlocksInfo);
    }
    return sw.toString();
}
Also used : StringWriter(java.io.StringWriter) SimpleDateFormat(java.text.SimpleDateFormat) DateFormat(java.text.DateFormat) SimpleDateFormat(java.text.SimpleDateFormat) Date(java.util.Date) PrintWriter(java.io.PrintWriter)

Example 78 with Date

use of java.util.Date in project hadoop by apache.

the class TestCacheDirectives method testCacheManagerRestart.

@Test(timeout = 60000)
public void testCacheManagerRestart() throws Exception {
    SecondaryNameNode secondary = null;
    try {
        // Start a secondary namenode
        conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
        secondary = new SecondaryNameNode(conf);
        // Create and validate a pool
        final String pool = "poolparty";
        String groupName = "partygroup";
        FsPermission mode = new FsPermission((short) 0777);
        long limit = 747;
        dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
        RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        CachePoolInfo info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        // Create some cache entries
        int numEntries = 10;
        String entryPrefix = "/party-";
        long prevId = -1;
        final Date expiry = new Date();
        for (int i = 0; i < numEntries; i++) {
            prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
        }
        RemoteIterator<CacheDirectiveEntry> dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        // Checkpoint once to set some cache pools and directives on 2NN side
        secondary.doCheckpoint();
        // Add some more CacheManager state
        final String imagePool = "imagePool";
        dfs.addCachePool(new CachePoolInfo(imagePool));
        prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
        // Save a new image to force a fresh fsimage download
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Checkpoint again forcing a reload of FSN state
        boolean fetchImage = secondary.doCheckpoint();
        assertTrue("Secondary should have fetched a new fsimage from NameNode", fetchImage);
        // Remove temp pool and directive
        dfs.removeCachePool(imagePool);
        // Restart namenode
        cluster.restartNameNode();
        // Check that state came back up
        pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
            assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        long nextId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
        assertEquals(prevId + 1, nextId);
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Date(java.util.Date) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 79 with Date

use of java.util.Date in project hadoop by apache.

the class NamenodeFsck method fsck.

/**
   * Check files on DFS, starting from the indicated path.
   */
public void fsck() {
    final long startTime = Time.monotonicNow();
    try {
        if (blockIds != null) {
            String[] blocks = blockIds.split(" ");
            StringBuilder sb = new StringBuilder();
            sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date());
            out.println(sb);
            sb.append(" for blockIds: \n");
            for (String blk : blocks) {
                if (blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
                    out.println("Incorrect blockId format: " + blk);
                    continue;
                }
                out.print("\n");
                blockIdCK(blk);
                sb.append(blk + "\n");
            }
            LOG.info(sb);
            namenode.getNamesystem().logFsckEvent("/", remoteAddress);
            out.flush();
            return;
        }
        String msg = "FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " for path " + path + " at " + new Date();
        LOG.info(msg);
        out.println(msg);
        namenode.getNamesystem().logFsckEvent(path, remoteAddress);
        if (snapshottableDirs != null) {
            SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer().getSnapshottableDirListing();
            if (snapshotDirs != null) {
                for (SnapshottableDirectoryStatus dir : snapshotDirs) {
                    snapshottableDirs.add(dir.getFullPath().toString());
                }
            }
        }
        final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
        if (file != null) {
            if (showCorruptFileBlocks) {
                listCorruptFileBlocks();
                return;
            }
            if (this.showStoragePolcies) {
                storageTypeSummary = new StoragePolicySummary(namenode.getNamesystem().getBlockManager().getStoragePolicies());
            }
            Result replRes = new ReplicationResult(conf);
            Result ecRes = new ErasureCodingResult(conf);
            check(path, file, replRes, ecRes);
            out.print("\nStatus: ");
            out.println(replRes.isHealthy() && ecRes.isHealthy() ? "HEALTHY" : "CORRUPT");
            out.println(" Number of data-nodes:\t" + totalDatanodes);
            out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks());
            out.println(" Total dirs:\t\t\t" + totalDirs);
            out.println(" Total symlinks:\t\t" + totalSymlinks);
            out.println("\nReplicated Blocks:");
            out.println(replRes);
            out.println("\nErasure Coded Block Groups:");
            out.println(ecRes);
            if (this.showStoragePolcies) {
                out.print(storageTypeSummary);
            }
            out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds"));
            // fatal.  Otherwise many unit tests will pass even when there are bugs.
            if (internalError) {
                throw new IOException("fsck encountered internal errors!");
            }
            // of the report.
            if (replRes.isHealthy() && ecRes.isHealthy()) {
                out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
            } else {
                out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
            }
        } else {
            out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
        }
    } catch (Exception e) {
        String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
        LOG.warn(errMsg, e);
        out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds"));
        out.println(e.getMessage());
        out.print("\n\n" + errMsg);
    } finally {
        out.close();
    }
}
Also used : IOException(java.io.IOException) Date(java.util.Date) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 80 with Date

use of java.util.Date in project hadoop by apache.

the class RollingWindow method getSum.

/**
   * Get value represented by this window at the specified time
   * <p/>
   *
   * If time lags behind the latest update time, the new updates are still
   * included in the sum
   *
   * @param time
   * @return number of events occurred in the past period
   */
public long getSum(long time) {
    long sum = 0;
    for (Bucket bucket : buckets) {
        boolean stale = bucket.isStaleNow(time);
        if (!stale) {
            sum += bucket.value.get();
        }
        if (LOG.isDebugEnabled()) {
            long bucketTime = bucket.updateTime.get();
            String timeStr = new Date(bucketTime).toString();
            LOG.debug("Sum: + " + sum + " Bucket: updateTime: " + timeStr + " (" + bucketTime + ") isStale " + stale + " at " + time);
        }
    }
    return sum;
}
Also used : Date(java.util.Date)

Aggregations

Date (java.util.Date)11526 Test (org.junit.Test)2903 SimpleDateFormat (java.text.SimpleDateFormat)1601 ArrayList (java.util.ArrayList)1066 Calendar (java.util.Calendar)809 HashMap (java.util.HashMap)615 IOException (java.io.IOException)606 File (java.io.File)577 ParseException (java.text.ParseException)525 GregorianCalendar (java.util.GregorianCalendar)425 List (java.util.List)336 DateFormat (java.text.DateFormat)313 Map (java.util.Map)296 DateTime (org.joda.time.DateTime)239 Test (org.testng.annotations.Test)210 HashSet (java.util.HashSet)190 SQLException (java.sql.SQLException)167 LocalDate (org.joda.time.LocalDate)155 BigDecimal (java.math.BigDecimal)148 JSONObject (org.json.JSONObject)148