Search in sources :

Example 1 with StartupProgress

use of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress in project hadoop by apache.

the class BlockManagerSafeMode method leaveSafeMode.

/**
   * Leave start up safe mode.
   *
   * @param force - true to force exit
   * @return true if it leaves safe mode successfully else false
   */
boolean leaveSafeMode(boolean force) {
    assert namesystem.hasWriteLock() : "Leaving safe mode needs write lock!";
    final long bytesInFuture = numberOfBytesInFutureBlocks.get();
    if (bytesInFuture > 0) {
        if (force) {
            LOG.warn("Leaving safe mode due to forceExit. This will cause a data " + "loss of {} byte(s).", bytesInFuture);
            numberOfBytesInFutureBlocks.set(0);
        } else {
            LOG.error("Refusing to leave safe mode without a force flag. " + "Exiting safe mode will cause a deletion of {} byte(s). Please " + "use -forceExit flag to exit safe mode forcefully if data loss is" + " acceptable.", bytesInFuture);
            return false;
        }
    } else if (force) {
        LOG.warn("forceExit used when normal exist would suffice. Treating " + "force exit as normal safe mode exit.");
    }
    // In the standby, do not populate repl queues
    if (!blockManager.isPopulatingReplQueues() && blockManager.shouldPopulateReplQueues()) {
        blockManager.initializeReplQueues();
    }
    if (status != BMSafeModeStatus.OFF) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
    }
    status = BMSafeModeStatus.OFF;
    final long timeInSafemode = monotonicNow() - startTime;
    NameNode.stateChangeLog.info("STATE* Leaving safe mode after {} secs", timeInSafemode / 1000);
    NameNode.getNameNodeMetrics().setSafeModeTime(timeInSafemode);
    final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
    NameNode.stateChangeLog.info("STATE* Network topology has {} racks and {}" + " datanodes", nt.getNumOfRacks(), nt.getNumOfLeaves());
    NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has {} blocks", blockManager.numOfUnderReplicatedBlocks());
    namesystem.startSecretManagerIfNecessary();
    // If startup has not yet completed, end safemode phase.
    StartupProgress prog = NameNode.getStartupProgress();
    if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
        prog.endStep(Phase.SAFEMODE, BlockManagerSafeMode.STEP_AWAITING_REPORTED_BLOCKS);
        prog.endPhase(Phase.SAFEMODE);
    }
    return true;
}
Also used : NetworkTopology(org.apache.hadoop.net.NetworkTopology) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Example 2 with StartupProgress

use of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress in project hadoop by apache.

the class BlockManagerSafeMode method incrementSafeBlockCount.

/**
   * Increment number of safe blocks if current block has reached minimal
   * replication.
   * If safe mode is not currently on, this is a no-op.
   * @param storageNum  current number of replicas or number of internal blocks
   *                    of a striped block group
   * @param storedBlock current storedBlock which is either a
   *                    BlockInfoContiguous or a BlockInfoStriped
   */
synchronized void incrementSafeBlockCount(int storageNum, BlockInfo storedBlock) {
    assert namesystem.hasWriteLock();
    if (status == BMSafeModeStatus.OFF) {
        return;
    }
    final int safe = storedBlock.isStriped() ? ((BlockInfoStriped) storedBlock).getRealDataBlockNum() : safeReplication;
    if (storageNum == safe) {
        this.blockSafe++;
        // Report startup progress only if we haven't completed startup yet.
        StartupProgress prog = NameNode.getStartupProgress();
        if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
            if (this.awaitingReportedBlocksCounter == null) {
                this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
            }
            this.awaitingReportedBlocksCounter.increment();
        }
        checkSafeMode();
    }
}
Also used : StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Example 3 with StartupProgress

use of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress in project hadoop by apache.

the class TestStartupProgressServlet method setUp.

@Before
public void setUp() throws Exception {
    startupProgress = new StartupProgress();
    ServletContext context = mock(ServletContext.class);
    when(context.getAttribute(NameNodeHttpServer.STARTUP_PROGRESS_ATTRIBUTE_KEY)).thenReturn(startupProgress);
    servlet = mock(StartupProgressServlet.class);
    when(servlet.getServletContext()).thenReturn(context);
    doCallRealMethod().when(servlet).doGet(any(HttpServletRequest.class), any(HttpServletResponse.class));
    req = mock(HttpServletRequest.class);
    respOut = new ByteArrayOutputStream();
    PrintWriter writer = new PrintWriter(respOut);
    resp = mock(HttpServletResponse.class);
    when(resp.getWriter()).thenReturn(writer);
}
Also used : HttpServletRequest(javax.servlet.http.HttpServletRequest) ServletContext(javax.servlet.ServletContext) HttpServletResponse(javax.servlet.http.HttpServletResponse) ByteArrayOutputStream(java.io.ByteArrayOutputStream) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress) PrintWriter(java.io.PrintWriter) Before(org.junit.Before)

Example 4 with StartupProgress

use of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress in project hadoop by apache.

the class StartupProgressServlet method doGet.

@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
    resp.setContentType("application/json; charset=UTF-8");
    StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(getServletContext());
    StartupProgressView view = prog.createView();
    JsonGenerator json = new JsonFactory().createGenerator(resp.getWriter());
    try {
        json.writeStartObject();
        json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
        json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
        json.writeArrayFieldStart(PHASES);
        for (Phase phase : view.getPhases()) {
            json.writeStartObject();
            json.writeStringField(NAME, phase.getName());
            json.writeStringField(DESC, phase.getDescription());
            json.writeStringField(STATUS, view.getStatus(phase).toString());
            json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
            json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
            writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
            writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
            json.writeArrayFieldStart(STEPS);
            for (Step step : view.getSteps(phase)) {
                json.writeStartObject();
                StepType type = step.getType();
                if (type != null) {
                    json.writeStringField(NAME, type.getName());
                    json.writeStringField(DESC, type.getDescription());
                }
                json.writeNumberField(COUNT, view.getCount(phase, step));
                writeStringFieldIfNotNull(json, FILE, step.getFile());
                writeNumberFieldIfDefined(json, SIZE, step.getSize());
                json.writeNumberField(TOTAL, view.getTotal(phase, step));
                json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase, step));
                json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
                json.writeEndObject();
            }
            json.writeEndArray();
            json.writeEndObject();
        }
        json.writeEndArray();
        json.writeEndObject();
    } finally {
        IOUtils.cleanup(LOG, json);
    }
}
Also used : Phase(org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase) StepType(org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType) StartupProgressView(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressView) JsonFactory(com.fasterxml.jackson.core.JsonFactory) JsonGenerator(com.fasterxml.jackson.core.JsonGenerator) Step(org.apache.hadoop.hdfs.server.namenode.startupprogress.Step) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Example 5 with StartupProgress

use of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress in project hadoop by apache.

the class FSEditLogLoader method loadFSEdits.

/**
   * Load an edit log, and apply the changes to the in-memory structure
   * This is where we apply edits that we've been writing to disk all
   * along.
   */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = createStartupProgressStep(edits);
    prog.beginStep(Phase.LOADING_EDITS, step);
    fsNamesys.writeLock();
    try {
        long startTime = monotonicNow();
        FSImage.LOG.info("Start loading edits file " + edits.getName());
        long numEdits = loadEditRecords(edits, false, expectedStartingTxId, startOpt, recovery);
        FSImage.LOG.info("Edits file " + edits.getName() + " of size " + edits.length() + " edits # " + numEdits + " loaded in " + (monotonicNow() - startTime) / 1000 + " seconds");
        return numEdits;
    } finally {
        edits.close();
        fsNamesys.writeUnlock("loadFSEdits");
        prog.endStep(Phase.LOADING_EDITS, step);
    }
}
Also used : Step(org.apache.hadoop.hdfs.server.namenode.startupprogress.Step) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Aggregations

StartupProgress (org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)11 IOException (java.io.IOException)3 Step (org.apache.hadoop.hdfs.server.namenode.startupprogress.Step)3 JsonFactory (com.fasterxml.jackson.core.JsonFactory)1 JsonGenerator (com.fasterxml.jackson.core.JsonGenerator)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 File (java.io.File)1 FileNotFoundException (java.io.FileNotFoundException)1 PrintWriter (java.io.PrintWriter)1 InetSocketAddress (java.net.InetSocketAddress)1 ArrayList (java.util.ArrayList)1 EnumMap (java.util.EnumMap)1 ServletContext (javax.servlet.ServletContext)1 HttpServletRequest (javax.servlet.http.HttpServletRequest)1 HttpServletResponse (javax.servlet.http.HttpServletResponse)1 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)1 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)1 RollingUpgradeOp (org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp)1 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)1 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)1