Search in sources :

Example 6 with NameNodeFile

use of org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile in project hadoop by apache.

the class FSImage method loadFSImage.

/**
   * Choose latest image from one of the directories,
   * load it and merge with the edits.
   * 
   * Saving and loading fsimage should never trigger symlink resolution. 
   * The paths that are persisted do not have *intermediate* symlinks 
   * because intermediate symlinks are resolved at the time files, 
   * directories, and symlinks are created. All paths accessed while 
   * loading or saving fsimage should therefore only see symlinks as 
   * the final path component, and the functions called below do not
   * resolve symlinks that are the final path component.
   *
   * @return whether the image should be saved
   * @throws IOException
   */
private boolean loadFSImage(FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
    final boolean rollingRollback = RollingUpgradeStartupOption.ROLLBACK.matches(startOpt);
    final EnumSet<NameNodeFile> nnfs;
    if (rollingRollback) {
        // if it is rollback of rolling upgrade, only load from the rollback image
        nnfs = EnumSet.of(NameNodeFile.IMAGE_ROLLBACK);
    } else {
        // otherwise we can load from both IMAGE and IMAGE_ROLLBACK
        nnfs = EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK);
    }
    final FSImageStorageInspector inspector = storage.readAndInspectDirs(nnfs, startOpt);
    isUpgradeFinalized = inspector.isUpgradeFinalized();
    List<FSImageFile> imageFiles = inspector.getLatestImages();
    StartupProgress prog = NameNode.getStartupProgress();
    prog.beginPhase(Phase.LOADING_FSIMAGE);
    File phaseFile = imageFiles.get(0).getFile();
    prog.setFile(Phase.LOADING_FSIMAGE, phaseFile.getAbsolutePath());
    prog.setSize(Phase.LOADING_FSIMAGE, phaseFile.length());
    boolean needToSave = inspector.needToSave();
    Iterable<EditLogInputStream> editStreams = null;
    initEditLog(startOpt);
    if (NameNodeLayoutVersion.supports(LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
        // If we're open for write, we're either non-HA or we're the active NN, so
        // we better be able to load all the edits. If we're the standby NN, it's
        // OK to not be able to read all of edits right now.
        // In the meanwhile, for HA upgrade, we will still write editlog thus need
        // this toAtLeastTxId to be set to the max-seen txid
        // For rollback in rolling upgrade, we need to set the toAtLeastTxId to
        // the txid right before the upgrade marker.  
        long toAtLeastTxId = editLog.isOpenForWrite() ? inspector.getMaxSeenTxId() : 0;
        if (rollingRollback) {
            // note that the first image in imageFiles is the special checkpoint
            // for the rolling upgrade
            toAtLeastTxId = imageFiles.get(0).getCheckpointTxId() + 2;
        }
        editStreams = editLog.selectInputStreams(imageFiles.get(0).getCheckpointTxId() + 1, toAtLeastTxId, recovery, false);
    } else {
        editStreams = FSImagePreTransactionalStorageInspector.getEditLogStreams(storage);
    }
    int maxOpSize = conf.getInt(DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT);
    for (EditLogInputStream elis : editStreams) {
        elis.setMaxOpSize(maxOpSize);
    }
    for (EditLogInputStream l : editStreams) {
        LOG.debug("Planning to load edit log stream: " + l);
    }
    if (!editStreams.iterator().hasNext()) {
        LOG.info("No edit log streams selected.");
    }
    FSImageFile imageFile = null;
    for (int i = 0; i < imageFiles.size(); i++) {
        try {
            imageFile = imageFiles.get(i);
            loadFSImageFile(target, recovery, imageFile, startOpt);
            break;
        } catch (IllegalReservedPathException ie) {
            throw new IOException("Failed to load image from " + imageFile, ie);
        } catch (Exception e) {
            LOG.error("Failed to load image from " + imageFile, e);
            target.clear();
            imageFile = null;
        }
    }
    // Failed to load any images, error out
    if (imageFile == null) {
        FSEditLog.closeAllStreams(editStreams);
        throw new IOException("Failed to load FSImage file, see error(s) " + "above for more info.");
    }
    prog.endPhase(Phase.LOADING_FSIMAGE);
    if (!rollingRollback) {
        long txnsAdvanced = loadEdits(editStreams, target, startOpt, recovery);
        needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(), txnsAdvanced);
    } else {
        // Trigger the rollback for rolling upgrade. Here lastAppliedTxId equals
        // to the last txid in rollback fsimage.
        rollingRollback(lastAppliedTxId + 1, imageFiles.get(0).getCheckpointTxId());
        needToSave = false;
    }
    editLog.setNextTxId(lastAppliedTxId + 1);
    return needToSave;
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) InconsistentFSStateException(org.apache.hadoop.hdfs.server.common.InconsistentFSStateException) IOException(java.io.IOException) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Example 7 with NameNodeFile

use of org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile in project hadoop by apache.

the class FSImage method saveNamespace.

/**
   * @param timeWindow a checkpoint is done if the latest checkpoint
   *                   was done more than this number of seconds ago.
   * @param txGap a checkpoint is done also if the gap between the latest tx id
   *              and the latest checkpoint is greater than this number.
   * @return true if a checkpoint has been made
   * @see #saveNamespace(FSNamesystem, NameNodeFile, Canceler)
   */
public synchronized boolean saveNamespace(long timeWindow, long txGap, FSNamesystem source) throws IOException {
    if (timeWindow > 0 || txGap > 0) {
        final FSImageStorageInspector inspector = storage.readAndInspectDirs(EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK), StartupOption.REGULAR);
        FSImageFile image = inspector.getLatestImages().get(0);
        File imageFile = image.getFile();
        final long checkpointTxId = image.getCheckpointTxId();
        final long checkpointAge = Time.now() - imageFile.lastModified();
        if (checkpointAge <= timeWindow * 1000 && checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) {
            return false;
        }
    }
    saveNamespace(source, NameNodeFile.IMAGE, null);
    return true;
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File)

Example 8 with NameNodeFile

use of org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile in project hadoop by apache.

the class TransferFsImage method uploadImage.

/*
   * Uploads the imagefile using HTTP PUT method
   */
private static void uploadImage(URL url, Configuration conf, NNStorage storage, NameNodeFile nnf, long txId, Canceler canceler) throws IOException {
    File imageFile = storage.findImageFile(nnf, txId);
    if (imageFile == null) {
        throw new IOException("Could not find image with txid " + txId);
    }
    HttpURLConnection connection = null;
    try {
        URIBuilder uriBuilder = new URIBuilder(url.toURI());
        // write all params for image upload request as query itself.
        // Request body contains the image to be uploaded.
        Map<String, String> params = ImageServlet.getParamsForPutImage(storage, txId, imageFile.length(), nnf);
        for (Entry<String, String> entry : params.entrySet()) {
            uriBuilder.addParameter(entry.getKey(), entry.getValue());
        }
        URL urlWithParams = uriBuilder.build().toURL();
        connection = (HttpURLConnection) connectionFactory.openConnection(urlWithParams, UserGroupInformation.isSecurityEnabled());
        // Set the request to PUT
        connection.setRequestMethod("PUT");
        connection.setDoOutput(true);
        int chunkSize = conf.getInt(DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY, DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT);
        if (imageFile.length() > chunkSize) {
            // using chunked streaming mode to support upload of 2GB+ files and to
            // avoid internal buffering.
            // this mode should be used only if more than chunkSize data is present
            // to upload. otherwise upload may not happen sometimes.
            connection.setChunkedStreamingMode(chunkSize);
        }
        setTimeout(connection);
        // set headers for verification
        ImageServlet.setVerificationHeadersForPut(connection, imageFile);
        // Write the file to output stream.
        writeFileToPutRequest(conf, connection, imageFile, canceler);
        int responseCode = connection.getResponseCode();
        if (responseCode != HttpURLConnection.HTTP_OK) {
            throw new HttpPutFailedException(String.format("Image uploading failed, status: %d, url: %s, message: %s", responseCode, urlWithParams, connection.getResponseMessage()), responseCode);
        }
    } catch (AuthenticationException | URISyntaxException e) {
        throw new IOException(e);
    } finally {
        if (connection != null) {
            connection.disconnect();
        }
    }
}
Also used : AuthenticationException(org.apache.hadoop.security.authentication.client.AuthenticationException) HttpPutFailedException(org.apache.hadoop.hdfs.server.common.HttpPutFailedException) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) URL(java.net.URL) URIBuilder(org.apache.http.client.utils.URIBuilder) HttpURLConnection(java.net.HttpURLConnection) File(java.io.File) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)

Aggregations

NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)8 File (java.io.File)6 IOException (java.io.IOException)5 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)5 URL (java.net.URL)2 FileNotFoundException (java.io.FileNotFoundException)1 HttpURLConnection (java.net.HttpURLConnection)1 URISyntaxException (java.net.URISyntaxException)1 ArrayList (java.util.ArrayList)1 ServletContext (javax.servlet.ServletContext)1 ServletException (javax.servlet.ServletException)1 Configuration (org.apache.hadoop.conf.Configuration)1 HttpPutFailedException (org.apache.hadoop.hdfs.server.common.HttpPutFailedException)1 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)1 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)1 FSImage (org.apache.hadoop.hdfs.server.namenode.FSImage)1 TransferFsImage (org.apache.hadoop.hdfs.server.namenode.TransferFsImage)1 NameNodeMetrics (org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics)1 StartupProgress (org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)1 MD5Hash (org.apache.hadoop.io.MD5Hash)1