Search in sources :

Example 1 with BlockConstructionStage

use of org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage in project hadoop by apache.

the class DataNode method transferReplicaForPipelineRecovery.

/**
   * Transfer a replica to the datanode targets.
   * @param b the block to transfer.
   *          The corresponding replica must be an RBW or a Finalized.
   *          Its GS and numBytes will be set to
   *          the stored GS and the visible length. 
   * @param targets targets to transfer the block to
   * @param client client name
   */
void transferReplicaForPipelineRecovery(final ExtendedBlock b, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final String client) throws IOException {
    final long storedGS;
    final long visible;
    final BlockConstructionStage stage;
    //get replica information
    try (AutoCloseableLock lock = data.acquireDatasetLock()) {
        Block storedBlock = data.getStoredBlock(b.getBlockPoolId(), b.getBlockId());
        if (null == storedBlock) {
            throw new IOException(b + " not found in datanode.");
        }
        storedGS = storedBlock.getGenerationStamp();
        if (storedGS < b.getGenerationStamp()) {
            throw new IOException(storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);
        }
        // Update the genstamp with storedGS
        b.setGenerationStamp(storedGS);
        if (data.isValidRbw(b)) {
            stage = BlockConstructionStage.TRANSFER_RBW;
        } else if (data.isValidBlock(b)) {
            stage = BlockConstructionStage.TRANSFER_FINALIZED;
        } else {
            final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
            throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
        }
        visible = data.getReplicaVisibleLength(b);
    }
    //set visible length
    b.setNumBytes(visible);
    if (targets.length > 0) {
        new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
    }
}
Also used : AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) BlockConstructionStage(org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage)

Example 2 with BlockConstructionStage

use of org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage in project hadoop by apache.

the class DataStreamer method createBlockOutputStream.

// connects to the first datanode in the pipeline
// Returns true if success, otherwise return failure.
//
boolean createBlockOutputStream(DatanodeInfo[] nodes, StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
    if (nodes.length == 0) {
        LOG.info("nodes are empty for write pipeline of " + block);
        return false;
    }
    String firstBadLink = "";
    boolean checkRestart = false;
    if (LOG.isDebugEnabled()) {
        LOG.debug("pipeline = " + Arrays.toString(nodes) + ", " + this);
    }
    // persist blocks on namenode on next flush
    persistBlocks.set(true);
    int refetchEncryptionKey = 1;
    while (true) {
        boolean result = false;
        DataOutputStream out = null;
        try {
            assert null == s : "Previous socket unclosed";
            assert null == blockReplyStream : "Previous blockReplyStream unclosed";
            s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
            long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
            long readTimeout = dfsClient.getDatanodeReadTimeout(nodes.length);
            OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
            InputStream unbufIn = NetUtils.getInputStream(s, readTimeout);
            IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s, unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
            unbufOut = saslStreams.out;
            unbufIn = saslStreams.in;
            out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
            blockReplyStream = new DataInputStream(unbufIn);
            //
            // Xmit header info to datanode
            //
            BlockConstructionStage bcs = recoveryFlag ? stage.getRecoveryStage() : stage;
            // We cannot change the block length in 'block' as it counts the number
            // of bytes ack'ed.
            ExtendedBlock blockCopy = block.getCurrentBlock();
            blockCopy.setNumBytes(stat.getBlockSize());
            boolean[] targetPinnings = getPinnings(nodes);
            // send the request
            new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken, dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, nodes.length, block.getNumBytes(), bytesSent, newGS, checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile, (targetPinnings != null && targetPinnings[0]), targetPinnings);
            // receive ack for connect
            BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(blockReplyStream));
            Status pipelineStatus = resp.getStatus();
            firstBadLink = resp.getFirstBadLink();
            // regular node error.
            if (PipelineAck.isRestartOOBStatus(pipelineStatus) && !errorState.isRestartingNode()) {
                checkRestart = true;
                throw new IOException("A datanode is restarting.");
            }
            String logInfo = "ack with firstBadLink as " + firstBadLink;
            DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
            assert null == blockStream : "Previous blockStream unclosed";
            blockStream = out;
            // success
            result = true;
            errorState.resetInternalError();
        } catch (IOException ie) {
            if (!errorState.isRestartingNode()) {
                LOG.info("Exception in createBlockOutputStream " + this, ie);
            }
            if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
                LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + nodes[0] + " : " + ie);
                // The encryption key used is invalid.
                refetchEncryptionKey--;
                dfsClient.clearDataEncryptionKey();
                // a new encryption key.
                continue;
            }
            // find the datanode that matches
            if (firstBadLink.length() != 0) {
                for (int i = 0; i < nodes.length; i++) {
                    // NB: Unconditionally using the xfer addr w/o hostname
                    if (firstBadLink.equals(nodes[i].getXferAddr())) {
                        errorState.setBadNodeIndex(i);
                        break;
                    }
                }
            } else {
                assert !checkRestart;
                errorState.setBadNodeIndex(0);
            }
            final int i = errorState.getBadNodeIndex();
            // Check whether there is a restart worth waiting for.
            if (checkRestart && shouldWaitForRestart(i)) {
                errorState.initRestartingNode(i, "Datanode " + i + " is restarting: " + nodes[i]);
            }
            errorState.setInternalError();
            lastException.set(ie);
            // error
            result = false;
        } finally {
            if (!result) {
                IOUtils.closeSocket(s);
                s = null;
                IOUtils.closeStream(out);
                IOUtils.closeStream(blockReplyStream);
                blockReplyStream = null;
            }
        }
        return result;
    }
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DataOutputStream(java.io.DataOutputStream) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataInputStream(java.io.DataInputStream) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) InvalidEncryptionKeyException(org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException) BufferedOutputStream(java.io.BufferedOutputStream) BlockConstructionStage(org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage)

Aggregations

IOException (java.io.IOException)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 BlockConstructionStage (org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage)2 BufferedOutputStream (java.io.BufferedOutputStream)1 DataInputStream (java.io.DataInputStream)1 DataOutputStream (java.io.DataOutputStream)1 InputStream (java.io.InputStream)1 InterruptedIOException (java.io.InterruptedIOException)1 OutputStream (java.io.OutputStream)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 IOStreamPair (org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)1 InvalidEncryptionKeyException (org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException)1 Sender (org.apache.hadoop.hdfs.protocol.datatransfer.Sender)1 BlockOpResponseProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)1 Status (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status)1 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)1 MultipleIOException (org.apache.hadoop.io.MultipleIOException)1 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)1