Search in sources :

Example 1 with Status

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createPipelineAckStatusGetter27.

private static PipelineAckStatusGetter createPipelineAckStatusGetter27() throws NoSuchMethodException {
    Method getFlagListMethod = PipelineAckProto.class.getMethod("getFlagList");
    @SuppressWarnings("rawtypes") Class<? extends Enum> ecnClass;
    try {
        ecnClass = Class.forName("org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck$ECN").asSubclass(Enum.class);
    } catch (ClassNotFoundException e) {
        String msg = "Couldn't properly initialize the PipelineAck.ECN class. Please " + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + "HBASE-16110 for more information.";
        LOG.error(msg, e);
        throw new Error(msg, e);
    }
    @SuppressWarnings("unchecked") Enum<?> disabledECN = Enum.valueOf(ecnClass, "DISABLED");
    Method getReplyMethod = PipelineAckProto.class.getMethod("getReply", int.class);
    Method combineHeaderMethod = PipelineAck.class.getMethod("combineHeader", ecnClass, Status.class);
    Method getStatusFromHeaderMethod = PipelineAck.class.getMethod("getStatusFromHeader", int.class);
    return new PipelineAckStatusGetter() {

        @Override
        public Status get(PipelineAckProto ack) {
            try {
                @SuppressWarnings("unchecked") List<Integer> flagList = (List<Integer>) getFlagListMethod.invoke(ack);
                Integer headerFlag;
                if (flagList.isEmpty()) {
                    Status reply = (Status) getReplyMethod.invoke(ack, 0);
                    headerFlag = (Integer) combineHeaderMethod.invoke(null, disabledECN, reply);
                } else {
                    headerFlag = flagList.get(0);
                }
                return (Status) getStatusFromHeaderMethod.invoke(null, headerFlag);
            } catch (IllegalAccessException | InvocationTargetException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) PipelineAckProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) Method(java.lang.reflect.Method) InvocationTargetException(java.lang.reflect.InvocationTargetException) List(java.util.List) ArrayList(java.util.ArrayList)

Example 2 with Status

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status in project hadoop by apache.

the class DataXceiver method writeBlock.

@Override
public void writeBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientname, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo srcDataNode, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException {
    previousOpClientName = clientname;
    updateCurrentThreadName("Receiving block " + block);
    final boolean isDatanode = clientname.length() == 0;
    final boolean isClient = !isDatanode;
    final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;
    allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
    long size = 0;
    // reply to upstream datanode or client 
    final DataOutputStream replyOut = getBufferedOutputStream();
    checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE);
    // check single target for transfer-RBW/Finalized 
    if (isTransfer && targets.length > 0) {
        throw new IOException(stage + " does not support multiple targets " + Arrays.asList(targets));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname + "\n  block  =" + block + ", newGs=" + latestGenerationStamp + ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]" + "\n  targets=" + Arrays.asList(targets) + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode + ", pinning=" + pinning);
        LOG.debug("isDatanode=" + isDatanode + ", isClient=" + isClient + ", isTransfer=" + isTransfer);
        LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() + " tcp no delay " + peer.getTcpNoDelay());
    }
    // We later mutate block's generation stamp and length, but we need to
    // forward the original version of the block to downstream mirrors, so
    // make a copy here.
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    if (block.getNumBytes() == 0) {
        block.setNumBytes(dataXceiverServer.estimateBlockSize);
    }
    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: " + localAddress);
    // stream to next target
    DataOutputStream mirrorOut = null;
    // reply from next target
    DataInputStream mirrorIn = null;
    // socket to next target
    Socket mirrorSock = null;
    // the name:port of next target
    String mirrorNode = null;
    // first datanode that failed in connection setup
    String firstBadLink = "";
    Status mirrorInStatus = SUCCESS;
    final String storageUuid;
    final boolean isOnTransientStorage;
    try {
        final Replica replica;
        if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            // open a block receiver
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, in, peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, cachingStrategy, allowLazyPersist, pinning));
            replica = blockReceiver.getReplica();
        } else {
            replica = datanode.data.recoverClose(block, latestGenerationStamp, minBytesRcvd);
        }
        storageUuid = replica.getStorageUuid();
        isOnTransientStorage = replica.isOnTransientStorage();
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Connecting to datanode " + mirrorNode);
            }
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                DataNodeFaultInjector.get().failMirrorConnection();
                int timeoutValue = dnConf.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
                int writeTimeout = dnConf.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setKeepAlive(true);
                if (dnConf.getTransferSocketSendBufferSize() > 0) {
                    mirrorSock.setSendBufferSize(dnConf.getTransferSocketSendBufferSize());
                }
                OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout);
                InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
                DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
                IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
                unbufMirrorOut = saslStreams.out;
                unbufMirrorIn = saslStreams.in;
                mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, smallBufferSize));
                mirrorIn = new DataInputStream(unbufMirrorIn);
                if (targetPinnings != null && targetPinnings.length > 0) {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, targetPinnings[0], targetPinnings);
                } else {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, false, targetPinnings);
                }
                mirrorOut.flush();
                DataNodeFaultInjector.get().writeBlockAfterFlush();
                // read connect ack (only for clients, not for replication req)
                if (isClient) {
                    BlockOpResponseProto connectAck = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
                    mirrorInStatus = connectAck.getStatus();
                    firstBadLink = connectAck.getFirstBadLink();
                    if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                        LOG.debug("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }
            } catch (IOException e) {
                if (isClient) {
                    BlockOpResponseProto.newBuilder().setStatus(ERROR).setFirstBadLink(targets[0].getXferAddr()).build().writeDelimitedTo(replyOut);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (isClient) {
                    LOG.error(datanode + ":Exception transfering block " + block + " to mirror " + mirrorNode + ": " + e);
                    throw e;
                } else {
                    LOG.info(datanode + ":Exception transfering " + block + " to mirror " + mirrorNode + "- continuing without the mirror", e);
                    incrDatanodeNetworkErrors();
                }
            }
        }
        // send connect-ack to source for clients and not transfer-RBW/Finalized
        if (isClient && !isTransfer) {
            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                LOG.debug("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink);
            }
            BlockOpResponseProto.newBuilder().setStatus(mirrorInStatus).setFirstBadLink(firstBadLink).build().writeDelimitedTo(replyOut);
            replyOut.flush();
        }
        // receive the block and mirror to the next target
        if (blockReceiver != null) {
            String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
            blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets, false);
            // send close-ack for transfer-RBW/Finalized 
            if (isTransfer) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("TRANSFER: send close-ack");
                }
                writeResponse(SUCCESS, null, replyOut);
            }
        }
        // update its generation stamp
        if (isClient && stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            block.setGenerationStamp(latestGenerationStamp);
            block.setNumBytes(minBytesRcvd);
        }
        // the block is finalized in the PacketResponder.
        if (isDatanode || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
            LOG.info("Received " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes());
        }
        if (isClient) {
            size = block.getNumBytes();
        }
    } catch (IOException ioe) {
        LOG.info("opWriteBlock " + block + " received exception " + ioe);
        incrDatanodeNetworkErrors();
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
        setCurrentBlockReceiver(null);
    }
    //update metrics
    datanode.getMetrics().addWriteBlockOp(elapsed());
    datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Example 3 with Status

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method getStatus.

static Status getStatus(PipelineAckProto ack) {
    List<Integer> flagList = ack.getFlagList();
    Integer headerFlag;
    if (flagList.isEmpty()) {
        Status reply = ack.getReply(0);
        headerFlag = PipelineAck.combineHeader(ECN.DISABLED, reply);
    } else {
        headerFlag = flagList.get(0);
    }
    return PipelineAck.getStatusFromHeader(headerFlag);
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 4 with Status

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status in project hadoop by apache.

the class DataXceiver method releaseShortCircuitFds.

@Override
public void releaseShortCircuitFds(SlotId slotId) throws IOException {
    boolean success = false;
    try {
        String error;
        Status status;
        try {
            datanode.shortCircuitRegistry.unregisterSlot(slotId);
            error = null;
            status = Status.SUCCESS;
        } catch (UnsupportedOperationException e) {
            error = "unsupported operation";
            status = Status.ERROR_UNSUPPORTED;
        } catch (Throwable e) {
            error = e.getMessage();
            status = Status.ERROR_INVALID;
        }
        ReleaseShortCircuitAccessResponseProto.Builder bld = ReleaseShortCircuitAccessResponseProto.newBuilder();
        bld.setStatus(status);
        if (error != null) {
            bld.setError(error);
        }
        bld.build().writeDelimitedTo(socketOut);
        success = true;
    } finally {
        if (ClientTraceLog.isInfoEnabled()) {
            BlockSender.ClientTraceLog.info(String.format("src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," + " shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b", slotId.getShmId().getHi(), slotId.getShmId().getLo(), slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));
        }
    }
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) ReleaseShortCircuitAccessResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) ByteString(com.google.protobuf.ByteString)

Example 5 with Status

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status in project hadoop by apache.

the class DataXceiver method replaceBlock.

@Override
public void replaceBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo proxySource) throws IOException {
    updateCurrentThreadName("Replacing block " + block + " from " + delHint);
    DataOutputStream replyOut = new DataOutputStream(getOutputStream());
    checkAccess(replyOut, true, block, blockToken, Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE);
    if (!dataXceiverServer.balanceThrottler.acquire()) {
        // not able to start
        String msg = "Not able to receive block " + block.getBlockId() + " from " + peer.getRemoteAddressString() + " because threads " + "quota is exceeded.";
        LOG.warn(msg);
        sendResponse(ERROR, msg);
        return;
    }
    Socket proxySock = null;
    DataOutputStream proxyOut = null;
    Status opStatus = SUCCESS;
    String errMsg = null;
    DataInputStream proxyReply = null;
    boolean IoeDuringCopyBlockOperation = false;
    try {
        // Move the block to different storage in the same datanode
        if (proxySource.equals(datanode.getDatanodeId())) {
            ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block, storageType);
            if (oldReplica != null) {
                LOG.info("Moved " + block + " from StorageType " + oldReplica.getVolume().getStorageType() + " to " + storageType);
            }
        } else {
            block.setNumBytes(dataXceiverServer.estimateBlockSize);
            // get the output stream to the proxy
            final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Connecting to datanode " + dnAddr);
            }
            InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
            proxySock = datanode.newSocket();
            NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
            proxySock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
            proxySock.setSoTimeout(dnConf.socketTimeout);
            proxySock.setKeepAlive(true);
            OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock, dnConf.socketWriteTimeout);
            InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
            DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
            IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock, unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
            unbufProxyOut = saslStreams.out;
            unbufProxyIn = saslStreams.in;
            proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, smallBufferSize));
            proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, ioFileBufferSize));
            /* send request to the proxy */
            IoeDuringCopyBlockOperation = true;
            new Sender(proxyOut).copyBlock(block, blockToken);
            IoeDuringCopyBlockOperation = false;
            // receive the response from the proxy
            BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(proxyReply));
            String logInfo = "copy block " + block + " from " + proxySock.getRemoteSocketAddress();
            DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo, true);
            // get checksum info about the block we're copying
            ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
            DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
            // open a block receiver and check if the block does not exist
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), null, 0, 0, 0, "", null, datanode, remoteChecksum, CachingStrategy.newDropBehind(), false, false));
            // receive a block
            blockReceiver.receiveBlock(null, null, replyOut, null, dataXceiverServer.balanceThrottler, null, true);
            // notify name node
            final Replica r = blockReceiver.getReplica();
            datanode.notifyNamenodeReceivedBlock(block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
            LOG.info("Moved " + block + " from " + peer.getRemoteAddressString() + ", delHint=" + delHint);
        }
    } catch (IOException ioe) {
        opStatus = ERROR;
        if (ioe instanceof BlockPinningException) {
            opStatus = Status.ERROR_BLOCK_PINNED;
        }
        errMsg = "opReplaceBlock " + block + " received exception " + ioe;
        LOG.info(errMsg);
        if (!IoeDuringCopyBlockOperation) {
            // Don't double count IO errors
            incrDatanodeNetworkErrors();
        }
        throw ioe;
    } finally {
        // receive the last byte that indicates the proxy released its thread resource
        if (opStatus == SUCCESS && proxyReply != null) {
            try {
                proxyReply.readChar();
            } catch (IOException ignored) {
            }
        }
        // now release the thread resource
        dataXceiverServer.balanceThrottler.release();
        // send response back
        try {
            sendResponse(opStatus, errMsg);
        } catch (IOException ioe) {
            LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
            incrDatanodeNetworkErrors();
        }
        IOUtils.closeStream(proxyOut);
        IOUtils.closeStream(blockReceiver);
        IOUtils.closeStream(proxyReply);
        IOUtils.closeStream(replyOut);
    }
    //update metrics
    datanode.metrics.addReplaceBlockOp(elapsed());
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ReadOpChecksumInfoProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) ByteString(com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) BlockPinningException(org.apache.hadoop.hdfs.protocol.datatransfer.BlockPinningException) DataChecksum(org.apache.hadoop.util.DataChecksum) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedInputStream(java.io.BufferedInputStream) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Aggregations

Status (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status)6 ByteString (com.google.protobuf.ByteString)3 BufferedOutputStream (java.io.BufferedOutputStream)3 DataInputStream (java.io.DataInputStream)3 DataOutputStream (java.io.DataOutputStream)3 IOException (java.io.IOException)3 InputStream (java.io.InputStream)3 InterruptedIOException (java.io.InterruptedIOException)3 OutputStream (java.io.OutputStream)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 IOStreamPair (org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)3 Sender (org.apache.hadoop.hdfs.protocol.datatransfer.Sender)3 BlockOpResponseProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)3 BufferedInputStream (java.io.BufferedInputStream)2 FileInputStream (java.io.FileInputStream)2 InetSocketAddress (java.net.InetSocketAddress)2 Socket (java.net.Socket)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 DataEncryptionKeyFactory (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory)2 DomainSocket (org.apache.hadoop.net.unix.DomainSocket)2