use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.
the class BlockReaderRemote method newBlockReader.
/**
* Create a new BlockReader specifically to satisfy a read.
* This method also sends the OP_READ_BLOCK request.
*
* @param file File location
* @param block The block object
* @param blockToken The block token for security
* @param startOffset The read offset, relative to block head
* @param len The number of bytes to read
* @param verifyChecksum Whether to verify checksum
* @param clientName Client name
* @param peer The Peer to use
* @param datanodeID The DatanodeID this peer is connected to
* @return New BlockReader instance, or null on error.
*/
public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy, Tracer tracer, int networkDistance) throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy);
//
// Get bytes in block
//
DataInputStream in = new DataInputStream(peer.getInputStream());
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
checkSuccess(status, peer, block, file);
ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo();
DataChecksum checksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
//Warning when we get CHECKSUM_NULL?
// Read the first chunk offset.
long firstChunkOffset = checksumInfo.getChunkOffset();
if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file);
}
return new BlockReaderRemote(file, block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance);
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.
the class DataStreamer method createBlockOutputStream.
// connects to the first datanode in the pipeline
// Returns true if success, otherwise return failure.
//
boolean createBlockOutputStream(DatanodeInfo[] nodes, StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
if (nodes.length == 0) {
LOG.info("nodes are empty for write pipeline of " + block);
return false;
}
String firstBadLink = "";
boolean checkRestart = false;
if (LOG.isDebugEnabled()) {
LOG.debug("pipeline = " + Arrays.toString(nodes) + ", " + this);
}
// persist blocks on namenode on next flush
persistBlocks.set(true);
int refetchEncryptionKey = 1;
while (true) {
boolean result = false;
DataOutputStream out = null;
try {
assert null == s : "Previous socket unclosed";
assert null == blockReplyStream : "Previous blockReplyStream unclosed";
s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
long readTimeout = dfsClient.getDatanodeReadTimeout(nodes.length);
OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(s, readTimeout);
IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s, unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
blockReplyStream = new DataInputStream(unbufIn);
//
// Xmit header info to datanode
//
BlockConstructionStage bcs = recoveryFlag ? stage.getRecoveryStage() : stage;
// We cannot change the block length in 'block' as it counts the number
// of bytes ack'ed.
ExtendedBlock blockCopy = block.getCurrentBlock();
blockCopy.setNumBytes(stat.getBlockSize());
boolean[] targetPinnings = getPinnings(nodes);
// send the request
new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken, dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, nodes.length, block.getNumBytes(), bytesSent, newGS, checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile, (targetPinnings != null && targetPinnings[0]), targetPinnings);
// receive ack for connect
BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(blockReplyStream));
Status pipelineStatus = resp.getStatus();
firstBadLink = resp.getFirstBadLink();
// regular node error.
if (PipelineAck.isRestartOOBStatus(pipelineStatus) && !errorState.isRestartingNode()) {
checkRestart = true;
throw new IOException("A datanode is restarting.");
}
String logInfo = "ack with firstBadLink as " + firstBadLink;
DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
assert null == blockStream : "Previous blockStream unclosed";
blockStream = out;
// success
result = true;
errorState.resetInternalError();
} catch (IOException ie) {
if (!errorState.isRestartingNode()) {
LOG.info("Exception in createBlockOutputStream " + this, ie);
}
if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + nodes[0] + " : " + ie);
// The encryption key used is invalid.
refetchEncryptionKey--;
dfsClient.clearDataEncryptionKey();
// a new encryption key.
continue;
}
// find the datanode that matches
if (firstBadLink.length() != 0) {
for (int i = 0; i < nodes.length; i++) {
// NB: Unconditionally using the xfer addr w/o hostname
if (firstBadLink.equals(nodes[i].getXferAddr())) {
errorState.setBadNodeIndex(i);
break;
}
}
} else {
assert !checkRestart;
errorState.setBadNodeIndex(0);
}
final int i = errorState.getBadNodeIndex();
// Check whether there is a restart worth waiting for.
if (checkRestart && shouldWaitForRestart(i)) {
errorState.initRestartingNode(i, "Datanode " + i + " is restarting: " + nodes[i]);
}
errorState.setInternalError();
lastException.set(ie);
// error
result = false;
} finally {
if (!result) {
IOUtils.closeSocket(s);
s = null;
IOUtils.closeStream(out);
IOUtils.closeStream(blockReplyStream);
blockReplyStream = null;
}
}
return result;
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.
the class BlockReaderFactory method requestFileDescriptors.
/**
* Request file descriptors from a DomainPeer.
*
* @param peer The peer to use for communication.
* @param slot If non-null, the shared memory slot to associate with the
* new ShortCircuitReplica.
*
* @return A ShortCircuitReplica object if we could communicate with the
* datanode; null, otherwise.
* @throws IOException If we encountered an I/O exception while communicating
* with the datanode.
*/
private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer, Slot slot) throws IOException {
ShortCircuitCache cache = clientContext.getShortCircuitCache();
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
SlotId slotId = slot == null ? null : slot.getSlotId();
new Sender(out).requestShortCircuitFds(block, token, slotId, 1, failureInjector.getSupportsReceiptVerification());
DataInputStream in = new DataInputStream(peer.getInputStream());
BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
DomainSocket sock = peer.getDomainSocket();
failureInjector.injectRequestFileDescriptorsFailure();
switch(resp.getStatus()) {
case SUCCESS:
byte[] buf = new byte[1];
FileInputStream[] fis = new FileInputStream[2];
sock.recvFileInputStreams(fis, buf, 0, buf.length);
ShortCircuitReplica replica = null;
try {
ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) {
LOG.trace("Sending receipt verification byte for slot {}", slot);
sock.getOutputStream().write(0);
}
replica = new ShortCircuitReplica(key, fis[0], fis[1], cache, Time.monotonicNow(), slot);
return new ShortCircuitReplicaInfo(replica);
} catch (IOException e) {
// This indicates an error reading from disk, or a format error. Since
// it's not a socket communication problem, we return null rather than
// throwing an exception.
LOG.warn(this + ": error creating ShortCircuitReplica.", e);
return null;
} finally {
if (replica == null) {
IOUtilsClient.cleanup(DFSClient.LOG, fis[0], fis[1]);
}
}
case ERROR_UNSUPPORTED:
if (!resp.hasShortCircuitAccessVersion()) {
LOG.warn("short-circuit read access is disabled for " + "DataNode " + datanode + ". reason: " + resp.getMessage());
clientContext.getDomainSocketFactory().disableShortCircuitForPath(pathInfo.getPath());
} else {
LOG.warn("short-circuit read access for the file " + fileName + " is disabled for DataNode " + datanode + ". reason: " + resp.getMessage());
}
return null;
case ERROR_ACCESS_TOKEN:
String msg = "access control error while " + "attempting to set up short-circuit access to " + fileName + resp.getMessage();
LOG.debug("{}:{}", this, msg);
return new ShortCircuitReplicaInfo(new InvalidToken(msg));
default:
LOG.warn(this + ": unknown response code " + resp.getStatus() + " while attempting to set up short-circuit access. " + resp.getMessage());
clientContext.getDomainSocketFactory().disableShortCircuitForPath(pathInfo.getPath());
return null;
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.
the class DataXceiver method replaceBlock.
@Override
public void replaceBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo proxySource) throws IOException {
updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken, Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE);
if (!dataXceiverServer.balanceThrottler.acquire()) {
// not able to start
String msg = "Not able to receive block " + block.getBlockId() + " from " + peer.getRemoteAddressString() + " because threads " + "quota is exceeded.";
LOG.warn(msg);
sendResponse(ERROR, msg);
return;
}
Socket proxySock = null;
DataOutputStream proxyOut = null;
Status opStatus = SUCCESS;
String errMsg = null;
DataInputStream proxyReply = null;
boolean IoeDuringCopyBlockOperation = false;
try {
// Move the block to different storage in the same datanode
if (proxySource.equals(datanode.getDatanodeId())) {
ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block, storageType);
if (oldReplica != null) {
LOG.info("Moved " + block + " from StorageType " + oldReplica.getVolume().getStorageType() + " to " + storageType);
}
} else {
block.setNumBytes(dataXceiverServer.estimateBlockSize);
// get the output stream to the proxy
final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr);
}
InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
proxySock = datanode.newSocket();
NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
proxySock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
proxySock.setSoTimeout(dnConf.socketTimeout);
proxySock.setKeepAlive(true);
OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock, dnConf.socketWriteTimeout);
InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock, unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
unbufProxyOut = saslStreams.out;
unbufProxyIn = saslStreams.in;
proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, smallBufferSize));
proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, ioFileBufferSize));
/* send request to the proxy */
IoeDuringCopyBlockOperation = true;
new Sender(proxyOut).copyBlock(block, blockToken);
IoeDuringCopyBlockOperation = false;
// receive the response from the proxy
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(proxyReply));
String logInfo = "copy block " + block + " from " + proxySock.getRemoteSocketAddress();
DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo, true);
// get checksum info about the block we're copying
ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
// open a block receiver and check if the block does not exist
setCurrentBlockReceiver(getBlockReceiver(block, storageType, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), null, 0, 0, 0, "", null, datanode, remoteChecksum, CachingStrategy.newDropBehind(), false, false));
// receive a block
blockReceiver.receiveBlock(null, null, replyOut, null, dataXceiverServer.balanceThrottler, null, true);
// notify name node
final Replica r = blockReceiver.getReplica();
datanode.notifyNamenodeReceivedBlock(block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
LOG.info("Moved " + block + " from " + peer.getRemoteAddressString() + ", delHint=" + delHint);
}
} catch (IOException ioe) {
opStatus = ERROR;
if (ioe instanceof BlockPinningException) {
opStatus = Status.ERROR_BLOCK_PINNED;
}
errMsg = "opReplaceBlock " + block + " received exception " + ioe;
LOG.info(errMsg);
if (!IoeDuringCopyBlockOperation) {
// Don't double count IO errors
incrDatanodeNetworkErrors();
}
throw ioe;
} finally {
// receive the last byte that indicates the proxy released its thread resource
if (opStatus == SUCCESS && proxyReply != null) {
try {
proxyReply.readChar();
} catch (IOException ignored) {
}
}
// now release the thread resource
dataXceiverServer.balanceThrottler.release();
// send response back
try {
sendResponse(opStatus, errMsg);
} catch (IOException ioe) {
LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
incrDatanodeNetworkErrors();
}
IOUtils.closeStream(proxyOut);
IOUtils.closeStream(blockReceiver);
IOUtils.closeStream(proxyReply);
IOUtils.closeStream(replyOut);
}
//update metrics
datanode.metrics.addReplaceBlockOp(elapsed());
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.
the class DFSTestUtil method transferRbw.
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
try (Socket s = DataStreamer.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
DataInputStream in = new DataInputStream(NetUtils.getInputStream(s))) {
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[] { datanodes[1] }, new StorageType[] { StorageType.DEFAULT });
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
}
Aggregations