use of org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException in project hadoop by apache.
the class SaslDataTransferServer method doSaslHandshake.
/**
* This method actually executes the server-side SASL handshake.
*
* @param peer connection peer
* @param underlyingOut connection output stream
* @param underlyingIn connection input stream
* @param saslProps properties of SASL negotiation
* @param callbackHandler for responding to SASL callbacks
* @return new pair of streams, wrapped after SASL negotiation
* @throws IOException for any error
*/
private IOStreamPair doSaslHandshake(Peer peer, OutputStream underlyingOut, InputStream underlyingIn, Map<String, String> saslProps, CallbackHandler callbackHandler) throws IOException {
DataInputStream in = new DataInputStream(underlyingIn);
DataOutputStream out = new DataOutputStream(underlyingOut);
SaslParticipant sasl = SaslParticipant.createServerSaslParticipant(saslProps, callbackHandler);
int magicNumber = in.readInt();
if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) {
throw new InvalidMagicNumberException(magicNumber, dnConf.getEncryptDataTransfer());
}
try {
// step 1
byte[] remoteResponse = readSaslMessage(in);
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
sendSaslMessage(out, localResponse);
// step 2 (server-side only)
List<CipherOption> cipherOptions = Lists.newArrayList();
remoteResponse = readSaslMessageAndNegotiationCipherOptions(in, cipherOptions);
localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
// SASL handshake is complete
checkSaslComplete(sasl, saslProps);
CipherOption cipherOption = null;
if (sasl.isNegotiatedQopPrivacy()) {
// Negotiate a cipher option
Configuration conf = dnConf.getConf();
cipherOption = negotiateCipherOption(conf, cipherOptions);
if (LOG.isDebugEnabled()) {
if (cipherOption == null) {
// No cipher suite is negotiated
String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
if (cipherSuites != null && !cipherSuites.isEmpty()) {
// the server accepts some cipher suites, but the client does not.
LOG.debug("Server accepts cipher suites {}, " + "but client {} does not accept any of them", cipherSuites, peer.getRemoteAddressString());
}
} else {
LOG.debug("Server using cipher suite {} with client {}", cipherOption.getCipherSuite().getName(), peer.getRemoteAddressString());
}
}
}
// If negotiated cipher option is not null, wrap it before sending.
sendSaslMessageAndNegotiatedCipherOption(out, localResponse, wrap(cipherOption, sasl));
// stream pair.
return cipherOption != null ? createStreamPair(dnConf.getConf(), cipherOption, underlyingOut, underlyingIn, true) : sasl.createStreamPair(out, in);
} catch (IOException ioe) {
if (ioe instanceof SaslException && ioe.getCause() != null && ioe.getCause() instanceof InvalidEncryptionKeyException) {
// This could just be because the client is long-lived and hasn't gotten
// a new encryption key from the NN in a while. Upon receiving this
// error, the client will get a new encryption key from the NN and retry
// connecting to this DN.
sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
} else {
sendGenericSaslErrorMessage(out, ioe.getMessage());
}
throw ioe;
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException in project hadoop by apache.
the class DataStreamer method createBlockOutputStream.
// connects to the first datanode in the pipeline
// Returns true if success, otherwise return failure.
//
boolean createBlockOutputStream(DatanodeInfo[] nodes, StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
if (nodes.length == 0) {
LOG.info("nodes are empty for write pipeline of " + block);
return false;
}
String firstBadLink = "";
boolean checkRestart = false;
if (LOG.isDebugEnabled()) {
LOG.debug("pipeline = " + Arrays.toString(nodes) + ", " + this);
}
// persist blocks on namenode on next flush
persistBlocks.set(true);
int refetchEncryptionKey = 1;
while (true) {
boolean result = false;
DataOutputStream out = null;
try {
assert null == s : "Previous socket unclosed";
assert null == blockReplyStream : "Previous blockReplyStream unclosed";
s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
long readTimeout = dfsClient.getDatanodeReadTimeout(nodes.length);
OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(s, readTimeout);
IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s, unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
blockReplyStream = new DataInputStream(unbufIn);
//
// Xmit header info to datanode
//
BlockConstructionStage bcs = recoveryFlag ? stage.getRecoveryStage() : stage;
// We cannot change the block length in 'block' as it counts the number
// of bytes ack'ed.
ExtendedBlock blockCopy = block.getCurrentBlock();
blockCopy.setNumBytes(stat.getBlockSize());
boolean[] targetPinnings = getPinnings(nodes);
// send the request
new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken, dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, nodes.length, block.getNumBytes(), bytesSent, newGS, checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile, (targetPinnings != null && targetPinnings[0]), targetPinnings);
// receive ack for connect
BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(blockReplyStream));
Status pipelineStatus = resp.getStatus();
firstBadLink = resp.getFirstBadLink();
// regular node error.
if (PipelineAck.isRestartOOBStatus(pipelineStatus) && !errorState.isRestartingNode()) {
checkRestart = true;
throw new IOException("A datanode is restarting.");
}
String logInfo = "ack with firstBadLink as " + firstBadLink;
DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
assert null == blockStream : "Previous blockStream unclosed";
blockStream = out;
// success
result = true;
errorState.resetInternalError();
} catch (IOException ie) {
if (!errorState.isRestartingNode()) {
LOG.info("Exception in createBlockOutputStream " + this, ie);
}
if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + nodes[0] + " : " + ie);
// The encryption key used is invalid.
refetchEncryptionKey--;
dfsClient.clearDataEncryptionKey();
// a new encryption key.
continue;
}
// find the datanode that matches
if (firstBadLink.length() != 0) {
for (int i = 0; i < nodes.length; i++) {
// NB: Unconditionally using the xfer addr w/o hostname
if (firstBadLink.equals(nodes[i].getXferAddr())) {
errorState.setBadNodeIndex(i);
break;
}
}
} else {
assert !checkRestart;
errorState.setBadNodeIndex(0);
}
final int i = errorState.getBadNodeIndex();
// Check whether there is a restart worth waiting for.
if (checkRestart && shouldWaitForRestart(i)) {
errorState.initRestartingNode(i, "Datanode " + i + " is restarting: " + nodes[i]);
}
errorState.setInternalError();
lastException.set(ie);
// error
result = false;
} finally {
if (!result) {
IOUtils.closeSocket(s);
s = null;
IOUtils.closeStream(out);
IOUtils.closeStream(blockReplyStream);
blockReplyStream = null;
}
}
return result;
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException in project hadoop by apache.
the class DFSStripedInputStream method createBlockReader.
boolean createBlockReader(LocatedBlock block, long offsetInBlock, LocatedBlock[] targetBlocks, BlockReaderInfo[] readerInfos, int chunkIndex) throws IOException {
BlockReader reader = null;
final ReaderRetryPolicy retry = new ReaderRetryPolicy();
DFSInputStream.DNAddrPair dnInfo = new DFSInputStream.DNAddrPair(null, null, null);
while (true) {
try {
// the cached block location might have been re-fetched, so always
// get it from cache.
block = refreshLocatedBlock(block);
targetBlocks[chunkIndex] = block;
// internal block has one location, just rule out the deadNodes
dnInfo = getBestNodeDNAddrPair(block, null);
if (dnInfo == null) {
break;
}
reader = getBlockReader(block, offsetInBlock, block.getBlockSize() - offsetInBlock, dnInfo.addr, dnInfo.storageType, dnInfo.info);
} catch (IOException e) {
if (e instanceof InvalidEncryptionKeyException && retry.shouldRefetchEncryptionKey()) {
DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + dnInfo.addr + " : " + e);
dfsClient.clearDataEncryptionKey();
retry.refetchEncryptionKey();
} else if (retry.shouldRefetchToken() && tokenRefetchNeeded(e, dnInfo.addr)) {
fetchBlockAt(block.getStartOffset());
retry.refetchToken();
} else {
//TODO: handles connection issues
DFSClient.LOG.warn("Failed to connect to " + dnInfo.addr + " for " + "block" + block.getBlock(), e);
// re-fetch the block in case the block has been moved
fetchBlockAt(block.getStartOffset());
addToDeadNodes(dnInfo.info);
}
}
if (reader != null) {
readerInfos[chunkIndex] = new BlockReaderInfo(reader, dnInfo.info, offsetInBlock);
return true;
}
}
return false;
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException in project hadoop by apache.
the class DFSInputStream method blockSeekTo.
/**
* Open a DataInputStream to a DataNode so that it can be read from.
* We get block ID and the IDs of the destinations at startup, from the namenode.
*/
private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
if (target >= getFileLength()) {
throw new IOException("Attempted to read past end of file");
}
// Will be getting a new BlockReader.
closeCurrentBlockReaders();
//
// Connect to best DataNode for desired Block, with potential offset
//
DatanodeInfo chosenNode;
// only need to get a new access token once
int refetchToken = 1;
// only need to get a new encryption key once
int refetchEncryptionKey = 1;
boolean connectFailedOnce = false;
while (true) {
//
// Compute desired block
//
LocatedBlock targetBlock = getBlockAt(target);
// update current position
this.pos = target;
this.blockEnd = targetBlock.getStartOffset() + targetBlock.getBlockSize() - 1;
this.currentLocatedBlock = targetBlock;
long offsetIntoBlock = target - targetBlock.getStartOffset();
DNAddrPair retval = chooseDataNode(targetBlock, null);
chosenNode = retval.info;
InetSocketAddress targetAddr = retval.addr;
StorageType storageType = retval.storageType;
try {
blockReader = getBlockReader(targetBlock, offsetIntoBlock, targetBlock.getBlockSize() - offsetIntoBlock, targetAddr, storageType, chosenNode);
if (connectFailedOnce) {
DFSClient.LOG.info("Successfully connected to " + targetAddr + " for " + targetBlock.getBlock());
}
return chosenNode;
} catch (IOException ex) {
checkInterrupted(ex);
if (ex instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + targetAddr + " : " + ex);
// The encryption key used is invalid.
refetchEncryptionKey--;
dfsClient.clearDataEncryptionKey();
} else if (refetchToken > 0 && tokenRefetchNeeded(ex, targetAddr)) {
refetchToken--;
fetchBlockAt(target);
} else {
connectFailedOnce = true;
DFSClient.LOG.warn("Failed to connect to " + targetAddr + " for block" + ", add to deadNodes and continue. " + ex, ex);
// Put chosen node into dead list, continue
addToDeadNodes(chosenNode);
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException in project hadoop by apache.
the class DFSInputStream method actualGetFromOneDataNode.
/**
* Read data from one DataNode.
*
* @param datanode the datanode from which to read data
* @param block the located block containing the requested data
* @param startInBlk the startInBlk offset of the block
* @param endInBlk the endInBlk offset of the block
* @param buf the given byte buffer into which the data is read
* @param corruptedBlocks map recording list of datanodes with corrupted
* block replica
*/
void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block, final long startInBlk, final long endInBlk, ByteBuffer buf, CorruptedBlocks corruptedBlocks) throws IOException {
DFSClientFaultInjector.get().startFetchFromDatanode();
// only need to get a new access token once
int refetchToken = 1;
// only need to get a new encryption key once
int refetchEncryptionKey = 1;
final int len = (int) (endInBlk - startInBlk + 1);
while (true) {
// cached block locations may have been updated by chooseDataNode()
// or fetchBlockAt(). Always get the latest list of locations at the
// start of the loop.
block = refreshLocatedBlock(block);
BlockReader reader = null;
try {
DFSClientFaultInjector.get().fetchFromDatanodeException();
reader = getBlockReader(block, startInBlk, len, datanode.addr, datanode.storageType, datanode.info);
//Behave exactly as the readAll() call
ByteBuffer tmp = buf.duplicate();
tmp.limit(tmp.position() + len);
tmp = tmp.slice();
int nread = 0;
int ret;
while (true) {
ret = reader.read(tmp);
if (ret <= 0) {
break;
}
nread += ret;
}
buf.position(buf.position() + nread);
IOUtilsClient.updateReadStatistics(readStatistics, nread, reader);
dfsClient.updateFileSystemReadStats(reader.getNetworkDistance(), nread);
if (nread != len) {
throw new IOException("truncated return from reader.read(): " + "excpected " + len + ", got " + nread);
}
DFSClientFaultInjector.get().readFromDatanodeDelay();
return;
} catch (ChecksumException e) {
String msg = "fetchBlockByteRange(). Got a checksum exception for " + src + " at " + block.getBlock() + ":" + e.getPos() + " from " + datanode.info;
DFSClient.LOG.warn(msg);
// we want to remember what we have tried
corruptedBlocks.addCorruptedBlock(block.getBlock(), datanode.info);
addToDeadNodes(datanode.info);
throw new IOException(msg);
} catch (IOException e) {
checkInterrupted(e);
if (e instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + datanode.addr + " : " + e);
// The encryption key used is invalid.
refetchEncryptionKey--;
dfsClient.clearDataEncryptionKey();
} else if (refetchToken > 0 && tokenRefetchNeeded(e, datanode.addr)) {
refetchToken--;
try {
fetchBlockAt(block.getStartOffset());
} catch (IOException fbae) {
// ignore IOE, since we can retry it later in a loop
}
} else {
String msg = "Failed to connect to " + datanode.addr + " for file " + src + " for block " + block.getBlock() + ":" + e;
DFSClient.LOG.warn("Connection failure: " + msg, e);
addToDeadNodes(datanode.info);
throw new IOException(msg);
}
} finally {
if (reader != null) {
reader.close();
}
}
}
}
Aggregations