use of org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey in project hadoop by apache.
the class TestSaslDataTransfer method TestPeerFromSocketAndKeyReadTimeout.
/**
* Verifies that peerFromSocketAndKey honors socket read timeouts.
*/
@Test(timeout = 60000)
public void TestPeerFromSocketAndKeyReadTimeout() throws Exception {
HdfsConfiguration conf = createSecureConfig("authentication,integrity,privacy");
AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
SaslDataTransferClient saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), fallbackToSimpleAuth);
DatanodeID fakeDatanodeId = new DatanodeID("127.0.0.1", "localhost", "beefbeef-beef-beef-beef-beefbeefbeef", 1, 2, 3, 4);
DataEncryptionKeyFactory dataEncKeyFactory = new DataEncryptionKeyFactory() {
@Override
public DataEncryptionKey newDataEncryptionKey() {
return new DataEncryptionKey(123, "456", new byte[8], new byte[8], 1234567, "fakeAlgorithm");
}
};
ServerSocket serverSocket = null;
Socket socket = null;
try {
serverSocket = new ServerSocket(0, -1);
socket = new Socket(serverSocket.getInetAddress(), serverSocket.getLocalPort());
Peer peer = DFSUtilClient.peerFromSocketAndKey(saslClient, socket, dataEncKeyFactory, new Token(), fakeDatanodeId, 1);
peer.close();
Assert.fail("Expected DFSClient#peerFromSocketAndKey to time out.");
} catch (SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("Read timed out", e);
} finally {
IOUtils.cleanup(null, socket, serverSocket);
}
}
use of org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method getDataEncryptionKey.
@Override
public GetDataEncryptionKeyResponseProto getDataEncryptionKey(RpcController controller, GetDataEncryptionKeyRequestProto request) throws ServiceException {
try {
GetDataEncryptionKeyResponseProto.Builder builder = GetDataEncryptionKeyResponseProto.newBuilder();
DataEncryptionKey encryptionKey = server.getDataEncryptionKey();
if (encryptionKey != null) {
builder.setDataEncryptionKey(PBHelperClient.convert(encryptionKey));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
use of org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputSaslHelper method trySaslNegotiate.
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Void> saslPromise) throws IOException {
SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
TrustedChannelResolver trustedChannelResolver = SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
saslPromise.trySuccess(null);
return;
}
DataEncryptionKey encryptionKey = client.newDataEncryptionKey();
if (encryptionKey != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo);
}
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise);
} else if (!UserGroupInformation.isSecurityEnabled()) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
} else if (dnInfo.getXferPort() < 1024) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with " + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
} else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with " + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
} else if (saslPropsResolver != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo);
}
doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise);
} else {
// edge case.
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with no SASL " + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
}
}
use of org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey in project hadoop by apache.
the class SaslDataTransferClient method newSocketSend.
/**
* Sends client SASL negotiation for a newly allocated socket if required.
*
* @param socket connection socket
* @param underlyingOut connection output stream
* @param underlyingIn connection input stream
* @param encryptionKeyFactory for creation of an encryption key
* @param accessToken connection block access token
* @param datanodeId ID of destination DataNode
* @return new pair of streams, wrapped after SASL negotiation
* @throws IOException for any error
*/
public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException {
// The encryption key factory only returns a key if encryption is enabled.
DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ? encryptionKeyFactory.newDataEncryptionKey() : null;
IOStreamPair ios = send(socket.getInetAddress(), underlyingOut, underlyingIn, encryptionKey, accessToken, datanodeId);
return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
}
use of org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey in project hadoop by apache.
the class TestEncryptedTransfer method testLongLivedClientPipelineRecovery.
@Test
public void testLongLivedClientPipelineRecovery() throws IOException, InterruptedException, TimeoutException {
if (resolverClazz != null) {
// TestTrustedChannelResolver does not use encryption keys.
return;
}
// use 4 datanodes to make sure that after 1 data node is stopped,
// client only retries establishing pipeline with the 4th node.
int numDataNodes = 4;
// do not consider load factor when selecting a data node
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
setEncryptionConfigKeys();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
fs = getFileSystem(conf);
DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
DFSClient spyClient = Mockito.spy(client);
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
writeTestDataToFile(fs);
BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
// Reduce key update interval and token life for testing.
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
// Wait until the encryption key becomes invalid.
LOG.info("Wait until encryption keys become invalid...");
DataEncryptionKey encryptionKey = spyClient.getEncryptionKey();
List<DataNode> dataNodes = cluster.getDataNodes();
for (DataNode dn : dataNodes) {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return !dn.getBlockPoolTokenSecretManager().get(encryptionKey.blockPoolId).hasKey(encryptionKey.keyId);
}
}, 100, 30 * 1000);
}
LOG.info("The encryption key is invalid on all nodes now.");
try (FSDataOutputStream out = fs.append(TEST_PATH)) {
DFSOutputStream dfstream = (DFSOutputStream) out.getWrappedStream();
// shut down the first datanode in the pipeline.
DatanodeInfo[] targets = dfstream.getPipeline();
cluster.stopDataNode(targets[0].getXferAddr());
// write data to induce pipeline recovery
out.write(PLAIN_TEXT.getBytes());
out.hflush();
assertFalse("The first datanode in the pipeline was not replaced.", Arrays.asList(dfstream.getPipeline()).contains(targets[0]));
}
// verify that InvalidEncryptionKeyException is handled properly
Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
}
Aggregations