use of org.apache.hadoop.crypto.CryptoProtocolVersion in project hadoop by apache.
the class FSDirEncryptionZoneOp method getEncryptionKeyInfo.
/**
* If the file is in an encryption zone, we optimistically create an
* EDEK for the file by calling out to the configured KeyProvider.
* Since this typically involves doing an RPC, the fsn lock is yielded.
*
* Since the path can flip-flop between being in an encryption zone and not
* in the meantime, the call MUST re-resolve the IIP and re-check
* preconditions if this method does not return null;
*
* @param fsn the namesystem.
* @param iip the inodes for the path
* @param supportedVersions client's supported versions
* @return EncryptionKeyInfo if the path is in an EZ, else null
*/
static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn, INodesInPath iip, CryptoProtocolVersion[] supportedVersions) throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
// Nothing to do if the path is not within an EZ
final EncryptionZone zone = getEZForPath(fsd, iip);
if (zone == null) {
EncryptionFaultInjector.getInstance().startFileNoKey();
return null;
}
CryptoProtocolVersion protocolVersion = fsn.chooseProtocolVersion(zone, supportedVersions);
CipherSuite suite = zone.getSuite();
String ezKeyName = zone.getKeyName();
Preconditions.checkNotNull(protocolVersion);
Preconditions.checkNotNull(suite);
Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), "Chose an UNKNOWN CipherSuite!");
Preconditions.checkNotNull(ezKeyName);
// Generate EDEK while not holding the fsn lock.
fsn.writeUnlock();
try {
EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey();
return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName, generateEncryptedDataEncryptionKey(fsd, ezKeyName));
} finally {
fsn.writeLock();
EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
}
}
use of org.apache.hadoop.crypto.CryptoProtocolVersion in project hadoop by apache.
the class TestDFSClientRetries method testNotYetReplicatedErrors.
// more tests related to different failure cases can be added here.
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException {
final String exceptionMsg = "Nope, not replicated yet...";
// Allow one retry (total of two calls)
final int maxRetries = 1;
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if (// First call was not a retry
retryCount > maxRetries + 1)
throw new IOException("Retried too many times: " + retryCount);
else
throw new RemoteException(NotReplicatedYetException.class.getName(), exceptionMsg);
}
};
when(mockNN.addBlock(anyString(), anyString(), any(ExtendedBlock.class), any(DatanodeInfo[].class), anyLong(), any(String[].class), Matchers.<EnumSet<AddBlockFlag>>any())).thenAnswer(answer);
Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
// write one random byte
os.write(20);
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(), e.getMessage().equals(exceptionMsg));
}
}
use of org.apache.hadoop.crypto.CryptoProtocolVersion in project hadoop by apache.
the class TestLease method testFactory.
@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
final String[] groups = new String[] { "supergroup" };
final UserGroupInformation[] ugi = new UserGroupInformation[3];
for (int i = 0; i < ugi.length; i++) {
ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
}
Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final Configuration conf = new Configuration();
final DFSClient c1 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out1 = createFsOut(c1, "/out1");
final DFSClient c2 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out2 = createFsOut(c2, "/out2");
Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
final DFSClient c3 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out3 = createFsOut(c3, "/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out4 = createFsOut(c4, "/out4");
Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
final DFSClient c5 = createDFSClientAs(ugi[2], conf);
FSDataOutputStream out5 = createFsOut(c5, "/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
Aggregations