Search in sources :

Example 6 with CryptoProtocolVersion

use of org.apache.hadoop.crypto.CryptoProtocolVersion in project hadoop by apache.

the class FSDirEncryptionZoneOp method getEncryptionKeyInfo.

/**
   * If the file is in an encryption zone, we optimistically create an
   * EDEK for the file by calling out to the configured KeyProvider.
   * Since this typically involves doing an RPC, the fsn lock is yielded.
   *
   * Since the path can flip-flop between being in an encryption zone and not
   * in the meantime, the call MUST re-resolve the IIP and re-check
   * preconditions if this method does not return null;
   *
   * @param fsn the namesystem.
   * @param iip the inodes for the path
   * @param supportedVersions client's supported versions
   * @return EncryptionKeyInfo if the path is in an EZ, else null
   */
static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn, INodesInPath iip, CryptoProtocolVersion[] supportedVersions) throws IOException {
    FSDirectory fsd = fsn.getFSDirectory();
    // Nothing to do if the path is not within an EZ
    final EncryptionZone zone = getEZForPath(fsd, iip);
    if (zone == null) {
        EncryptionFaultInjector.getInstance().startFileNoKey();
        return null;
    }
    CryptoProtocolVersion protocolVersion = fsn.chooseProtocolVersion(zone, supportedVersions);
    CipherSuite suite = zone.getSuite();
    String ezKeyName = zone.getKeyName();
    Preconditions.checkNotNull(protocolVersion);
    Preconditions.checkNotNull(suite);
    Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), "Chose an UNKNOWN CipherSuite!");
    Preconditions.checkNotNull(ezKeyName);
    // Generate EDEK while not holding the fsn lock.
    fsn.writeUnlock();
    try {
        EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey();
        return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName, generateEncryptedDataEncryptionKey(fsd, ezKeyName));
    } finally {
        fsn.writeLock();
        EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
    }
}
Also used : EncryptionZone(org.apache.hadoop.hdfs.protocol.EncryptionZone) CryptoProtocolVersion(org.apache.hadoop.crypto.CryptoProtocolVersion) CipherSuite(org.apache.hadoop.crypto.CipherSuite)

Example 7 with CryptoProtocolVersion

use of org.apache.hadoop.crypto.CryptoProtocolVersion in project hadoop by apache.

the class TestDFSClientRetries method testNotYetReplicatedErrors.

// more tests related to different failure cases can be added here.
/**
   * Verify that client will correctly give up after the specified number
   * of times trying to add a block
   */
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException {
    final String exceptionMsg = "Nope, not replicated yet...";
    // Allow one retry (total of two calls)
    final int maxRetries = 1;
    conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
    NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
    Answer<Object> answer = new ThrowsException(new IOException()) {

        int retryCount = 0;

        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            retryCount++;
            System.out.println("addBlock has been called " + retryCount + " times");
            if (// First call was not a retry
            retryCount > maxRetries + 1)
                throw new IOException("Retried too many times: " + retryCount);
            else
                throw new RemoteException(NotReplicatedYetException.class.getName(), exceptionMsg);
        }
    };
    when(mockNN.addBlock(anyString(), anyString(), any(ExtendedBlock.class), any(DatanodeInfo[].class), anyLong(), any(String[].class), Matchers.<EnumSet<AddBlockFlag>>any())).thenAnswer(answer);
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
    final DFSClient client = new DFSClient(null, mockNN, conf, null);
    OutputStream os = client.create("testfile", true);
    // write one random byte
    os.write(20);
    try {
        os.close();
    } catch (Exception e) {
        assertTrue("Retries are not being stopped correctly: " + e.getMessage(), e.getMessage().equals(exceptionMsg));
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) ThrowsException(org.mockito.internal.stubbing.answers.ThrowsException) CryptoProtocolVersion(org.apache.hadoop.crypto.CryptoProtocolVersion) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) ThrowsException(org.mockito.internal.stubbing.answers.ThrowsException) ChecksumException(org.apache.hadoop.fs.ChecksumException) FileNotFoundException(java.io.FileNotFoundException) NotReplicatedYetException(org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException) SocketTimeoutException(java.net.SocketTimeoutException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Matchers.anyObject(org.mockito.Matchers.anyObject) FsPermission(org.apache.hadoop.fs.permission.FsPermission) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 8 with CryptoProtocolVersion

use of org.apache.hadoop.crypto.CryptoProtocolVersion in project hadoop by apache.

the class TestLease method testFactory.

@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
    final String[] groups = new String[] { "supergroup" };
    final UserGroupInformation[] ugi = new UserGroupInformation[3];
    for (int i = 0; i < ugi.length; i++) {
        ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
    }
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
    final Configuration conf = new Configuration();
    final DFSClient c1 = createDFSClientAs(ugi[0], conf);
    FSDataOutputStream out1 = createFsOut(c1, "/out1");
    final DFSClient c2 = createDFSClientAs(ugi[0], conf);
    FSDataOutputStream out2 = createFsOut(c2, "/out2");
    Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
    final DFSClient c3 = createDFSClientAs(ugi[1], conf);
    FSDataOutputStream out3 = createFsOut(c3, "/out3");
    Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
    final DFSClient c4 = createDFSClientAs(ugi[1], conf);
    FSDataOutputStream out4 = createFsOut(c4, "/out4");
    Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
    final DFSClient c5 = createDFSClientAs(ugi[2], conf);
    FSDataOutputStream out5 = createFsOut(c5, "/out5");
    Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
    Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Configuration(org.apache.hadoop.conf.Configuration) CryptoProtocolVersion(org.apache.hadoop.crypto.CryptoProtocolVersion) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Matchers.anyString(org.mockito.Matchers.anyString) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

CryptoProtocolVersion (org.apache.hadoop.crypto.CryptoProtocolVersion)8 CipherSuite (org.apache.hadoop.crypto.CipherSuite)4 IOException (java.io.IOException)2 CreateFlag (org.apache.hadoop.fs.CreateFlag)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 XAttr (org.apache.hadoop.fs.XAttr)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 EncryptionZone (org.apache.hadoop.hdfs.protocol.EncryptionZone)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 Test (org.junit.Test)2 Matchers.anyString (org.mockito.Matchers.anyString)2 ByteString (com.google.protobuf.ByteString)1 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStream (java.io.OutputStream)1 SocketTimeoutException (java.net.SocketTimeoutException)1 Configuration (org.apache.hadoop.conf.Configuration)1 ChecksumException (org.apache.hadoop.fs.ChecksumException)1 FileEncryptionInfo (org.apache.hadoop.fs.FileEncryptionInfo)1 UnknownCryptoProtocolVersionException (org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException)1