Search in sources :

Example 6 with FileEncryptionInfo

use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.

the class TestEncryptionZones method testReadWrite.

@Test
public void testReadWrite() throws Exception {
    final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    // Create a base file for comparison
    final Path baseFile = new Path("/base");
    final int len = 8192;
    DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
    // Create the first enc file
    final Path zone = new Path("/zone");
    fs.mkdirs(zone);
    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
    final Path encFile1 = new Path(zone, "myfile");
    DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
    // Read them back in and compare byte-by-byte
    verifyFilesEqual(fs, baseFile, encFile1, len);
    // Roll the key of the encryption zone
    assertNumZones(1);
    String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
    cluster.getNamesystem().getProvider().rollNewVersion(keyName);
    cluster.getNamesystem().getProvider().invalidateCache(keyName);
    // Read them back in and compare byte-by-byte
    verifyFilesEqual(fs, baseFile, encFile1, len);
    // Write a new enc file and validate
    final Path encFile2 = new Path(zone, "myfile2");
    DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
    // FEInfos should be different
    FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
    FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
    assertFalse("EDEKs should be different", Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(), feInfo2.getEncryptedDataEncryptionKey()));
    assertNotEquals("Key was rolled, versions should be different", feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
    // Contents still equal
    verifyFilesEqual(fs, encFile1, encFile2, len);
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Mockito.anyString(org.mockito.Mockito.anyString) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) Test(org.junit.Test)

Example 7 with FileEncryptionInfo

use of org.apache.hadoop.fs.FileEncryptionInfo in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputSaslHelper method createTransparentCryptoHelper.

private static TransparentCryptoHelper createTransparentCryptoHelper() throws NoSuchMethodException {
    Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
    decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
    return new TransparentCryptoHelper() {

        @Override
        public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException {
            try {
                KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
                CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite());
                Encryptor encryptor = cryptoCodec.createEncryptor();
                encryptor.init(decryptedKey.getMaterial(), feInfo.getIV());
                return encryptor;
            } catch (InvocationTargetException e) {
                Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
                throw new RuntimeException(e.getTargetException());
            } catch (GeneralSecurityException e) {
                throw new IOException(e);
            } catch (IllegalAccessException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Configuration(org.apache.hadoop.conf.Configuration) KeyVersion(org.apache.hadoop.crypto.key.KeyProvider.KeyVersion) GeneralSecurityException(java.security.GeneralSecurityException) Encryptor(org.apache.hadoop.crypto.Encryptor) Method(java.lang.reflect.Method) IOException(java.io.IOException) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) InvocationTargetException(java.lang.reflect.InvocationTargetException) CryptoCodec(org.apache.hadoop.crypto.CryptoCodec)

Example 8 with FileEncryptionInfo

use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.

the class PBHelperClient method convert.

public static FileEncryptionInfo convert(HdfsProtos.PerFileEncryptionInfoProto fileProto, CipherSuite suite, CryptoProtocolVersion version, String keyName) {
    if (fileProto == null || suite == null || version == null || keyName == null) {
        return null;
    }
    byte[] key = fileProto.getKey().toByteArray();
    byte[] iv = fileProto.getIv().toByteArray();
    String ezKeyVersionName = fileProto.getEzKeyVersionName();
    return new FileEncryptionInfo(suite, version, key, iv, keyName, ezKeyVersionName);
}
Also used : ByteString(com.google.protobuf.ByteString) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 9 with FileEncryptionInfo

use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.

the class FSDirStatAndListingOp method createFileStatus.

/**
   * create a hdfs file status from an iip.
   *
   * @param fsd FSDirectory
   * @param iip The INodesInPath containing the INodeFile and its ancestors.
   * @param child for a directory listing of the iip, else null
   * @param storagePolicy for the path or closest ancestor
   * @param needLocation if block locations need to be included or not
   * @param includeStoragePolicy if storage policy should be returned
   * @return a file status
   * @throws java.io.IOException if any error occurs
   */
private static HdfsFileStatus createFileStatus(FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy, boolean needLocation) throws IOException {
    assert fsd.hasReadLock();
    // only directory listing sets the status name.
    byte[] name = HdfsFileStatus.EMPTY_NAME;
    if (child != null) {
        name = child.getLocalNameBytes();
        // have to do this for EC and EZ lookups...
        iip = INodesInPath.append(iip, child, name);
    }
    // length is zero for directories
    long size = 0;
    short replication = 0;
    long blocksize = 0;
    final INode node = iip.getLastINode();
    final int snapshot = iip.getPathSnapshotId();
    LocatedBlocks loc = null;
    final boolean isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
    FileEncryptionInfo feInfo = null;
    final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
    if (node.isFile()) {
        final INodeFile fileNode = node.asFile();
        size = fileNode.computeFileSize(snapshot);
        replication = fileNode.getFileReplication(snapshot);
        blocksize = fileNode.getPreferredBlockSize();
        if (isEncrypted) {
            feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        }
        if (needLocation) {
            final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
            final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
            final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
            loc = fsd.getBlockManager().createLocatedBlocks(fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo, ecPolicy);
            if (loc == null) {
                loc = new LocatedBlocks();
            }
        }
    }
    int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0;
    INodeAttributes nodeAttrs = fsd.getAttributes(iip);
    return createFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.getUserName(), nodeAttrs.getGroupName(), node.isSymlink() ? node.asSymlink().getSymlink() : null, name, node.getId(), childrenNum, feInfo, storagePolicy, ecPolicy, loc);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 10 with FileEncryptionInfo

use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.

the class FSDirStatAndListingOp method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   * @throws IOException
   */
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
    Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
    Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
    BlockManager bm = fsd.getBlockManager();
    fsd.readLock();
    try {
        final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
        src = iip.getPath();
        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ);
            fsd.checkUnreadableBySuperuser(pc, iip);
        }
        final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
        boolean isUc = inode.isUnderConstruction();
        if (iip.isSnapshot()) {
            // if src indicates a snapshot file, we need to make sure the returned
            // blocks do not exceed the size of the snapshot file.
            length = Math.min(length, fileSize - offset);
            isUc = false;
        }
        final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
        final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
        final long now = now();
        boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
        return new GetBlockLocationsResult(updateAccessTime, blocks);
    } finally {
        fsd.readUnlock();
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Aggregations

FileEncryptionInfo (org.apache.hadoop.fs.FileEncryptionInfo)11 CryptoCodec (org.apache.hadoop.crypto.CryptoCodec)3 KeyVersion (org.apache.hadoop.crypto.key.KeyProvider.KeyVersion)3 ByteString (com.google.protobuf.ByteString)2 IOException (java.io.IOException)2 EncryptedKeyVersion (org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion)2 Path (org.apache.hadoop.fs.Path)2 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)2 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)2 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)2 Test (org.junit.Test)2 Mockito.anyString (org.mockito.Mockito.anyString)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)1 Method (java.lang.reflect.Method)1 URI (java.net.URI)1 GeneralSecurityException (java.security.GeneralSecurityException)1 Configuration (org.apache.hadoop.conf.Configuration)1 CipherSuite (org.apache.hadoop.crypto.CipherSuite)1 CryptoInputStream (org.apache.hadoop.crypto.CryptoInputStream)1 CryptoOutputStream (org.apache.hadoop.crypto.CryptoOutputStream)1