use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class TestEncryptionZones method testReadWrite.
@Test
public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
// Create a base file for comparison
final Path baseFile = new Path("/base");
final int len = 8192;
DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
// Create the first enc file
final Path zone = new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
final Path encFile1 = new Path(zone, "myfile");
DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
// Roll the key of the encryption zone
assertNumZones(1);
String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
cluster.getNamesystem().getProvider().invalidateCache(keyName);
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
// Write a new enc file and validate
final Path encFile2 = new Path(zone, "myfile2");
DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
// FEInfos should be different
FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different", Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(), feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different", feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
// Contents still equal
verifyFilesEqual(fs, encFile1, encFile2, len);
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputSaslHelper method createTransparentCryptoHelper.
private static TransparentCryptoHelper createTransparentCryptoHelper() throws NoSuchMethodException {
Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
return new TransparentCryptoHelper() {
@Override
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException {
try {
KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite());
Encryptor encryptor = cryptoCodec.createEncryptor();
encryptor.init(decryptedKey.getMaterial(), feInfo.getIV());
return encryptor;
} catch (InvocationTargetException e) {
Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
throw new RuntimeException(e.getTargetException());
} catch (GeneralSecurityException e) {
throw new IOException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
};
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class PBHelperClient method convert.
public static FileEncryptionInfo convert(HdfsProtos.PerFileEncryptionInfoProto fileProto, CipherSuite suite, CryptoProtocolVersion version, String keyName) {
if (fileProto == null || suite == null || version == null || keyName == null) {
return null;
}
byte[] key = fileProto.getKey().toByteArray();
byte[] iv = fileProto.getIv().toByteArray();
String ezKeyVersionName = fileProto.getEzKeyVersionName();
return new FileEncryptionInfo(suite, version, key, iv, keyName, ezKeyVersionName);
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class FSDirStatAndListingOp method createFileStatus.
/**
* create a hdfs file status from an iip.
*
* @param fsd FSDirectory
* @param iip The INodesInPath containing the INodeFile and its ancestors.
* @param child for a directory listing of the iip, else null
* @param storagePolicy for the path or closest ancestor
* @param needLocation if block locations need to be included or not
* @param includeStoragePolicy if storage policy should be returned
* @return a file status
* @throws java.io.IOException if any error occurs
*/
private static HdfsFileStatus createFileStatus(FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy, boolean needLocation) throws IOException {
assert fsd.hasReadLock();
// only directory listing sets the status name.
byte[] name = HdfsFileStatus.EMPTY_NAME;
if (child != null) {
name = child.getLocalNameBytes();
// have to do this for EC and EZ lookups...
iip = INodesInPath.append(iip, child, name);
}
// length is zero for directories
long size = 0;
short replication = 0;
long blocksize = 0;
final INode node = iip.getLastINode();
final int snapshot = iip.getPathSnapshotId();
LocatedBlocks loc = null;
final boolean isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
FileEncryptionInfo feInfo = null;
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
if (node.isFile()) {
final INodeFile fileNode = node.asFile();
size = fileNode.computeFileSize(snapshot);
replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize();
if (isEncrypted) {
feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
}
if (needLocation) {
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
loc = fsd.getBlockManager().createLocatedBlocks(fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo, ecPolicy);
if (loc == null) {
loc = new LocatedBlocks();
}
}
}
int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0;
INodeAttributes nodeAttrs = fsd.getAttributes(iip);
return createFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.getUserName(), nodeAttrs.getGroupName(), node.isSymlink() ? node.asSymlink().getSymlink() : null, name, node.getId(), childrenNum, feInfo, storagePolicy, ecPolicy, loc);
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class FSDirStatAndListingOp method getBlockLocations.
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
* @throws IOException
*/
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
BlockManager bm = fsd.getBlockManager();
fsd.readLock();
try {
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
src = iip.getPath();
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
fsd.checkUnreadableBySuperuser(pc, iip);
}
final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
boolean isUc = inode.isUnderConstruction();
if (iip.isSnapshot()) {
// if src indicates a snapshot file, we need to make sure the returned
// blocks do not exceed the size of the snapshot file.
length = Math.min(length, fileSize - offset);
isUc = false;
}
final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
final long now = now();
boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
return new GetBlockLocationsResult(updateAccessTime, blocks);
} finally {
fsd.readUnlock();
}
}
Aggregations