use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class FSNamesystem method startFileInt.
private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("DIR* NameSystem.startFile: src=").append(src).append(", holder=").append(holder).append(", clientMachine=").append(clientMachine).append(", createParent=").append(createParent).append(", replication=").append(replication).append(", createFlag=").append(flag).append(", blockSize=").append(blockSize).append(", supportedVersions=").append(Arrays.toString(supportedVersions));
NameNode.stateChangeLog.debug(builder.toString());
}
if (!DFSUtil.isValidName(src) || FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src) && !FSDirectory.isReservedRawName(src) && !FSDirectory.isReservedInodesName(src))) {
throw new InvalidPathException(src);
}
FSPermissionChecker pc = getPermissionChecker();
INodesInPath iip = null;
// until we do something that might create edits
boolean skipSync = true;
HdfsFileStatus stat = null;
BlocksMapUpdateInfo toRemoveBlocks = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create file" + src);
iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, src, flag, createParent);
if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip)) {
blockManager.verifyReplication(src, replication, clientMachine);
}
if (blockSize < minBlockSize) {
throw new IOException("Specified block size is less than configured" + " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY + "): " + blockSize + " < " + minBlockSize);
}
FileEncryptionInfo feInfo = null;
if (provider != null) {
EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(this, iip, supportedVersions);
// and/or EZ has not mutated
if (ezInfo != null) {
checkOperation(OperationCategory.WRITE);
iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, iip.getPath(), flag, createParent);
feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(dir, iip, ezInfo);
}
}
// following might generate edits
skipSync = false;
toRemoveBlocks = new BlocksMapUpdateInfo();
dir.writeLock();
try {
stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder, clientMachine, flag, createParent, replication, blockSize, feInfo, toRemoveBlocks, logRetryCache);
} catch (IOException e) {
skipSync = e instanceof StandbyException;
throw e;
} finally {
dir.writeUnlock();
}
} finally {
writeUnlock("create");
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
if (toRemoveBlocks != null) {
removeBlocks(toRemoveBlocks);
toRemoveBlocks.clear();
}
}
}
return stat;
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class PBHelperClient method convert.
public static FileEncryptionInfo convert(HdfsProtos.FileEncryptionInfoProto proto) {
if (proto == null) {
return null;
}
CipherSuite suite = convert(proto.getSuite());
CryptoProtocolVersion version = convert(proto.getCryptoProtocolVersion());
byte[] key = proto.getKey().toByteArray();
byte[] iv = proto.getIv().toByteArray();
String ezKeyVersionName = proto.getEzKeyVersionName();
String keyName = proto.getKeyName();
return new FileEncryptionInfo(suite, version, key, iv, keyName, ezKeyVersionName);
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class FSDirEncryptionZoneOp method getFileEncryptionInfo.
/**
* If the file and encryption key are valid, return the encryption info,
* else throw a retry exception. The startFile method generates the EDEK
* outside of the lock so the zone must be reverified.
*
* @param dir fsdirectory
* @param iip inodes in the file path
* @param ezInfo the encryption key
* @return FileEncryptionInfo for the file
* @throws RetryStartFileException if key is inconsistent with current zone
*/
static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir, INodesInPath iip, EncryptionKeyInfo ezInfo) throws RetryStartFileException {
FileEncryptionInfo feInfo = null;
final EncryptionZone zone = getEZForPath(dir, iip);
if (zone != null) {
// The path is now within an EZ, but we're missing encryption parameters
if (ezInfo == null) {
throw new RetryStartFileException();
}
// Path is within an EZ and we have provided encryption parameters.
// Make sure that the generated EDEK matches the settings of the EZ.
final String ezKeyName = zone.getKeyName();
if (!ezKeyName.equals(ezInfo.edek.getEncryptionKeyName())) {
throw new RetryStartFileException();
}
feInfo = new FileEncryptionInfo(ezInfo.suite, ezInfo.protocolVersion, ezInfo.edek.getEncryptedKeyVersion().getMaterial(), ezInfo.edek.getEncryptedKeyIv(), ezKeyName, ezInfo.edek.getEncryptionKeyVersionName());
}
return feInfo;
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class DFSClient method createWrappedOutputStream.
/**
* Wraps the stream in a CryptoOutputStream if the underlying file is
* encrypted.
*/
public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos, FileSystem.Statistics statistics, long startPos) throws IOException {
final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo();
if (feInfo != null) {
// File is encrypted, wrap the stream in a crypto stream.
// Currently only one version, so no special logic based on the version #
getCryptoProtocolVersion(feInfo);
final CryptoCodec codec = getCryptoCodec(conf, feInfo);
KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
final CryptoOutputStream cryptoOut = new CryptoOutputStream(dfsos, codec, decrypted.getMaterial(), feInfo.getIV(), startPos);
return new HdfsDataOutputStream(cryptoOut, statistics, startPos);
} else {
// No FileEncryptionInfo present so no encryption.
return new HdfsDataOutputStream(dfsos, statistics, startPos);
}
}
use of org.apache.hadoop.fs.FileEncryptionInfo in project hadoop by apache.
the class DFSClient method createWrappedInputStream.
/**
* Wraps the stream in a CryptoInputStream if the underlying file is
* encrypted.
*/
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) throws IOException {
final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
if (feInfo != null) {
// File is encrypted, wrap the stream in a crypto stream.
// Currently only one version, so no special logic based on the version #
getCryptoProtocolVersion(feInfo);
final CryptoCodec codec = getCryptoCodec(conf, feInfo);
final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
final CryptoInputStream cryptoIn = new CryptoInputStream(dfsis, codec, decrypted.getMaterial(), feInfo.getIV());
return new HdfsDataInputStream(cryptoIn);
} else {
// No FileEncryptionInfo so no encryption.
return new HdfsDataInputStream(dfsis);
}
}
Aggregations