use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class RpcClient method createInputStream.
private OzoneInputStream createInputStream(OmKeyInfo keyInfo, Function<OmKeyInfo, OmKeyInfo> retryFunction) throws IOException {
// When Key is not MPU or when Key is MPU and encryption is not enabled
// Need to revisit for GDP.
FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo();
if (feInfo == null) {
LengthInputStream lengthInputStream = KeyInputStream.getFromOmKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction);
try {
Map<String, String> keyInfoMetadata = keyInfo.getMetadata();
if (Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))) {
GDPRSymmetricKey gk = new GDPRSymmetricKey(keyInfoMetadata.get(OzoneConsts.GDPR_SECRET), keyInfoMetadata.get(OzoneConsts.GDPR_ALGORITHM));
gk.getCipher().init(Cipher.DECRYPT_MODE, gk.getSecretKey());
return new OzoneInputStream(new CipherInputStream(lengthInputStream, gk.getCipher()));
}
} catch (Exception ex) {
throw new IOException(ex);
}
return new OzoneInputStream(lengthInputStream.getWrappedStream());
} else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) {
// Regular Key with FileEncryptionInfo
LengthInputStream lengthInputStream = KeyInputStream.getFromOmKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction);
final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
final CryptoInputStream cryptoIn = new CryptoInputStream(lengthInputStream.getWrappedStream(), OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
return new OzoneInputStream(cryptoIn);
} else {
// Multipart Key with FileEncryptionInfo
List<LengthInputStream> lengthInputStreams = KeyInputStream.getStreamsFromKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction);
final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
List<OzoneCryptoInputStream> cryptoInputStreams = new ArrayList<>();
for (LengthInputStream lengthInputStream : lengthInputStreams) {
final OzoneCryptoInputStream ozoneCryptoInputStream = new OzoneCryptoInputStream(lengthInputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
cryptoInputStreams.add(ozoneCryptoInputStream);
}
return new MultipartCryptoKeyInputStream(keyInfo.getKeyName(), cryptoInputStreams);
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class RpcClient method getKeysEveryReplicas.
@Override
public Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> getKeysEveryReplicas(String volumeName, String bucketName, String keyName) throws IOException {
Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> result = new LinkedHashMap<>();
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
Pipeline pipelineBefore = keyLocationInfo.getPipeline();
List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
for (DatanodeDetails dn : datanodes) {
List<DatanodeDetails> nodes = new ArrayList<>();
nodes.add(dn);
Pipeline pipeline = new Pipeline.Builder(pipelineBefore).setNodes(nodes).setId(PipelineID.randomId()).build();
keyLocationInfo.setPipeline(pipeline);
List<OmKeyLocationInfo> keyLocationInfoList = new ArrayList<>();
keyLocationInfoList.add(keyLocationInfo);
OmKeyLocationInfoGroup keyLocationInfoGroup = new OmKeyLocationInfoGroup(0, keyLocationInfoList);
List<OmKeyLocationInfoGroup> keyLocationInfoGroups = new ArrayList<>();
keyLocationInfoGroups.add(keyLocationInfoGroup);
keyInfo.setKeyLocationVersions(keyLocationInfoGroups);
OzoneInputStream is = createInputStream(keyInfo, Function.identity());
blocks.put(dn, is);
}
result.put(keyLocationInfo, blocks);
}
return result;
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneClient method testPutKeyRatisOneNode.
@Test
public void testPutKeyRatisOneNode() throws IOException {
Instant testStartTime = Instant.now();
String value = "sample value";
OzoneBucket bucket = getOzoneBucket();
for (int i = 0; i < 10; i++) {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
Assert.assertEquals(value.length(), is.read(fileContent));
is.close();
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestStorageContainerManagerHA method testPutKey.
public void testPutKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
is.close();
final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
long index = -1;
for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
if (scm.checkLeader()) {
index = getLastAppliedIndex(scm);
}
}
Assert.assertFalse(index == -1);
long finalIndex = index;
// Ensure all follower scms have caught up with the leader
GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
final long containerID = keyLocationInfos.get(0).getContainerID();
for (int k = 0; k < numOfSCMs; k++) {
StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
// flush to DB on each SCM
((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneManagerHA method createKeyTest.
protected void createKeyTest(boolean checkSuccess) throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
try {
getObjectStore().createVolume(volumeName, createVolumeArgs);
OzoneVolume retVolumeinfo = getObjectStore().getVolume(volumeName);
Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
retVolumeinfo.createBucket(bucketName);
OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
String value = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
ozoneOutputStream.close();
OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
ozoneInputStream.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
} catch (ConnectException | RemoteException e) {
if (!checkSuccess) {
// last running OM as it would fail to get a quorum.
if (e instanceof RemoteException) {
GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
}
} else {
throw e;
}
}
}
Aggregations