use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.
the class RpcClient method getKeyDetails.
@Override
public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, String keyName) throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).setLatestVersionLocation(getLatestVersionLocation).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
List<OzoneKeyLocation> ozoneKeyLocations = new ArrayList<>();
long lastKeyOffset = 0L;
List<OmKeyLocationInfo> omKeyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
for (OmKeyLocationInfo info : omKeyLocationInfos) {
ozoneKeyLocations.add(new OzoneKeyLocation(info.getContainerID(), info.getLocalID(), info.getLength(), info.getOffset(), lastKeyOffset));
lastKeyOffset += info.getLength();
}
return new OzoneKeyDetails(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(), keyInfo.getModificationTime(), ozoneKeyLocations, keyInfo.getReplicationConfig(), keyInfo.getMetadata(), keyInfo.getFileEncryptionInfo());
}
use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.
the class TestObjectStoreWithFSO method testLookupKey.
@Test
public void testLookupKey() throws Exception {
String parent = "a/b/c/";
String fileName = "key" + RandomStringUtils.randomNumeric(5);
String key = parent + fileName;
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Table<String, OmKeyInfo> openFileTable = cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(getBucketLayout());
String data = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
KeyOutputStream keyOutputStream = (KeyOutputStream) ozoneOutputStream.getOutputStream();
long clientID = keyOutputStream.getClientID();
OmDirectoryInfo dirPathC = getDirInfo(parent);
Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
// after file creation
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), false);
ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0, data.length());
// open key
try {
ozoneBucket.getKey(key);
fail("Should throw exception as fileName is not visible and its still " + "open for writing!");
} catch (OMException ome) {
// expected
assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
}
ozoneOutputStream.close();
OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
Assert.assertEquals(key, keyDetails.getName());
Table<String, OmKeyInfo> fileTable = cluster.getOzoneManager().getMetadataManager().getKeyTable(getBucketLayout());
// When closing the key, entry should be removed from openFileTable
// and it should be added to fileTable.
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
ozoneBucket.deleteKey(key);
// get deleted key
try {
ozoneBucket.getKey(key);
fail("Should throw exception as fileName not exists!");
} catch (OMException ome) {
// expected
assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
}
// after key delete
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
}
use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.
the class InfoKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException {
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
OzoneKeyDetails key = bucket.getKey(keyName);
// For compliance/security, GDPR Secret & Algorithm details are removed
// from local copy of metadata before printing. This doesn't remove these
// from Ozone Manager's actual metadata.
key.getMetadata().remove(OzoneConsts.GDPR_SECRET);
key.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
printObjectAsJson(key);
}
use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.
the class TestOzoneRpcClientAbstract method testDeletedKeyForGDPR.
/**
* Tests deletedKey for GDPR.
* 1. Create GDPR Enabled bucket.
* 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
* 3. Read key and validate the content/metadata is as expected because the
* readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo
* Metadata.
* 4. Delete this key in GDPR enabled bucket
* 5. Confirm the deleted key metadata in deletedTable does not contain the
* GDPR encryption details (flag, secret, algorithm).
* @throws Exception
*/
@Test
public void testDeletedKeyForGDPR() throws Exception {
// Step 1
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs args = BucketArgs.newBuilder().addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
volume.createBucket(bucketName, args);
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
Assert.assertNotNull(bucket.getMetadata());
Assert.assertEquals("true", bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
// Step 2
String text = "hello world";
Map<String, String> keyMetadata = new HashMap<>();
keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
OzoneOutputStream out = bucket.createKey(keyName, text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata);
out.write(text.getBytes(UTF_8));
out.close();
// Step 3
OzoneKeyDetails key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
Assert.assertEquals("AES", key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[text.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, RATIS, ONE));
Assert.assertEquals(text, new String(fileContent, UTF_8));
// Step 4
bucket.deleteKey(keyName);
// Step 5
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
RepeatedOmKeyInfo deletedKeys = omMetadataManager.getDeletedTable().get(objectKey);
if (deletedKeys != null) {
Map<String, String> deletedKeyMetadata = deletedKeys.getOmKeyInfoList().get(0).getMetadata();
Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG));
Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET));
Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM));
}
}
use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.
the class TestOzoneRpcClientAbstract method createAndCorruptKey.
private void createAndCorruptKey(String volumeName, String bucketName, String keyName) throws IOException {
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes. Atleast one of the
// datanode must have this container.
Container container = null;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
break;
}
}
Assert.assertNotNull("Container not found", container);
corruptData(container, key);
}
Aggregations