use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestOzoneRpcClientAbstract method testListMultipartUploadPartsWithContinuation.
@ParameterizedTest
@MethodSource("replicationConfigs")
void testListMultipartUploadPartsWithContinuation(ReplicationConfig replication) throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
String uploadID = initiateMultipartUpload(bucket, keyName, replication);
String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97));
partsMap.put(1, partName1);
String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97));
partsMap.put(2, partName2);
String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97));
partsMap.put(3, partName3);
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2);
Assert.assertEquals(replication, ozoneMultipartUploadPartListParts.getReplicationConfig());
Assert.assertEquals(2, ozoneMultipartUploadPartListParts.getPartInfoList().size());
Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts.getPartInfoList().get(0).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(0).getPartName());
Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts.getPartInfoList().get(1).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(1).getPartName());
// Get remaining
Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2);
Assert.assertEquals(1, ozoneMultipartUploadPartListParts.getPartInfoList().size());
Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts.getPartInfoList().get(0).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(0).getPartName());
// As we don't have any parts for this, we should get false here
Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestOzoneRpcClientAbstract method testNativeAclsForKey.
@Test
public void testNativeAclsForKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String key1 = "dir1/dir2" + UUID.randomUUID().toString();
String key2 = "dir1/dir2" + UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
assertNotNull("Bucket creation failed", bucket);
writeKey(key1, bucket);
writeKey(key2, bucket);
OzoneObj ozObj = new OzoneObjInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(key1).setResType(OzoneObj.ResourceType.KEY).setStoreType(OzoneObj.StoreType.OZONE).build();
// Validates access acls.
validateOzoneAccessAcl(ozObj);
// Check default acls inherited from bucket.
OzoneObj buckObj = new OzoneObjInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(key1).setResType(OzoneObj.ResourceType.BUCKET).setStoreType(OzoneObj.StoreType.OZONE).build();
validateDefaultAcls(buckObj, ozObj, null, bucket);
// Check default acls inherited from prefix.
OzoneObj prefixObj = new OzoneObjInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(key1).setPrefixName("dir1/").setResType(OzoneObj.ResourceType.PREFIX).setStoreType(OzoneObj.StoreType.OZONE).build();
store.setAcl(prefixObj, getAclList(new OzoneConfiguration()));
// Prefix should inherit DEFAULT acl from bucket.
List<OzoneAcl> acls = store.getAcl(prefixObj);
assertTrue("Current acls:" + StringUtils.join(",", acls), acls.contains(inheritedUserAcl));
assertTrue("Current acls:" + StringUtils.join(",", acls), acls.contains(inheritedGroupAcl));
// Remove inherited acls from prefix.
assertTrue(store.removeAcl(prefixObj, inheritedUserAcl));
assertTrue(store.removeAcl(prefixObj, inheritedGroupAcl));
validateDefaultAcls(prefixObj, ozObj, null, bucket);
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestOzoneRpcClientAbstract method testGetKeyDetails.
@Test
public void testGetKeyDetails() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
String keyValue = RandomStringUtils.random(128);
// String keyValue = "this is a test value.glx";
// create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(keyValue.getBytes(UTF_8));
out.close();
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[32];
is.read(fileContent);
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(datanodes.size(), 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
HddsDatanodeService datanodeService = null;
for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
datanodeService = datanodeServiceItr;
break;
}
}
KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf());
BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator(containerID)) {
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) {
long length = 0;
List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen();
}
Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
break;
}
}
}
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestOzoneRpcClientAbstract method testCreateVolumeWithMetadata.
@Test
public void testCreateVolumeWithMetadata() throws IOException, OzoneClientException {
String volumeName = UUID.randomUUID().toString();
VolumeArgs volumeArgs = VolumeArgs.newBuilder().addMetadata("key1", "val1").build();
store.createVolume(volumeName, volumeArgs);
OzoneVolume volume = store.getVolume(volumeName);
Assert.assertEquals(OzoneConsts.QUOTA_RESET, volume.getQuotaInNamespace());
Assert.assertEquals(OzoneConsts.QUOTA_RESET, volume.getQuotaInBytes());
Assert.assertEquals("val1", volume.getMetadata().get("key1"));
Assert.assertEquals(volumeName, volume.getName());
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestOzoneRpcClientAbstract method bucketUsedBytesTestHelper.
private void bucketUsedBytesTestHelper(BucketLayout bucketLayout) throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
OzoneVolume volume = null;
String value = "sample value";
int valueLength = value.getBytes(UTF_8).length;
store.createVolume(volumeName);
volume = store.getVolume(volumeName);
BucketArgs bucketArgs = BucketArgs.newBuilder().setBucketLayout(bucketLayout).build();
volume.createBucket(bucketName, bucketArgs);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
writeKey(bucket, keyName, ONE, value, valueLength);
Assert.assertEquals(valueLength, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
writeKey(bucket, keyName, ONE, value, valueLength);
Assert.assertEquals(valueLength, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
// pre-allocate more blocks than needed
int fakeValueLength = valueLength + blockSize;
writeKey(bucket, keyName, ONE, value, fakeValueLength);
Assert.assertEquals(valueLength, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
bucket.deleteKey(keyName);
Assert.assertEquals(0L, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
}
Aggregations