use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestObjectStoreWithFSO method testListKeysAtDifferentLevels.
/**
* Verify listKeys at different levels.
*
* buck-1
* |
* a
* |
* -----------------------------------
* | | |
* b1 b2 b3
* ----- -------- ----------
* | | | | | | | |
* c1 c2 d1 d2 d3 e1 e2 e3
* | | | | | | | |
* c1.tx c2.tx d11.tx | d31.tx | | e31.tx
* -------- | e21.tx
* | | |
* d21.tx d22.tx e11.tx
*
* Above is the FS tree structure.
*/
@Test
public void testListKeysAtDifferentLevels() throws Exception {
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
String keyc1 = "/a/b1/c1/c1.tx";
String keyc2 = "/a/b1/c2/c2.tx";
String keyd13 = "/a/b2/d1/d11.tx";
String keyd21 = "/a/b2/d2/d21.tx";
String keyd22 = "/a/b2/d2/d22.tx";
String keyd31 = "/a/b2/d3/d31.tx";
String keye11 = "/a/b3/e1/e11.tx";
String keye21 = "/a/b3/e2/e21.tx";
String keye31 = "/a/b3/e3/e31.tx";
LinkedList<String> keys = new LinkedList<>();
keys.add(keyc1);
keys.add(keyc2);
keys.add(keyd13);
keys.add(keyd21);
keys.add(keyd22);
keys.add(keyd31);
keys.add(keye11);
keys.add(keye21);
keys.add(keye31);
int length = 10;
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
createKeys(ozoneBucket, keys);
// Root level listing keys
Iterator<? extends OzoneKey> ozoneKeyIterator = ozoneBucket.listKeys(null, null);
verifyFullTreeStructure(ozoneKeyIterator);
ozoneKeyIterator = ozoneBucket.listKeys("a/", null);
verifyFullTreeStructure(ozoneKeyIterator);
LinkedList<String> expectedKeys;
// Intermediate level keyPrefix - 2nd level
ozoneKeyIterator = ozoneBucket.listKeys("a///b2///", null);
expectedKeys = new LinkedList<>();
expectedKeys.add("a/b2/");
expectedKeys.add("a/b2/d1/");
expectedKeys.add("a/b2/d2/");
expectedKeys.add("a/b2/d3/");
expectedKeys.add("a/b2/d1/d11.tx");
expectedKeys.add("a/b2/d2/d21.tx");
expectedKeys.add("a/b2/d2/d22.tx");
expectedKeys.add("a/b2/d3/d31.tx");
checkKeyList(ozoneKeyIterator, expectedKeys);
// Intermediate level keyPrefix - 3rd level
ozoneKeyIterator = ozoneBucket.listKeys("a/b2/d1", null);
expectedKeys = new LinkedList<>();
expectedKeys.add("a/b2/d1/");
expectedKeys.add("a/b2/d1/d11.tx");
checkKeyList(ozoneKeyIterator, expectedKeys);
// Boundary of a level
ozoneKeyIterator = ozoneBucket.listKeys("a/b2/d2", "a/b2/d2/d21.tx");
expectedKeys = new LinkedList<>();
expectedKeys.add("a/b2/d2/d22.tx");
checkKeyList(ozoneKeyIterator, expectedKeys);
// Boundary case - last node in the depth-first-traversal
ozoneKeyIterator = ozoneBucket.listKeys("a/b3/e3", "a/b3/e3/e31.tx");
expectedKeys = new LinkedList<>();
checkKeyList(ozoneKeyIterator, expectedKeys);
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestObjectStoreWithFSO method testKeyRenameWithSubDirs.
@Test
public void testKeyRenameWithSubDirs() throws Exception {
String keyName1 = "dir1/dir2/file1";
String keyName2 = "dir1/dir2/file2";
String newKeyName1 = "dir1/key1";
String newKeyName2 = "dir1/key2";
String value = "sample value";
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
createTestKey(bucket, keyName1, value);
createTestKey(bucket, keyName2, value);
bucket.renameKey(keyName1, newKeyName1);
bucket.renameKey(keyName2, newKeyName2);
// new key should exist
Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName());
Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName());
// old key should not exist
assertKeyRenamedEx(bucket, keyName1);
assertKeyRenamedEx(bucket, keyName2);
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestObjectStoreWithFSO method testListKeysWithNotNormalizedPath.
@Test
public void testListKeysWithNotNormalizedPath() throws Exception {
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
String key1 = "/dir1///dir2/file1/";
String key2 = "/dir1///dir2/file2/";
String key3 = "/dir1///dir2/file3/";
LinkedList<String> keys = new LinkedList<>();
keys.add("dir1/");
keys.add("dir1/dir2/");
keys.add(OmUtils.normalizeKey(key1, false));
keys.add(OmUtils.normalizeKey(key2, false));
keys.add(OmUtils.normalizeKey(key3, false));
int length = 10;
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
createKey(ozoneBucket, key1, 10, input);
createKey(ozoneBucket, key2, 10, input);
createKey(ozoneBucket, key3, 10, input);
// Iterator with key name as prefix.
Iterator<? extends OzoneKey> ozoneKeyIterator = ozoneBucket.listKeys("/dir1//", null);
checkKeyList(ozoneKeyIterator, keys);
// Iterator with with normalized key prefix.
ozoneKeyIterator = ozoneBucket.listKeys("dir1/");
checkKeyList(ozoneKeyIterator, keys);
// Iterator with key name as previous key.
ozoneKeyIterator = ozoneBucket.listKeys(null, "/dir1///dir2/file1/");
// Remove keys before //dir1/dir2/file1
keys.remove("dir1/");
keys.remove("dir1/dir2/");
keys.remove("dir1/dir2/file1");
checkKeyList(ozoneKeyIterator, keys);
// Iterator with normalized key as previous key.
ozoneKeyIterator = ozoneBucket.listKeys(null, OmUtils.normalizeKey(key1, false));
checkKeyList(ozoneKeyIterator, keys);
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestOzoneManagerHA method setupBucket.
protected OzoneBucket setupBucket() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
objectStore.createVolume(volumeName, createVolumeArgs);
OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
String bucketName = UUID.randomUUID().toString();
retVolumeinfo.createBucket(bucketName);
OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
return ozoneBucket;
}
use of org.apache.hadoop.ozone.client.OzoneVolume in project ozone by apache.
the class TestContainerMapper method init.
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
dbPath = GenericTestUtils.getRandomizedTempPath();
conf.set(OZONE_OM_DB_DIRS, dbPath);
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB");
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
// By default, 2 pipelines are created. Setting the value to 6, will ensure
// each pipleine can have 3 containers open.
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 6);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setScmId(SCM_ID).build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
store = ozClient.getObjectStore();
storageContainerLocationClient = cluster.getStorageContainerLocationClient();
ozoneManager = cluster.getOzoneManager();
store.createVolume(volName);
OzoneVolume volume = store.getVolume(volName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
byte[] data = generateData(10 * 1024 * 1024, (byte) 98);
for (int i = 0; i < 20; i++) {
String key = UUID.randomUUID().toString();
keyList.add(key);
OzoneOutputStream out = bucket.createKey(key, data.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<String, String>());
out.write(data, 0, data.length);
out.close();
}
cluster.stop();
}
Aggregations