use of org.apache.hadoop.ozone.client.OzoneClient in project ozone by apache.
the class TestObjectStore method testCreateDanglingLinkBucket.
@Test
public void testCreateDanglingLinkBucket() throws Exception {
String volumeName = UUID.randomUUID().toString();
// Does not exist
String sourceBucketName = UUID.randomUUID().toString();
OzoneClient client = cluster.getClient();
ObjectStore store = client.getObjectStore();
// Create volume
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
// Dangling link bucket
String danglingLinkBucketName = UUID.randomUUID().toString();
// danglingLinkBucket is a dangling link over a source bucket that doesn't
// exist.
createLinkBucket(volume, sourceBucketName, danglingLinkBucketName);
// since sourceBucket does not exist, layout depends on
// OZONE_DEFAULT_BUCKET_LAYOUT config.
OzoneBucket bucket = volume.getBucket(danglingLinkBucketName);
Assert.assertEquals(BucketLayout.fromString(conf.get(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT)), bucket.getBucketLayout());
Assert.assertEquals(sourceBucketName, bucket.getSourceBucket());
}
use of org.apache.hadoop.ozone.client.OzoneClient in project ozone by apache.
the class TestObjectStoreWithFSO method testKeyRenameWithSubDirs.
@Test
public void testKeyRenameWithSubDirs() throws Exception {
String keyName1 = "dir1/dir2/file1";
String keyName2 = "dir1/dir2/file2";
String newKeyName1 = "dir1/key1";
String newKeyName2 = "dir1/key2";
String value = "sample value";
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
createTestKey(bucket, keyName1, value);
createTestKey(bucket, keyName2, value);
bucket.renameKey(keyName1, newKeyName1);
bucket.renameKey(keyName2, newKeyName2);
// new key should exist
Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName());
Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName());
// old key should not exist
assertKeyRenamedEx(bucket, keyName1);
assertKeyRenamedEx(bucket, keyName2);
}
use of org.apache.hadoop.ozone.client.OzoneClient in project ozone by apache.
the class TestObjectStoreWithFSO method testListKeysAtDifferentLevels.
/**
* Verify listKeys at different levels.
*
* buck-1
* |
* a
* |
* -----------------------------------
* | | |
* b1 b2 b3
* ----- -------- ----------
* | | | | | | | |
* c1 c2 d1 d2 d3 e1 e2 e3
* | | | | | | | |
* c1.tx c2.tx d11.tx | d31.tx | | e31.tx
* -------- | e21.tx
* | | |
* d21.tx d22.tx e11.tx
*
* Above is the FS tree structure.
*/
@Test
public void testListKeysAtDifferentLevels() throws Exception {
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
String keyc1 = "/a/b1/c1/c1.tx";
String keyc2 = "/a/b1/c2/c2.tx";
String keyd13 = "/a/b2/d1/d11.tx";
String keyd21 = "/a/b2/d2/d21.tx";
String keyd22 = "/a/b2/d2/d22.tx";
String keyd31 = "/a/b2/d3/d31.tx";
String keye11 = "/a/b3/e1/e11.tx";
String keye21 = "/a/b3/e2/e21.tx";
String keye31 = "/a/b3/e3/e31.tx";
LinkedList<String> keys = new LinkedList<>();
keys.add(keyc1);
keys.add(keyc2);
keys.add(keyd13);
keys.add(keyd21);
keys.add(keyd22);
keys.add(keyd31);
keys.add(keye11);
keys.add(keye21);
keys.add(keye31);
int length = 10;
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
createKeys(ozoneBucket, keys);
// Root level listing keys
Iterator<? extends OzoneKey> ozoneKeyIterator = ozoneBucket.listKeys(null, null);
verifyFullTreeStructure(ozoneKeyIterator);
ozoneKeyIterator = ozoneBucket.listKeys("a/", null);
verifyFullTreeStructure(ozoneKeyIterator);
LinkedList<String> expectedKeys;
// Intermediate level keyPrefix - 2nd level
ozoneKeyIterator = ozoneBucket.listKeys("a///b2///", null);
expectedKeys = new LinkedList<>();
expectedKeys.add("a/b2/");
expectedKeys.add("a/b2/d1/");
expectedKeys.add("a/b2/d2/");
expectedKeys.add("a/b2/d3/");
expectedKeys.add("a/b2/d1/d11.tx");
expectedKeys.add("a/b2/d2/d21.tx");
expectedKeys.add("a/b2/d2/d22.tx");
expectedKeys.add("a/b2/d3/d31.tx");
checkKeyList(ozoneKeyIterator, expectedKeys);
// Intermediate level keyPrefix - 3rd level
ozoneKeyIterator = ozoneBucket.listKeys("a/b2/d1", null);
expectedKeys = new LinkedList<>();
expectedKeys.add("a/b2/d1/");
expectedKeys.add("a/b2/d1/d11.tx");
checkKeyList(ozoneKeyIterator, expectedKeys);
// Boundary of a level
ozoneKeyIterator = ozoneBucket.listKeys("a/b2/d2", "a/b2/d2/d21.tx");
expectedKeys = new LinkedList<>();
expectedKeys.add("a/b2/d2/d22.tx");
checkKeyList(ozoneKeyIterator, expectedKeys);
// Boundary case - last node in the depth-first-traversal
ozoneKeyIterator = ozoneBucket.listKeys("a/b3/e3", "a/b3/e3/e31.tx");
expectedKeys = new LinkedList<>();
checkKeyList(ozoneKeyIterator, expectedKeys);
}
use of org.apache.hadoop.ozone.client.OzoneClient in project ozone by apache.
the class TestObjectStoreWithFSO method testLookupKey.
@Test
public void testLookupKey() throws Exception {
String parent = "a/b/c/";
String fileName = "key" + RandomStringUtils.randomNumeric(5);
String key = parent + fileName;
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Table<String, OmKeyInfo> openFileTable = cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(getBucketLayout());
String data = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
KeyOutputStream keyOutputStream = (KeyOutputStream) ozoneOutputStream.getOutputStream();
long clientID = keyOutputStream.getClientID();
OmDirectoryInfo dirPathC = getDirInfo(parent);
Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
// after file creation
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), false);
ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0, data.length());
// open key
try {
ozoneBucket.getKey(key);
fail("Should throw exception as fileName is not visible and its still " + "open for writing!");
} catch (OMException ome) {
// expected
assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
}
ozoneOutputStream.close();
OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
Assert.assertEquals(key, keyDetails.getName());
Table<String, OmKeyInfo> fileTable = cluster.getOzoneManager().getMetadataManager().getKeyTable(getBucketLayout());
// When closing the key, entry should be removed from openFileTable
// and it should be added to fileTable.
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
ozoneBucket.deleteKey(key);
// get deleted key
try {
ozoneBucket.getKey(key);
fail("Should throw exception as fileName not exists!");
} catch (OMException ome) {
// expected
assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
}
// after key delete
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
}
use of org.apache.hadoop.ozone.client.OzoneClient in project ozone by apache.
the class TestObjectStoreWithFSO method testListKeysWithNotNormalizedPath.
@Test
public void testListKeysWithNotNormalizedPath() throws Exception {
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
String key1 = "/dir1///dir2/file1/";
String key2 = "/dir1///dir2/file2/";
String key3 = "/dir1///dir2/file3/";
LinkedList<String> keys = new LinkedList<>();
keys.add("dir1/");
keys.add("dir1/dir2/");
keys.add(OmUtils.normalizeKey(key1, false));
keys.add(OmUtils.normalizeKey(key2, false));
keys.add(OmUtils.normalizeKey(key3, false));
int length = 10;
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
createKey(ozoneBucket, key1, 10, input);
createKey(ozoneBucket, key2, 10, input);
createKey(ozoneBucket, key3, 10, input);
// Iterator with key name as prefix.
Iterator<? extends OzoneKey> ozoneKeyIterator = ozoneBucket.listKeys("/dir1//", null);
checkKeyList(ozoneKeyIterator, keys);
// Iterator with with normalized key prefix.
ozoneKeyIterator = ozoneBucket.listKeys("dir1/");
checkKeyList(ozoneKeyIterator, keys);
// Iterator with key name as previous key.
ozoneKeyIterator = ozoneBucket.listKeys(null, "/dir1///dir2/file1/");
// Remove keys before //dir1/dir2/file1
keys.remove("dir1/");
keys.remove("dir1/dir2/");
keys.remove("dir1/dir2/file1");
checkKeyList(ozoneKeyIterator, keys);
// Iterator with normalized key as previous key.
ozoneKeyIterator = ozoneBucket.listKeys(null, OmUtils.normalizeKey(key1, false));
checkKeyList(ozoneKeyIterator, keys);
}
Aggregations