use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class OMFailoverProxyProvider method shouldFailover.
private synchronized boolean shouldFailover(Exception ex) {
Throwable unwrappedException = HddsUtils.getUnwrappedException(ex);
if (unwrappedException instanceof AccessControlException || unwrappedException instanceof SecretManager.InvalidToken) {
// AccessControlException.
if (accessControlExceptionOMs.contains(currentProxyOMNodeId)) {
accessControlExceptionOMs.clear();
return false;
} else {
accessControlExceptionOMs.add(currentProxyOMNodeId);
if (accessControlExceptionOMs.containsAll(omNodeIDList)) {
return false;
}
}
} else if (HddsUtils.shouldNotFailoverOnRpcException(unwrappedException)) {
return false;
} else if (ex instanceof StateMachineException) {
StateMachineException smEx = (StateMachineException) ex;
Throwable cause = smEx.getCause();
if (cause instanceof OMException) {
OMException omEx = (OMException) cause;
// prepared.
return omEx.getResult() != OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED;
}
}
return true;
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class TestOzoneManagerHAWithData method testFileOperationsWithRecursive.
private void testFileOperationsWithRecursive() throws Exception {
OzoneBucket ozoneBucket = setupBucket();
String data = "random data";
// one level key name
String keyName = UUID.randomUUID().toString();
testCreateFile(ozoneBucket, keyName, data, true, false);
// multi level key name
keyName = "dir1/dir2/dir3/file1";
testCreateFile(ozoneBucket, keyName, data, true, false);
data = "random data random data";
// multi level key name with over write set.
testCreateFile(ozoneBucket, keyName, data, true, true);
try {
testCreateFile(ozoneBucket, keyName, data, true, false);
fail("testFileOperationsWithRecursive");
} catch (OMException ex) {
Assert.assertEquals(FILE_ALREADY_EXISTS, ex.getResult());
}
// Try now with a file name which is same as a directory.
try {
keyName = "folder/folder2";
ozoneBucket.createDirectory(keyName);
testCreateFile(ozoneBucket, keyName, data, true, false);
fail("testFileOperationsWithNonRecursive");
} catch (OMException ex) {
Assert.assertEquals(NOT_A_FILE, ex.getResult());
}
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class TestOzoneManagerHAWithData method testKeysDelete.
private void testKeysDelete() throws Exception {
OzoneBucket ozoneBucket = setupBucket();
String data = "random data";
String keyName1 = "dir/file1";
String keyName2 = "dir/file2";
String keyName3 = "dir/file3";
String keyName4 = "dir/file4";
List<String> keyList1 = new ArrayList<>();
keyList1.add(keyName2);
keyList1.add(keyName3);
testCreateFile(ozoneBucket, keyName1, data, true, false);
testCreateFile(ozoneBucket, keyName2, data, true, false);
testCreateFile(ozoneBucket, keyName3, data, true, false);
testCreateFile(ozoneBucket, keyName4, data, true, false);
// Delete keyName1 use deleteKey api.
ozoneBucket.deleteKey(keyName1);
// Delete keyName2 and keyName3 in keyList1 using the deleteKeys api.
ozoneBucket.deleteKeys(keyList1);
// In keyList2 keyName3 was previously deleted and KeyName4 exists .
List<String> keyList2 = new ArrayList<>();
keyList2.add(keyName3);
keyList2.add(keyName4);
// exception. In this case, we test for deletion failure.
try {
ozoneBucket.deleteKeys(keyList2);
fail("testFilesDelete");
} catch (OMException ex) {
// The expected exception PARTIAL_DELETE, as if not able to delete, we
// return error codee PARTIAL_DElETE.
Assert.assertEquals(PARTIAL_DELETE, ex.getResult());
}
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class TestRecursiveAclWithFSO method testKeyDeleteAndRenameWithoutPermission.
@Test
public void testKeyDeleteAndRenameWithoutPermission() throws Exception {
List<String> keys = new ArrayList<>();
// Create volumes with user1
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
/* r = READ, w = WRITE, c = CREATE, d = DELETE
l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */
String aclWorldAll = "world::a";
createVolumeWithOwnerAndAcl(objectStore, "volume1", "user1", aclWorldAll);
// Login as user1, create directories and keys
UserGroupInformation.setLoginUser(user1);
client = cluster.getClient();
objectStore = client.getObjectStore();
OzoneVolume volume = objectStore.getVolume("volume1");
BucketArgs omBucketArgs = BucketArgs.newBuilder().setStorageType(StorageType.DISK).build();
// create bucket with user1
volume.createBucket("bucket1", omBucketArgs);
setBucketAcl(objectStore, volume.getName(), "bucket1", aclWorldAll);
OzoneBucket ozoneBucket = volume.getBucket("bucket1");
/**
* buck-1
* |
* a
* |
* ------------------------------------
* | | | |
* b1 b2 b3 file1
* ----- ------ -----
* | | | | | |
* c1 c2 d1 d2 e1 e2
* | | | | | |
* f1 f2 f3 -------- f5 f6
* | |
* d21 file2
* |
* f4
*
* Test Case 1 :
* Remove delete acl from file File2
* Try deleting b2
*
* Test case 2:
* Remove delete acl fro dir c2
* Try deleting b1
*
* Test case 3
* try deleting b3
*/
String keyf1 = "a/b1/c1/f1";
String keyf2 = "a/b1/c2/f2";
String keyf3 = "a/b2/d1/f3";
String keyf4 = "a/b2/d2/d21/f4";
String keyf5 = "/a/b3/e1/f5";
String keyf6 = "/a/b3/e2/f6";
String file1 = "a/" + "file" + RandomStringUtils.randomNumeric(5);
String file2 = "a/b2/d2/" + "file" + RandomStringUtils.randomNumeric(5);
keys.add(keyf1);
keys.add(keyf2);
keys.add(keyf3);
keys.add(keyf4);
keys.add(keyf5);
keys.add(keyf6);
keys.add(file1);
keys.add(file2);
createKeys(objectStore, ozoneBucket, keys);
// Test case 1
// Remove acls from file2
// Delete/Rename on directory a/b2 should throw permission denied
// (since file2 is a child)
removeAclsFromKey(objectStore, ozoneBucket, file2);
OzoneObj ozoneObj;
List<OzoneAcl> aclList1;
UserGroupInformation.setLoginUser(user2);
client = cluster.getClient();
objectStore = client.getObjectStore();
volume = objectStore.getVolume("volume1");
ozoneBucket = volume.getBucket("bucket1");
// perform delete
try {
ozoneBucket.deleteDirectory("a/b2", true);
Assert.fail("Should throw permission denied !");
} catch (OMException ome) {
// expect permission error
Assert.assertEquals("Permission check failed", OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
}
// perform rename
try {
ozoneBucket.renameKey("a/b2", "a/b2_renamed");
Assert.fail("Should throw permission denied !");
} catch (OMException ome) {
// expect permission error
Assert.assertEquals("Permission check failed", OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
}
// Test case 2
// Remove acl from directory c2, delete/rename a/b1 should throw
// permission denied since c2 is a subdirectory
UserGroupInformation.setLoginUser(user1);
removeAclsFromKey(objectStore, ozoneBucket, "a/b1/c2");
UserGroupInformation.setLoginUser(user2);
// perform delete
try {
ozoneBucket.deleteDirectory("a/b1", true);
Assert.fail("Should throw permission denied !");
} catch (OMException ome) {
// expect permission error
Assert.assertEquals("Permission check failed", OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
}
// perform rename
try {
ozoneBucket.renameKey("a/b1", "a/b1_renamed");
Assert.fail("Should throw permission denied !");
} catch (OMException ome) {
// expect permission error
Assert.assertEquals("Permission check failed", OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
}
// Test case 3
// delete b3 and this shouldn't throw exception because acls have not
// been removed from subpaths.
ozoneBucket.deleteDirectory("a/b3", true);
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class BaseFreonGenerator method ensureVolumeAndBucketExist.
/**
* Create missing target volume/bucket.
*/
public void ensureVolumeAndBucketExist(OzoneClient rpcClient, String volumeName, String bucketName) throws IOException {
OzoneVolume volume;
ensureVolumeExists(rpcClient, volumeName);
volume = rpcClient.getObjectStore().getVolume(volumeName);
try {
volume.getBucket(bucketName);
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
volume.createBucket(bucketName);
} else {
throw ex;
}
}
}
Aggregations