use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.
the class CuratorService method operationFailure.
/**
* Create an IOE when an operation fails
* @param path path of operation
* @param operation operation attempted
* @param exception caught the exception caught
* @return an IOE to throw that contains the path and operation details.
*/
protected IOException operationFailure(String path, String operation, Exception exception, List<ACL> acls) {
IOException ioe;
String aclList = "[" + RegistrySecurity.aclsToString(acls) + "]";
if (exception instanceof KeeperException.NoNodeException) {
ioe = new PathNotFoundException(path);
} else if (exception instanceof KeeperException.NodeExistsException) {
ioe = new FileAlreadyExistsException(path);
} else if (exception instanceof KeeperException.NoAuthException) {
ioe = new NoPathPermissionsException(path, "Not authorized to access path; ACLs: " + aclList);
} else if (exception instanceof KeeperException.NotEmptyException) {
ioe = new PathIsNotEmptyDirectoryException(path);
} else if (exception instanceof KeeperException.AuthFailedException) {
ioe = new AuthenticationFailedException(path, "Authentication Failed: " + exception + "; " + securityConnectionDiagnostics, exception);
} else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) {
ioe = new NoChildrenForEphemeralsException(path, "Cannot create a path under an ephemeral node: " + exception, exception);
} else if (exception instanceof KeeperException.InvalidACLException) {
// this is a security exception of a kind
// include the ACLs to help the diagnostics
StringBuilder builder = new StringBuilder();
builder.append("Path access failure ").append(aclList);
builder.append(" ");
builder.append(securityConnectionDiagnostics);
ioe = new NoPathPermissionsException(path, builder.toString());
} else {
ioe = new RegistryIOException(path, "Failure of " + operation + " on " + path + ": " + exception.toString(), exception);
}
if (ioe.getCause() == null) {
ioe.initCause(exception);
}
return ioe;
}
use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.
the class TestRegistryRMOperations method testChildDeletion.
@Test
public void testChildDeletion() throws Throwable {
ServiceRecord app = createRecord("app1", PersistencePolicies.APPLICATION, "app", null);
ServiceRecord container = createRecord("container1", PersistencePolicies.CONTAINER, "container", null);
operations.bind("/app", app, BindFlags.OVERWRITE);
operations.bind("/app/container", container, BindFlags.OVERWRITE);
try {
int p = purge("/", "app1", PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
fail("expected a failure, got a purge count of " + p);
} catch (PathIsNotEmptyDirectoryException expected) {
// expected
}
}
use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.
the class TestRegistryRMOperations method testCreateComplexApplication.
/**
* Create a complex example app
* @throws Throwable
*/
@Test
public void testCreateComplexApplication() throws Throwable {
String appId = "application_1408631738011_0001";
String cid = "container_1408631738011_0001_01_";
String cid1 = cid + "000001";
String cid2 = cid + "000002";
String appPath = USERPATH + "tomcat";
ServiceRecord webapp = createRecord(appId, PersistencePolicies.APPLICATION, "tomcat-based web application", null);
webapp.addExternalEndpoint(restEndpoint("www", new URI("http", "//loadbalancer/", null)));
ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER, null, null);
comp1.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack4server3:43572", null)));
comp1.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573));
// Component 2 has a container lifespan
ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER, null, null);
comp2.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack1server28:35881", null)));
comp2.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882));
operations.mknode(USERPATH, false);
operations.bind(appPath, webapp, BindFlags.OVERWRITE);
String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS;
operations.mknode(componentsPath, false);
String dns1 = RegistryPathUtils.encodeYarnID(cid1);
String dns1path = componentsPath + dns1;
operations.bind(dns1path, comp1, BindFlags.CREATE);
String dns2 = RegistryPathUtils.encodeYarnID(cid2);
String dns2path = componentsPath + dns2;
operations.bind(dns2path, comp2, BindFlags.CREATE);
ZKPathDumper pathDumper = registry.dumpPath(false);
LOG.info(pathDumper.toString());
logRecord("tomcat", webapp);
logRecord(dns1, comp1);
logRecord(dns2, comp2);
ServiceRecord dns1resolved = operations.resolve(dns1path);
assertEquals("Persistence policies on resolved entry", PersistencePolicies.CONTAINER, dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
Map<String, RegistryPathStatus> children = RegistryUtils.statChildren(operations, componentsPath);
assertEquals(2, children.size());
Collection<RegistryPathStatus> componentStats = children.values();
Map<String, ServiceRecord> records = RegistryUtils.extractServiceRecords(operations, componentsPath, componentStats);
assertEquals(2, records.size());
ServiceRecord retrieved1 = records.get(dns1path);
logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1);
assertMatches(dns1resolved, retrieved1);
assertEquals(PersistencePolicies.CONTAINER, retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
// create a listing under components/
operations.mknode(componentsPath + "subdir", false);
// this shows up in the listing of child entries
Map<String, RegistryPathStatus> childrenUpdated = RegistryUtils.statChildren(operations, componentsPath);
assertEquals(3, childrenUpdated.size());
// the non-record child this is not picked up in the record listing
Map<String, ServiceRecord> recordsUpdated = RegistryUtils.extractServiceRecords(operations, componentsPath, childrenUpdated);
assertEquals(2, recordsUpdated.size());
// now do some deletions.
// synchronous delete container ID 2
// fail if the app policy is chosen
assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren));
// succeed for container
assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER, RegistryAdminService.PurgePolicy.FailOnChildren));
assertPathNotFound(dns2path);
assertPathExists(dns1path);
// expect a skip on children to skip
assertEquals(0, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.SkipOnChildren));
assertPathExists(appPath);
assertPathExists(dns1path);
// attempt to delete app with policy of fail on children
try {
int p = purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
fail("expected a failure, got a purge count of " + p);
} catch (PathIsNotEmptyDirectoryException expected) {
// expected
}
assertPathExists(appPath);
assertPathExists(dns1path);
// now trigger recursive delete
assertEquals(1, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.PurgeAll));
assertPathNotFound(appPath);
assertPathNotFound(dns1path);
}
use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.
the class RegistryAdminService method purge.
/**
* Recursive operation to purge all matching records under a base path.
* <ol>
* <li>Uses a depth first search</li>
* <li>A match is on ID and persistence policy, or, if policy==-1, any match</li>
* <li>If a record matches then it is deleted without any child searches</li>
* <li>Deletions will be asynchronous if a callback is provided</li>
* </ol>
*
* The code is designed to be robust against parallel deletions taking place;
* in such a case it will stop attempting that part of the tree. This
* avoid the situation of more than 1 purge happening in parallel and
* one of the purge operations deleteing the node tree above the other.
* @param path base path
* @param selector selector for the purge policy
* @param purgePolicy what to do if there is a matching record with children
* @param callback optional curator callback
* @return the number of delete operations perfomed. As deletes may be for
* everything under a path, this may be less than the number of records
* actually deleted
* @throws IOException problems
* @throws PathIsNotEmptyDirectoryException if an entry cannot be deleted
* as it has children and the purge policy is FailOnChildren
*/
@VisibleForTesting
public int purge(String path, NodeSelector selector, PurgePolicy purgePolicy, BackgroundCallback callback) throws IOException {
boolean toDelete = false;
// look at self to see if it has a service record
Map<String, RegistryPathStatus> childEntries;
Collection<RegistryPathStatus> entries;
try {
// list this path's children
childEntries = RegistryUtils.statChildren(this, path);
entries = childEntries.values();
} catch (PathNotFoundException e) {
// exit
return 0;
}
try {
RegistryPathStatus registryPathStatus = stat(path);
ServiceRecord serviceRecord = resolve(path);
// there is now an entry here.
toDelete = selector.shouldSelect(path, registryPathStatus, serviceRecord);
} catch (EOFException ignored) {
// ignore
} catch (InvalidRecordException ignored) {
// ignore
} catch (NoRecordException ignored) {
// ignore
} catch (PathNotFoundException e) {
// exit
return 0;
}
if (toDelete && !entries.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Match on record @ {} with children ", path);
}
// there's children
switch(purgePolicy) {
case SkipOnChildren:
// don't do the deletion... continue to next record
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping deletion");
}
toDelete = false;
break;
case PurgeAll:
// mark for deletion
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling for deletion with children");
}
toDelete = true;
entries = new ArrayList<RegistryPathStatus>(0);
break;
case FailOnChildren:
if (LOG.isDebugEnabled()) {
LOG.debug("Failing deletion operation");
}
throw new PathIsNotEmptyDirectoryException(path);
}
}
int deleteOps = 0;
if (toDelete) {
try {
zkDelete(path, true, callback);
} catch (PathNotFoundException e) {
// this is a no-op, and all children can be skipped
return deleteOps;
}
deleteOps++;
}
// now go through the children
for (RegistryPathStatus status : entries) {
String childname = status.path;
String childpath = RegistryPathUtils.join(path, childname);
deleteOps += purge(childpath, selector, purgePolicy, callback);
}
return deleteOps;
}
use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.
the class S3AFileSystem method innerDelete.
/**
* Delete an object. See {@link #delete(Path, boolean)}.
*
* @param status fileStatus object
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException due to inability to delete a directory or file.
* @throws AmazonClientException on failures inside the AWS SDK
*/
private boolean innerDelete(S3AFileStatus status, boolean recursive) throws IOException, AmazonClientException {
Path f = status.getPath();
LOG.debug("Delete path {} - recursive {}", f, recursive);
String key = pathToKey(f);
if (status.isDirectory()) {
LOG.debug("delete: Path is a directory: {}", f);
if (!key.endsWith("/")) {
key = key + "/";
}
if (key.equals("/")) {
return rejectRootDirectoryDelete(status, recursive);
}
if (!recursive && !status.isEmptyDirectory()) {
throw new PathIsNotEmptyDirectoryException(f.toString());
}
if (status.isEmptyDirectory()) {
LOG.debug("Deleting fake empty directory {}", key);
deleteObject(key);
instrumentation.directoryDeleted();
} else {
LOG.debug("Getting objects for directory prefix {} to delete", key);
ListObjectsRequest request = createListObjectsRequest(key, null);
ObjectListing objects = listObjects(request);
List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(objects.getObjectSummaries().size());
while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
LOG.debug("Got object to delete {}", summary.getKey());
if (keys.size() == MAX_ENTRIES_TO_DELETE) {
removeKeys(keys, true, false);
}
}
if (objects.isTruncated()) {
objects = continueListObjects(objects);
} else {
if (!keys.isEmpty()) {
removeKeys(keys, false, false);
}
break;
}
}
}
} else {
LOG.debug("delete: Path is a file");
instrumentation.fileDeleted(1);
deleteObject(key);
}
Path parent = f.getParent();
if (parent != null) {
createFakeDirectoryIfNecessary(parent);
}
return true;
}
Aggregations