Search in sources :

Example 1 with PathIsNotEmptyDirectoryException

use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.

the class CuratorService method operationFailure.

/**
   * Create an IOE when an operation fails
   * @param path path of operation
   * @param operation operation attempted
   * @param exception caught the exception caught
   * @return an IOE to throw that contains the path and operation details.
   */
protected IOException operationFailure(String path, String operation, Exception exception, List<ACL> acls) {
    IOException ioe;
    String aclList = "[" + RegistrySecurity.aclsToString(acls) + "]";
    if (exception instanceof KeeperException.NoNodeException) {
        ioe = new PathNotFoundException(path);
    } else if (exception instanceof KeeperException.NodeExistsException) {
        ioe = new FileAlreadyExistsException(path);
    } else if (exception instanceof KeeperException.NoAuthException) {
        ioe = new NoPathPermissionsException(path, "Not authorized to access path; ACLs: " + aclList);
    } else if (exception instanceof KeeperException.NotEmptyException) {
        ioe = new PathIsNotEmptyDirectoryException(path);
    } else if (exception instanceof KeeperException.AuthFailedException) {
        ioe = new AuthenticationFailedException(path, "Authentication Failed: " + exception + "; " + securityConnectionDiagnostics, exception);
    } else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) {
        ioe = new NoChildrenForEphemeralsException(path, "Cannot create a path under an ephemeral node: " + exception, exception);
    } else if (exception instanceof KeeperException.InvalidACLException) {
        // this is a security exception of a kind
        // include the ACLs to help the diagnostics
        StringBuilder builder = new StringBuilder();
        builder.append("Path access failure ").append(aclList);
        builder.append(" ");
        builder.append(securityConnectionDiagnostics);
        ioe = new NoPathPermissionsException(path, builder.toString());
    } else {
        ioe = new RegistryIOException(path, "Failure of " + operation + " on " + path + ": " + exception.toString(), exception);
    }
    if (ioe.getCause() == null) {
        ioe.initCause(exception);
    }
    return ioe;
}
Also used : NoPathPermissionsException(org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) AuthenticationFailedException(org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException) PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) IOException(java.io.IOException) RegistryIOException(org.apache.hadoop.registry.client.exceptions.RegistryIOException) NoChildrenForEphemeralsException(org.apache.hadoop.registry.client.exceptions.NoChildrenForEphemeralsException) PathNotFoundException(org.apache.hadoop.fs.PathNotFoundException) RegistryIOException(org.apache.hadoop.registry.client.exceptions.RegistryIOException) KeeperException(org.apache.zookeeper.KeeperException)

Example 2 with PathIsNotEmptyDirectoryException

use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.

the class TestRegistryRMOperations method testChildDeletion.

@Test
public void testChildDeletion() throws Throwable {
    ServiceRecord app = createRecord("app1", PersistencePolicies.APPLICATION, "app", null);
    ServiceRecord container = createRecord("container1", PersistencePolicies.CONTAINER, "container", null);
    operations.bind("/app", app, BindFlags.OVERWRITE);
    operations.bind("/app/container", container, BindFlags.OVERWRITE);
    try {
        int p = purge("/", "app1", PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
        fail("expected a failure, got a purge count of " + p);
    } catch (PathIsNotEmptyDirectoryException expected) {
    // expected
    }
}
Also used : PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) RegistryTypeUtils.restEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint) RegistryTypeUtils.inetAddrEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord) Test(org.junit.Test) AbstractRegistryTest(org.apache.hadoop.registry.AbstractRegistryTest)

Example 3 with PathIsNotEmptyDirectoryException

use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.

the class TestRegistryRMOperations method testCreateComplexApplication.

/**
   * Create a complex example app
   * @throws Throwable
   */
@Test
public void testCreateComplexApplication() throws Throwable {
    String appId = "application_1408631738011_0001";
    String cid = "container_1408631738011_0001_01_";
    String cid1 = cid + "000001";
    String cid2 = cid + "000002";
    String appPath = USERPATH + "tomcat";
    ServiceRecord webapp = createRecord(appId, PersistencePolicies.APPLICATION, "tomcat-based web application", null);
    webapp.addExternalEndpoint(restEndpoint("www", new URI("http", "//loadbalancer/", null)));
    ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER, null, null);
    comp1.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack4server3:43572", null)));
    comp1.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573));
    // Component 2 has a container lifespan
    ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER, null, null);
    comp2.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack1server28:35881", null)));
    comp2.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882));
    operations.mknode(USERPATH, false);
    operations.bind(appPath, webapp, BindFlags.OVERWRITE);
    String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS;
    operations.mknode(componentsPath, false);
    String dns1 = RegistryPathUtils.encodeYarnID(cid1);
    String dns1path = componentsPath + dns1;
    operations.bind(dns1path, comp1, BindFlags.CREATE);
    String dns2 = RegistryPathUtils.encodeYarnID(cid2);
    String dns2path = componentsPath + dns2;
    operations.bind(dns2path, comp2, BindFlags.CREATE);
    ZKPathDumper pathDumper = registry.dumpPath(false);
    LOG.info(pathDumper.toString());
    logRecord("tomcat", webapp);
    logRecord(dns1, comp1);
    logRecord(dns2, comp2);
    ServiceRecord dns1resolved = operations.resolve(dns1path);
    assertEquals("Persistence policies on resolved entry", PersistencePolicies.CONTAINER, dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
    Map<String, RegistryPathStatus> children = RegistryUtils.statChildren(operations, componentsPath);
    assertEquals(2, children.size());
    Collection<RegistryPathStatus> componentStats = children.values();
    Map<String, ServiceRecord> records = RegistryUtils.extractServiceRecords(operations, componentsPath, componentStats);
    assertEquals(2, records.size());
    ServiceRecord retrieved1 = records.get(dns1path);
    logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1);
    assertMatches(dns1resolved, retrieved1);
    assertEquals(PersistencePolicies.CONTAINER, retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
    // create a listing under components/
    operations.mknode(componentsPath + "subdir", false);
    // this shows up in the listing of child entries
    Map<String, RegistryPathStatus> childrenUpdated = RegistryUtils.statChildren(operations, componentsPath);
    assertEquals(3, childrenUpdated.size());
    // the non-record child this is not picked up in the record listing
    Map<String, ServiceRecord> recordsUpdated = RegistryUtils.extractServiceRecords(operations, componentsPath, childrenUpdated);
    assertEquals(2, recordsUpdated.size());
    // now do some deletions.
    // synchronous delete container ID 2
    // fail if the app policy is chosen
    assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren));
    // succeed for container
    assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER, RegistryAdminService.PurgePolicy.FailOnChildren));
    assertPathNotFound(dns2path);
    assertPathExists(dns1path);
    // expect a skip on children to skip
    assertEquals(0, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.SkipOnChildren));
    assertPathExists(appPath);
    assertPathExists(dns1path);
    // attempt to delete app with policy of fail on children
    try {
        int p = purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
        fail("expected a failure, got a purge count of " + p);
    } catch (PathIsNotEmptyDirectoryException expected) {
    // expected
    }
    assertPathExists(appPath);
    assertPathExists(dns1path);
    // now trigger recursive delete
    assertEquals(1, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.PurgeAll));
    assertPathNotFound(appPath);
    assertPathNotFound(dns1path);
}
Also used : RegistryPathStatus(org.apache.hadoop.registry.client.types.RegistryPathStatus) ZKPathDumper(org.apache.hadoop.registry.client.impl.zk.ZKPathDumper) PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) URI(java.net.URI) RegistryTypeUtils.restEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint) RegistryTypeUtils.inetAddrEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord) Test(org.junit.Test) AbstractRegistryTest(org.apache.hadoop.registry.AbstractRegistryTest)

Example 4 with PathIsNotEmptyDirectoryException

use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.

the class RegistryAdminService method purge.

/**
   * Recursive operation to purge all matching records under a base path.
   * <ol>
   *   <li>Uses a depth first search</li>
   *   <li>A match is on ID and persistence policy, or, if policy==-1, any match</li>
   *   <li>If a record matches then it is deleted without any child searches</li>
   *   <li>Deletions will be asynchronous if a callback is provided</li>
   * </ol>
   *
   * The code is designed to be robust against parallel deletions taking place;
   * in such a case it will stop attempting that part of the tree. This
   * avoid the situation of more than 1 purge happening in parallel and
   * one of the purge operations deleteing the node tree above the other.
   * @param path base path
   * @param selector selector for the purge policy
   * @param purgePolicy what to do if there is a matching record with children
   * @param callback optional curator callback
   * @return the number of delete operations perfomed. As deletes may be for
   * everything under a path, this may be less than the number of records
   * actually deleted
   * @throws IOException problems
   * @throws PathIsNotEmptyDirectoryException if an entry cannot be deleted
   * as it has children and the purge policy is FailOnChildren
   */
@VisibleForTesting
public int purge(String path, NodeSelector selector, PurgePolicy purgePolicy, BackgroundCallback callback) throws IOException {
    boolean toDelete = false;
    // look at self to see if it has a service record
    Map<String, RegistryPathStatus> childEntries;
    Collection<RegistryPathStatus> entries;
    try {
        // list this path's children
        childEntries = RegistryUtils.statChildren(this, path);
        entries = childEntries.values();
    } catch (PathNotFoundException e) {
        // exit
        return 0;
    }
    try {
        RegistryPathStatus registryPathStatus = stat(path);
        ServiceRecord serviceRecord = resolve(path);
        // there is now an entry here.
        toDelete = selector.shouldSelect(path, registryPathStatus, serviceRecord);
    } catch (EOFException ignored) {
    // ignore
    } catch (InvalidRecordException ignored) {
    // ignore
    } catch (NoRecordException ignored) {
    // ignore
    } catch (PathNotFoundException e) {
        // exit
        return 0;
    }
    if (toDelete && !entries.isEmpty()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Match on record @ {} with children ", path);
        }
        // there's children
        switch(purgePolicy) {
            case SkipOnChildren:
                // don't do the deletion... continue to next record
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Skipping deletion");
                }
                toDelete = false;
                break;
            case PurgeAll:
                // mark for deletion
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Scheduling for deletion with children");
                }
                toDelete = true;
                entries = new ArrayList<RegistryPathStatus>(0);
                break;
            case FailOnChildren:
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Failing deletion operation");
                }
                throw new PathIsNotEmptyDirectoryException(path);
        }
    }
    int deleteOps = 0;
    if (toDelete) {
        try {
            zkDelete(path, true, callback);
        } catch (PathNotFoundException e) {
            // this is a no-op, and all children can be skipped
            return deleteOps;
        }
        deleteOps++;
    }
    // now go through the children
    for (RegistryPathStatus status : entries) {
        String childname = status.path;
        String childpath = RegistryPathUtils.join(path, childname);
        deleteOps += purge(childpath, selector, purgePolicy, callback);
    }
    return deleteOps;
}
Also used : PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord) RegistryPathStatus(org.apache.hadoop.registry.client.types.RegistryPathStatus) EOFException(java.io.EOFException) NoRecordException(org.apache.hadoop.registry.client.exceptions.NoRecordException) PathNotFoundException(org.apache.hadoop.fs.PathNotFoundException) InvalidRecordException(org.apache.hadoop.registry.client.exceptions.InvalidRecordException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 5 with PathIsNotEmptyDirectoryException

use of org.apache.hadoop.fs.PathIsNotEmptyDirectoryException in project hadoop by apache.

the class S3AFileSystem method innerDelete.

/**
   * Delete an object. See {@link #delete(Path, boolean)}.
   *
   * @param status fileStatus object
   * @param recursive if path is a directory and set to
   * true, the directory is deleted else throws an exception. In
   * case of a file the recursive can be set to either true or false.
   * @return  true if delete is successful else false.
   * @throws IOException due to inability to delete a directory or file.
   * @throws AmazonClientException on failures inside the AWS SDK
   */
private boolean innerDelete(S3AFileStatus status, boolean recursive) throws IOException, AmazonClientException {
    Path f = status.getPath();
    LOG.debug("Delete path {} - recursive {}", f, recursive);
    String key = pathToKey(f);
    if (status.isDirectory()) {
        LOG.debug("delete: Path is a directory: {}", f);
        if (!key.endsWith("/")) {
            key = key + "/";
        }
        if (key.equals("/")) {
            return rejectRootDirectoryDelete(status, recursive);
        }
        if (!recursive && !status.isEmptyDirectory()) {
            throw new PathIsNotEmptyDirectoryException(f.toString());
        }
        if (status.isEmptyDirectory()) {
            LOG.debug("Deleting fake empty directory {}", key);
            deleteObject(key);
            instrumentation.directoryDeleted();
        } else {
            LOG.debug("Getting objects for directory prefix {} to delete", key);
            ListObjectsRequest request = createListObjectsRequest(key, null);
            ObjectListing objects = listObjects(request);
            List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(objects.getObjectSummaries().size());
            while (true) {
                for (S3ObjectSummary summary : objects.getObjectSummaries()) {
                    keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
                    LOG.debug("Got object to delete {}", summary.getKey());
                    if (keys.size() == MAX_ENTRIES_TO_DELETE) {
                        removeKeys(keys, true, false);
                    }
                }
                if (objects.isTruncated()) {
                    objects = continueListObjects(objects);
                } else {
                    if (!keys.isEmpty()) {
                        removeKeys(keys, false, false);
                    }
                    break;
                }
            }
        }
    } else {
        LOG.debug("delete: Path is a file");
        instrumentation.fileDeleted(1);
        deleteObject(key);
    }
    Path parent = f.getParent();
    if (parent != null) {
        createFakeDirectoryIfNecessary(parent);
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) ListObjectsRequest(com.amazonaws.services.s3.model.ListObjectsRequest) ArrayList(java.util.ArrayList) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) DeleteObjectsRequest(com.amazonaws.services.s3.model.DeleteObjectsRequest)

Aggregations

PathIsNotEmptyDirectoryException (org.apache.hadoop.fs.PathIsNotEmptyDirectoryException)5 ServiceRecord (org.apache.hadoop.registry.client.types.ServiceRecord)3 PathNotFoundException (org.apache.hadoop.fs.PathNotFoundException)2 AbstractRegistryTest (org.apache.hadoop.registry.AbstractRegistryTest)2 RegistryTypeUtils.inetAddrEndpoint (org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint)2 RegistryTypeUtils.restEndpoint (org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint)2 RegistryPathStatus (org.apache.hadoop.registry.client.types.RegistryPathStatus)2 Test (org.junit.Test)2 DeleteObjectsRequest (com.amazonaws.services.s3.model.DeleteObjectsRequest)1 ListObjectsRequest (com.amazonaws.services.s3.model.ListObjectsRequest)1 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)1 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)1 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 EOFException (java.io.EOFException)1 IOException (java.io.IOException)1 URI (java.net.URI)1 ArrayList (java.util.ArrayList)1 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)1 Path (org.apache.hadoop.fs.Path)1 AuthenticationFailedException (org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException)1