Search in sources :

Example 21 with ServiceRecord

use of org.apache.hadoop.registry.client.types.ServiceRecord in project hadoop by apache.

the class TestRegistryRMOperations method testChildDeletion.

@Test
public void testChildDeletion() throws Throwable {
    ServiceRecord app = createRecord("app1", PersistencePolicies.APPLICATION, "app", null);
    ServiceRecord container = createRecord("container1", PersistencePolicies.CONTAINER, "container", null);
    operations.bind("/app", app, BindFlags.OVERWRITE);
    operations.bind("/app/container", container, BindFlags.OVERWRITE);
    try {
        int p = purge("/", "app1", PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
        fail("expected a failure, got a purge count of " + p);
    } catch (PathIsNotEmptyDirectoryException expected) {
    // expected
    }
}
Also used : PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) RegistryTypeUtils.restEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint) RegistryTypeUtils.inetAddrEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord) Test(org.junit.Test) AbstractRegistryTest(org.apache.hadoop.registry.AbstractRegistryTest)

Example 22 with ServiceRecord

use of org.apache.hadoop.registry.client.types.ServiceRecord in project hadoop by apache.

the class TestRegistryRMOperations method testCreateComplexApplication.

/**
   * Create a complex example app
   * @throws Throwable
   */
@Test
public void testCreateComplexApplication() throws Throwable {
    String appId = "application_1408631738011_0001";
    String cid = "container_1408631738011_0001_01_";
    String cid1 = cid + "000001";
    String cid2 = cid + "000002";
    String appPath = USERPATH + "tomcat";
    ServiceRecord webapp = createRecord(appId, PersistencePolicies.APPLICATION, "tomcat-based web application", null);
    webapp.addExternalEndpoint(restEndpoint("www", new URI("http", "//loadbalancer/", null)));
    ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER, null, null);
    comp1.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack4server3:43572", null)));
    comp1.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573));
    // Component 2 has a container lifespan
    ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER, null, null);
    comp2.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack1server28:35881", null)));
    comp2.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882));
    operations.mknode(USERPATH, false);
    operations.bind(appPath, webapp, BindFlags.OVERWRITE);
    String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS;
    operations.mknode(componentsPath, false);
    String dns1 = RegistryPathUtils.encodeYarnID(cid1);
    String dns1path = componentsPath + dns1;
    operations.bind(dns1path, comp1, BindFlags.CREATE);
    String dns2 = RegistryPathUtils.encodeYarnID(cid2);
    String dns2path = componentsPath + dns2;
    operations.bind(dns2path, comp2, BindFlags.CREATE);
    ZKPathDumper pathDumper = registry.dumpPath(false);
    LOG.info(pathDumper.toString());
    logRecord("tomcat", webapp);
    logRecord(dns1, comp1);
    logRecord(dns2, comp2);
    ServiceRecord dns1resolved = operations.resolve(dns1path);
    assertEquals("Persistence policies on resolved entry", PersistencePolicies.CONTAINER, dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
    Map<String, RegistryPathStatus> children = RegistryUtils.statChildren(operations, componentsPath);
    assertEquals(2, children.size());
    Collection<RegistryPathStatus> componentStats = children.values();
    Map<String, ServiceRecord> records = RegistryUtils.extractServiceRecords(operations, componentsPath, componentStats);
    assertEquals(2, records.size());
    ServiceRecord retrieved1 = records.get(dns1path);
    logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1);
    assertMatches(dns1resolved, retrieved1);
    assertEquals(PersistencePolicies.CONTAINER, retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
    // create a listing under components/
    operations.mknode(componentsPath + "subdir", false);
    // this shows up in the listing of child entries
    Map<String, RegistryPathStatus> childrenUpdated = RegistryUtils.statChildren(operations, componentsPath);
    assertEquals(3, childrenUpdated.size());
    // the non-record child this is not picked up in the record listing
    Map<String, ServiceRecord> recordsUpdated = RegistryUtils.extractServiceRecords(operations, componentsPath, childrenUpdated);
    assertEquals(2, recordsUpdated.size());
    // now do some deletions.
    // synchronous delete container ID 2
    // fail if the app policy is chosen
    assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren));
    // succeed for container
    assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER, RegistryAdminService.PurgePolicy.FailOnChildren));
    assertPathNotFound(dns2path);
    assertPathExists(dns1path);
    // expect a skip on children to skip
    assertEquals(0, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.SkipOnChildren));
    assertPathExists(appPath);
    assertPathExists(dns1path);
    // attempt to delete app with policy of fail on children
    try {
        int p = purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
        fail("expected a failure, got a purge count of " + p);
    } catch (PathIsNotEmptyDirectoryException expected) {
    // expected
    }
    assertPathExists(appPath);
    assertPathExists(dns1path);
    // now trigger recursive delete
    assertEquals(1, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.PurgeAll));
    assertPathNotFound(appPath);
    assertPathNotFound(dns1path);
}
Also used : RegistryPathStatus(org.apache.hadoop.registry.client.types.RegistryPathStatus) ZKPathDumper(org.apache.hadoop.registry.client.impl.zk.ZKPathDumper) PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) URI(java.net.URI) RegistryTypeUtils.restEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint) RegistryTypeUtils.inetAddrEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord) Test(org.junit.Test) AbstractRegistryTest(org.apache.hadoop.registry.AbstractRegistryTest)

Example 23 with ServiceRecord

use of org.apache.hadoop.registry.client.types.ServiceRecord in project hadoop by apache.

the class TestRegistryRMOperations method testPurgeEntryCuratorCallback.

@Test
public void testPurgeEntryCuratorCallback() throws Throwable {
    String path = "/users/example/hbase/hbase1/";
    ServiceRecord written = buildExampleServiceEntry(PersistencePolicies.APPLICATION_ATTEMPT);
    written.set(YarnRegistryAttributes.YARN_ID, "testAsyncPurgeEntry_attempt_001");
    operations.mknode(RegistryPathUtils.parentOf(path), true);
    operations.bind(path, written, 0);
    ZKPathDumper dump = registry.dumpPath(false);
    CuratorEventCatcher events = new CuratorEventCatcher();
    LOG.info("Initial state {}", dump);
    // container query
    String id = written.get(YarnRegistryAttributes.YARN_ID, "");
    int opcount = purge("/", id, PersistencePolicies.CONTAINER, RegistryAdminService.PurgePolicy.PurgeAll, events);
    assertPathExists(path);
    assertEquals(0, opcount);
    assertEquals("Event counter", 0, events.getCount());
    // now the application attempt
    opcount = purge("/", id, PersistencePolicies.APPLICATION_ATTEMPT, RegistryAdminService.PurgePolicy.PurgeAll, events);
    LOG.info("Final state {}", dump);
    assertPathNotFound(path);
    assertEquals("wrong no of delete operations in " + dump, 1, opcount);
    // and validate the callback event
    assertEquals("Event counter", 1, events.getCount());
}
Also used : ZKPathDumper(org.apache.hadoop.registry.client.impl.zk.ZKPathDumper) RegistryTypeUtils.restEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint) RegistryTypeUtils.inetAddrEndpoint(org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord) CuratorEventCatcher(org.apache.hadoop.registry.client.impl.CuratorEventCatcher) Test(org.junit.Test) AbstractRegistryTest(org.apache.hadoop.registry.AbstractRegistryTest)

Example 24 with ServiceRecord

use of org.apache.hadoop.registry.client.types.ServiceRecord in project hive by apache.

the class HS2ActivePassiveHARegistry method getNewServiceRecord.

private ServiceRecord getNewServiceRecord() {
    ServiceRecord srv = new ServiceRecord();
    final Map<String, String> confsToPublish = getConfsToPublish();
    for (Map.Entry<String, String> entry : confsToPublish.entrySet()) {
        srv.set(entry.getKey(), entry.getValue());
    }
    return srv;
}
Also used : HashMap(java.util.HashMap) Map(java.util.Map) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord)

Example 25 with ServiceRecord

use of org.apache.hadoop.registry.client.types.ServiceRecord in project hive by apache.

the class LlapZookeeperRegistryImpl method register.

@Override
public String register() throws IOException {
    ServiceRecord srv = new ServiceRecord();
    Endpoint rpcEndpoint = getRpcEndpoint();
    srv.addInternalEndpoint(rpcEndpoint);
    srv.addInternalEndpoint(getMngEndpoint());
    srv.addInternalEndpoint(getShuffleEndpoint());
    srv.addExternalEndpoint(getServicesEndpoint());
    srv.addInternalEndpoint(getOutputFormatEndpoint());
    for (Map.Entry<String, String> kv : this.conf) {
        if (kv.getKey().startsWith(HiveConf.PREFIX_LLAP) || kv.getKey().startsWith(HiveConf.PREFIX_HIVE_LLAP)) {
            // TODO: read this somewhere useful, like the task scheduler
            srv.set(kv.getKey(), kv.getValue());
        }
    }
    String uniqueId = registerServiceRecord(srv);
    long znodeCreationTimeout = 120;
    // Create a znode under the rootNamespace parent for this instance of the server
    try {
        slotZnode = new SlotZnode(zooKeeperClient, workersPath, SLOT_PREFIX, WORKER_PREFIX, uniqueId);
        if (!slotZnode.start(znodeCreationTimeout, TimeUnit.SECONDS)) {
            throw new Exception("Max znode creation wait time: " + znodeCreationTimeout + "s exhausted");
        }
    } catch (Exception e) {
        LOG.error("Unable to create a znode for this server instance", e);
        CloseableUtils.closeQuietly(slotZnode);
        super.stop();
        throw (e instanceof IOException) ? (IOException) e : new IOException(e);
    }
    LOG.info("Registered node. Created a znode on ZooKeeper for LLAP instance: rpc: {}, " + "shuffle: {}, webui: {}, mgmt: {}, znodePath: {}", rpcEndpoint, getShuffleEndpoint(), getServicesEndpoint(), getMngEndpoint(), getRegistrationZnodePath());
    return uniqueId;
}
Also used : Endpoint(org.apache.hadoop.registry.client.types.Endpoint) IOException(java.io.IOException) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) URISyntaxException(java.net.URISyntaxException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) ServiceRecord(org.apache.hadoop.registry.client.types.ServiceRecord)

Aggregations

ServiceRecord (org.apache.hadoop.registry.client.types.ServiceRecord)42 Test (org.junit.Test)21 AbstractRegistryTest (org.apache.hadoop.registry.AbstractRegistryTest)14 IOException (java.io.IOException)10 RegistryPathStatus (org.apache.hadoop.registry.client.types.RegistryPathStatus)6 NoRecordException (org.apache.hadoop.registry.client.exceptions.NoRecordException)5 Endpoint (org.apache.hadoop.registry.client.types.Endpoint)5 HashMap (java.util.HashMap)4 ParseException (org.apache.commons.cli.ParseException)4 PathNotFoundException (org.apache.hadoop.fs.PathNotFoundException)4 RegistryTypeUtils.inetAddrEndpoint (org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint)4 RegistryTypeUtils.restEndpoint (org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint)4 InvalidRecordException (org.apache.hadoop.registry.client.exceptions.InvalidRecordException)4 URISyntaxException (java.net.URISyntaxException)3 Map (java.util.Map)3 PathIsNotEmptyDirectoryException (org.apache.hadoop.fs.PathIsNotEmptyDirectoryException)3 ZKPathDumper (org.apache.hadoop.registry.client.impl.zk.ZKPathDumper)3 EOFException (java.io.EOFException)2 UndeclaredThrowableException (java.lang.reflect.UndeclaredThrowableException)2 URI (java.net.URI)2