use of org.apache.hadoop.registry.client.types.ServiceRecord in project hadoop by apache.
the class TestRegistryRMOperations method testChildDeletion.
@Test
public void testChildDeletion() throws Throwable {
ServiceRecord app = createRecord("app1", PersistencePolicies.APPLICATION, "app", null);
ServiceRecord container = createRecord("container1", PersistencePolicies.CONTAINER, "container", null);
operations.bind("/app", app, BindFlags.OVERWRITE);
operations.bind("/app/container", container, BindFlags.OVERWRITE);
try {
int p = purge("/", "app1", PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
fail("expected a failure, got a purge count of " + p);
} catch (PathIsNotEmptyDirectoryException expected) {
// expected
}
}
use of org.apache.hadoop.registry.client.types.ServiceRecord in project hadoop by apache.
the class TestRegistryRMOperations method testCreateComplexApplication.
/**
* Create a complex example app
* @throws Throwable
*/
@Test
public void testCreateComplexApplication() throws Throwable {
String appId = "application_1408631738011_0001";
String cid = "container_1408631738011_0001_01_";
String cid1 = cid + "000001";
String cid2 = cid + "000002";
String appPath = USERPATH + "tomcat";
ServiceRecord webapp = createRecord(appId, PersistencePolicies.APPLICATION, "tomcat-based web application", null);
webapp.addExternalEndpoint(restEndpoint("www", new URI("http", "//loadbalancer/", null)));
ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER, null, null);
comp1.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack4server3:43572", null)));
comp1.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573));
// Component 2 has a container lifespan
ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER, null, null);
comp2.addExternalEndpoint(restEndpoint("www", new URI("http", "//rack1server28:35881", null)));
comp2.addInternalEndpoint(inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882));
operations.mknode(USERPATH, false);
operations.bind(appPath, webapp, BindFlags.OVERWRITE);
String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS;
operations.mknode(componentsPath, false);
String dns1 = RegistryPathUtils.encodeYarnID(cid1);
String dns1path = componentsPath + dns1;
operations.bind(dns1path, comp1, BindFlags.CREATE);
String dns2 = RegistryPathUtils.encodeYarnID(cid2);
String dns2path = componentsPath + dns2;
operations.bind(dns2path, comp2, BindFlags.CREATE);
ZKPathDumper pathDumper = registry.dumpPath(false);
LOG.info(pathDumper.toString());
logRecord("tomcat", webapp);
logRecord(dns1, comp1);
logRecord(dns2, comp2);
ServiceRecord dns1resolved = operations.resolve(dns1path);
assertEquals("Persistence policies on resolved entry", PersistencePolicies.CONTAINER, dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
Map<String, RegistryPathStatus> children = RegistryUtils.statChildren(operations, componentsPath);
assertEquals(2, children.size());
Collection<RegistryPathStatus> componentStats = children.values();
Map<String, ServiceRecord> records = RegistryUtils.extractServiceRecords(operations, componentsPath, componentStats);
assertEquals(2, records.size());
ServiceRecord retrieved1 = records.get(dns1path);
logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1);
assertMatches(dns1resolved, retrieved1);
assertEquals(PersistencePolicies.CONTAINER, retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
// create a listing under components/
operations.mknode(componentsPath + "subdir", false);
// this shows up in the listing of child entries
Map<String, RegistryPathStatus> childrenUpdated = RegistryUtils.statChildren(operations, componentsPath);
assertEquals(3, childrenUpdated.size());
// the non-record child this is not picked up in the record listing
Map<String, ServiceRecord> recordsUpdated = RegistryUtils.extractServiceRecords(operations, componentsPath, childrenUpdated);
assertEquals(2, recordsUpdated.size());
// now do some deletions.
// synchronous delete container ID 2
// fail if the app policy is chosen
assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren));
// succeed for container
assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER, RegistryAdminService.PurgePolicy.FailOnChildren));
assertPathNotFound(dns2path);
assertPathExists(dns1path);
// expect a skip on children to skip
assertEquals(0, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.SkipOnChildren));
assertPathExists(appPath);
assertPathExists(dns1path);
// attempt to delete app with policy of fail on children
try {
int p = purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.FailOnChildren);
fail("expected a failure, got a purge count of " + p);
} catch (PathIsNotEmptyDirectoryException expected) {
// expected
}
assertPathExists(appPath);
assertPathExists(dns1path);
// now trigger recursive delete
assertEquals(1, purge("/", appId, PersistencePolicies.APPLICATION, RegistryAdminService.PurgePolicy.PurgeAll));
assertPathNotFound(appPath);
assertPathNotFound(dns1path);
}
use of org.apache.hadoop.registry.client.types.ServiceRecord in project hadoop by apache.
the class TestRegistryRMOperations method testPurgeEntryCuratorCallback.
@Test
public void testPurgeEntryCuratorCallback() throws Throwable {
String path = "/users/example/hbase/hbase1/";
ServiceRecord written = buildExampleServiceEntry(PersistencePolicies.APPLICATION_ATTEMPT);
written.set(YarnRegistryAttributes.YARN_ID, "testAsyncPurgeEntry_attempt_001");
operations.mknode(RegistryPathUtils.parentOf(path), true);
operations.bind(path, written, 0);
ZKPathDumper dump = registry.dumpPath(false);
CuratorEventCatcher events = new CuratorEventCatcher();
LOG.info("Initial state {}", dump);
// container query
String id = written.get(YarnRegistryAttributes.YARN_ID, "");
int opcount = purge("/", id, PersistencePolicies.CONTAINER, RegistryAdminService.PurgePolicy.PurgeAll, events);
assertPathExists(path);
assertEquals(0, opcount);
assertEquals("Event counter", 0, events.getCount());
// now the application attempt
opcount = purge("/", id, PersistencePolicies.APPLICATION_ATTEMPT, RegistryAdminService.PurgePolicy.PurgeAll, events);
LOG.info("Final state {}", dump);
assertPathNotFound(path);
assertEquals("wrong no of delete operations in " + dump, 1, opcount);
// and validate the callback event
assertEquals("Event counter", 1, events.getCount());
}
use of org.apache.hadoop.registry.client.types.ServiceRecord in project hive by apache.
the class HS2ActivePassiveHARegistry method getNewServiceRecord.
private ServiceRecord getNewServiceRecord() {
ServiceRecord srv = new ServiceRecord();
final Map<String, String> confsToPublish = getConfsToPublish();
for (Map.Entry<String, String> entry : confsToPublish.entrySet()) {
srv.set(entry.getKey(), entry.getValue());
}
return srv;
}
use of org.apache.hadoop.registry.client.types.ServiceRecord in project hive by apache.
the class LlapZookeeperRegistryImpl method register.
@Override
public String register() throws IOException {
ServiceRecord srv = new ServiceRecord();
Endpoint rpcEndpoint = getRpcEndpoint();
srv.addInternalEndpoint(rpcEndpoint);
srv.addInternalEndpoint(getMngEndpoint());
srv.addInternalEndpoint(getShuffleEndpoint());
srv.addExternalEndpoint(getServicesEndpoint());
srv.addInternalEndpoint(getOutputFormatEndpoint());
for (Map.Entry<String, String> kv : this.conf) {
if (kv.getKey().startsWith(HiveConf.PREFIX_LLAP) || kv.getKey().startsWith(HiveConf.PREFIX_HIVE_LLAP)) {
// TODO: read this somewhere useful, like the task scheduler
srv.set(kv.getKey(), kv.getValue());
}
}
String uniqueId = registerServiceRecord(srv);
long znodeCreationTimeout = 120;
// Create a znode under the rootNamespace parent for this instance of the server
try {
slotZnode = new SlotZnode(zooKeeperClient, workersPath, SLOT_PREFIX, WORKER_PREFIX, uniqueId);
if (!slotZnode.start(znodeCreationTimeout, TimeUnit.SECONDS)) {
throw new Exception("Max znode creation wait time: " + znodeCreationTimeout + "s exhausted");
}
} catch (Exception e) {
LOG.error("Unable to create a znode for this server instance", e);
CloseableUtils.closeQuietly(slotZnode);
super.stop();
throw (e instanceof IOException) ? (IOException) e : new IOException(e);
}
LOG.info("Registered node. Created a znode on ZooKeeper for LLAP instance: rpc: {}, " + "shuffle: {}, webui: {}, mgmt: {}, znodePath: {}", rpcEndpoint, getShuffleEndpoint(), getServicesEndpoint(), getMngEndpoint(), getRegistrationZnodePath());
return uniqueId;
}
Aggregations