use of org.apache.hadoop.registry.client.types.Endpoint in project hadoop by apache.
the class RegistryTestHelper method validateEntry.
/**
* General code to validate bits of a component/service entry built iwth
* {@link #addSampleEndpoints(ServiceRecord, String)}
* @param record instance to check
*/
public static void validateEntry(ServiceRecord record) {
assertNotNull("null service record", record);
List<Endpoint> endpoints = record.external;
assertEquals(2, endpoints.size());
Endpoint webhdfs = findEndpoint(record, API_WEBHDFS, true, 1, 1);
assertEquals(API_WEBHDFS, webhdfs.api);
assertEquals(AddressTypes.ADDRESS_URI, webhdfs.addressType);
assertEquals(ProtocolTypes.PROTOCOL_REST, webhdfs.protocolType);
List<Map<String, String>> addressList = webhdfs.addresses;
Map<String, String> url = addressList.get(0);
String addr = url.get("uri");
assertTrue(addr.contains("http"));
assertTrue(addr.contains(":9820"));
Endpoint nnipc = findEndpoint(record, NNIPC, false, 1, 2);
assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT, nnipc.protocolType);
Endpoint ipc2 = findEndpoint(record, IPC2, false, 1, 2);
assertNotNull(ipc2);
Endpoint web = findEndpoint(record, HTTP_API, true, 1, 1);
assertEquals(1, web.addresses.size());
assertEquals(1, web.addresses.get(0).size());
}
use of org.apache.hadoop.registry.client.types.Endpoint in project hadoop by apache.
the class RegistryTestHelper method addSampleEndpoints.
/**
* Add some endpoints
* @param entry entry
*/
public static void addSampleEndpoints(ServiceRecord entry, String hostname) throws URISyntaxException {
assertNotNull(hostname);
entry.addExternalEndpoint(webEndpoint(HTTP_API, new URI("http", hostname + ":80", "/")));
entry.addExternalEndpoint(restEndpoint(API_WEBHDFS, new URI("http", hostname + ":9820", "/")));
Endpoint endpoint = ipcEndpoint(API_HDFS, null);
endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));
entry.addInternalEndpoint(endpoint);
InetSocketAddress localhost = new InetSocketAddress("localhost", 8050);
entry.addInternalEndpoint(inetAddrEndpoint(NNIPC, ProtocolTypes.PROTOCOL_THRIFT, "localhost", 8050));
entry.addInternalEndpoint(RegistryTypeUtils.ipcEndpoint(IPC2, localhost));
}
use of org.apache.hadoop.registry.client.types.Endpoint in project hadoop by apache.
the class RegistryCli method resolve.
@SuppressWarnings("unchecked")
public int resolve(String[] args) {
Options resolveOption = new Options();
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(resolveOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("resolve requires exactly one path argument", RESOLVE_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
ServiceRecord record = registry.resolve(argsList.get(1));
for (Endpoint endpoint : record.external) {
sysout.println(" Endpoint(ProtocolType=" + endpoint.protocolType + ", Api=" + endpoint.api + ");" + " Addresses(AddressType=" + endpoint.addressType + ") are: ");
for (Map<String, String> address : endpoint.addresses) {
sysout.println("[ ");
for (Map.Entry<String, String> entry : address.entrySet()) {
sysout.print("\t" + entry.getKey() + ":" + entry.getValue());
}
sysout.println("\n]");
}
sysout.println();
}
return 0;
} catch (Exception e) {
syserr.println(analyzeException("resolve", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp, RESOLVE_USAGE);
}
}
use of org.apache.hadoop.registry.client.types.Endpoint in project hive by apache.
the class LlapZookeeperRegistryImpl method register.
@Override
public String register() throws IOException {
ServiceRecord srv = new ServiceRecord();
Endpoint rpcEndpoint = getRpcEndpoint();
srv.addInternalEndpoint(rpcEndpoint);
srv.addInternalEndpoint(getMngEndpoint());
srv.addInternalEndpoint(getShuffleEndpoint());
srv.addExternalEndpoint(getServicesEndpoint());
srv.addInternalEndpoint(getOutputFormatEndpoint());
for (Map.Entry<String, String> kv : this.conf) {
if (kv.getKey().startsWith(HiveConf.PREFIX_LLAP) || kv.getKey().startsWith(HiveConf.PREFIX_HIVE_LLAP)) {
// TODO: read this somewhere useful, like the task scheduler
srv.set(kv.getKey(), kv.getValue());
}
}
// restart sensitive instance id
srv.set(UNIQUE_IDENTIFIER, uniq.toString());
// Create a znode under the rootNamespace parent for this instance of the server
try {
// PersistentEphemeralNode will make sure the ephemeral node created on server will be present
// even under connection or session interruption (will automatically handle retries)
znode = new PersistentEphemeralNode(zooKeeperClient, Mode.EPHEMERAL_SEQUENTIAL, workersPath + "/" + WORKER_PREFIX, encoder.toBytes(srv));
// start the creation of znodes
znode.start();
// We'll wait for 120s for node creation
long znodeCreationTimeout = 120;
if (!znode.waitForInitialCreate(znodeCreationTimeout, TimeUnit.SECONDS)) {
throw new Exception("Max znode creation wait time: " + znodeCreationTimeout + "s exhausted");
}
znodePath = znode.getActualPath();
slotZnode = new SlotZnode(zooKeeperClient, workersPath, SLOT_PREFIX, WORKER_PREFIX, uniq.toString());
if (!slotZnode.start(znodeCreationTimeout, TimeUnit.SECONDS)) {
throw new Exception("Max znode creation wait time: " + znodeCreationTimeout + "s exhausted");
}
if (HiveConf.getBoolVar(conf, ConfVars.LLAP_VALIDATE_ACLS)) {
try {
checkAndSetAcls();
} catch (Exception ex) {
throw new IOException("Error validating or setting ACLs. " + DISABLE_MESSAGE, ex);
}
}
if (zooKeeperClient.checkExists().forPath(znodePath) == null) {
// No node exists, throw exception
throw new Exception("Unable to create znode for this LLAP instance on ZooKeeper.");
}
LOG.info("Registered node. Created a znode on ZooKeeper for LLAP instance: rpc: {}, shuffle: {}," + " webui: {}, mgmt: {}, znodePath: {} ", rpcEndpoint, getShuffleEndpoint(), getServicesEndpoint(), getMngEndpoint(), znodePath);
} catch (Exception e) {
LOG.error("Unable to create a znode for this server instance", e);
CloseableUtils.closeQuietly(znode);
CloseableUtils.closeQuietly(slotZnode);
throw (e instanceof IOException) ? (IOException) e : new IOException(e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created zknode with path: {} service record: {}", znodePath, srv);
}
return uniq.toString();
}
use of org.apache.hadoop.registry.client.types.Endpoint in project jstorm by alibaba.
the class JstormMaster method setupServiceRecord.
private ServiceRecord setupServiceRecord() {
ServiceRecord application = new ServiceRecord();
application.set(YarnRegistryAttributes.YARN_ID, jstormMasterContext.appAttemptID.getApplicationId().toString());
application.description = JOYConstants.AM;
application.set(YarnRegistryAttributes.YARN_PERSISTENCE, PersistencePolicies.PERMANENT);
Map<String, String> addresses = new HashMap<String, String>();
addresses.put(JOYConstants.HOST, jstormMasterContext.appMasterHostname);
addresses.put(JOYConstants.PORT, String.valueOf(jstormMasterContext.appMasterThriftPort));
Endpoint endpoint = new Endpoint(JOYConstants.HTTP, JOYConstants.HOST_PORT, JOYConstants.RPC, addresses);
application.addExternalEndpoint(endpoint);
return application;
}
Aggregations