use of com.sun.jersey.api.client.Client in project hadoop by apache.
the class LogsCLI method getAMContainerInfoForRMWebService.
protected List<JSONObject> getAMContainerInfoForRMWebService(Configuration conf, String appId) throws ClientHandlerException, UniformInterfaceException, JSONException {
Client webServiceClient = Client.create();
String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf);
WebResource webResource = webServiceClient.resource(webAppAddress);
ClientResponse response = webResource.path("ws").path("v1").path("cluster").path("apps").path(appId).path("appattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
JSONObject json = response.getEntity(JSONObject.class).getJSONObject("appAttempts");
JSONArray requests = json.getJSONArray("appAttempt");
List<JSONObject> amContainersList = new ArrayList<JSONObject>();
for (int i = 0; i < requests.length(); i++) {
amContainersList.add(requests.getJSONObject(i));
}
return amContainersList;
}
use of com.sun.jersey.api.client.Client in project hadoop by apache.
the class LogsCLI method getContainerLogFiles.
private List<Pair<PerContainerLogFileInfo, String>> getContainerLogFiles(Configuration conf, String containerIdStr, String nodeHttpAddress) throws IOException {
List<Pair<PerContainerLogFileInfo, String>> logFileInfos = new ArrayList<>();
Client webServiceClient = Client.create();
try {
WebResource webResource = webServiceClient.resource(WebAppUtils.getHttpSchemePrefix(conf) + nodeHttpAddress);
ClientResponse response = webResource.path("ws").path("v1").path("node").path("containers").path(containerIdStr).path("logs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
if (response.getStatusInfo().getStatusCode() == ClientResponse.Status.OK.getStatusCode()) {
try {
JSONArray array = new JSONArray();
JSONObject json = response.getEntity(JSONObject.class);
Object logsInfoObj = json.get("containerLogsInfo");
if (logsInfoObj instanceof JSONObject) {
array.put((JSONObject) logsInfoObj);
} else if (logsInfoObj instanceof JSONArray) {
JSONArray logsArray = (JSONArray) logsInfoObj;
for (int i = 0; i < logsArray.length(); i++) {
array.put(logsArray.getJSONObject(i));
}
}
for (int i = 0; i < array.length(); i++) {
JSONObject log = array.getJSONObject(i);
String aggregateType = log.has("logAggregationType") ? log.getString("logAggregationType") : "N/A";
Object ob = log.get("containerLogInfo");
if (ob instanceof JSONArray) {
JSONArray obArray = (JSONArray) ob;
for (int j = 0; j < obArray.length(); j++) {
logFileInfos.add(new Pair<PerContainerLogFileInfo, String>(generatePerContainerLogFileInfoFromJSON(obArray.getJSONObject(j)), aggregateType));
}
} else if (ob instanceof JSONObject) {
logFileInfos.add(new Pair<PerContainerLogFileInfo, String>(generatePerContainerLogFileInfoFromJSON((JSONObject) ob), aggregateType));
}
}
} catch (Exception e) {
System.err.println("Unable to parse json from webservice. Error:");
System.err.println(e.getMessage());
throw new IOException(e);
}
}
} catch (ClientHandlerException | UniformInterfaceException ex) {
System.err.println("Unable to fetch log files list");
throw new IOException(ex);
}
return logFileInfos;
}
use of com.sun.jersey.api.client.Client in project hadoop by apache.
the class TestTimelineClientForATS1_5 method createTimelineClient.
private TimelineClientImpl createTimelineClient(YarnConfiguration conf) {
TimelineClientImpl client = new TimelineClientImpl() {
@Override
protected TimelineWriter createTimelineWriter(Configuration conf, UserGroupInformation authUgi, Client client, URI resURI) throws IOException {
TimelineWriter timelineWriter = new FileSystemTimelineWriter(conf, authUgi, client, resURI) {
public ClientResponse doPostingObject(Object object, String path) {
ClientResponse response = mock(ClientResponse.class);
when(response.getStatusInfo()).thenReturn(ClientResponse.Status.OK);
return response;
}
};
spyTimelineWriter = spy(timelineWriter);
return spyTimelineWriter;
}
};
client.init(conf);
client.start();
return client;
}
use of com.sun.jersey.api.client.Client in project hadoop by apache.
the class TestRMHA method checkActiveRMWebServices.
// Do some sanity testing of the web-services after fail-over.
private void checkActiveRMWebServices() throws JSONException {
// Validate web-service
Client webServiceClient = Client.create(new DefaultClientConfig());
InetSocketAddress rmWebappAddr = NetUtils.getConnectAddress(rm.getWebapp().getListenerAddress());
String webappURL = "http://" + rmWebappAddr.getHostName() + ":" + rmWebappAddr.getPort();
WebResource webResource = webServiceClient.resource(webappURL);
String path = app.getApplicationId().toString();
ClientResponse response = webResource.path("ws").path("v1").path("cluster").path("apps").path(path).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject appJson = json.getJSONObject("app");
assertEquals("ACCEPTED", appJson.getString("state"));
// Other stuff is verified in the regular web-services related tests
}
use of com.sun.jersey.api.client.Client in project hadoop by apache.
the class TestTimelineReaderWebServicesHBaseStorage method testGetFlows.
@Test
public void testGetFlows() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows");
ClientResponse resp = getResponse(client, uri);
Set<FlowActivityEntity> entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(2, entities.size());
for (FlowActivityEntity entity : entities) {
assertTrue((entity.getId().endsWith("@flow_name") && entity.getFlowRuns().size() == 2) || (entity.getId().endsWith("@flow_name2") && entity.getFlowRuns().size() == 1));
}
// Query without specifying cluster ID.
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/flows/");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(2, entities.size());
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?limit=1");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(1, entities.size());
long firstFlowActivity = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=" + fmt.format(firstFlowActivity) + "-" + fmt.format(dayTs));
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(2, entities.size());
for (FlowActivityEntity entity : entities) {
assertTrue((entity.getId().endsWith("@flow_name") && entity.getFlowRuns().size() == 2) || (entity.getId().endsWith("@flow_name2") && entity.getFlowRuns().size() == 1));
}
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=" + fmt.format(dayTs + (4 * 86400000L)));
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(0, entities.size());
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=-" + fmt.format(dayTs));
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(2, entities.size());
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=" + fmt.format(firstFlowActivity) + "-");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
});
assertNotNull(entities);
assertEquals(2, entities.size());
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=20150711:20150714");
verifyHttpResponse(client, uri, Status.BAD_REQUEST);
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=20150714-20150711");
verifyHttpResponse(client, uri, Status.BAD_REQUEST);
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=2015071129-20150712");
verifyHttpResponse(client, uri, Status.BAD_REQUEST);
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/flows?daterange=20150711-2015071243");
verifyHttpResponse(client, uri, Status.BAD_REQUEST);
} finally {
client.destroy();
}
}
Aggregations