use of org.apache.druid.java.util.http.client.response.StringFullResponseHolder in project hive by apache.
the class DruidStorageHandlerUtils method getResponseFromCurrentLeader.
static StringFullResponseHolder getResponseFromCurrentLeader(HttpClient client, Request request, StringFullResponseHandler fullResponseHandler) throws ExecutionException, InterruptedException {
StringFullResponseHolder responseHolder = client.go(request, fullResponseHandler).get();
if (HttpResponseStatus.TEMPORARY_REDIRECT.equals(responseHolder.getStatus())) {
String redirectUrlStr = responseHolder.getResponse().headers().get("Location");
LOG.debug("Request[%s] received redirect response to location [%s].", request.getUrl(), redirectUrlStr);
final URL redirectUrl;
try {
redirectUrl = new URL(redirectUrlStr);
} catch (MalformedURLException ex) {
throw new ExecutionException(String.format("Malformed redirect location is found in response from url[%s], new location[%s].", request.getUrl(), redirectUrlStr), ex);
}
responseHolder = client.go(withUrl(request, redirectUrl), fullResponseHandler).get();
}
return responseHolder;
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHolder in project hive by apache.
the class DruidKafkaUtils method updateKafkaIngestionSpec.
static void updateKafkaIngestionSpec(String overlordAddress, KafkaSupervisorSpec spec) {
try {
String task = JSON_MAPPER.writeValueAsString(spec);
CONSOLE.printInfo("submitting kafka Spec {}", task);
LOG.info("submitting kafka Supervisor Spec {}", task);
StringFullResponseHolder response = DruidStorageHandlerUtils.getResponseFromCurrentLeader(DruidStorageHandler.getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor", overlordAddress))).setContent("application/json", JSON_MAPPER.writeValueAsBytes(spec)), new StringFullResponseHandler(Charset.forName("UTF-8")));
if (response.getStatus().equals(HttpResponseStatus.OK)) {
String msg = String.format("Kafka Supervisor for [%s] Submitted Successfully to druid.", spec.getDataSchema().getDataSource());
LOG.info(msg);
CONSOLE.printInfo(msg);
} else {
throw new IOException(String.format("Unable to update Kafka Ingestion for Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent()));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHolder in project druid by apache.
the class CoordinatorRuleManagerTest method mockClient.
private DruidLeaderClient mockClient() {
final Map<String, List<Rule>> rules = ImmutableMap.of(DATASOURCE1, ImmutableList.of(new ForeverLoadRule(null)), DATASOURCE2, ImmutableList.of(new ForeverLoadRule(null), new IntervalDropRule(Intervals.of("2020-01-01/2020-01-02"))), "datasource3", ImmutableList.of(new PeriodLoadRule(new Period("P1M"), true, null), new ForeverDropRule()), TieredBrokerConfig.DEFAULT_RULE_NAME, ImmutableList.of(new ForeverLoadRule(ImmutableMap.of("__default", 2))));
final StringFullResponseHolder holder = EasyMock.niceMock(StringFullResponseHolder.class);
EasyMock.expect(holder.getStatus()).andReturn(HttpResponseStatus.OK);
try {
EasyMock.expect(holder.getContent()).andReturn(objectMapper.writeValueAsString(rules));
final DruidLeaderClient client = EasyMock.niceMock(DruidLeaderClient.class);
EasyMock.expect(client.go(EasyMock.anyObject())).andReturn(holder);
EasyMock.replay(holder, client);
return client;
} catch (IOException | InterruptedException e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHolder in project druid by apache.
the class HttpIndexingServiceClient method getTotalWorkerCapacity.
@Override
public int getTotalWorkerCapacity() {
try {
final StringFullResponseHolder response = druidLeaderClient.go(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/indexer/v1/workers").setHeader("Content-Type", MediaType.APPLICATION_JSON));
if (!response.getStatus().equals(HttpResponseStatus.OK)) {
throw new ISE("Error while getting available cluster capacity. status[%s] content[%s]", response.getStatus(), response.getContent());
}
final Collection<IndexingWorkerInfo> workers = jsonMapper.readValue(response.getContent(), new TypeReference<Collection<IndexingWorkerInfo>>() {
});
return workers.stream().mapToInt(workerInfo -> workerInfo.getWorker().getCapacity()).sum();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHolder in project druid by apache.
the class HttpIndexingServiceClient method runTask.
@Override
public String runTask(String taskId, Object taskObject) {
try {
// Warning, magic: here we may serialize ClientTaskQuery objects, but OverlordResource.taskPost() deserializes
// Task objects from the same data. See the comment for ClientTaskQuery for details.
final StringFullResponseHolder response = druidLeaderClient.go(druidLeaderClient.makeRequest(HttpMethod.POST, "/druid/indexer/v1/task").setContent(MediaType.APPLICATION_JSON, jsonMapper.writeValueAsBytes(taskObject)));
if (!response.getStatus().equals(HttpResponseStatus.OK)) {
if (!Strings.isNullOrEmpty(response.getContent())) {
throw new ISE("Failed to post task[%s] with error[%s].", taskId, response.getContent());
} else {
throw new ISE("Failed to post task[%s]. Please check overlord log", taskId);
}
}
final Map<String, Object> resultMap = jsonMapper.readValue(response.getContent(), JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT);
final String returnedTaskId = (String) resultMap.get("task");
Preconditions.checkState(taskId.equals(returnedTaskId), "Got a different taskId[%s]. Expected taskId[%s]", returnedTaskId, taskId);
return taskId;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations