use of org.apache.druid.java.util.http.client.response.StringFullResponseHandler in project hive by apache.
the class DruidStorageHandler method fetchKafkaIngestionSpec.
private KafkaSupervisorSpec fetchKafkaIngestionSpec(Table table) {
// Stop Kafka Ingestion first
final String overlordAddress = Preconditions.checkNotNull(HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS), "Druid Overlord Address is null");
String dataSourceName = Preconditions.checkNotNull(DruidStorageHandlerUtils.getTableProperty(table, Constants.DRUID_DATA_SOURCE), "Druid Datasource name is null");
try {
StringFullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s", overlordAddress, dataSourceName))), new StringFullResponseHandler(Charset.forName("UTF-8"))), input -> input instanceof IOException, getMaxRetryCount());
if (response.getStatus().equals(HttpResponseStatus.OK)) {
return JSON_MAPPER.readValue(response.getContent(), KafkaSupervisorSpec.class);
// Druid Returns 400 Bad Request when not found.
} else if (response.getStatus().equals(HttpResponseStatus.NOT_FOUND) || response.getStatus().equals(HttpResponseStatus.BAD_REQUEST)) {
LOG.debug("No Kafka Supervisor found for datasource[%s]", dataSourceName);
return null;
} else {
throw new IOException(String.format("Unable to fetch Kafka Ingestion Spec from Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent()));
}
} catch (Exception e) {
throw new RuntimeException("Exception while fetching kafka ingestion spec from druid", e);
}
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHandler in project hive by apache.
the class DruidKafkaUtils method updateKafkaIngestionSpec.
static void updateKafkaIngestionSpec(String overlordAddress, KafkaSupervisorSpec spec) {
try {
String task = JSON_MAPPER.writeValueAsString(spec);
CONSOLE.printInfo("submitting kafka Spec {}", task);
LOG.info("submitting kafka Supervisor Spec {}", task);
StringFullResponseHolder response = DruidStorageHandlerUtils.getResponseFromCurrentLeader(DruidStorageHandler.getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor", overlordAddress))).setContent("application/json", JSON_MAPPER.writeValueAsBytes(spec)), new StringFullResponseHandler(Charset.forName("UTF-8")));
if (response.getStatus().equals(HttpResponseStatus.OK)) {
String msg = String.format("Kafka Supervisor for [%s] Submitted Successfully to druid.", spec.getDataSchema().getDataSource());
LOG.info(msg);
CONSOLE.printInfo(msg);
} else {
throw new IOException(String.format("Unable to update Kafka Ingestion for Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent()));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHandler in project hive by apache.
the class DruidStorageHandler method checkLoadStatus.
/**
* This function checks the load status of Druid segments by polling druid coordinator.
* @param segments List of druid segments to check for
*/
private void checkLoadStatus(List<DataSegment> segments) {
final String coordinatorAddress = HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS);
int maxTries = getMaxRetryCount();
LOG.debug("checking load status from coordinator {}", coordinatorAddress);
String coordinatorResponse;
try {
coordinatorResponse = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/status", coordinatorAddress))), new StringFullResponseHandler(Charset.forName("UTF-8"))).getContent(), input -> input instanceof IOException, maxTries);
} catch (Exception e) {
CONSOLE.printInfo("Will skip waiting for data loading, coordinator unavailable");
return;
}
if (Strings.isNullOrEmpty(coordinatorResponse)) {
CONSOLE.printInfo("Will skip waiting for data loading empty response from coordinator");
}
CONSOLE.printInfo(String.format("Waiting for the loading of [%s] segments", segments.size()));
long passiveWaitTimeMs = HiveConf.getLongVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_PASSIVE_WAIT_TIME);
Set<URL> urlsOfUnloadedSegments = segments.stream().map(dataSegment -> {
try {
// Need to make sure that we are using segment identifier
return new URL(String.format("http://%s/druid/coordinator/v1/datasources/%s/segments/%s", coordinatorAddress, dataSegment.getDataSource(), dataSegment.getId().toString()));
} catch (MalformedURLException e) {
Throwables.propagate(e);
}
return null;
}).collect(Collectors.toSet());
int numRetries = 0;
while (numRetries++ < maxTries && !urlsOfUnloadedSegments.isEmpty()) {
urlsOfUnloadedSegments = ImmutableSet.copyOf(Sets.filter(urlsOfUnloadedSegments, input -> {
try {
String result = DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, input), new StringFullResponseHandler(Charset.forName("UTF-8"))).getContent();
LOG.debug("Checking segment [{}] response is [{}]", input, result);
return Strings.isNullOrEmpty(result);
} catch (InterruptedException | ExecutionException e) {
LOG.error(String.format("Error while checking URL [%s]", input), e);
return true;
}
}));
try {
if (!urlsOfUnloadedSegments.isEmpty()) {
Thread.sleep(passiveWaitTimeMs);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
if (!urlsOfUnloadedSegments.isEmpty()) {
// We are not Throwing an exception since it might be a transient issue that is blocking loading
CONSOLE.printError(String.format("Wait time exhausted and we have [%s] out of [%s] segments not loaded yet", urlsOfUnloadedSegments.size(), segments.size()));
}
}
use of org.apache.druid.java.util.http.client.response.StringFullResponseHandler in project hive by apache.
the class DruidStorageHandler method fetchKafkaSupervisorReport.
/**
* Fetches kafka supervisor status report from druid overlord. This method will return null if can not fetch report
*
* @param table object.
* @return kafka supervisor report or null when druid overlord is unreachable.
*/
@Nullable
private KafkaSupervisorReport fetchKafkaSupervisorReport(Table table) {
final String overlordAddress = Preconditions.checkNotNull(HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS), "Druid Overlord Address is null");
final String dataSourceName = Preconditions.checkNotNull(DruidStorageHandlerUtils.getTableProperty(table, Constants.DRUID_DATA_SOURCE), "Druid Datasource name is null");
try {
StringFullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/status", overlordAddress, dataSourceName))), new StringFullResponseHandler(Charset.forName("UTF-8"))), input -> input instanceof IOException, getMaxRetryCount());
if (response.getStatus().equals(HttpResponseStatus.OK)) {
return DruidStorageHandlerUtils.JSON_MAPPER.readValue(response.getContent(), KafkaSupervisorReport.class);
// Druid Returns 400 Bad Request when not found.
} else if (response.getStatus().equals(HttpResponseStatus.NOT_FOUND) || response.getStatus().equals(HttpResponseStatus.BAD_REQUEST)) {
LOG.info("No Kafka Supervisor found for datasource[%s]", dataSourceName);
return null;
} else {
LOG.error("Unable to fetch Kafka Supervisor status [%d] full response [%s]", response.getStatus().getCode(), response.getContent());
return null;
}
} catch (Exception e) {
LOG.error("Exception while fetching kafka ingestion spec from druid", e);
return null;
}
}
Aggregations