use of org.apache.hadoop.hive.druid.io.HiveDruidSplit in project hive by apache.
the class TestHiveDruidQueryBasedInputFormat method testTimeZone.
@Test
public void testTimeZone() throws Exception {
DruidQueryBasedInputFormat input = new DruidQueryBasedInputFormat();
Method method1 = DruidQueryBasedInputFormat.class.getDeclaredMethod("getInputSplits", Configuration.class);
method1.setAccessible(true);
// Create, initialize, and test
Configuration conf = createPropertiesQuery("sample_datasource", Query.TIMESERIES, TIMESERIES_QUERY);
HiveDruidSplit[] resultSplits = (HiveDruidSplit[]) method1.invoke(input, conf);
assertEquals(TIMESERIES_QUERY_SPLIT, Arrays.toString(resultSplits));
conf = createPropertiesQuery("sample_datasource", Query.TOPN, TOPN_QUERY);
resultSplits = (HiveDruidSplit[]) method1.invoke(input, conf);
assertEquals(TOPN_QUERY_SPLIT, Arrays.toString(resultSplits));
conf = createPropertiesQuery("sample_datasource", Query.GROUP_BY, GROUP_BY_QUERY);
resultSplits = (HiveDruidSplit[]) method1.invoke(input, conf);
assertEquals(GROUP_BY_QUERY_SPLIT, Arrays.toString(resultSplits));
conf = createPropertiesQuery("sample_datasource", Query.SELECT, SELECT_QUERY);
resultSplits = (HiveDruidSplit[]) method1.invoke(input, conf);
assertEquals(SELECT_QUERY_SPLIT, Arrays.toString(resultSplits));
}
use of org.apache.hadoop.hive.druid.io.HiveDruidSplit in project hive by apache.
the class DruidQueryRecordReader method initialize.
public void initialize(InputSplit split, Configuration conf) throws IOException {
HiveDruidSplit hiveDruidSplit = (HiveDruidSplit) split;
// Create query
query = createQuery(hiveDruidSplit.getDruidQuery());
// Execute query
if (LOG.isInfoEnabled()) {
LOG.info("Retrieving from druid using query:\n " + query);
}
final Lifecycle lifecycle = new Lifecycle();
final int numConnection = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
final Period readTimeout = new Period(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
HttpClient client = HttpClientInit.createClient(HttpClientConfig.builder().withReadTimeout(readTimeout.toStandardDuration()).withNumConnections(numConnection).build(), lifecycle);
try {
lifecycle.start();
} catch (Exception e) {
LOG.error("Issues with lifecycle start", e);
}
InputStream response;
try {
response = DruidStorageHandlerUtils.submitRequest(client, DruidStorageHandlerUtils.createRequest(hiveDruidSplit.getLocations()[0], query));
} catch (Exception e) {
lifecycle.stop();
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
// Retrieve results
List<R> resultsList;
try {
resultsList = createResultsList(response);
} catch (IOException e) {
response.close();
throw e;
} finally {
lifecycle.stop();
}
if (resultsList == null || resultsList.isEmpty()) {
return;
}
results = resultsList.iterator();
}
Aggregations