use of io.druid.jackson.DefaultObjectMapper in project druid by druid-io.
the class DatasourcePathSpecTest method testAddInputPaths.
@Test
public void testAddInputPaths() throws Exception {
HadoopDruidIndexerConfig hadoopIndexerConfig = makeHadoopDruidIndexerConfig();
ObjectMapper mapper = new DefaultObjectMapper();
DatasourcePathSpec pathSpec = new DatasourcePathSpec(mapper, segments, ingestionSpec, null);
Configuration config = new Configuration();
Job job = EasyMock.createNiceMock(Job.class);
EasyMock.expect(job.getConfiguration()).andReturn(config).anyTimes();
EasyMock.replay(job);
pathSpec.addInputPaths(hadoopIndexerConfig, job);
List<WindowedDataSegment> actualSegments = mapper.readValue(config.get(DatasourceInputFormat.CONF_INPUT_SEGMENTS), new TypeReference<List<WindowedDataSegment>>() {
});
Assert.assertEquals(segments, actualSegments);
DatasourceIngestionSpec actualIngestionSpec = mapper.readValue(config.get(DatasourceInputFormat.CONF_DRUID_SCHEMA), DatasourceIngestionSpec.class);
Assert.assertEquals(ingestionSpec.withDimensions(ImmutableList.of("product")).withMetrics(ImmutableList.of("visited_sum")), actualIngestionSpec);
}
use of io.druid.jackson.DefaultObjectMapper in project hive by apache.
the class TestDruidSerDe method deserializeQueryResults.
private static void deserializeQueryResults(DruidSerDe serDe, String queryType, String jsonQuery, String resultString, Object[][] records) throws SerDeException, JsonParseException, JsonMappingException, IOException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, InterruptedException, NoSuchMethodException, InvocationTargetException {
// Initialize
Query<?> query = null;
DruidQueryRecordReader<?, ?> reader = null;
List<?> resultsList = null;
ObjectMapper mapper = new DefaultObjectMapper();
switch(queryType) {
case Query.TIMESERIES:
query = mapper.readValue(jsonQuery, TimeseriesQuery.class);
reader = new DruidTimeseriesQueryRecordReader();
resultsList = mapper.readValue(resultString, new TypeReference<List<Result<TimeseriesResultValue>>>() {
});
break;
case Query.TOPN:
query = mapper.readValue(jsonQuery, TopNQuery.class);
reader = new DruidTopNQueryRecordReader();
resultsList = mapper.readValue(resultString, new TypeReference<List<Result<TopNResultValue>>>() {
});
break;
case Query.GROUP_BY:
query = mapper.readValue(jsonQuery, GroupByQuery.class);
reader = new DruidGroupByQueryRecordReader();
resultsList = mapper.readValue(resultString, new TypeReference<List<Row>>() {
});
break;
case Query.SELECT:
query = mapper.readValue(jsonQuery, SelectQuery.class);
reader = new DruidSelectQueryRecordReader();
resultsList = mapper.readValue(resultString, new TypeReference<List<Result<SelectResultValue>>>() {
});
break;
}
// Set query and fields access
Field field1 = DruidQueryRecordReader.class.getDeclaredField("query");
field1.setAccessible(true);
field1.set(reader, query);
if (reader instanceof DruidGroupByQueryRecordReader) {
Method method1 = DruidGroupByQueryRecordReader.class.getDeclaredMethod("initExtractors");
method1.setAccessible(true);
method1.invoke(reader);
}
Field field2 = DruidQueryRecordReader.class.getDeclaredField("results");
field2.setAccessible(true);
// Get the row structure
StructObjectInspector oi = (StructObjectInspector) serDe.getObjectInspector();
List<? extends StructField> fieldRefs = oi.getAllStructFieldRefs();
// Check mapred
Iterator<?> results = resultsList.iterator();
field2.set(reader, results);
DruidWritable writable = new DruidWritable();
int pos = 0;
while (reader.next(NullWritable.get(), writable)) {
Object row = serDe.deserialize(writable);
Object[] expectedFieldsData = records[pos];
assertEquals(expectedFieldsData.length, fieldRefs.size());
for (int i = 0; i < fieldRefs.size(); i++) {
Object fieldData = oi.getStructFieldData(row, fieldRefs.get(i));
assertEquals("Field " + i, expectedFieldsData[i], fieldData);
}
pos++;
}
assertEquals(pos, records.length);
// Check mapreduce
results = resultsList.iterator();
field2.set(reader, results);
pos = 0;
while (reader.nextKeyValue()) {
Object row = serDe.deserialize(reader.getCurrentValue());
Object[] expectedFieldsData = records[pos];
assertEquals(expectedFieldsData.length, fieldRefs.size());
for (int i = 0; i < fieldRefs.size(); i++) {
Object fieldData = oi.getStructFieldData(row, fieldRefs.get(i));
assertEquals("Field " + i, expectedFieldsData[i], fieldData);
}
pos++;
}
assertEquals(pos, records.length);
}
use of io.druid.jackson.DefaultObjectMapper in project druid by druid-io.
the class SpnegoFilterConfigTest method testserde.
@Test
public void testserde() {
Injector injector = Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.install(new PropertiesModule(Arrays.asList("test.runtime.properties")));
binder.install(new ConfigModule());
binder.install(new DruidGuiceExtensions());
JsonConfigProvider.bind(binder, "druid.hadoop.security.spnego", SpnegoFilterConfig.class);
}
@Provides
@LazySingleton
public ObjectMapper jsonMapper() {
return new DefaultObjectMapper();
}
});
Properties props = injector.getInstance(Properties.class);
SpnegoFilterConfig config = injector.getInstance(SpnegoFilterConfig.class);
Assert.assertEquals(props.getProperty("druid.hadoop.security.spnego.principal"), config.getPrincipal());
Assert.assertEquals(props.getProperty("druid.hadoop.security.spnego.keytab"), config.getKeytab());
Assert.assertEquals(props.getProperty("druid.hadoop.security.spnego.authToLocal"), config.getAuthToLocal());
}
use of io.druid.jackson.DefaultObjectMapper in project druid by druid-io.
the class HdfsDataSegmentPusherTest method testUsingSchemeForMultipleSegments.
private void testUsingSchemeForMultipleSegments(final String scheme, final int numberOfSegments) throws Exception {
Configuration conf = new Configuration(true);
DataSegment[] segments = new DataSegment[numberOfSegments];
// Create a mock segment on disk
File segmentDir = tempFolder.newFolder();
File tmp = new File(segmentDir, "version.bin");
final byte[] data = new byte[] { 0x0, 0x0, 0x0, 0x1 };
Files.write(data, tmp);
final long size = data.length;
HdfsDataSegmentPusherConfig config = new HdfsDataSegmentPusherConfig();
final File storageDirectory = tempFolder.newFolder();
config.setStorageDirectory(scheme != null ? String.format("%s://%s", scheme, storageDirectory.getAbsolutePath()) : storageDirectory.getAbsolutePath());
HdfsDataSegmentPusher pusher = new HdfsDataSegmentPusher(config, conf, new DefaultObjectMapper());
for (int i = 0; i < numberOfSegments; i++) {
segments[i] = new DataSegment("foo", new Interval("2015/2016"), "0", Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NumberedShardSpec(i, i), 0, size);
}
for (int i = 0; i < numberOfSegments; i++) {
final DataSegment pushedSegment = pusher.push(segmentDir, segments[i]);
String indexUri = String.format("%s/%s/%d_index.zip", FileSystem.newInstance(conf).makeQualified(new Path(config.getStorageDirectory())).toUri().toString(), DataSegmentPusherUtil.getHdfsStorageDir(segments[i]), segments[i].getShardSpec().getPartitionNum());
Assert.assertEquals(segments[i].getSize(), pushedSegment.getSize());
Assert.assertEquals(segments[i], pushedSegment);
Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", indexUri), pushedSegment.getLoadSpec());
// rename directory after push
String segmentPath = DataSegmentPusherUtil.getHdfsStorageDir(pushedSegment);
File indexFile = new File(String.format("%s/%s/%d_index.zip", storageDirectory, segmentPath, pushedSegment.getShardSpec().getPartitionNum()));
Assert.assertTrue(indexFile.exists());
File descriptorFile = new File(String.format("%s/%s/%d_descriptor.json", storageDirectory, segmentPath, pushedSegment.getShardSpec().getPartitionNum()));
Assert.assertTrue(descriptorFile.exists());
//read actual data from descriptor file.
DataSegment fromDescriptorFileDataSegment = objectMapper.readValue(descriptorFile, DataSegment.class);
Assert.assertEquals(segments[i].getSize(), pushedSegment.getSize());
Assert.assertEquals(segments[i], pushedSegment);
Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", indexUri), fromDescriptorFileDataSegment.getLoadSpec());
// rename directory after push
segmentPath = DataSegmentPusherUtil.getHdfsStorageDir(fromDescriptorFileDataSegment);
indexFile = new File(String.format("%s/%s/%d_index.zip", storageDirectory, segmentPath, fromDescriptorFileDataSegment.getShardSpec().getPartitionNum()));
Assert.assertTrue(indexFile.exists());
// push twice will fail and temp dir cleaned
File outDir = new File(String.format("%s/%s", config.getStorageDirectory(), segmentPath));
outDir.setReadOnly();
try {
pusher.push(segmentDir, segments[i]);
} catch (IOException e) {
Assert.fail("should not throw exception");
}
}
}
use of io.druid.jackson.DefaultObjectMapper in project druid by druid-io.
the class CachingQueryRunnerTest method testCloseAndPopulate.
private void testCloseAndPopulate(List<Result> expectedRes, List<Result> expectedCacheRes, Query query, QueryToolChest toolchest) throws Exception {
final AssertingClosable closable = new AssertingClosable();
final Sequence resultSeq = Sequences.wrap(Sequences.simple(expectedRes), new SequenceWrapper() {
@Override
public void before() {
Assert.assertFalse(closable.isClosed());
}
@Override
public void after(boolean isDone, Throwable thrown) throws Exception {
closable.close();
}
});
final CountDownLatch cacheMustBePutOnce = new CountDownLatch(1);
Cache cache = new Cache() {
private final Map<NamedKey, byte[]> baseMap = new ConcurrentHashMap<>();
@Override
public byte[] get(NamedKey key) {
return baseMap.get(key);
}
@Override
public void put(NamedKey key, byte[] value) {
baseMap.put(key, value);
cacheMustBePutOnce.countDown();
}
@Override
public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
return null;
}
@Override
public void close(String namespace) {
}
@Override
public CacheStats getStats() {
return null;
}
@Override
public boolean isLocal() {
return true;
}
@Override
public void doMonitor(ServiceEmitter emitter) {
}
};
String segmentIdentifier = "segment";
SegmentDescriptor segmentDescriptor = new SegmentDescriptor(new Interval("2011/2012"), "version", 0);
DefaultObjectMapper objectMapper = new DefaultObjectMapper();
CachingQueryRunner runner = new CachingQueryRunner(segmentIdentifier, segmentDescriptor, objectMapper, cache, toolchest, new QueryRunner() {
@Override
public Sequence run(Query query, Map responseContext) {
return resultSeq;
}
}, backgroundExecutorService, new CacheConfig() {
@Override
public boolean isPopulateCache() {
return true;
}
@Override
public boolean isUseCache() {
return true;
}
});
CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor, cacheStrategy.computeCacheKey(query));
HashMap<String, Object> context = new HashMap<String, Object>();
Sequence res = runner.run(query, context);
// base sequence is not closed yet
Assert.assertFalse("sequence must not be closed", closable.isClosed());
Assert.assertNull("cache must be empty", cache.get(cacheKey));
ArrayList results = Sequences.toList(res, new ArrayList());
Assert.assertTrue(closable.isClosed());
Assert.assertEquals(expectedRes.toString(), results.toString());
// wait for background caching finish
// wait at most 10 seconds to fail the test to avoid block overall tests
Assert.assertTrue("cache must be populated", cacheMustBePutOnce.await(10, TimeUnit.SECONDS));
byte[] cacheValue = cache.get(cacheKey);
Assert.assertNotNull(cacheValue);
Function<Object, Result> fn = cacheStrategy.pullFromCache();
List<Result> cacheResults = Lists.newArrayList(Iterators.transform(objectMapper.readValues(objectMapper.getFactory().createParser(cacheValue), cacheStrategy.getCacheObjectClazz()), fn));
Assert.assertEquals(expectedCacheRes.toString(), cacheResults.toString());
}
Aggregations