use of io.druid.metadata.MetadataSegmentManagerConfig in project druid by druid-io.
the class HadoopConverterJobTest method testSimpleJob.
@Test
public void testSimpleJob() throws IOException, InterruptedException {
final SQLMetadataSegmentManager manager = new SQLMetadataSegmentManager(HadoopDruidConverterConfig.jsonMapper, new Supplier<MetadataSegmentManagerConfig>() {
@Override
public MetadataSegmentManagerConfig get() {
return new MetadataSegmentManagerConfig();
}
}, metadataStorageTablesConfigSupplier, connector);
final List<DataSegment> oldSemgments = getDataSegments(manager);
final File tmpDir = temporaryFolder.newFolder();
final HadoopConverterJob converterJob = new HadoopConverterJob(new HadoopDruidConverterConfig(DATASOURCE, interval, new IndexSpec(new RoaringBitmapSerdeFactory(null), CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressionFactory.LongEncodingStrategy.LONGS), oldSemgments, true, tmpDir.toURI(), ImmutableMap.<String, String>of(), null, tmpSegmentDir.toURI().toString()));
final List<DataSegment> segments = Lists.newArrayList(converterJob.run());
Assert.assertNotNull("bad result", segments);
Assert.assertEquals("wrong segment count", 4, segments.size());
Assert.assertTrue(converterJob.getLoadedBytes() > 0);
Assert.assertTrue(converterJob.getWrittenBytes() > 0);
Assert.assertTrue(converterJob.getWrittenBytes() > converterJob.getLoadedBytes());
Assert.assertEquals(oldSemgments.size(), segments.size());
final DataSegment segment = segments.get(0);
Assert.assertTrue(interval.contains(segment.getInterval()));
Assert.assertTrue(segment.getVersion().endsWith("_converted"));
Assert.assertTrue(segment.getLoadSpec().get("path").toString().contains("_converted"));
for (File file : tmpDir.listFiles()) {
Assert.assertFalse(file.isDirectory());
Assert.assertTrue(file.isFile());
}
final Comparator<DataSegment> segmentComparator = new Comparator<DataSegment>() {
@Override
public int compare(DataSegment o1, DataSegment o2) {
return o1.getIdentifier().compareTo(o2.getIdentifier());
}
};
Collections.sort(oldSemgments, segmentComparator);
Collections.sort(segments, segmentComparator);
for (int i = 0; i < oldSemgments.size(); ++i) {
final DataSegment oldSegment = oldSemgments.get(i);
final DataSegment newSegment = segments.get(i);
Assert.assertEquals(oldSegment.getDataSource(), newSegment.getDataSource());
Assert.assertEquals(oldSegment.getInterval(), newSegment.getInterval());
Assert.assertEquals(Sets.<String>newHashSet(oldSegment.getMetrics()), Sets.<String>newHashSet(newSegment.getMetrics()));
Assert.assertEquals(Sets.<String>newHashSet(oldSegment.getDimensions()), Sets.<String>newHashSet(newSegment.getDimensions()));
Assert.assertEquals(oldSegment.getVersion() + "_converted", newSegment.getVersion());
Assert.assertTrue(oldSegment.getSize() < newSegment.getSize());
Assert.assertEquals(oldSegment.getBinaryVersion(), newSegment.getBinaryVersion());
}
}
use of io.druid.metadata.MetadataSegmentManagerConfig in project druid by druid-io.
the class HadoopConverterJobTest method testHadoopFailure.
@Test
// This takes a long time due to retries
@Ignore
public void testHadoopFailure() throws IOException, InterruptedException {
final SQLMetadataSegmentManager manager = new SQLMetadataSegmentManager(HadoopDruidConverterConfig.jsonMapper, new Supplier<MetadataSegmentManagerConfig>() {
@Override
public MetadataSegmentManagerConfig get() {
return new MetadataSegmentManagerConfig();
}
}, metadataStorageTablesConfigSupplier, connector);
final List<DataSegment> oldSemgments = getDataSegments(manager);
final File tmpDir = temporaryFolder.newFolder();
final HadoopConverterJob converterJob = new HadoopConverterJob(new HadoopDruidConverterConfig(DATASOURCE, interval, new IndexSpec(new RoaringBitmapSerdeFactory(null), CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressionFactory.LongEncodingStrategy.LONGS), oldSemgments, true, tmpDir.toURI(), ImmutableMap.<String, String>of(), null, tmpSegmentDir.toURI().toString()));
corrupt(oldSemgments.get(0));
final List<DataSegment> result = converterJob.run();
Assert.assertNull("result should be null", result);
final List<DataSegment> segments = getDataSegments(manager);
Assert.assertEquals(oldSemgments.size(), segments.size());
Assert.assertEquals(oldSemgments, segments);
}
Aggregations