Search in sources :

Example 1 with MetadataSegmentManagerConfig

use of io.druid.metadata.MetadataSegmentManagerConfig in project druid by druid-io.

the class HadoopConverterJobTest method testSimpleJob.

@Test
public void testSimpleJob() throws IOException, InterruptedException {
    final SQLMetadataSegmentManager manager = new SQLMetadataSegmentManager(HadoopDruidConverterConfig.jsonMapper, new Supplier<MetadataSegmentManagerConfig>() {

        @Override
        public MetadataSegmentManagerConfig get() {
            return new MetadataSegmentManagerConfig();
        }
    }, metadataStorageTablesConfigSupplier, connector);
    final List<DataSegment> oldSemgments = getDataSegments(manager);
    final File tmpDir = temporaryFolder.newFolder();
    final HadoopConverterJob converterJob = new HadoopConverterJob(new HadoopDruidConverterConfig(DATASOURCE, interval, new IndexSpec(new RoaringBitmapSerdeFactory(null), CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressionFactory.LongEncodingStrategy.LONGS), oldSemgments, true, tmpDir.toURI(), ImmutableMap.<String, String>of(), null, tmpSegmentDir.toURI().toString()));
    final List<DataSegment> segments = Lists.newArrayList(converterJob.run());
    Assert.assertNotNull("bad result", segments);
    Assert.assertEquals("wrong segment count", 4, segments.size());
    Assert.assertTrue(converterJob.getLoadedBytes() > 0);
    Assert.assertTrue(converterJob.getWrittenBytes() > 0);
    Assert.assertTrue(converterJob.getWrittenBytes() > converterJob.getLoadedBytes());
    Assert.assertEquals(oldSemgments.size(), segments.size());
    final DataSegment segment = segments.get(0);
    Assert.assertTrue(interval.contains(segment.getInterval()));
    Assert.assertTrue(segment.getVersion().endsWith("_converted"));
    Assert.assertTrue(segment.getLoadSpec().get("path").toString().contains("_converted"));
    for (File file : tmpDir.listFiles()) {
        Assert.assertFalse(file.isDirectory());
        Assert.assertTrue(file.isFile());
    }
    final Comparator<DataSegment> segmentComparator = new Comparator<DataSegment>() {

        @Override
        public int compare(DataSegment o1, DataSegment o2) {
            return o1.getIdentifier().compareTo(o2.getIdentifier());
        }
    };
    Collections.sort(oldSemgments, segmentComparator);
    Collections.sort(segments, segmentComparator);
    for (int i = 0; i < oldSemgments.size(); ++i) {
        final DataSegment oldSegment = oldSemgments.get(i);
        final DataSegment newSegment = segments.get(i);
        Assert.assertEquals(oldSegment.getDataSource(), newSegment.getDataSource());
        Assert.assertEquals(oldSegment.getInterval(), newSegment.getInterval());
        Assert.assertEquals(Sets.<String>newHashSet(oldSegment.getMetrics()), Sets.<String>newHashSet(newSegment.getMetrics()));
        Assert.assertEquals(Sets.<String>newHashSet(oldSegment.getDimensions()), Sets.<String>newHashSet(newSegment.getDimensions()));
        Assert.assertEquals(oldSegment.getVersion() + "_converted", newSegment.getVersion());
        Assert.assertTrue(oldSegment.getSize() < newSegment.getSize());
        Assert.assertEquals(oldSegment.getBinaryVersion(), newSegment.getBinaryVersion());
    }
}
Also used : IndexSpec(io.druid.segment.IndexSpec) DataSegment(io.druid.timeline.DataSegment) Comparator(java.util.Comparator) MetadataSegmentManagerConfig(io.druid.metadata.MetadataSegmentManagerConfig) RoaringBitmapSerdeFactory(io.druid.segment.data.RoaringBitmapSerdeFactory) SQLMetadataSegmentManager(io.druid.metadata.SQLMetadataSegmentManager) File(java.io.File) Test(org.junit.Test)

Example 2 with MetadataSegmentManagerConfig

use of io.druid.metadata.MetadataSegmentManagerConfig in project druid by druid-io.

the class HadoopConverterJobTest method testHadoopFailure.

@Test
// This takes a long time due to retries
@Ignore
public void testHadoopFailure() throws IOException, InterruptedException {
    final SQLMetadataSegmentManager manager = new SQLMetadataSegmentManager(HadoopDruidConverterConfig.jsonMapper, new Supplier<MetadataSegmentManagerConfig>() {

        @Override
        public MetadataSegmentManagerConfig get() {
            return new MetadataSegmentManagerConfig();
        }
    }, metadataStorageTablesConfigSupplier, connector);
    final List<DataSegment> oldSemgments = getDataSegments(manager);
    final File tmpDir = temporaryFolder.newFolder();
    final HadoopConverterJob converterJob = new HadoopConverterJob(new HadoopDruidConverterConfig(DATASOURCE, interval, new IndexSpec(new RoaringBitmapSerdeFactory(null), CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressionFactory.LongEncodingStrategy.LONGS), oldSemgments, true, tmpDir.toURI(), ImmutableMap.<String, String>of(), null, tmpSegmentDir.toURI().toString()));
    corrupt(oldSemgments.get(0));
    final List<DataSegment> result = converterJob.run();
    Assert.assertNull("result should be null", result);
    final List<DataSegment> segments = getDataSegments(manager);
    Assert.assertEquals(oldSemgments.size(), segments.size());
    Assert.assertEquals(oldSemgments, segments);
}
Also used : MetadataSegmentManagerConfig(io.druid.metadata.MetadataSegmentManagerConfig) IndexSpec(io.druid.segment.IndexSpec) RoaringBitmapSerdeFactory(io.druid.segment.data.RoaringBitmapSerdeFactory) SQLMetadataSegmentManager(io.druid.metadata.SQLMetadataSegmentManager) DataSegment(io.druid.timeline.DataSegment) File(java.io.File) Ignore(org.junit.Ignore) Test(org.junit.Test)

Aggregations

MetadataSegmentManagerConfig (io.druid.metadata.MetadataSegmentManagerConfig)2 SQLMetadataSegmentManager (io.druid.metadata.SQLMetadataSegmentManager)2 IndexSpec (io.druid.segment.IndexSpec)2 RoaringBitmapSerdeFactory (io.druid.segment.data.RoaringBitmapSerdeFactory)2 DataSegment (io.druid.timeline.DataSegment)2 File (java.io.File)2 Test (org.junit.Test)2 Comparator (java.util.Comparator)1 Ignore (org.junit.Ignore)1