use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class HttpIndexingServiceClientTest method testCompact.
@Test
public void testCompact() throws Exception {
DataSegment segment = new DataSegment("test", Intervals.of("2015-04-12/2015-04-13"), "1", ImmutableMap.of("bucket", "bucket", "path", "test/2015-04-12T00:00:00.000Z_2015-04-13T00:00:00.000Z/1/0/index.zip"), null, null, NoneShardSpec.instance(), 0, 1);
Capture captureTask = EasyMock.newCapture();
HttpResponse response = EasyMock.createMock(HttpResponse.class);
EasyMock.expect(response.getStatus()).andReturn(HttpResponseStatus.OK).anyTimes();
EasyMock.expect(response.getContent()).andReturn(new BigEndianHeapChannelBuffer(0));
EasyMock.replay(response);
StringFullResponseHolder responseHolder = new StringFullResponseHolder(response, StandardCharsets.UTF_8).addChunk(jsonMapper.writeValueAsString(ImmutableMap.of("task", "aaa")));
EasyMock.expect(druidLeaderClient.makeRequest(HttpMethod.POST, "/druid/indexer/v1/task")).andReturn(new Request(HttpMethod.POST, new URL("http://localhost:8090/druid/indexer/v1/task"))).anyTimes();
EasyMock.expect(druidLeaderClient.go(EasyMock.anyObject(Request.class))).andReturn(responseHolder).anyTimes();
EasyMock.expect(mockMapper.writeValueAsBytes(EasyMock.capture(captureTask))).andReturn(new byte[] { 1, 2, 3 }).anyTimes();
EasyMock.expect(mockMapper.readValue(EasyMock.anyString(), EasyMock.eq(JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT))).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.replay(druidLeaderClient, mockMapper);
HttpIndexingServiceClient httpIndexingServiceClient = new HttpIndexingServiceClient(mockMapper, druidLeaderClient);
try {
httpIndexingServiceClient.compactSegments("test-compact", ImmutableList.of(segment), 50, null, null, null, null, null, null, null);
} catch (Exception e) {
// Ignore IllegalStateException as taskId is internally generated and returned task id will failed check
Assert.assertEquals(IllegalStateException.class.getName(), e.getCause().getClass().getName());
}
ClientCompactionTaskQuery taskQuery = (ClientCompactionTaskQuery) captureTask.getValue();
Assert.assertNull(taskQuery.getIoConfig().getInputSpec().getSha256OfSortedSegmentIds());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class MetadataSegmentView method poll.
private void poll() {
log.info("polling published segments from coordinator");
final JsonParserIterator<SegmentWithOvershadowedStatus> metadataSegments = getMetadataSegments(coordinatorDruidLeaderClient, jsonMapper, segmentWatcherConfig.getWatchedDataSources());
final ImmutableSortedSet.Builder<SegmentWithOvershadowedStatus> builder = ImmutableSortedSet.naturalOrder();
while (metadataSegments.hasNext()) {
final SegmentWithOvershadowedStatus segment = metadataSegments.next();
final DataSegment interned = DataSegmentInterner.intern(segment.getDataSegment());
final SegmentWithOvershadowedStatus segmentWithOvershadowedStatus = new SegmentWithOvershadowedStatus(interned, segment.isOvershadowed());
builder.add(segmentWithOvershadowedStatus);
}
publishedSegments = builder.build();
cachePopulated.countDown();
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class BroadcastSegmentIndexedTableTest method setup.
@Before
public void setup() throws IOException, SegmentLoadingException {
final ObjectMapper mapper = new DefaultObjectMapper();
mapper.registerModule(new SegmentizerModule());
final IndexIO indexIO = new IndexIO(mapper, () -> 0);
mapper.setInjectableValues(new InjectableValues.Std().addValue(ExprMacroTable.class.getName(), TestExprMacroTable.INSTANCE).addValue(ObjectMapper.class.getName(), mapper).addValue(IndexIO.class, indexIO).addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT));
final IndexMerger indexMerger = new IndexMergerV9(mapper, indexIO, OffHeapMemorySegmentWriteOutMediumFactory.instance());
Interval testInterval = Intervals.of("2011-01-12T00:00:00.000Z/2011-05-01T00:00:00.000Z");
IncrementalIndex data = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv");
File segment = new File(temporaryFolder.newFolder(), "segment");
File persisted = indexMerger.persist(data, testInterval, segment, new IndexSpec(), null);
File factoryJson = new File(persisted, "factory.json");
Assert.assertTrue(factoryJson.exists());
SegmentizerFactory factory = mapper.readValue(factoryJson, SegmentizerFactory.class);
Assert.assertTrue(factory instanceof MMappedQueryableSegmentizerFactory);
DataSegment dataSegment = new DataSegment(DATASOURCE, testInterval, DateTimes.nowUtc().toString(), ImmutableMap.of(), columnNames, ImmutableList.of(), null, null, segment.getTotalSpace());
backingSegment = (QueryableIndexSegment) factory.factorize(dataSegment, segment, false, SegmentLazyLoadFailCallback.NOOP);
columnNames = ImmutableList.<String>builder().add(ColumnHolder.TIME_COLUMN_NAME).addAll(backingSegment.asQueryableIndex().getColumnNames()).build();
broadcastTable = new BroadcastSegmentIndexedTable(backingSegment, keyColumns, dataSegment.getVersion());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class SegmentPublisherHelperTest method testAnnotateCorePartitionSetSizeForSingleDimensionShardSpec.
@Test
public void testAnnotateCorePartitionSetSizeForSingleDimensionShardSpec() {
final Set<DataSegment> segments = ImmutableSet.of(newSegment(new BuildingSingleDimensionShardSpec(0, "dim", null, "ccc", 0)), newSegment(new BuildingSingleDimensionShardSpec(1, "dim", null, "ccc", 1)), newSegment(new BuildingSingleDimensionShardSpec(2, "dim", null, "ccc", 2)));
final Set<DataSegment> annotated = SegmentPublisherHelper.annotateShardSpec(segments);
for (DataSegment segment : annotated) {
Assert.assertSame(SingleDimensionShardSpec.class, segment.getShardSpec().getClass());
final SingleDimensionShardSpec shardSpec = (SingleDimensionShardSpec) segment.getShardSpec();
Assert.assertEquals(3, shardSpec.getNumCorePartitions());
}
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class SegmentPublisherHelperTest method testAnnotateCorePartitionSetSizeForHashNumberedShardSpec.
@Test
public void testAnnotateCorePartitionSetSizeForHashNumberedShardSpec() {
final Set<DataSegment> segments = ImmutableSet.of(newSegment(new BuildingHashBasedNumberedShardSpec(0, 0, 3, null, HashPartitionFunction.MURMUR3_32_ABS, new ObjectMapper())), newSegment(new BuildingHashBasedNumberedShardSpec(1, 1, 3, null, HashPartitionFunction.MURMUR3_32_ABS, new ObjectMapper())), newSegment(new BuildingHashBasedNumberedShardSpec(2, 2, 3, null, HashPartitionFunction.MURMUR3_32_ABS, new ObjectMapper())));
final Set<DataSegment> annotated = SegmentPublisherHelper.annotateShardSpec(segments);
for (DataSegment segment : annotated) {
Assert.assertSame(HashBasedNumberedShardSpec.class, segment.getShardSpec().getClass());
final HashBasedNumberedShardSpec shardSpec = (HashBasedNumberedShardSpec) segment.getShardSpec();
Assert.assertEquals(3, shardSpec.getNumCorePartitions());
}
}
Aggregations