use of org.apache.druid.timeline.partition.BuildingNumberedShardSpec in project druid by druid-io.
the class SegmentPublisherHelperTest method testAnnotateCorePartitionSetSizeForNumberedShardSpec.
@Test
public void testAnnotateCorePartitionSetSizeForNumberedShardSpec() {
final Set<DataSegment> segments = ImmutableSet.of(newSegment(new BuildingNumberedShardSpec(0)), newSegment(new BuildingNumberedShardSpec(1)), newSegment(new BuildingNumberedShardSpec(2)));
final Set<DataSegment> annotated = SegmentPublisherHelper.annotateShardSpec(segments);
for (DataSegment segment : annotated) {
Assert.assertSame(NumberedShardSpec.class, segment.getShardSpec().getClass());
final NumberedShardSpec shardSpec = (NumberedShardSpec) segment.getShardSpec();
Assert.assertEquals(3, shardSpec.getNumCorePartitions());
}
}
use of org.apache.druid.timeline.partition.BuildingNumberedShardSpec in project druid by druid-io.
the class SinglePhaseParallelIndexTaskRunner method allocateNewSegment.
/**
* Allocate a new segment for the given timestamp locally. This method is called when dynamic partitioning is used
* and {@link org.apache.druid.indexing.common.LockGranularity} is {@code TIME_CHUNK}.
*
* The allocation algorithm is similar to the Overlord-based segment allocation. It keeps the segment allocation
* history per sequenceName. If the prevSegmentId is found in the segment allocation history, this method
* returns the next segmentId right after the prevSegmentId in the history. Since the sequenceName is unique
* per {@link SubTaskSpec} (it is the ID of subtaskSpec), this algorithm guarantees that the same set of segmentIds
* are created in the same order for the same subtaskSpec.
*
* @see org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator#allocatePendingSegmentWithSegmentLineageCheck
*/
public SegmentIdWithShardSpec allocateNewSegment(String dataSource, DateTime timestamp, String sequenceName, @Nullable String prevSegmentId) throws IOException {
NonnullPair<Interval, String> intervalAndVersion = findIntervalAndVersion(timestamp);
MutableObject<SegmentIdWithShardSpec> segmentIdHolder = new MutableObject<>();
sequenceToSegmentIds.compute(sequenceName, (k, v) -> {
final int prevSegmentIdIndex;
final List<String> segmentIds;
if (prevSegmentId == null) {
prevSegmentIdIndex = -1;
segmentIds = v == null ? new ArrayList<>() : v;
} else {
segmentIds = v;
if (segmentIds == null) {
throw new ISE("Can't find previous segmentIds for sequence[%s]", sequenceName);
}
prevSegmentIdIndex = segmentIds.indexOf(prevSegmentId);
if (prevSegmentIdIndex == -1) {
throw new ISE("Can't find previously allocated segmentId[%s] for sequence[%s]", prevSegmentId, sequenceName);
}
}
final int nextSegmentIdIndex = prevSegmentIdIndex + 1;
final SegmentIdWithShardSpec newSegmentId;
if (nextSegmentIdIndex < segmentIds.size()) {
SegmentId segmentId = SegmentId.tryParse(dataSource, segmentIds.get(nextSegmentIdIndex));
if (segmentId == null) {
throw new ISE("Illegal segmentId format [%s]", segmentIds.get(nextSegmentIdIndex));
}
newSegmentId = new SegmentIdWithShardSpec(segmentId.getDataSource(), segmentId.getInterval(), segmentId.getVersion(), new BuildingNumberedShardSpec(segmentId.getPartitionNum()));
} else {
final int partitionNum = Counters.getAndIncrementInt(partitionNumCountersPerInterval, intervalAndVersion.lhs);
newSegmentId = new SegmentIdWithShardSpec(dataSource, intervalAndVersion.lhs, intervalAndVersion.rhs, new BuildingNumberedShardSpec(partitionNum));
segmentIds.add(newSegmentId.toString());
}
segmentIdHolder.setValue(newSegmentId);
return segmentIds;
});
return segmentIdHolder.getValue();
}
Aggregations