Search in sources :

Example 31 with SegmentIdWithShardSpec

use of org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec in project druid by druid-io.

the class SegmentAllocateActionTest method testAddToExistingLinearShardSpecsSameGranularity.

@Test
public void testAddToExistingLinearShardSpecsSameGranularity() throws Exception {
    final Task task = NoopTask.create();
    taskActionTestKit.getMetadataStorageCoordinator().announceHistoricalSegments(ImmutableSet.of(DataSegment.builder().dataSource(DATA_SOURCE).interval(Granularities.HOUR.bucket(PARTY_TIME)).version(PARTY_TIME.toString()).shardSpec(new LinearShardSpec(0)).size(0).build(), DataSegment.builder().dataSource(DATA_SOURCE).interval(Granularities.HOUR.bucket(PARTY_TIME)).version(PARTY_TIME.toString()).shardSpec(new LinearShardSpec(1)).size(0).build()));
    taskActionTestKit.getTaskLockbox().add(task);
    final SegmentIdWithShardSpec id1 = allocate(task, PARTY_TIME, Granularities.NONE, Granularities.HOUR, "s1", null, LinearPartialShardSpec.instance());
    final SegmentIdWithShardSpec id2 = allocate(task, PARTY_TIME, Granularities.NONE, Granularities.HOUR, "s1", id1.toString(), LinearPartialShardSpec.instance());
    assertSameIdentifier(new SegmentIdWithShardSpec(DATA_SOURCE, Granularities.HOUR.bucket(PARTY_TIME), PARTY_TIME.toString(), new LinearShardSpec(2)), id1);
    assertSameIdentifier(new SegmentIdWithShardSpec(DATA_SOURCE, Granularities.HOUR.bucket(PARTY_TIME), PARTY_TIME.toString(), new LinearShardSpec(3)), id2);
}
Also used : Task(org.apache.druid.indexing.common.task.Task) NoopTask(org.apache.druid.indexing.common.task.NoopTask) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) Test(org.junit.Test)

Example 32 with SegmentIdWithShardSpec

use of org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method createNewSegment.

/**
 * This function creates a new segment for the given datasource/interval/etc. A critical
 * aspect of the creation is to make sure that the new version & new partition number will make
 * sense given the existing segments & pending segments also very important is to avoid
 * clashes with existing pending & used/unused segments.
 * @param handle Database handle
 * @param dataSource datasource for the new segment
 * @param interval interval for the new segment
 * @param partialShardSpec Shard spec info minus segment id stuff
 * @param existingVersion Version of segments in interval, used to compute the version of the very first segment in
 *                        interval
 * @return
 * @throws IOException
 */
@Nullable
private SegmentIdWithShardSpec createNewSegment(final Handle handle, final String dataSource, final Interval interval, final PartialShardSpec partialShardSpec, final String existingVersion) throws IOException {
    // Get the time chunk and associated data segments for the given interval, if any
    final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
    if (existingChunks.size() > 1) {
        // Not possible to expand more than one chunk with a single segment.
        log.warn("Cannot allocate new segment for dataSource[%s], interval[%s]: already have [%,d] chunks.", dataSource, interval, existingChunks.size());
        return null;
    } else {
        // max partitionId of the shardSpecs which share the same partition space.
        SegmentIdWithShardSpec maxId = null;
        if (!existingChunks.isEmpty()) {
            TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
            // noinspection ConstantConditions
            for (DataSegment segment : FluentIterable.from(existingHolder.getObject()).transform(PartitionChunk::getObject).filter(segment -> segment.getShardSpec().sharePartitionSpace(partialShardSpec))) {
                // Note that this will compute the max id of existing, visible, data segments in the time chunk:
                if (maxId == null || maxId.getShardSpec().getPartitionNum() < segment.getShardSpec().getPartitionNum()) {
                    maxId = SegmentIdWithShardSpec.fromDataSegment(segment);
                }
            }
        }
        // Get the version of the existing chunk, we might need it in some of the cases below
        // to compute the new identifier's version
        @Nullable final String versionOfExistingChunk;
        if (!existingChunks.isEmpty()) {
            // remember only one chunk possible for given interval so get the first & only one
            versionOfExistingChunk = existingChunks.get(0).getVersion();
        } else {
            versionOfExistingChunk = null;
        }
        // next, we need to enrich the maxId computed before with the information of the pending segments
        // it is possible that a pending segment has a higher id in which case we need that, it will work,
        // and it will avoid clashes when inserting the new pending segment later in the caller of this method
        final Set<SegmentIdWithShardSpec> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
        // Make sure we add the maxId we obtained from the segments table:
        if (maxId != null) {
            pendings.add(maxId);
        }
        // Now compute the maxId with all the information: pendings + segments:
        // The versionOfExistingChunks filter is ensure that we pick the max id with the version of the existing chunk
        // in the case that there may be a pending segment with a higher version but no corresponding used segments
        // which may generate a clash with an existing segment once the new id is generated
        maxId = pendings.stream().filter(id -> id.getShardSpec().sharePartitionSpace(partialShardSpec)).filter(id -> versionOfExistingChunk == null ? true : id.getVersion().equals(versionOfExistingChunk)).max((id1, id2) -> {
            final int versionCompare = id1.getVersion().compareTo(id2.getVersion());
            if (versionCompare != 0) {
                return versionCompare;
            } else {
                return Integer.compare(id1.getShardSpec().getPartitionNum(), id2.getShardSpec().getPartitionNum());
            }
        }).orElse(null);
        // The following code attempts to compute the new version, if this
        // new version is not null at the end of next block then it will be
        // used as the new version in the case for initial or appended segment
        final String newSegmentVersion;
        if (versionOfExistingChunk != null) {
            // segment version overrides, so pick that now that we know it exists
            newSegmentVersion = versionOfExistingChunk;
        } else if (!pendings.isEmpty() && maxId != null) {
            // there is no visible segments in the time chunk, so pick the maxId of pendings, as computed above
            newSegmentVersion = maxId.getVersion();
        } else {
            // no segments, no pendings, so this must be the very first segment created for this interval
            newSegmentVersion = null;
        }
        if (maxId == null) {
            // When appending segments, null maxId means that we are allocating the very initial
            // segment for this time chunk.
            // This code is executed when the Overlord coordinates segment allocation, which is either you append segments
            // or you use segment lock. Since the core partitions set is not determined for appended segments, we set
            // it 0. When you use segment lock, the core partitions set doesn't work with it. We simply set it 0 so that the
            // OvershadowableManager handles the atomic segment update.
            final int newPartitionId = partialShardSpec.useNonRootGenerationPartitionSpace() ? PartitionIds.NON_ROOT_GEN_START_PARTITION_ID : PartitionIds.ROOT_GEN_START_PARTITION_ID;
            String version = newSegmentVersion == null ? existingVersion : newSegmentVersion;
            return new SegmentIdWithShardSpec(dataSource, interval, version, partialShardSpec.complete(jsonMapper, newPartitionId, 0));
        } else if (!maxId.getInterval().equals(interval) || maxId.getVersion().compareTo(existingVersion) > 0) {
            log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], existingVersion[%s]: conflicting segment[%s].", dataSource, interval, existingVersion, maxId);
            return null;
        } else if (maxId.getShardSpec().getNumCorePartitions() == SingleDimensionShardSpec.UNKNOWN_NUM_CORE_PARTITIONS) {
            log.warn("Cannot allocate new segment because of unknown core partition size of segment[%s], shardSpec[%s]", maxId, maxId.getShardSpec());
            return null;
        } else {
            return new SegmentIdWithShardSpec(dataSource, maxId.getInterval(), Preconditions.checkNotNull(newSegmentVersion, "newSegmentVersion"), partialShardSpec.complete(jsonMapper, maxId.getShardSpec().getPartitionNum() + 1, maxId.getShardSpec().getNumCorePartitions()));
        }
    }
}
Also used : Arrays(java.util.Arrays) Partitions(org.apache.druid.timeline.Partitions) Inject(com.google.inject.Inject) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) StatementContext(org.skife.jdbi.v2.StatementContext) Pair(org.apache.druid.java.util.common.Pair) FluentIterable(com.google.common.collect.FluentIterable) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) ResultSet(java.sql.ResultSet) Map(java.util.Map) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) IAE(org.apache.druid.java.util.common.IAE) CloseableIterator(org.apache.druid.java.util.common.parsers.CloseableIterator) ByteArrayMapper(org.skife.jdbi.v2.util.ByteArrayMapper) DateTimes(org.apache.druid.java.util.common.DateTimes) ImmutableSet(com.google.common.collect.ImmutableSet) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) Collection(java.util.Collection) Segments(org.apache.druid.indexing.overlord.Segments) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Collectors(java.util.stream.Collectors) List(java.util.List) PartitionIds(org.apache.druid.timeline.partition.PartitionIds) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) DataSegment(org.apache.druid.timeline.DataSegment) ISOChronology(org.joda.time.chrono.ISOChronology) PartialShardSpec(org.apache.druid.timeline.partition.PartialShardSpec) Logger(org.apache.druid.java.util.common.logger.Logger) IntStream(java.util.stream.IntStream) Iterables(com.google.common.collect.Iterables) Intervals(org.apache.druid.java.util.common.Intervals) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Hashing(com.google.common.hash.Hashing) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ResultIterator(org.skife.jdbi.v2.ResultIterator) Nullable(javax.annotation.Nullable) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) BaseEncoding(com.google.common.io.BaseEncoding) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Query(org.skife.jdbi.v2.Query) IOException(java.io.IOException) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) Handle(org.skife.jdbi.v2.Handle) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) SegmentUtils(org.apache.druid.segment.SegmentUtils) TransactionCallback(org.skife.jdbi.v2.TransactionCallback) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) StringEscapeUtils(org.apache.commons.lang.StringEscapeUtils) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 33 with SegmentIdWithShardSpec

use of org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method allocatePendingSegment.

@Nullable
private SegmentIdWithShardSpec allocatePendingSegment(final Handle handle, final String dataSource, final String sequenceName, final Interval interval, final PartialShardSpec partialShardSpec, final String maxVersion) throws IOException {
    final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId(handle.createQuery(StringUtils.format("SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "start = :start AND " + "%2$send%2$s = :end", dbTables.getPendingSegmentsTable(), connector.getQuoteString())), interval, sequenceName, null, Pair.of("dataSource", dataSource), Pair.of("sequence_name", sequenceName), Pair.of("start", interval.getStart().toString()), Pair.of("end", interval.getEnd().toString()));
    if (result.found) {
        // The found existing segment identifier can be null if its interval doesn't match with the given interval
        return result.segmentIdentifier;
    }
    final SegmentIdWithShardSpec newIdentifier = createNewSegment(handle, dataSource, interval, partialShardSpec, maxVersion);
    if (newIdentifier == null) {
        return null;
    }
    // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
    // Avoiding ON DUPLICATE KEY since it's not portable.
    // Avoiding try/catch since it may cause inadvertent transaction-splitting.
    // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval.
    // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
    // have difficulty with large unique keys (see https://github.com/apache/druid/issues/2319)
    final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff).putLong(interval.getStartMillis()).putLong(interval.getEndMillis()).hash().asBytes());
    // always insert empty previous sequence id
    insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1);
    log.info("Allocated pending segment [%s] for sequence[%s] in DB", newIdentifier, sequenceName);
    return newIdentifier;
}
Also used : SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) Nullable(javax.annotation.Nullable)

Example 34 with SegmentIdWithShardSpec

use of org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testAnotherAllocatePendingSegmentAfterRevertingCompaction.

/**
 * Slightly different that the above test but that involves reverted compaction
 *   1) used segments of version = A, id = 0, 1, 2
 *   2) overwrote segments of version = B, id = 0 <= compaction
 *   3) marked segments unused for version = A, id = 0, 1, 2 <= overshadowing
 *   4) pending segment of version = B, id = 1 <= appending new data, aborted
 *   5) reverted compaction, mark segments used for version = A, id = 0, 1, 2, and mark compacted segments unused
 *   6) used segments of version = A, id = 0, 1, 2
 *   7) pending segment of version = B, id = 1
 */
@Test
public void testAnotherAllocatePendingSegmentAfterRevertingCompaction() {
    String maxVersion = "Z";
    // 1.0) simulate one append load
    final PartialShardSpec partialShardSpec = NumberedPartialShardSpec.instance();
    final String dataSource = "ds";
    final Interval interval = Intervals.of("2017-01-01/2017-02-01");
    final SegmentIdWithShardSpec identifier = coordinator.allocatePendingSegment(dataSource, "seq", null, interval, partialShardSpec, "A", true);
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A", identifier.toString());
    // Assume it publishes; create its corresponding segment
    DataSegment segment = new DataSegment("ds", Intervals.of("2017-01-01T00Z/2017-02-01T00Z"), "A", ImmutableMap.of(), ImmutableList.of("dim1"), ImmutableList.of("m1"), new LinearShardSpec(0), 9, 100);
    Assert.assertTrue(insertUsedSegments(ImmutableSet.of(segment)));
    List<String> ids = retrieveUsedSegmentIds();
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A", ids.get(0));
    // 1.1) simulate one more append load  (as if previous segment was published, note different sequence name)
    final SegmentIdWithShardSpec identifier1 = coordinator.allocatePendingSegment(dataSource, "seq2", identifier.toString(), interval, partialShardSpec, maxVersion, true);
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_1", identifier1.toString());
    // Assume it publishes; create its corresponding segment
    segment = new DataSegment("ds", Intervals.of("2017-01-01T00Z/2017-02-01T00Z"), "A", ImmutableMap.of(), ImmutableList.of("dim1"), ImmutableList.of("m1"), new LinearShardSpec(1), 9, 100);
    Assert.assertTrue(insertUsedSegments(ImmutableSet.of(segment)));
    ids = retrieveUsedSegmentIds();
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_1", ids.get(1));
    // 1.2) simulate one more append load  (as if previous segment was published, note different sequence name)
    final SegmentIdWithShardSpec identifier2 = coordinator.allocatePendingSegment(dataSource, "seq3", identifier1.toString(), interval, partialShardSpec, maxVersion, true);
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_2", identifier2.toString());
    // Assume it publishes; create its corresponding segment
    segment = new DataSegment("ds", Intervals.of("2017-01-01T00Z/2017-02-01T00Z"), "A", ImmutableMap.of(), ImmutableList.of("dim1"), ImmutableList.of("m1"), new LinearShardSpec(2), 9, 100);
    // state so far:
    // pendings: A: 0,1,2
    // used segments A: 0,1,2
    // unused segments:
    Assert.assertTrue(insertUsedSegments(ImmutableSet.of(segment)));
    ids = retrieveUsedSegmentIds();
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_2", ids.get(2));
    // 2)
    // now simulate that one compaction was done (batch) ingestion for same interval (like reindex of the previous three):
    DataSegment compactedSegment = new DataSegment("ds", Intervals.of("2017-01-01T00Z/2017-02-01T00Z"), "B", ImmutableMap.of(), ImmutableList.of("dim1"), ImmutableList.of("m1"), new LinearShardSpec(0), 9, 100);
    Assert.assertTrue(insertUsedSegments(ImmutableSet.of(compactedSegment)));
    ids = retrieveUsedSegmentIds();
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_B", ids.get(3));
    // 3) When overshadowing, segments are still marked as "used" in the segments table
    // state so far:
    // pendings: A: 0,1,2
    // used segments: A: 0,1,2; B: 0 <- new compacted segment, overshadows previous version A
    // unused segment:
    // 4) pending segment of version = B, id = 1 <= appending new data, aborted
    final SegmentIdWithShardSpec identifier3 = coordinator.allocatePendingSegment(dataSource, "seq4", identifier2.toString(), interval, partialShardSpec, maxVersion, true);
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_B_1", identifier3.toString());
    // no corresponding segment, pending aborted
    // state so far:
    // pendings: A: 0,1,2; B:1 (note that B_1 does not make it into segments since its task aborted)
    // used segments: A: 0,1,2; B: 0 <-  compacted segment, overshadows previous version A
    // unused segment:
    // 5) reverted compaction (by marking B_0 as unused)
    // Revert compaction a manual metadata update which is basically the following two steps:
    // <- drop compacted segment
    markAllSegmentsUnused(ImmutableSet.of(compactedSegment));
    // pending: version = A, id = 0,1,2
    // version = B, id = 1
    // 
    // used segment: version = A, id = 0,1,2
    // unused segment: version = B, id = 0
    List<String> pendings = retrievePendingSegmentIds();
    Assert.assertTrue(pendings.size() == 4);
    List<String> used = retrieveUsedSegmentIds();
    Assert.assertTrue(used.size() == 3);
    List<String> unused = retrieveUnusedSegmentIds();
    Assert.assertTrue(unused.size() == 1);
    // Simulate one more append load
    final SegmentIdWithShardSpec identifier4 = coordinator.allocatePendingSegment(dataSource, "seq5", identifier1.toString(), interval, partialShardSpec, maxVersion, true);
    // maxid = B_1 -> new partno = 2
    // versionofexistingchunk=A
    // ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_2
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_3", identifier4.toString());
    // Assume it publishes; create its corresponding segment
    segment = new DataSegment("ds", Intervals.of("2017-01-01T00Z/2017-02-01T00Z"), "A", ImmutableMap.of(), ImmutableList.of("dim1"), ImmutableList.of("m1"), new LinearShardSpec(3), 9, 100);
    // pending: version = A, id = 0,1,2,3
    // version = B, id = 1
    // 
    // used segment: version = A, id = 0,1,2,3
    // unused segment: version = B, id = 0
    Assert.assertTrue(insertUsedSegments(ImmutableSet.of(segment)));
    ids = retrieveUsedSegmentIds();
    Assert.assertEquals("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_A_3", ids.get(3));
}
Also used : LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) HashBasedNumberedPartialShardSpec(org.apache.druid.timeline.partition.HashBasedNumberedPartialShardSpec) PartialShardSpec(org.apache.druid.timeline.partition.PartialShardSpec) NumberedPartialShardSpec(org.apache.druid.timeline.partition.NumberedPartialShardSpec) NumberedOverwritePartialShardSpec(org.apache.druid.timeline.partition.NumberedOverwritePartialShardSpec) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 35 with SegmentIdWithShardSpec

use of org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testAddNumberedShardSpecAfterMultiDimensionsShardSpecWithUnknownCorePartitionSize.

@Test
public void testAddNumberedShardSpecAfterMultiDimensionsShardSpecWithUnknownCorePartitionSize() throws IOException {
    final String datasource = "datasource";
    final Interval interval = Intervals.of("2020-01-01/P1D");
    final String version = "version";
    final List<String> dimensions = ImmutableList.of("dim");
    final List<String> metrics = ImmutableList.of("met");
    final Set<DataSegment> originalSegments = new HashSet<>();
    for (int i = 0; i < 6; i++) {
        originalSegments.add(new DataSegment(datasource, interval, version, ImmutableMap.of(), dimensions, metrics, new DimensionRangeShardSpec(Collections.singletonList("dim"), i == 0 ? null : StringTuple.create(String.valueOf(i - 1)), i == 5 ? null : StringTuple.create(String.valueOf(i)), i, // emulate shardSpecs created in older versions of Druid
        null), 9, 10L));
    }
    coordinator.announceHistoricalSegments(originalSegments);
    final SegmentIdWithShardSpec id = coordinator.allocatePendingSegment(datasource, "seq", null, interval, NumberedPartialShardSpec.instance(), version, false);
    Assert.assertNull(id);
}
Also used : DataSegment(org.apache.druid.timeline.DataSegment) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) DimensionRangeShardSpec(org.apache.druid.timeline.partition.DimensionRangeShardSpec) Interval(org.joda.time.Interval) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

SegmentIdWithShardSpec (org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec)36 Test (org.junit.Test)23 DataSegment (org.apache.druid.timeline.DataSegment)14 Interval (org.joda.time.Interval)14 NoopTask (org.apache.druid.indexing.common.task.NoopTask)12 Task (org.apache.druid.indexing.common.task.Task)12 PartialShardSpec (org.apache.druid.timeline.partition.PartialShardSpec)11 HashBasedNumberedPartialShardSpec (org.apache.druid.timeline.partition.HashBasedNumberedPartialShardSpec)10 NumberedPartialShardSpec (org.apache.druid.timeline.partition.NumberedPartialShardSpec)10 HashBasedNumberedShardSpec (org.apache.druid.timeline.partition.HashBasedNumberedShardSpec)9 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)9 NumberedShardSpec (org.apache.druid.timeline.partition.NumberedShardSpec)8 NumberedOverwritePartialShardSpec (org.apache.druid.timeline.partition.NumberedOverwritePartialShardSpec)7 IOException (java.io.IOException)6 HashSet (java.util.HashSet)6 Map (java.util.Map)6 DateTime (org.joda.time.DateTime)6 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)5 Iterables (com.google.common.collect.Iterables)5 List (java.util.List)5