Search in sources :

Example 91 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class SQLAuditManager method doAudit.

@Override
public <T> void doAudit(String key, String type, AuditInfo auditInfo, T payload, ConfigSerde<T> configSerde) {
    AuditEntry auditEntry = AuditEntry.builder().key(key).type(type).auditInfo(auditInfo).payload(configSerde.serializeToString(payload, config.isSkipNullField())).build();
    dbi.withHandle(new HandleCallback<Void>() {

        @Override
        public Void withHandle(Handle handle) throws Exception {
            doAudit(auditEntry, handle);
            return null;
        }
    });
}
Also used : AuditEntry(org.apache.druid.audit.AuditEntry) IOException(java.io.IOException) Handle(org.skife.jdbi.v2.Handle)

Example 92 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method createNewSegment.

/**
 * This function creates a new segment for the given datasource/interval/etc. A critical
 * aspect of the creation is to make sure that the new version & new partition number will make
 * sense given the existing segments & pending segments also very important is to avoid
 * clashes with existing pending & used/unused segments.
 * @param handle Database handle
 * @param dataSource datasource for the new segment
 * @param interval interval for the new segment
 * @param partialShardSpec Shard spec info minus segment id stuff
 * @param existingVersion Version of segments in interval, used to compute the version of the very first segment in
 *                        interval
 * @return
 * @throws IOException
 */
@Nullable
private SegmentIdWithShardSpec createNewSegment(final Handle handle, final String dataSource, final Interval interval, final PartialShardSpec partialShardSpec, final String existingVersion) throws IOException {
    // Get the time chunk and associated data segments for the given interval, if any
    final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
    if (existingChunks.size() > 1) {
        // Not possible to expand more than one chunk with a single segment.
        log.warn("Cannot allocate new segment for dataSource[%s], interval[%s]: already have [%,d] chunks.", dataSource, interval, existingChunks.size());
        return null;
    } else {
        // max partitionId of the shardSpecs which share the same partition space.
        SegmentIdWithShardSpec maxId = null;
        if (!existingChunks.isEmpty()) {
            TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
            // noinspection ConstantConditions
            for (DataSegment segment : FluentIterable.from(existingHolder.getObject()).transform(PartitionChunk::getObject).filter(segment -> segment.getShardSpec().sharePartitionSpace(partialShardSpec))) {
                // Note that this will compute the max id of existing, visible, data segments in the time chunk:
                if (maxId == null || maxId.getShardSpec().getPartitionNum() < segment.getShardSpec().getPartitionNum()) {
                    maxId = SegmentIdWithShardSpec.fromDataSegment(segment);
                }
            }
        }
        // Get the version of the existing chunk, we might need it in some of the cases below
        // to compute the new identifier's version
        @Nullable final String versionOfExistingChunk;
        if (!existingChunks.isEmpty()) {
            // remember only one chunk possible for given interval so get the first & only one
            versionOfExistingChunk = existingChunks.get(0).getVersion();
        } else {
            versionOfExistingChunk = null;
        }
        // next, we need to enrich the maxId computed before with the information of the pending segments
        // it is possible that a pending segment has a higher id in which case we need that, it will work,
        // and it will avoid clashes when inserting the new pending segment later in the caller of this method
        final Set<SegmentIdWithShardSpec> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
        // Make sure we add the maxId we obtained from the segments table:
        if (maxId != null) {
            pendings.add(maxId);
        }
        // Now compute the maxId with all the information: pendings + segments:
        // The versionOfExistingChunks filter is ensure that we pick the max id with the version of the existing chunk
        // in the case that there may be a pending segment with a higher version but no corresponding used segments
        // which may generate a clash with an existing segment once the new id is generated
        maxId = pendings.stream().filter(id -> id.getShardSpec().sharePartitionSpace(partialShardSpec)).filter(id -> versionOfExistingChunk == null ? true : id.getVersion().equals(versionOfExistingChunk)).max((id1, id2) -> {
            final int versionCompare = id1.getVersion().compareTo(id2.getVersion());
            if (versionCompare != 0) {
                return versionCompare;
            } else {
                return Integer.compare(id1.getShardSpec().getPartitionNum(), id2.getShardSpec().getPartitionNum());
            }
        }).orElse(null);
        // The following code attempts to compute the new version, if this
        // new version is not null at the end of next block then it will be
        // used as the new version in the case for initial or appended segment
        final String newSegmentVersion;
        if (versionOfExistingChunk != null) {
            // segment version overrides, so pick that now that we know it exists
            newSegmentVersion = versionOfExistingChunk;
        } else if (!pendings.isEmpty() && maxId != null) {
            // there is no visible segments in the time chunk, so pick the maxId of pendings, as computed above
            newSegmentVersion = maxId.getVersion();
        } else {
            // no segments, no pendings, so this must be the very first segment created for this interval
            newSegmentVersion = null;
        }
        if (maxId == null) {
            // When appending segments, null maxId means that we are allocating the very initial
            // segment for this time chunk.
            // This code is executed when the Overlord coordinates segment allocation, which is either you append segments
            // or you use segment lock. Since the core partitions set is not determined for appended segments, we set
            // it 0. When you use segment lock, the core partitions set doesn't work with it. We simply set it 0 so that the
            // OvershadowableManager handles the atomic segment update.
            final int newPartitionId = partialShardSpec.useNonRootGenerationPartitionSpace() ? PartitionIds.NON_ROOT_GEN_START_PARTITION_ID : PartitionIds.ROOT_GEN_START_PARTITION_ID;
            String version = newSegmentVersion == null ? existingVersion : newSegmentVersion;
            return new SegmentIdWithShardSpec(dataSource, interval, version, partialShardSpec.complete(jsonMapper, newPartitionId, 0));
        } else if (!maxId.getInterval().equals(interval) || maxId.getVersion().compareTo(existingVersion) > 0) {
            log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], existingVersion[%s]: conflicting segment[%s].", dataSource, interval, existingVersion, maxId);
            return null;
        } else if (maxId.getShardSpec().getNumCorePartitions() == SingleDimensionShardSpec.UNKNOWN_NUM_CORE_PARTITIONS) {
            log.warn("Cannot allocate new segment because of unknown core partition size of segment[%s], shardSpec[%s]", maxId, maxId.getShardSpec());
            return null;
        } else {
            return new SegmentIdWithShardSpec(dataSource, maxId.getInterval(), Preconditions.checkNotNull(newSegmentVersion, "newSegmentVersion"), partialShardSpec.complete(jsonMapper, maxId.getShardSpec().getPartitionNum() + 1, maxId.getShardSpec().getNumCorePartitions()));
        }
    }
}
Also used : Arrays(java.util.Arrays) Partitions(org.apache.druid.timeline.Partitions) Inject(com.google.inject.Inject) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) StatementContext(org.skife.jdbi.v2.StatementContext) Pair(org.apache.druid.java.util.common.Pair) FluentIterable(com.google.common.collect.FluentIterable) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) ResultSet(java.sql.ResultSet) Map(java.util.Map) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) IAE(org.apache.druid.java.util.common.IAE) CloseableIterator(org.apache.druid.java.util.common.parsers.CloseableIterator) ByteArrayMapper(org.skife.jdbi.v2.util.ByteArrayMapper) DateTimes(org.apache.druid.java.util.common.DateTimes) ImmutableSet(com.google.common.collect.ImmutableSet) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) Collection(java.util.Collection) Segments(org.apache.druid.indexing.overlord.Segments) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Collectors(java.util.stream.Collectors) List(java.util.List) PartitionIds(org.apache.druid.timeline.partition.PartitionIds) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) DataSegment(org.apache.druid.timeline.DataSegment) ISOChronology(org.joda.time.chrono.ISOChronology) PartialShardSpec(org.apache.druid.timeline.partition.PartialShardSpec) Logger(org.apache.druid.java.util.common.logger.Logger) IntStream(java.util.stream.IntStream) Iterables(com.google.common.collect.Iterables) Intervals(org.apache.druid.java.util.common.Intervals) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Hashing(com.google.common.hash.Hashing) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ResultIterator(org.skife.jdbi.v2.ResultIterator) Nullable(javax.annotation.Nullable) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) BaseEncoding(com.google.common.io.BaseEncoding) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Query(org.skife.jdbi.v2.Query) IOException(java.io.IOException) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) Handle(org.skife.jdbi.v2.Handle) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) SegmentUtils(org.apache.druid.segment.SegmentUtils) TransactionCallback(org.skife.jdbi.v2.TransactionCallback) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) StringEscapeUtils(org.apache.commons.lang.StringEscapeUtils) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 93 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.

@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final Set<DataSegment> segmentsToDrop, @Nullable final DataSourceMetadata startMetadata, @Nullable final DataSourceMetadata endMetadata) throws IOException {
    if (segments.isEmpty()) {
        throw new IllegalArgumentException("segment set must not be empty");
    }
    final String dataSource = segments.iterator().next().getDataSource();
    for (DataSegment segment : segments) {
        if (!dataSource.equals(segment.getDataSource())) {
            throw new IllegalArgumentException("segments must all be from the same dataSource");
        }
    }
    if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
        throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
    }
    // Find which segments are used (i.e. not overshadowed).
    final Set<DataSegment> usedSegments = new HashSet<>();
    List<TimelineObjectHolder<String, DataSegment>> segmentHolders = VersionedIntervalTimeline.forSegments(segments).lookupWithIncompletePartitions(Intervals.ETERNITY);
    for (TimelineObjectHolder<String, DataSegment> holder : segmentHolders) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            usedSegments.add(chunk.getObject());
        }
    }
    final AtomicBoolean definitelyNotUpdated = new AtomicBoolean(false);
    try {
        return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {

            @Override
            public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
                // Set definitelyNotUpdated back to false upon retrying.
                definitelyNotUpdated.set(false);
                if (startMetadata != null) {
                    final DataStoreMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
                    if (result != DataStoreMetadataUpdateResult.SUCCESS) {
                        // Metadata was definitely not updated.
                        transactionStatus.setRollbackOnly();
                        definitelyNotUpdated.set(true);
                        if (result == DataStoreMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataStoreMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                if (segmentsToDrop != null && !segmentsToDrop.isEmpty()) {
                    final DataStoreMetadataUpdateResult result = dropSegmentsWithHandle(handle, segmentsToDrop, dataSource);
                    if (result != DataStoreMetadataUpdateResult.SUCCESS) {
                        // Metadata store was definitely not updated.
                        transactionStatus.setRollbackOnly();
                        definitelyNotUpdated.set(true);
                        if (result == DataStoreMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataStoreMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                final Set<DataSegment> inserted = announceHistoricalSegmentBatch(handle, segments, usedSegments);
                return SegmentPublishResult.ok(ImmutableSet.copyOf(inserted));
            }
        }, 3, getSqlMetadataMaxRetry());
    } catch (CallbackFailedException e) {
        if (definitelyNotUpdated.get()) {
            return SegmentPublishResult.fail(e.getMessage());
        } else {
            // Must throw exception if we are not sure if we updated or not.
            throw e;
        }
    }
}
Also used : ResultSet(java.sql.ResultSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(org.apache.druid.timeline.DataSegment) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) HashSet(java.util.HashSet)

Example 94 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testDropSegmentsWithHandleForSegmentThatDoesNotExist.

@Test
public void testDropSegmentsWithHandleForSegmentThatDoesNotExist() {
    try (Handle handle = derbyConnector.getDBI().open()) {
        // Try drop segment
        IndexerSQLMetadataStorageCoordinator.DataStoreMetadataUpdateResult result = coordinator.dropSegmentsWithHandle(handle, ImmutableSet.of(defaultSegment), defaultSegment.getDataSource());
        Assert.assertEquals(IndexerSQLMetadataStorageCoordinator.DataStoreMetadataUpdateResult.TRY_AGAIN, result);
    }
}
Also used : Handle(org.skife.jdbi.v2.Handle) Test(org.junit.Test)

Example 95 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceRetryAndSuccess.

@Test
public void testTransactionalAnnounceRetryAndSuccess() throws IOException {
    final AtomicLong attemptCounter = new AtomicLong();
    final IndexerSQLMetadataStorageCoordinator failOnceCoordinator = new IndexerSQLMetadataStorageCoordinator(mapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnector) {

        @Override
        protected DataStoreMetadataUpdateResult updateDataSourceMetadataWithHandle(Handle handle, String dataSource, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
            metadataUpdateCounter.getAndIncrement();
            if (attemptCounter.getAndIncrement() == 0) {
                return DataStoreMetadataUpdateResult.TRY_AGAIN;
            } else {
                return super.updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
            }
        }
    };
    // Insert first segment.
    final SegmentPublishResult result1 = failOnceCoordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "bar")));
    Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment)), result1);
    Assert.assertArrayEquals(mapper.writeValueAsString(defaultSegment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", defaultSegment.getId().toString()));
    // Reset attempt counter to induce another failure.
    attemptCounter.set(0);
    // Insert second segment.
    final SegmentPublishResult result2 = failOnceCoordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment2), ImmutableSet.of(), new ObjectMetadata(ImmutableMap.of("foo", "bar")), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
    Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment2)), result2);
    Assert.assertArrayEquals(mapper.writeValueAsString(defaultSegment2).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", defaultSegment2.getId().toString()));
    // Examine metadata.
    Assert.assertEquals(new ObjectMetadata(ImmutableMap.of("foo", "baz")), failOnceCoordinator.retrieveDataSourceMetadata("fooDataSource"));
    // Should be tried twice per call.
    Assert.assertEquals(4, metadataUpdateCounter.get());
}
Also used : SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) AtomicLong(java.util.concurrent.atomic.AtomicLong) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) ObjectMetadata(org.apache.druid.indexing.overlord.ObjectMetadata) Handle(org.skife.jdbi.v2.Handle) Test(org.junit.Test)

Aggregations

Handle (org.skife.jdbi.v2.Handle)103 DBI (org.skife.jdbi.v2.DBI)28 Before (org.junit.Before)21 IOException (java.io.IOException)18 List (java.util.List)17 DataSourceFactory (io.dropwizard.db.DataSourceFactory)15 DBIFactory (io.dropwizard.jdbi.DBIFactory)15 SQLException (java.sql.SQLException)15 Map (java.util.Map)14 Test (org.junit.Test)14 Test (org.testng.annotations.Test)14 DateTime (org.joda.time.DateTime)13 ArrayList (java.util.ArrayList)11 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)11 ResultSet (java.sql.ResultSet)10 ImmutableList (com.google.common.collect.ImmutableList)8 UUID (java.util.UUID)8 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)7 ImmutableSet (com.google.common.collect.ImmutableSet)6 Set (java.util.Set)6