Search in sources :

Example 1 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project dropwizard by dropwizard.

the class LoggingDBIExceptionMapperTest method testPlainDBIException.

@Test
public void testPlainDBIException() throws Exception {
    DBIException dbiException = new TransactionFailedException("Transaction failed for unknown reason");
    dbiExceptionMapper.logException(9812, dbiException);
    verify(logger).error("Error handling a request: 0000000000002654", dbiException);
}
Also used : DBIException(org.skife.jdbi.v2.exceptions.DBIException) TransactionFailedException(org.skife.jdbi.v2.exceptions.TransactionFailedException) Test(org.junit.Test)

Example 2 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegment.

/**
   * Attempts to insert a single segment to the database. If the segment already exists, will do nothing; although,
   * this checking is imperfect and callers must be prepared to retry their entire transaction on exceptions.
   *
   * @return true if the segment was added, false if it already existed
   */
private boolean announceHistoricalSegment(final Handle handle, final DataSegment segment, final boolean used) throws IOException {
    try {
        if (segmentExists(handle, segment)) {
            log.info("Found [%s] in DB, not updating DB", segment.getIdentifier());
            return false;
        }
        // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
        // Avoiding ON DUPLICATE KEY since it's not portable.
        // Avoiding try/catch since it may cause inadvertent transaction-splitting.
        handle.createStatement(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", dbTables.getSegmentsTable(), connector.getQuoteString())).bind("id", segment.getIdentifier()).bind("dataSource", segment.getDataSource()).bind("created_date", new DateTime().toString()).bind("start", segment.getInterval().getStart().toString()).bind("end", segment.getInterval().getEnd().toString()).bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).bind("version", segment.getVersion()).bind("used", used).bind("payload", jsonMapper.writeValueAsBytes(segment)).execute();
        log.info("Published segment [%s] to DB", segment.getIdentifier());
    } catch (Exception e) {
        log.error(e, "Exception inserting segment [%s] into DB", segment.getIdentifier());
        throw e;
    }
    return true;
}
Also used : NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) DateTime(org.joda.time.DateTime) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException)

Example 3 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.

/**
   * {@inheritDoc}
   */
@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
    if (segments.isEmpty()) {
        throw new IllegalArgumentException("segment set must not be empty");
    }
    final String dataSource = segments.iterator().next().getDataSource();
    for (DataSegment segment : segments) {
        if (!dataSource.equals(segment.getDataSource())) {
            throw new IllegalArgumentException("segments must all be from the same dataSource");
        }
    }
    if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
        throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
    }
    // Find which segments are used (i.e. not overshadowed).
    final Set<DataSegment> usedSegments = Sets.newHashSet();
    for (TimelineObjectHolder<String, DataSegment> holder : VersionedIntervalTimeline.forSegments(segments).lookup(JodaUtils.ETERNITY)) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            usedSegments.add(chunk.getObject());
        }
    }
    final AtomicBoolean txnFailure = new AtomicBoolean(false);
    try {
        return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {

            @Override
            public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
                final Set<DataSegment> inserted = Sets.newHashSet();
                if (startMetadata != null) {
                    final DataSourceMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
                    if (result != DataSourceMetadataUpdateResult.SUCCESS) {
                        transactionStatus.setRollbackOnly();
                        txnFailure.set(true);
                        if (result == DataSourceMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataSourceMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                for (final DataSegment segment : segments) {
                    if (announceHistoricalSegment(handle, segment, usedSegments.contains(segment))) {
                        inserted.add(segment);
                    }
                }
                return new SegmentPublishResult(ImmutableSet.copyOf(inserted), true);
            }
        }, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
    } catch (CallbackFailedException e) {
        if (txnFailure.get()) {
            return new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false);
        } else {
            throw e;
        }
    }
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SegmentPublishResult(io.druid.indexing.overlord.SegmentPublishResult)

Example 4 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project druid by druid-io.

the class SQLMetadataSegmentManager method poll.

@Override
public void poll() {
    try {
        if (!started) {
            return;
        }
        ConcurrentHashMap<String, DruidDataSource> newDataSources = new ConcurrentHashMap<String, DruidDataSource>();
        log.debug("Starting polling of segment table");
        // some databases such as PostgreSQL require auto-commit turned off
        // to stream results back, enabling transactions disables auto-commit
        //
        // setting connection to read-only will allow some database such as MySQL
        // to automatically use read-only transaction mode, further optimizing the query
        final List<DataSegment> segments = connector.inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {

            @Override
            public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
                return handle.createQuery(String.format("SELECT payload FROM %s WHERE used=true", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).map(new ResultSetMapper<DataSegment>() {

                    @Override
                    public DataSegment map(int index, ResultSet r, StatementContext ctx) throws SQLException {
                        try {
                            return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class));
                        } catch (IOException e) {
                            log.makeAlert(e, "Failed to read segment from db.");
                            return null;
                        }
                    }
                }).list();
            }
        });
        if (segments == null || segments.isEmpty()) {
            log.warn("No segments found in the database!");
            return;
        }
        final Collection<DataSegment> segmentsFinal = Collections2.filter(segments, Predicates.notNull());
        log.info("Polled and found %,d segments in the database", segments.size());
        for (final DataSegment segment : segmentsFinal) {
            String datasourceName = segment.getDataSource();
            DruidDataSource dataSource = newDataSources.get(datasourceName);
            if (dataSource == null) {
                dataSource = new DruidDataSource(datasourceName, ImmutableMap.of("created", new DateTime().toString()));
                Object shouldBeNull = newDataSources.put(datasourceName, dataSource);
                if (shouldBeNull != null) {
                    log.warn("Just put key[%s] into dataSources and what was there wasn't null!?  It was[%s]", datasourceName, shouldBeNull);
                }
            }
            if (!dataSource.getSegments().contains(segment)) {
                dataSource.addSegment(segment.getIdentifier(), segment);
            }
        }
        synchronized (lock) {
            if (started) {
                dataSources.set(newDataSources);
            }
        }
    } catch (Exception e) {
        log.makeAlert(e, "Problem polling DB.").emit();
    }
}
Also used : SQLException(java.sql.SQLException) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) IOException(java.io.IOException) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext) ResultSet(java.sql.ResultSet) List(java.util.List) ArrayList(java.util.ArrayList) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 5 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project hive by apache.

the class DruidStorageHandlerUtils method publishSegmentsAndCommit.

/**
 * First computes the segments timeline to accommodate new segments for insert into case
 * Then moves segments to druid deep storage with updated metadata/version
 * ALL IS DONE IN ONE TRANSACTION
 *
 * @param connector DBI connector to commit
 * @param metadataStorageTablesConfig Druid metadata tables definitions
 * @param dataSource Druid datasource name
 * @param segments List of segments to move and commit to metadata
 * @param overwrite if it is an insert overwrite
 * @param conf Configuration
 * @param dataSegmentPusher segment pusher
 *
 * @return List of successfully published Druid segments.
 * This list has the updated versions and metadata about segments after move and timeline sorting
 *
 * @throws CallbackFailedException
 */
public static List<DataSegment> publishSegmentsAndCommit(final SQLMetadataConnector connector, final MetadataStorageTablesConfig metadataStorageTablesConfig, final String dataSource, final List<DataSegment> segments, boolean overwrite, Configuration conf, DataSegmentPusher dataSegmentPusher) throws CallbackFailedException {
    return connector.getDBI().inTransaction((handle, transactionStatus) -> {
        // We create the timeline for the existing and new segments
        VersionedIntervalTimeline<String, DataSegment> timeline;
        if (overwrite) {
            // If we are overwriting, we disable existing sources
            disableDataSourceWithHandle(handle, metadataStorageTablesConfig, dataSource);
            // When overwriting, we just start with empty timeline,
            // as we are overwriting segments with new versions
            timeline = new VersionedIntervalTimeline<>(Ordering.natural());
        } else {
            // Append Mode
            if (segments.isEmpty()) {
                // If there are no new segments, we can just bail out
                return Collections.EMPTY_LIST;
            }
            // Otherwise, build a timeline of existing segments in metadata storage
            Interval indexedInterval = JodaUtils.umbrellaInterval(Iterables.transform(segments, input -> input.getInterval()));
            LOG.info("Building timeline for umbrella Interval [{}]", indexedInterval);
            timeline = getTimelineForIntervalWithHandle(handle, dataSource, indexedInterval, metadataStorageTablesConfig);
        }
        final List<DataSegment> finalSegmentsToPublish = Lists.newArrayList();
        for (DataSegment segment : segments) {
            List<TimelineObjectHolder<String, DataSegment>> existingChunks = timeline.lookup(segment.getInterval());
            if (existingChunks.size() > 1) {
                // Druid shard specs does not support multiple partitions for same interval with different granularity.
                throw new IllegalStateException(String.format("Cannot allocate new segment for dataSource[%s], interval[%s], already have [%,d] chunks. Not possible to append new segment.", dataSource, segment.getInterval(), existingChunks.size()));
            }
            // Find out the segment with latest version and maximum partition number
            SegmentIdentifier max = null;
            final ShardSpec newShardSpec;
            final String newVersion;
            if (!existingChunks.isEmpty()) {
                // Some existing chunk, Find max
                TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
                for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
                    if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
                        max = SegmentIdentifier.fromDataSegment(existing.getObject());
                    }
                }
            }
            if (max == null) {
                // No existing shard present in the database, use the current version.
                newShardSpec = segment.getShardSpec();
                newVersion = segment.getVersion();
            } else {
                // use version of existing max segment to generate new shard spec
                newShardSpec = getNextPartitionShardSpec(max.getShardSpec());
                newVersion = max.getVersion();
            }
            DataSegment publishedSegment = publishSegmentWithShardSpec(segment, newShardSpec, newVersion, getPath(segment).getFileSystem(conf), dataSegmentPusher);
            finalSegmentsToPublish.add(publishedSegment);
            timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment));
        }
        // Publish new segments to metadata storage
        final PreparedBatch batch = handle.prepareBatch(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, \"end\", partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", metadataStorageTablesConfig.getSegmentsTable()));
        for (final DataSegment segment : finalSegmentsToPublish) {
            batch.add(new ImmutableMap.Builder<String, Object>().put("id", segment.getIdentifier()).put("dataSource", segment.getDataSource()).put("created_date", new DateTime().toString()).put("start", segment.getInterval().getStart().toString()).put("end", segment.getInterval().getEnd().toString()).put("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).put("version", segment.getVersion()).put("used", true).put("payload", JSON_MAPPER.writeValueAsBytes(segment)).build());
            LOG.info("Published {}", segment.getIdentifier());
        }
        batch.execute();
        return finalSegmentsToPublish;
    });
}
Also used : SQLMetadataConnector(io.druid.metadata.SQLMetadataConnector) FoldController(org.skife.jdbi.v2.FoldController) Request(com.metamx.http.client.Request) FileSystem(org.apache.hadoop.fs.FileSystem) URL(java.net.URL) HttpMethod(org.jboss.netty.handler.codec.http.HttpMethod) LoggerFactory(org.slf4j.LoggerFactory) RetryPolicies(org.apache.hadoop.io.retry.RetryPolicies) FileStatus(org.apache.hadoop.fs.FileStatus) StatementContext(org.skife.jdbi.v2.StatementContext) InetAddress(java.net.InetAddress) SelectQueryConfig(io.druid.query.select.SelectQueryConfig) InputStreamResponseHandler(com.metamx.http.client.response.InputStreamResponseHandler) IndexIO(io.druid.segment.IndexIO) CharStreams(com.google.common.io.CharStreams) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) NamedType(com.fasterxml.jackson.databind.jsontype.NamedType) Path(org.apache.hadoop.fs.Path) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) TimestampFloorExprMacro(io.druid.query.expression.TimestampFloorExprMacro) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) ByteArrayMapper(org.skife.jdbi.v2.util.ByteArrayMapper) DataSegment(io.druid.timeline.DataSegment) ImmutableMap(com.google.common.collect.ImmutableMap) TimeZone(java.util.TimeZone) MapUtils(com.metamx.common.MapUtils) Collection(java.util.Collection) Set(java.util.Set) Interner(com.google.common.collect.Interner) Reader(java.io.Reader) MetadataStorageTablesConfig(io.druid.metadata.MetadataStorageTablesConfig) FileNotFoundException(java.io.FileNotFoundException) TimestampParseExprMacro(io.druid.query.expression.TimestampParseExprMacro) List(java.util.List) PartitionChunk(io.druid.timeline.partition.PartitionChunk) ISOChronology(org.joda.time.chrono.ISOChronology) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) TrimExprMacro(io.druid.query.expression.TrimExprMacro) HttpClient(com.metamx.http.client.HttpClient) Iterables(com.google.common.collect.Iterables) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) TimestampFormatExprMacro(io.druid.query.expression.TimestampFormatExprMacro) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) TimestampExtractExprMacro(io.druid.query.expression.TimestampExtractExprMacro) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) RegexpExtractExprMacro(io.druid.query.expression.RegexpExtractExprMacro) LikeExprMacro(io.druid.query.expression.LikeExprMacro) TimestampCeilExprMacro(io.druid.query.expression.TimestampCeilExprMacro) ShardSpec(io.druid.timeline.partition.ShardSpec) ArrayList(java.util.ArrayList) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) HashSet(java.util.HashSet) IndexMergerV9(io.druid.segment.IndexMergerV9) Interval(org.joda.time.Interval) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) JodaUtils(com.metamx.common.JodaUtils) ImmutableList(com.google.common.collect.ImmutableList) StringUtils(org.apache.hadoop.util.StringUtils) ResultIterator(org.skife.jdbi.v2.ResultIterator) TimestampShiftExprMacro(io.druid.query.expression.TimestampShiftExprMacro) OutputStream(java.io.OutputStream) HttpHeaders(org.jboss.netty.handler.codec.http.HttpHeaders) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec) Logger(org.slf4j.Logger) Folder3(org.skife.jdbi.v2.Folder3) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) EmittingLogger(com.metamx.emitter.EmittingLogger) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) Throwables(com.google.common.base.Throwables) Interners(com.google.common.collect.Interners) Query(org.skife.jdbi.v2.Query) IOException(java.io.IOException) InputStreamReader(java.io.InputStreamReader) UnknownHostException(java.net.UnknownHostException) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) HdfsDataSegmentPusherConfig(io.druid.storage.hdfs.HdfsDataSegmentPusherConfig) Handle(org.skife.jdbi.v2.Handle) Ordering(com.google.common.collect.Ordering) ExprMacroTable(io.druid.math.expr.ExprMacroTable) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) HiveDruidSerializationModule(org.apache.hadoop.hive.druid.serde.HiveDruidSerializationModule) RetryProxy(org.apache.hadoop.io.retry.RetryProxy) NoopEmitter(com.metamx.emitter.core.NoopEmitter) ServiceEmitter(com.metamx.emitter.service.ServiceEmitter) Collections(java.util.Collections) MySQLConnector(io.druid.metadata.storage.mysql.MySQLConnector) InputStream(java.io.InputStream) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) DataSegment(io.druid.timeline.DataSegment) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) ShardSpec(io.druid.timeline.partition.ShardSpec) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) ImmutableMap(com.google.common.collect.ImmutableMap) DateTime(org.joda.time.DateTime) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) Interval(org.joda.time.Interval)

Aggregations

IOException (java.io.IOException)6 DataSegment (io.druid.timeline.DataSegment)5 SQLException (java.sql.SQLException)5 Handle (org.skife.jdbi.v2.Handle)5 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)5 ArrayList (java.util.ArrayList)4 DateTime (org.joda.time.DateTime)4 List (java.util.List)3 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)3 ImmutableList (com.google.common.collect.ImmutableList)2 NoneShardSpec (io.druid.timeline.partition.NoneShardSpec)2 Set (java.util.Set)2 StatementContext (org.skife.jdbi.v2.StatementContext)2 RaptorColumnHandle (com.facebook.presto.raptor.RaptorColumnHandle)1 ShardOrganizerDao (com.facebook.presto.raptor.storage.organization.ShardOrganizerDao)1 InjectableValues (com.fasterxml.jackson.databind.InjectableValues)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 NamedType (com.fasterxml.jackson.databind.jsontype.NamedType)1 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)1 Throwables (com.google.common.base.Throwables)1