Search in sources :

Example 1 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.

/**
   * {@inheritDoc}
   */
@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
    if (segments.isEmpty()) {
        throw new IllegalArgumentException("segment set must not be empty");
    }
    final String dataSource = segments.iterator().next().getDataSource();
    for (DataSegment segment : segments) {
        if (!dataSource.equals(segment.getDataSource())) {
            throw new IllegalArgumentException("segments must all be from the same dataSource");
        }
    }
    if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
        throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
    }
    // Find which segments are used (i.e. not overshadowed).
    final Set<DataSegment> usedSegments = Sets.newHashSet();
    for (TimelineObjectHolder<String, DataSegment> holder : VersionedIntervalTimeline.forSegments(segments).lookup(JodaUtils.ETERNITY)) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            usedSegments.add(chunk.getObject());
        }
    }
    final AtomicBoolean txnFailure = new AtomicBoolean(false);
    try {
        return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {

            @Override
            public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
                final Set<DataSegment> inserted = Sets.newHashSet();
                if (startMetadata != null) {
                    final DataSourceMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
                    if (result != DataSourceMetadataUpdateResult.SUCCESS) {
                        transactionStatus.setRollbackOnly();
                        txnFailure.set(true);
                        if (result == DataSourceMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataSourceMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                for (final DataSegment segment : segments) {
                    if (announceHistoricalSegment(handle, segment, usedSegments.contains(segment))) {
                        inserted.add(segment);
                    }
                }
                return new SegmentPublishResult(ImmutableSet.copyOf(inserted), true);
            }
        }, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
    } catch (CallbackFailedException e) {
        if (txnFailure.get()) {
            return new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false);
        } else {
            throw e;
        }
    }
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SegmentPublishResult(io.druid.indexing.overlord.SegmentPublishResult)

Example 2 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project druid by druid-io.

the class SQLMetadataSegmentManager method enableDatasource.

@Override
public boolean enableDatasource(final String ds) {
    try {
        final IDBI dbi = connector.getDBI();
        VersionedIntervalTimeline<String, DataSegment> segmentTimeline = connector.inReadOnlyTransaction(new TransactionCallback<VersionedIntervalTimeline<String, DataSegment>>() {

            @Override
            public VersionedIntervalTimeline<String, DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
                return handle.createQuery(String.format("SELECT payload FROM %s WHERE dataSource = :dataSource", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).bind("dataSource", ds).map(ByteArrayMapper.FIRST).fold(new VersionedIntervalTimeline<String, DataSegment>(Ordering.natural()), new Folder3<VersionedIntervalTimeline<String, DataSegment>, byte[]>() {

                    @Override
                    public VersionedIntervalTimeline<String, DataSegment> fold(VersionedIntervalTimeline<String, DataSegment> timeline, byte[] payload, FoldController foldController, StatementContext statementContext) throws SQLException {
                        try {
                            final DataSegment segment = DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(payload, DataSegment.class));
                            timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
                            return timeline;
                        } catch (Exception e) {
                            throw new SQLException(e.toString());
                        }
                    }
                });
            }
        });
        final List<DataSegment> segments = Lists.newArrayList();
        for (TimelineObjectHolder<String, DataSegment> objectHolder : segmentTimeline.lookup(new Interval("0000-01-01/3000-01-01"))) {
            for (PartitionChunk<DataSegment> partitionChunk : objectHolder.getObject()) {
                segments.add(partitionChunk.getObject());
            }
        }
        if (segments.isEmpty()) {
            log.warn("No segments found in the database!");
            return false;
        }
        dbi.withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                Batch batch = handle.createBatch();
                for (DataSegment segment : segments) {
                    batch.add(String.format("UPDATE %s SET used=true WHERE id = '%s'", getSegmentsTable(), segment.getIdentifier()));
                }
                batch.execute();
                return null;
            }
        });
    } catch (Exception e) {
        log.error(e, "Exception enabling datasource %s", ds);
        return false;
    }
    return true;
}
Also used : IDBI(org.skife.jdbi.v2.IDBI) SQLException(java.sql.SQLException) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext) FoldController(org.skife.jdbi.v2.FoldController) Batch(org.skife.jdbi.v2.Batch) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) Folder3(org.skife.jdbi.v2.Folder3) Interval(org.joda.time.Interval)

Example 3 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project druid by druid-io.

the class SQLMetadataSegmentManager method poll.

@Override
public void poll() {
    try {
        if (!started) {
            return;
        }
        ConcurrentHashMap<String, DruidDataSource> newDataSources = new ConcurrentHashMap<String, DruidDataSource>();
        log.debug("Starting polling of segment table");
        // some databases such as PostgreSQL require auto-commit turned off
        // to stream results back, enabling transactions disables auto-commit
        //
        // setting connection to read-only will allow some database such as MySQL
        // to automatically use read-only transaction mode, further optimizing the query
        final List<DataSegment> segments = connector.inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {

            @Override
            public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
                return handle.createQuery(String.format("SELECT payload FROM %s WHERE used=true", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).map(new ResultSetMapper<DataSegment>() {

                    @Override
                    public DataSegment map(int index, ResultSet r, StatementContext ctx) throws SQLException {
                        try {
                            return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class));
                        } catch (IOException e) {
                            log.makeAlert(e, "Failed to read segment from db.");
                            return null;
                        }
                    }
                }).list();
            }
        });
        if (segments == null || segments.isEmpty()) {
            log.warn("No segments found in the database!");
            return;
        }
        final Collection<DataSegment> segmentsFinal = Collections2.filter(segments, Predicates.notNull());
        log.info("Polled and found %,d segments in the database", segments.size());
        for (final DataSegment segment : segmentsFinal) {
            String datasourceName = segment.getDataSource();
            DruidDataSource dataSource = newDataSources.get(datasourceName);
            if (dataSource == null) {
                dataSource = new DruidDataSource(datasourceName, ImmutableMap.of("created", new DateTime().toString()));
                Object shouldBeNull = newDataSources.put(datasourceName, dataSource);
                if (shouldBeNull != null) {
                    log.warn("Just put key[%s] into dataSources and what was there wasn't null!?  It was[%s]", datasourceName, shouldBeNull);
                }
            }
            if (!dataSource.getSegments().contains(segment)) {
                dataSource.addSegment(segment.getIdentifier(), segment);
            }
        }
        synchronized (lock) {
            if (started) {
                dataSources.set(newDataSources);
            }
        }
    } catch (Exception e) {
        log.makeAlert(e, "Problem polling DB.").emit();
    }
}
Also used : SQLException(java.sql.SQLException) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) IOException(java.io.IOException) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext) ResultSet(java.sql.ResultSet) List(java.util.List) ArrayList(java.util.ArrayList) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 4 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project killbill by killbill.

the class DefaultUserDao method addRoleDefinition.

@Override
public void addRoleDefinition(final String role, final List<String> permissions, final String createdBy) throws SecurityApiException {
    final DateTime createdDate = clock.getUTCNow();
    inTransactionWithExceptionHandling(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
            final RolesPermissionsSqlDao rolesPermissionsSqlDao = handle.attach(RolesPermissionsSqlDao.class);
            final List<RolesPermissionsModelDao> existingRole = rolesPermissionsSqlDao.getByRoleName(role);
            if (!existingRole.isEmpty()) {
                throw new SecurityApiException(ErrorCode.SECURITY_ROLE_ALREADY_EXISTS, role);
            }
            for (final String permission : permissions) {
                rolesPermissionsSqlDao.create(new RolesPermissionsModelDao(role, permission, createdDate, createdBy));
            }
            return null;
        }
    });
}
Also used : TransactionStatus(org.skife.jdbi.v2.TransactionStatus) List(java.util.List) SecurityApiException(org.killbill.billing.security.SecurityApiException) DateTime(org.joda.time.DateTime) SecurityApiException(org.killbill.billing.security.SecurityApiException) Handle(org.skife.jdbi.v2.Handle)

Example 5 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project killbill by killbill.

the class DefaultUserDao method insertUser.

@Override
public void insertUser(final String username, final String password, final List<String> roles, final String createdBy) throws SecurityApiException {
    final ByteSource salt = rng.nextBytes();
    final String hashedPasswordBase64 = new SimpleHash(KillbillCredentialsMatcher.HASH_ALGORITHM_NAME, password, salt.toBase64(), securityConfig.getShiroNbHashIterations()).toBase64();
    final DateTime createdDate = clock.getUTCNow();
    inTransactionWithExceptionHandling(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
            final UserRolesSqlDao userRolesSqlDao = handle.attach(UserRolesSqlDao.class);
            for (final String role : roles) {
                userRolesSqlDao.create(new UserRolesModelDao(username, role, createdDate, createdBy));
            }
            final UsersSqlDao usersSqlDao = handle.attach(UsersSqlDao.class);
            final UserModelDao userModelDao = usersSqlDao.getByUsername(username);
            if (userModelDao != null) {
                throw new SecurityApiException(ErrorCode.SECURITY_USER_ALREADY_EXISTS, username);
            }
            usersSqlDao.create(new UserModelDao(username, hashedPasswordBase64, salt.toBase64(), createdDate, createdBy));
            return null;
        }
    });
}
Also used : TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DateTime(org.joda.time.DateTime) SecurityApiException(org.killbill.billing.security.SecurityApiException) Handle(org.skife.jdbi.v2.Handle) SimpleHash(org.apache.shiro.crypto.hash.SimpleHash) ByteSource(org.apache.shiro.util.ByteSource) SecurityApiException(org.killbill.billing.security.SecurityApiException)

Aggregations

Handle (org.skife.jdbi.v2.Handle)10 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)10 IOException (java.io.IOException)6 DateTime (org.joda.time.DateTime)6 DataSegment (io.druid.timeline.DataSegment)4 SQLException (java.sql.SQLException)4 List (java.util.List)4 SecurityApiException (org.killbill.billing.security.SecurityApiException)4 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)3 ImmutableSet (com.google.common.collect.ImmutableSet)2 ResultSet (java.sql.ResultSet)2 ArrayList (java.util.ArrayList)2 Set (java.util.Set)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 SimpleHash (org.apache.shiro.crypto.hash.SimpleHash)2 ByteSource (org.apache.shiro.util.ByteSource)2 StatementContext (org.skife.jdbi.v2.StatementContext)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 Predicate (com.google.common.base.Predicate)1 ImmutableList (com.google.common.collect.ImmutableList)1