Search in sources :

Example 6 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project killbill by killbill.

the class DefaultUserDao method updateUserPassword.

@Override
public void updateUserPassword(final String username, final String password, final String updatedBy) throws SecurityApiException {
    final ByteSource salt = rng.nextBytes();
    final String hashedPasswordBase64 = new SimpleHash(KillbillCredentialsMatcher.HASH_ALGORITHM_NAME, password, salt.toBase64(), securityConfig.getShiroNbHashIterations()).toBase64();
    inTransactionWithExceptionHandling(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
            final DateTime updatedDate = clock.getUTCNow();
            final UsersSqlDao usersSqlDao = handle.attach(UsersSqlDao.class);
            final UserModelDao userModelDao = usersSqlDao.getByUsername(username);
            if (userModelDao == null) {
                throw new SecurityApiException(ErrorCode.SECURITY_INVALID_USER, username);
            }
            usersSqlDao.updatePassword(username, hashedPasswordBase64, salt.toBase64(), updatedDate.toDate(), updatedBy);
            return null;
        }
    });
}
Also used : TransactionStatus(org.skife.jdbi.v2.TransactionStatus) SecurityApiException(org.killbill.billing.security.SecurityApiException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) SimpleHash(org.apache.shiro.crypto.hash.SimpleHash) ByteSource(org.apache.shiro.util.ByteSource) SecurityApiException(org.killbill.billing.security.SecurityApiException)

Example 7 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project killbill by killbill.

the class TestMysqlGlobalLocker method testSimpleLocking.

// Used as a manual test to validate the simple DAO by stepping through that locking is done and release correctly
@Test(groups = "slow")
public void testSimpleLocking() throws IOException, LockFailedException {
    final String lockName = UUID.randomUUID().toString();
    final GlobalLock lock = locker.lockWithNumberOfTries(LockerType.ACCNT_INV_PAY.toString(), lockName, 3);
    dbi.inTransaction(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle conn, final TransactionStatus status) throws Exception {
            conn.execute("insert into dummy2 (dummy_id) values ('" + UUID.randomUUID().toString() + "')");
            return null;
        }
    });
    Assert.assertEquals(locker.isFree(LockerType.ACCNT_INV_PAY.toString(), lockName), false);
    boolean gotException = false;
    try {
        locker.lockWithNumberOfTries(LockerType.ACCNT_INV_PAY.toString(), lockName, 1);
    } catch (LockFailedException e) {
        gotException = true;
    }
    Assert.assertTrue(gotException);
    lock.release();
    Assert.assertEquals(locker.isFree(LockerType.ACCNT_INV_PAY.toString(), lockName), true);
}
Also used : GlobalLock(org.killbill.commons.locker.GlobalLock) LockFailedException(org.killbill.commons.locker.LockFailedException) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) IOException(java.io.IOException) LockFailedException(org.killbill.commons.locker.LockFailedException) Handle(org.skife.jdbi.v2.Handle) Test(org.testng.annotations.Test)

Example 8 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method allocatePendingSegment.

@Override
public SegmentIdentifier allocatePendingSegment(final String dataSource, final String sequenceName, final String previousSegmentId, final Interval interval, final String maxVersion) throws IOException {
    Preconditions.checkNotNull(dataSource, "dataSource");
    Preconditions.checkNotNull(sequenceName, "sequenceName");
    Preconditions.checkNotNull(interval, "interval");
    Preconditions.checkNotNull(maxVersion, "maxVersion");
    final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId;
    return connector.retryTransaction(new TransactionCallback<SegmentIdentifier>() {

        @Override
        public SegmentIdentifier inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception {
            final List<byte[]> existingBytes = handle.createQuery(String.format("SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "sequence_prev_id = :sequence_prev_id", dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).map(ByteArrayMapper.FIRST).list();
            if (!existingBytes.isEmpty()) {
                final SegmentIdentifier existingIdentifier = jsonMapper.readValue(Iterables.getOnlyElement(existingBytes), SegmentIdentifier.class);
                if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) {
                    log.info("Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
                    return existingIdentifier;
                } else {
                    log.warn("Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " + "does not match requested interval[%s]", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull, interval);
                    return null;
                }
            }
            // Make up a pending segment based on existing segments and pending segments in the DB. This works
            // assuming that all tasks inserting segments at a particular point in time are going through the
            // allocatePendingSegment flow. This should be assured through some other mechanism (like task locks).
            final SegmentIdentifier newIdentifier;
            final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
            if (existingChunks.size() > 1) {
                // Not possible to expand more than one chunk with a single segment.
                log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", dataSource, interval, maxVersion, existingChunks.size());
                return null;
            } else {
                SegmentIdentifier max = null;
                if (!existingChunks.isEmpty()) {
                    TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
                    for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
                        if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
                            max = SegmentIdentifier.fromDataSegment(existing.getObject());
                        }
                    }
                }
                final List<SegmentIdentifier> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
                for (SegmentIdentifier pending : pendings) {
                    if (max == null || pending.getVersion().compareTo(max.getVersion()) > 0 || (pending.getVersion().equals(max.getVersion()) && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) {
                        max = pending;
                    }
                }
                if (max == null) {
                    newIdentifier = new SegmentIdentifier(dataSource, interval, maxVersion, new NumberedShardSpec(0, 0));
                } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", dataSource, interval, maxVersion, max.getIdentifierAsString());
                    return null;
                } else if (max.getShardSpec() instanceof LinearShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1));
                } else if (max.getShardSpec() instanceof NumberedShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new NumberedShardSpec(max.getShardSpec().getPartitionNum() + 1, ((NumberedShardSpec) max.getShardSpec()).getPartitions()));
                } else {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", dataSource, interval, maxVersion, max.getShardSpec().getClass(), max.getIdentifierAsString());
                    return null;
                }
            }
            // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
            // Avoiding ON DUPLICATE KEY since it's not portable.
            // Avoiding try/catch since it may cause inadvertent transaction-splitting.
            // UNIQUE key for the row, ensuring sequences do not fork in two directions.
            // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
            // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319)
            final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff).putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)).hash().asBytes());
            handle.createStatement(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("id", newIdentifier.getIdentifierAsString()).bind("dataSource", dataSource).bind("created_date", new DateTime().toString()).bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1).bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute();
            log.info("Allocated pending segment [%s] for sequence[%s] (previous = [%s]) in DB", newIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
            return newIdentifier;
        }
    }, ALLOCATE_SEGMENT_QUIET_TRIES, SQLMetadataConnector.DEFAULT_MAX_TRIES);
}
Also used : SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PartitionChunk(io.druid.timeline.partition.PartitionChunk) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec)

Example 9 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.

@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final Set<DataSegment> segmentsToDrop, @Nullable final DataSourceMetadata startMetadata, @Nullable final DataSourceMetadata endMetadata) throws IOException {
    if (segments.isEmpty()) {
        throw new IllegalArgumentException("segment set must not be empty");
    }
    final String dataSource = segments.iterator().next().getDataSource();
    for (DataSegment segment : segments) {
        if (!dataSource.equals(segment.getDataSource())) {
            throw new IllegalArgumentException("segments must all be from the same dataSource");
        }
    }
    if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
        throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
    }
    // Find which segments are used (i.e. not overshadowed).
    final Set<DataSegment> usedSegments = new HashSet<>();
    List<TimelineObjectHolder<String, DataSegment>> segmentHolders = VersionedIntervalTimeline.forSegments(segments).lookupWithIncompletePartitions(Intervals.ETERNITY);
    for (TimelineObjectHolder<String, DataSegment> holder : segmentHolders) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            usedSegments.add(chunk.getObject());
        }
    }
    final AtomicBoolean definitelyNotUpdated = new AtomicBoolean(false);
    try {
        return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {

            @Override
            public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
                // Set definitelyNotUpdated back to false upon retrying.
                definitelyNotUpdated.set(false);
                if (startMetadata != null) {
                    final DataStoreMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
                    if (result != DataStoreMetadataUpdateResult.SUCCESS) {
                        // Metadata was definitely not updated.
                        transactionStatus.setRollbackOnly();
                        definitelyNotUpdated.set(true);
                        if (result == DataStoreMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataStoreMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                if (segmentsToDrop != null && !segmentsToDrop.isEmpty()) {
                    final DataStoreMetadataUpdateResult result = dropSegmentsWithHandle(handle, segmentsToDrop, dataSource);
                    if (result != DataStoreMetadataUpdateResult.SUCCESS) {
                        // Metadata store was definitely not updated.
                        transactionStatus.setRollbackOnly();
                        definitelyNotUpdated.set(true);
                        if (result == DataStoreMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataStoreMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                final Set<DataSegment> inserted = announceHistoricalSegmentBatch(handle, segments, usedSegments);
                return SegmentPublishResult.ok(ImmutableSet.copyOf(inserted));
            }
        }, 3, getSqlMetadataMaxRetry());
    } catch (CallbackFailedException e) {
        if (definitelyNotUpdated.get()) {
            return SegmentPublishResult.fail(e.getMessage());
        } else {
            // Must throw exception if we are not sure if we updated or not.
            throw e;
        }
    }
}
Also used : ResultSet(java.sql.ResultSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(org.apache.druid.timeline.DataSegment) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) HashSet(java.util.HashSet)

Example 10 with TransactionCallback

use of org.skife.jdbi.v2.TransactionCallback in project killbill by killbill.

the class DefaultUserDao method updateRoleDefinition.

@Override
public void updateRoleDefinition(final String role, final List<String> permissions, final String createdBy) throws SecurityApiException {
    final DateTime createdDate = clock.getUTCNow();
    inTransactionWithExceptionHandling(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
            final RolesPermissionsSqlDao rolesPermissionsSqlDao = handle.attach(RolesPermissionsSqlDao.class);
            final List<RolesPermissionsModelDao> existingPermissions = rolesPermissionsSqlDao.getByRoleName(role);
            // A empty list of permissions means we should remove all current permissions
            final Iterable<RolesPermissionsModelDao> toBeDeleted = existingPermissions.isEmpty() ? existingPermissions : Iterables.filter(existingPermissions, new Predicate<RolesPermissionsModelDao>() {

                @Override
                public boolean apply(final RolesPermissionsModelDao input) {
                    return !permissions.contains(input.getPermission());
                }
            });
            final Iterable<String> toBeAdded = Iterables.filter(permissions, new Predicate<String>() {

                @Override
                public boolean apply(final String input) {
                    for (RolesPermissionsModelDao e : existingPermissions) {
                        if (e.getPermission().equals(input)) {
                            return false;
                        }
                    }
                    return true;
                }
            });
            for (RolesPermissionsModelDao d : toBeDeleted) {
                rolesPermissionsSqlDao.unactiveEvent(d.getRecordId(), createdDate, createdBy);
            }
            for (final String permission : toBeAdded) {
                rolesPermissionsSqlDao.create(new RolesPermissionsModelDao(role, permission, createdDate, createdBy));
            }
            return null;
        }
    });
}
Also used : TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DateTime(org.joda.time.DateTime) SecurityApiException(org.killbill.billing.security.SecurityApiException) Handle(org.skife.jdbi.v2.Handle) Predicate(com.google.common.base.Predicate) List(java.util.List)

Aggregations

Handle (org.skife.jdbi.v2.Handle)10 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)10 IOException (java.io.IOException)6 DateTime (org.joda.time.DateTime)6 DataSegment (io.druid.timeline.DataSegment)4 SQLException (java.sql.SQLException)4 List (java.util.List)4 SecurityApiException (org.killbill.billing.security.SecurityApiException)4 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)3 ImmutableSet (com.google.common.collect.ImmutableSet)2 ResultSet (java.sql.ResultSet)2 ArrayList (java.util.ArrayList)2 Set (java.util.Set)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 SimpleHash (org.apache.shiro.crypto.hash.SimpleHash)2 ByteSource (org.apache.shiro.util.ByteSource)2 StatementContext (org.skife.jdbi.v2.StatementContext)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 Predicate (com.google.common.base.Predicate)1 ImmutableList (com.google.common.collect.ImmutableList)1