Search in sources :

Example 6 with TransactionStatus

use of org.skife.jdbi.v2.TransactionStatus in project killbill by killbill.

the class DefaultUserDao method insertUser.

@Override
public void insertUser(final String username, final String password, final List<String> roles, final String createdBy) throws SecurityApiException {
    final ByteSource salt = rng.nextBytes();
    final String hashedPasswordBase64 = new SimpleHash(KillbillCredentialsMatcher.HASH_ALGORITHM_NAME, password, salt.toBase64(), securityConfig.getShiroNbHashIterations()).toBase64();
    final DateTime createdDate = clock.getUTCNow();
    inTransactionWithExceptionHandling(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
            final UserRolesSqlDao userRolesSqlDao = handle.attach(UserRolesSqlDao.class);
            for (final String role : roles) {
                userRolesSqlDao.create(new UserRolesModelDao(username, role, createdDate, createdBy));
            }
            final UsersSqlDao usersSqlDao = handle.attach(UsersSqlDao.class);
            final UserModelDao userModelDao = usersSqlDao.getByUsername(username);
            if (userModelDao != null) {
                throw new SecurityApiException(ErrorCode.SECURITY_USER_ALREADY_EXISTS, username);
            }
            usersSqlDao.create(new UserModelDao(username, hashedPasswordBase64, salt.toBase64(), createdDate, createdBy));
            return null;
        }
    });
}
Also used : TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DateTime(org.joda.time.DateTime) SecurityApiException(org.killbill.billing.security.SecurityApiException) Handle(org.skife.jdbi.v2.Handle) SimpleHash(org.apache.shiro.crypto.hash.SimpleHash) ByteSource(org.apache.shiro.util.ByteSource) SecurityApiException(org.killbill.billing.security.SecurityApiException)

Example 7 with TransactionStatus

use of org.skife.jdbi.v2.TransactionStatus in project killbill by killbill.

the class DefaultUserDao method updateUserPassword.

@Override
public void updateUserPassword(final String username, final String password, final String updatedBy) throws SecurityApiException {
    final ByteSource salt = rng.nextBytes();
    final String hashedPasswordBase64 = new SimpleHash(KillbillCredentialsMatcher.HASH_ALGORITHM_NAME, password, salt.toBase64(), securityConfig.getShiroNbHashIterations()).toBase64();
    inTransactionWithExceptionHandling(new TransactionCallback<Void>() {

        @Override
        public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
            final DateTime updatedDate = clock.getUTCNow();
            final UsersSqlDao usersSqlDao = handle.attach(UsersSqlDao.class);
            final UserModelDao userModelDao = usersSqlDao.getByUsername(username);
            if (userModelDao == null) {
                throw new SecurityApiException(ErrorCode.SECURITY_INVALID_USER, username);
            }
            usersSqlDao.updatePassword(username, hashedPasswordBase64, salt.toBase64(), updatedDate.toDate(), updatedBy);
            return null;
        }
    });
}
Also used : TransactionStatus(org.skife.jdbi.v2.TransactionStatus) SecurityApiException(org.killbill.billing.security.SecurityApiException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) SimpleHash(org.apache.shiro.crypto.hash.SimpleHash) ByteSource(org.apache.shiro.util.ByteSource) SecurityApiException(org.killbill.billing.security.SecurityApiException)

Example 8 with TransactionStatus

use of org.skife.jdbi.v2.TransactionStatus in project hive by apache.

the class DruidStorageHandlerUtils method getDataSegmentList.

/**
   * @param connector                   SQL connector to metadata
   * @param metadataStorageTablesConfig Tables configuration
   * @param dataSource                  Name of data source
   *
   * @return List of all data segments part of the given data source
   */
public static List<DataSegment> getDataSegmentList(final SQLMetadataConnector connector, final MetadataStorageTablesConfig metadataStorageTablesConfig, final String dataSource) {
    List<DataSegment> segmentList = connector.retryTransaction(new TransactionCallback<List<DataSegment>>() {

        @Override
        public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
            return handle.createQuery(String.format("SELECT payload FROM %s WHERE dataSource = :dataSource", metadataStorageTablesConfig.getSegmentsTable())).setFetchSize(getStreamingFetchSize(connector)).bind("dataSource", dataSource).map(ByteArrayMapper.FIRST).fold(new ArrayList<DataSegment>(), new Folder3<List<DataSegment>, byte[]>() {

                @Override
                public List<DataSegment> fold(List<DataSegment> accumulator, byte[] payload, FoldController control, StatementContext ctx) throws SQLException {
                    try {
                        final DataSegment segment = DATA_SEGMENT_INTERNER.intern(JSON_MAPPER.readValue(payload, DataSegment.class));
                        accumulator.add(segment);
                        return accumulator;
                    } catch (Exception e) {
                        throw new SQLException(e.toString());
                    }
                }
            });
        }
    }, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
    return segmentList;
}
Also used : SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) InvocationTargetException(java.lang.reflect.InvocationTargetException) SQLException(java.sql.SQLException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext) FoldController(org.skife.jdbi.v2.FoldController) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) Folder3(org.skife.jdbi.v2.Folder3)

Example 9 with TransactionStatus

use of org.skife.jdbi.v2.TransactionStatus in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method allocatePendingSegment.

@Override
public SegmentIdentifier allocatePendingSegment(final String dataSource, final String sequenceName, final String previousSegmentId, final Interval interval, final String maxVersion) throws IOException {
    Preconditions.checkNotNull(dataSource, "dataSource");
    Preconditions.checkNotNull(sequenceName, "sequenceName");
    Preconditions.checkNotNull(interval, "interval");
    Preconditions.checkNotNull(maxVersion, "maxVersion");
    final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId;
    return connector.retryTransaction(new TransactionCallback<SegmentIdentifier>() {

        @Override
        public SegmentIdentifier inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception {
            final List<byte[]> existingBytes = handle.createQuery(String.format("SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "sequence_prev_id = :sequence_prev_id", dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).map(ByteArrayMapper.FIRST).list();
            if (!existingBytes.isEmpty()) {
                final SegmentIdentifier existingIdentifier = jsonMapper.readValue(Iterables.getOnlyElement(existingBytes), SegmentIdentifier.class);
                if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) {
                    log.info("Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
                    return existingIdentifier;
                } else {
                    log.warn("Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " + "does not match requested interval[%s]", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull, interval);
                    return null;
                }
            }
            // Make up a pending segment based on existing segments and pending segments in the DB. This works
            // assuming that all tasks inserting segments at a particular point in time are going through the
            // allocatePendingSegment flow. This should be assured through some other mechanism (like task locks).
            final SegmentIdentifier newIdentifier;
            final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
            if (existingChunks.size() > 1) {
                // Not possible to expand more than one chunk with a single segment.
                log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", dataSource, interval, maxVersion, existingChunks.size());
                return null;
            } else {
                SegmentIdentifier max = null;
                if (!existingChunks.isEmpty()) {
                    TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
                    for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
                        if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
                            max = SegmentIdentifier.fromDataSegment(existing.getObject());
                        }
                    }
                }
                final List<SegmentIdentifier> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
                for (SegmentIdentifier pending : pendings) {
                    if (max == null || pending.getVersion().compareTo(max.getVersion()) > 0 || (pending.getVersion().equals(max.getVersion()) && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) {
                        max = pending;
                    }
                }
                if (max == null) {
                    newIdentifier = new SegmentIdentifier(dataSource, interval, maxVersion, new NumberedShardSpec(0, 0));
                } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", dataSource, interval, maxVersion, max.getIdentifierAsString());
                    return null;
                } else if (max.getShardSpec() instanceof LinearShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1));
                } else if (max.getShardSpec() instanceof NumberedShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new NumberedShardSpec(max.getShardSpec().getPartitionNum() + 1, ((NumberedShardSpec) max.getShardSpec()).getPartitions()));
                } else {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", dataSource, interval, maxVersion, max.getShardSpec().getClass(), max.getIdentifierAsString());
                    return null;
                }
            }
            // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
            // Avoiding ON DUPLICATE KEY since it's not portable.
            // Avoiding try/catch since it may cause inadvertent transaction-splitting.
            // UNIQUE key for the row, ensuring sequences do not fork in two directions.
            // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
            // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319)
            final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff).putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)).hash().asBytes());
            handle.createStatement(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("id", newIdentifier.getIdentifierAsString()).bind("dataSource", dataSource).bind("created_date", new DateTime().toString()).bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1).bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute();
            log.info("Allocated pending segment [%s] for sequence[%s] (previous = [%s]) in DB", newIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
            return newIdentifier;
        }
    }, ALLOCATE_SEGMENT_QUIET_TRIES, SQLMetadataConnector.DEFAULT_MAX_TRIES);
}
Also used : SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PartitionChunk(io.druid.timeline.partition.PartitionChunk) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec)

Aggregations

Handle (org.skife.jdbi.v2.Handle)9 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)9 IOException (java.io.IOException)6 DataSegment (io.druid.timeline.DataSegment)5 SQLException (java.sql.SQLException)5 DateTime (org.joda.time.DateTime)5 List (java.util.List)4 ArrayList (java.util.ArrayList)3 SecurityApiException (org.killbill.billing.security.SecurityApiException)3 StatementContext (org.skife.jdbi.v2.StatementContext)3 ImmutableList (com.google.common.collect.ImmutableList)2 SimpleHash (org.apache.shiro.crypto.hash.SimpleHash)2 ByteSource (org.apache.shiro.util.ByteSource)2 FoldController (org.skife.jdbi.v2.FoldController)2 Folder3 (org.skife.jdbi.v2.Folder3)2 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)2 ImmutableSet (com.google.common.collect.ImmutableSet)1 DruidDataSource (io.druid.client.DruidDataSource)1 SegmentPublishResult (io.druid.indexing.overlord.SegmentPublishResult)1 SegmentIdentifier (io.druid.segment.realtime.appenderator.SegmentIdentifier)1