use of org.skife.jdbi.v2.TransactionStatus in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.
/**
* {@inheritDoc}
*/
@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
if (segments.isEmpty()) {
throw new IllegalArgumentException("segment set must not be empty");
}
final String dataSource = segments.iterator().next().getDataSource();
for (DataSegment segment : segments) {
if (!dataSource.equals(segment.getDataSource())) {
throw new IllegalArgumentException("segments must all be from the same dataSource");
}
}
if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
}
// Find which segments are used (i.e. not overshadowed).
final Set<DataSegment> usedSegments = Sets.newHashSet();
for (TimelineObjectHolder<String, DataSegment> holder : VersionedIntervalTimeline.forSegments(segments).lookup(JodaUtils.ETERNITY)) {
for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
usedSegments.add(chunk.getObject());
}
}
final AtomicBoolean txnFailure = new AtomicBoolean(false);
try {
return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {
@Override
public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
final Set<DataSegment> inserted = Sets.newHashSet();
if (startMetadata != null) {
final DataSourceMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
if (result != DataSourceMetadataUpdateResult.SUCCESS) {
transactionStatus.setRollbackOnly();
txnFailure.set(true);
if (result == DataSourceMetadataUpdateResult.FAILURE) {
throw new RuntimeException("Aborting transaction!");
} else if (result == DataSourceMetadataUpdateResult.TRY_AGAIN) {
throw new RetryTransactionException("Aborting transaction!");
}
}
}
for (final DataSegment segment : segments) {
if (announceHistoricalSegment(handle, segment, usedSegments.contains(segment))) {
inserted.add(segment);
}
}
return new SegmentPublishResult(ImmutableSet.copyOf(inserted), true);
}
}, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
} catch (CallbackFailedException e) {
if (txnFailure.get()) {
return new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false);
} else {
throw e;
}
}
}
use of org.skife.jdbi.v2.TransactionStatus in project druid by druid-io.
the class SQLMetadataSegmentManager method enableDatasource.
@Override
public boolean enableDatasource(final String ds) {
try {
final IDBI dbi = connector.getDBI();
VersionedIntervalTimeline<String, DataSegment> segmentTimeline = connector.inReadOnlyTransaction(new TransactionCallback<VersionedIntervalTimeline<String, DataSegment>>() {
@Override
public VersionedIntervalTimeline<String, DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
return handle.createQuery(String.format("SELECT payload FROM %s WHERE dataSource = :dataSource", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).bind("dataSource", ds).map(ByteArrayMapper.FIRST).fold(new VersionedIntervalTimeline<String, DataSegment>(Ordering.natural()), new Folder3<VersionedIntervalTimeline<String, DataSegment>, byte[]>() {
@Override
public VersionedIntervalTimeline<String, DataSegment> fold(VersionedIntervalTimeline<String, DataSegment> timeline, byte[] payload, FoldController foldController, StatementContext statementContext) throws SQLException {
try {
final DataSegment segment = DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(payload, DataSegment.class));
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
return timeline;
} catch (Exception e) {
throw new SQLException(e.toString());
}
}
});
}
});
final List<DataSegment> segments = Lists.newArrayList();
for (TimelineObjectHolder<String, DataSegment> objectHolder : segmentTimeline.lookup(new Interval("0000-01-01/3000-01-01"))) {
for (PartitionChunk<DataSegment> partitionChunk : objectHolder.getObject()) {
segments.add(partitionChunk.getObject());
}
}
if (segments.isEmpty()) {
log.warn("No segments found in the database!");
return false;
}
dbi.withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(Handle handle) throws Exception {
Batch batch = handle.createBatch();
for (DataSegment segment : segments) {
batch.add(String.format("UPDATE %s SET used=true WHERE id = '%s'", getSegmentsTable(), segment.getIdentifier()));
}
batch.execute();
return null;
}
});
} catch (Exception e) {
log.error(e, "Exception enabling datasource %s", ds);
return false;
}
return true;
}
use of org.skife.jdbi.v2.TransactionStatus in project druid by druid-io.
the class SQLMetadataSegmentManager method poll.
@Override
public void poll() {
try {
if (!started) {
return;
}
ConcurrentHashMap<String, DruidDataSource> newDataSources = new ConcurrentHashMap<String, DruidDataSource>();
log.debug("Starting polling of segment table");
// some databases such as PostgreSQL require auto-commit turned off
// to stream results back, enabling transactions disables auto-commit
//
// setting connection to read-only will allow some database such as MySQL
// to automatically use read-only transaction mode, further optimizing the query
final List<DataSegment> segments = connector.inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {
@Override
public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
return handle.createQuery(String.format("SELECT payload FROM %s WHERE used=true", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).map(new ResultSetMapper<DataSegment>() {
@Override
public DataSegment map(int index, ResultSet r, StatementContext ctx) throws SQLException {
try {
return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class));
} catch (IOException e) {
log.makeAlert(e, "Failed to read segment from db.");
return null;
}
}
}).list();
}
});
if (segments == null || segments.isEmpty()) {
log.warn("No segments found in the database!");
return;
}
final Collection<DataSegment> segmentsFinal = Collections2.filter(segments, Predicates.notNull());
log.info("Polled and found %,d segments in the database", segments.size());
for (final DataSegment segment : segmentsFinal) {
String datasourceName = segment.getDataSource();
DruidDataSource dataSource = newDataSources.get(datasourceName);
if (dataSource == null) {
dataSource = new DruidDataSource(datasourceName, ImmutableMap.of("created", new DateTime().toString()));
Object shouldBeNull = newDataSources.put(datasourceName, dataSource);
if (shouldBeNull != null) {
log.warn("Just put key[%s] into dataSources and what was there wasn't null!? It was[%s]", datasourceName, shouldBeNull);
}
}
if (!dataSource.getSegments().contains(segment)) {
dataSource.addSegment(segment.getIdentifier(), segment);
}
}
synchronized (lock) {
if (started) {
dataSources.set(newDataSources);
}
}
} catch (Exception e) {
log.makeAlert(e, "Problem polling DB.").emit();
}
}
use of org.skife.jdbi.v2.TransactionStatus in project killbill by killbill.
the class DefaultUserDao method addRoleDefinition.
@Override
public void addRoleDefinition(final String role, final List<String> permissions, final String createdBy) throws SecurityApiException {
final DateTime createdDate = clock.getUTCNow();
inTransactionWithExceptionHandling(new TransactionCallback<Void>() {
@Override
public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
final RolesPermissionsSqlDao rolesPermissionsSqlDao = handle.attach(RolesPermissionsSqlDao.class);
final List<RolesPermissionsModelDao> existingRole = rolesPermissionsSqlDao.getByRoleName(role);
if (!existingRole.isEmpty()) {
throw new SecurityApiException(ErrorCode.SECURITY_ROLE_ALREADY_EXISTS, role);
}
for (final String permission : permissions) {
rolesPermissionsSqlDao.create(new RolesPermissionsModelDao(role, permission, createdDate, createdBy));
}
return null;
}
});
}
use of org.skife.jdbi.v2.TransactionStatus in project killbill by killbill.
the class DefaultUserDao method insertUser.
@Override
public void insertUser(final String username, final String password, final List<String> roles, final String createdBy) throws SecurityApiException {
final ByteSource salt = rng.nextBytes();
final String hashedPasswordBase64 = new SimpleHash(KillbillCredentialsMatcher.HASH_ALGORITHM_NAME, password, salt.toBase64(), securityConfig.getShiroNbHashIterations()).toBase64();
final DateTime createdDate = clock.getUTCNow();
inTransactionWithExceptionHandling(new TransactionCallback<Void>() {
@Override
public Void inTransaction(final Handle handle, final TransactionStatus status) throws Exception {
final UserRolesSqlDao userRolesSqlDao = handle.attach(UserRolesSqlDao.class);
for (final String role : roles) {
userRolesSqlDao.create(new UserRolesModelDao(username, role, createdDate, createdBy));
}
final UsersSqlDao usersSqlDao = handle.attach(UsersSqlDao.class);
final UserModelDao userModelDao = usersSqlDao.getByUsername(username);
if (userModelDao != null) {
throw new SecurityApiException(ErrorCode.SECURITY_USER_ALREADY_EXISTS, username);
}
usersSqlDao.create(new UserModelDao(username, hashedPasswordBase64, salt.toBase64(), createdDate, createdBy));
return null;
}
});
}
Aggregations