use of org.skife.jdbi.v2.sqlobject.Bind in project killbill by killbill.
the class EntitySqlDaoWrapperInvocationHandler method invokeWithAuditAndHistory.
private Object invokeWithAuditAndHistory(final Audited auditedAnnotation, final Method method, final Object[] args) throws Throwable {
final InternalCallContext contextMaybeWithoutAccountRecordId = retrieveContextFromArguments(args);
final List<String> entityIds = retrieveEntityIdsFromArguments(method, args);
Preconditions.checkState(!entityIds.isEmpty(), "@Audited Sql method must have entities (@Bind(\"id\")) as arguments");
// We cannot always infer the TableName from the signature
TableName tableName = retrieveTableNameFromArgumentsIfPossible(Arrays.asList(args));
final ChangeType changeType = auditedAnnotation.value();
final boolean isBatchQuery = method.getAnnotation(SqlBatch.class) != null;
// Get the current state before deletion for the history tables
final Map<Long, M> deletedAndUpdatedEntities = new HashMap<Long, M>();
// Real jdbc call
final Object obj = prof.executeWithProfiling(ProfilingFeatureType.DAO_DETAILS, getProfilingId("raw", method), new WithProfilingCallback<Object, Throwable>() {
@Override
public Object execute() throws Throwable {
return executeJDBCCall(method, args);
}
});
if (entityIds.isEmpty()) {
return obj;
}
InternalCallContext context = null;
// Retrieve record_id(s) for audit and history tables
final List<Long> entityRecordIds = new LinkedList<Long>();
if (changeType == ChangeType.INSERT) {
Preconditions.checkNotNull(tableName, "Insert query should have an EntityModelDao as argument: %s", args);
if (isBatchQuery) {
entityRecordIds.addAll((Collection<? extends Long>) obj);
} else {
entityRecordIds.add((Long) obj);
}
// Snowflake
if (TableName.ACCOUNT.equals(tableName)) {
Preconditions.checkState(entityIds.size() == 1, "Bulk insert of accounts isn't supported");
// AccountModelDao in practice
final TimeZoneAwareEntity accountModelDao = retrieveTimeZoneAwareEntityFromArguments(args);
context = internalCallContextFactory.createInternalCallContext(accountModelDao, entityRecordIds.get(0), contextMaybeWithoutAccountRecordId);
}
} else {
// Rehydrate entry with latest state
final List<M> retrievedEntities = sqlDao.getByIdsIncludedDeleted(entityIds, contextMaybeWithoutAccountRecordId);
printSQLWarnings();
for (final M entity : retrievedEntities) {
deletedAndUpdatedEntities.put(entity.getRecordId(), entity);
entityRecordIds.add(entity.getRecordId());
if (tableName == null) {
tableName = entity.getTableName();
} else {
Preconditions.checkState(tableName == entity.getTableName(), "Entities with different TableName");
}
}
}
Preconditions.checkState(entityIds.size() == entityRecordIds.size(), "SqlDao method has %s as ids but found %s as recordIds", entityIds, entityRecordIds);
// Context validations
if (context != null) {
// context was already updated, see above (createAccount code path). Just make sure we don't attempt to bulk create
Preconditions.checkState(entityIds.size() == 1, "Bulk insert of accounts isn't supported");
} else {
context = contextMaybeWithoutAccountRecordId;
final boolean tableWithoutAccountRecordId = tableName == TableName.TENANT || tableName == TableName.TENANT_BROADCASTS || tableName == TableName.TENANT_KVS || tableName == TableName.TAG_DEFINITIONS || tableName == TableName.SERVICE_BRODCASTS || tableName == TableName.NODE_INFOS;
Preconditions.checkState(context.getAccountRecordId() != null || tableWithoutAccountRecordId, "accountRecordId should be set for tableName=%s and changeType=%s", tableName, changeType);
}
final Collection<M> reHydratedEntities = updateHistoryAndAudit(entityRecordIds, deletedAndUpdatedEntities, tableName, changeType, context);
if (method.getReturnType().equals(Void.TYPE)) {
// Return early
return null;
} else if (isBatchQuery) {
// Return the raw jdbc response (generated keys)
return obj;
} else {
// PERF: override the return value with the reHydrated entity to avoid an extra 'get' in the transaction,
// (see EntityDaoBase#createAndRefresh for an example, but it works for updates as well).
Preconditions.checkState(entityRecordIds.size() == 1, "Invalid number of entityRecordIds: %s", entityRecordIds);
if (!reHydratedEntities.isEmpty()) {
Preconditions.checkState(reHydratedEntities.size() == 1, "Invalid number of entities: %s", reHydratedEntities);
return Iterables.<M>getFirst(reHydratedEntities, null);
} else {
// Updated entity not retrieved yet, we have to go back to the database
final M entity = sqlDao.getByRecordId(entityRecordIds.get(0), context);
printSQLWarnings();
return entity;
}
}
}
use of org.skife.jdbi.v2.sqlobject.Bind in project killbill by killbill.
the class TestPermissionAnnotationMethodInterceptor method testAOPForInterface.
@Test(groups = "fast")
public void testAOPForInterface() throws Exception {
// Make sure it works as expected without any AOP magic
final IAopTester simpleTester = new AopTesterImpl();
try {
simpleTester.createRefund();
} catch (Exception e) {
Assert.fail(e.getLocalizedMessage());
}
// Now, verify the interception works
configureShiro();
final Injector injector = Guice.createInjector(Stage.PRODUCTION, new ShiroModuleNoDB(configSource), new KillBillShiroAopModule(configSource), new TestSecurityModuleNoDB(configSource), new CacheModule(configSource), new AbstractModule() {
@Override
public void configure() {
bind(IDBI.class).toInstance(Mockito.mock(IDBI.class));
bind(IDBI.class).annotatedWith(Names.named(MAIN_RO_DATA_SOURCE_ID)).toInstance(Mockito.mock(IDBI.class));
bind(IAopTester.class).to(AopTesterImpl.class).asEagerSingleton();
bind(TenantInternalApi.class).toInstance(Mockito.mock(TenantInternalApi.class));
bind(NonEntityDao.class).toInstance(Mockito.mock(NonEntityDao.class));
}
});
final IAopTester aopedTester = injector.getInstance(IAopTester.class);
verifyAopedTester(aopedTester);
}
use of org.skife.jdbi.v2.sqlobject.Bind in project killbill by killbill.
the class TestPermissionAnnotationMethodInterceptor method testAOPForClass.
@Test(groups = "fast")
public void testAOPForClass() throws Exception {
// Make sure it works as expected without any AOP magic
final IAopTester simpleTester = new AopTester();
try {
simpleTester.createRefund();
} catch (Exception e) {
Assert.fail(e.getLocalizedMessage());
}
// Now, verify the interception works
configureShiro();
final Injector injector = Guice.createInjector(Stage.PRODUCTION, new ShiroModuleNoDB(configSource), new KillBillShiroAopModule(configSource), new TestSecurityModuleNoDB(configSource), new CacheModule(configSource), new AbstractModule() {
@Override
protected void configure() {
bind(IDBI.class).toInstance(Mockito.mock(IDBI.class));
bind(IDBI.class).annotatedWith(Names.named(MAIN_RO_DATA_SOURCE_ID)).toInstance(Mockito.mock(IDBI.class));
bind(TenantInternalApi.class).toInstance(Mockito.mock(TenantInternalApi.class));
bind(NonEntityDao.class).toInstance(Mockito.mock(NonEntityDao.class));
}
});
final AopTester aopedTester = injector.getInstance(AopTester.class);
verifyAopedTester(aopedTester);
}
use of org.skife.jdbi.v2.sqlobject.Bind in project druid by druid-io.
the class SqlSegmentsMetadataQuery method retrieveSegments.
private CloseableIterator<DataSegment> retrieveSegments(final String dataSource, final Collection<Interval> intervals, final IntervalMode matchMode, final boolean used) {
// Check if the intervals all support comparing as strings. If so, bake them into the SQL.
final boolean compareAsString = intervals.stream().allMatch(Intervals::canCompareEndpointsAsStrings);
final StringBuilder sb = new StringBuilder();
sb.append("SELECT payload FROM %s WHERE used = :used AND dataSource = :dataSource");
if (compareAsString && !intervals.isEmpty()) {
sb.append(" AND (");
for (int i = 0; i < intervals.size(); i++) {
sb.append(matchMode.makeSqlCondition(connector.getQuoteString(), StringUtils.format(":start%d", i), StringUtils.format(":end%d", i)));
if (i == intervals.size() - 1) {
sb.append(")");
} else {
sb.append(" OR ");
}
}
}
final Query<Map<String, Object>> sql = handle.createQuery(StringUtils.format(sb.toString(), dbTables.getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).bind("used", used).bind("dataSource", dataSource);
if (compareAsString) {
final Iterator<Interval> iterator = intervals.iterator();
for (int i = 0; iterator.hasNext(); i++) {
Interval interval = iterator.next();
sql.bind(StringUtils.format("start%d", i), interval.getStart().toString()).bind(StringUtils.format("end%d", i), interval.getEnd().toString());
}
}
final ResultIterator<DataSegment> resultIterator = sql.map((index, r, ctx) -> JacksonUtils.readValue(jsonMapper, r.getBytes(1), DataSegment.class)).iterator();
return CloseableIterators.wrap(Iterators.filter(resultIterator, dataSegment -> {
if (intervals.isEmpty()) {
return true;
} else {
// segment interval like "20010/20011".)
for (Interval interval : intervals) {
if (matchMode.apply(interval, dataSegment.getInterval())) {
return true;
}
}
return false;
}
}), resultIterator);
}
use of org.skife.jdbi.v2.sqlobject.Bind in project druid by druid-io.
the class SQLMetadataSegmentPublisher method publishSegment.
@VisibleForTesting
void publishSegment(final String segmentId, final String dataSource, final String createdDate, final String start, final String end, final boolean partitioned, final String version, final boolean used, final byte[] payload) {
try {
final DBI dbi = connector.getDBI();
List<Map<String, Object>> exists = dbi.withHandle(new HandleCallback<List<Map<String, Object>>>() {
@Override
public List<Map<String, Object>> withHandle(Handle handle) {
return handle.createQuery(StringUtils.format("SELECT id FROM %s WHERE id=:id", config.getSegmentsTable())).bind("id", segmentId).list();
}
});
if (!exists.isEmpty()) {
log.info("Found [%s] in DB, not updating DB", segmentId);
return;
}
dbi.withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(Handle handle) {
handle.createStatement(statement).bind("id", segmentId).bind("dataSource", dataSource).bind("created_date", createdDate).bind("start", start).bind("end", end).bind("partitioned", partitioned).bind("version", version).bind("used", used).bind("payload", payload).execute();
return null;
}
});
} catch (Exception e) {
log.error(e, "Exception inserting into DB");
throw new RuntimeException(e);
}
}
Aggregations