use of org.skife.jdbi.v2.Query in project packages-jpl by SWI-Prolog.
the class AddWithThreads method run.
public void run() {
for (int i = 0; i < REPS; i++) {
// System.out.println("Asserting test('" + i + "')");
Query queryA = new Query("assert(" + namespace + "(test('" + i + "')))");
Thread.yield();
// System.out.println("adding query: " + queryA);
// boolean retA = queryA.hasMoreElements();
queryA.close();
}
latch.countDown();
}
use of org.skife.jdbi.v2.Query in project packages-jpl by SWI-Prolog.
the class FetchBigTree method main.
public static void main(String[] args) {
// Prolog.set_default_init_args(new String[] { "libpl.dll", "-f",
// "D:/pcm/bin/pcm.ini", "-g", "pcm_2000" });
(new Query("consult('jpl/test/test.pl')")).oneSolution();
Term t = (Term) ((new Query("p(18,T)")).oneSolution().get("T"));
int i = 1;
while (t.hasFunctor("a", 2)) {
t = t.arg(2);
i = i + 1;
}
System.err.println("got a tree of " + i + " generations");
}
use of org.skife.jdbi.v2.Query in project syndesis by syndesisio.
the class SqlJsonDB method getAsStreamingOutput.
@Override
@SuppressWarnings({ "PMD.ExcessiveMethodLength", "PMD.NPathComplexity" })
public Consumer<OutputStream> getAsStreamingOutput(String path, GetOptions options) {
GetOptions o;
if (options != null) {
o = options;
} else {
o = new GetOptions();
}
// Lets normalize the path a bit
String baseDBPath = JsonRecordSupport.convertToDBPath(path);
String like = baseDBPath + "%";
GetOptions.Order order = o.order();
if (order == null) {
order = GetOptions.Order.ASC;
}
Consumer<OutputStream> result = null;
final Handle h = dbi.open();
try {
StringBuilder sql = new StringBuilder(250);
// Creating the iterator could fail with a runtime exception,
ArrayList<Consumer<Query<Map<String, Object>>>> binds = new ArrayList<>();
if (o.filter() == null) {
sql.append("select path,value,ovalue from jsondb where path LIKE :like");
} else {
sql.append("SELECT path,value,ovalue FROM jsondb A INNER JOIN (");
SqlExpressionBuilder.create(this, o.filter(), baseDBPath).build(sql, binds);
sql.append(") B ON A.path LIKE B.match_path||'%'");
}
if (o.startAfter() != null) {
String startAfter = validateKey(o.startAfter());
if (o.order() == GetOptions.Order.DESC) {
sql.append(" and path <= :startAfter");
binds.add(query -> {
String bindPath = baseDBPath + startAfter;
query.bind("startAfter", bindPath);
});
} else {
sql.append(" and path >= :startAfter");
binds.add(query -> {
String bindPath = baseDBPath + incrementKey(startAfter);
query.bind("startAfter", bindPath);
});
}
}
if (o.startAt() != null) {
String startAt = validateKey(o.startAt());
if (o.order() == GetOptions.Order.DESC) {
sql.append(" and path < :startAt");
binds.add(query -> {
String bindPath = baseDBPath + incrementKey(startAt);
query.bind("startAt", bindPath);
});
} else {
sql.append(" and path >= :startAt");
binds.add(query -> {
String bindPath = baseDBPath + startAt;
query.bind("startAt", bindPath);
});
}
}
if (o.endAt() != null) {
String endAt = validateKey(o.endAt());
if (o.order() == GetOptions.Order.DESC) {
sql.append(" and path > :endAt");
binds.add(query -> {
String value = baseDBPath + endAt;
query.bind("endAt", value);
});
} else {
sql.append(" and path < :endAt");
binds.add(query -> {
String bindPath = baseDBPath + incrementKey(endAt);
query.bind("endAt", bindPath);
});
}
}
if (o.endBefore() != null) {
String endBefore = validateKey(o.endBefore());
if (o.order() == GetOptions.Order.DESC) {
sql.append(" and path >= :endBefore");
binds.add(query -> {
String value = baseDBPath + incrementKey(endBefore);
query.bind("endBefore", value);
});
} else {
sql.append(" and path < :endBefore");
binds.add(query -> {
String value = baseDBPath + endBefore;
query.bind("endBefore", value);
});
}
}
sql.append(" order by path ").append(order);
Query<Map<String, Object>> query = h.createQuery(sql.toString()).bind("like", like);
for (Consumer<Query<Map<String, Object>>> bind : binds) {
bind.accept(query);
}
ResultIterator<JsonRecord> iterator = query.map(JsonRecordMapper.INSTANCE).iterator();
try {
// At this point we know if we can produce results..
if (iterator.hasNext()) {
result = output -> {
try (JsonRecordConsumer toJson = new JsonRecordConsumer(baseDBPath, output, o)) {
while (!toJson.isClosed() && iterator.hasNext()) {
toJson.accept(iterator.next());
}
} catch (IOException e) {
throw new JsonDBException(e);
} finally {
iterator.close();
h.close();
}
};
}
} finally {
// if we are producing results, then defer closing the iterator
if (result == null) {
iterator.close();
}
}
} finally {
// if we are producing results, then defer closing the handle
if (result == null) {
h.close();
}
}
return result;
}
use of org.skife.jdbi.v2.Query in project providence by morimekta.
the class MessageInserter method execute.
public int execute(Handle handle, Collection<M> items) {
if (items.isEmpty()) {
throw new IllegalArgumentException("Nothing to insert");
}
String query = queryPrefix + items.stream().map(item -> valueMarkers).collect(Collectors.joining(", ")) + querySuffix;
Update update = handle.createStatement(query);
int offset = 0;
for (M item : items) {
for (String column : columnOrder) {
F field = columnToFieldMap.get(column);
int type = columnTypeMap.get(column);
update.bind(offset++, new MessageFieldArgument<>(item, field, type));
}
}
return update.execute();
}
use of org.skife.jdbi.v2.Query in project killbill by killbill.
the class EntitySqlDaoWrapperInvocationHandler method invokeWithAuditAndHistory.
private Object invokeWithAuditAndHistory(final Audited auditedAnnotation, final Method method, final Object[] args) throws Throwable {
final InternalCallContext contextMaybeWithoutAccountRecordId = retrieveContextFromArguments(args);
final List<String> entityIds = retrieveEntityIdsFromArguments(method, args);
Preconditions.checkState(!entityIds.isEmpty(), "@Audited Sql method must have entities (@Bind(\"id\")) as arguments");
// We cannot always infer the TableName from the signature
TableName tableName = retrieveTableNameFromArgumentsIfPossible(Arrays.asList(args));
final ChangeType changeType = auditedAnnotation.value();
final boolean isBatchQuery = method.getAnnotation(SqlBatch.class) != null;
// Get the current state before deletion for the history tables
final Map<Long, M> deletedAndUpdatedEntities = new HashMap<Long, M>();
// Real jdbc call
final Object obj = prof.executeWithProfiling(ProfilingFeatureType.DAO_DETAILS, getProfilingId("raw", method), new WithProfilingCallback<Object, Throwable>() {
@Override
public Object execute() throws Throwable {
return executeJDBCCall(method, args);
}
});
if (entityIds.isEmpty()) {
return obj;
}
InternalCallContext context = null;
// Retrieve record_id(s) for audit and history tables
final List<Long> entityRecordIds = new LinkedList<Long>();
if (changeType == ChangeType.INSERT) {
Preconditions.checkNotNull(tableName, "Insert query should have an EntityModelDao as argument: %s", args);
if (isBatchQuery) {
entityRecordIds.addAll((Collection<? extends Long>) obj);
} else {
entityRecordIds.add((Long) obj);
}
// Snowflake
if (TableName.ACCOUNT.equals(tableName)) {
Preconditions.checkState(entityIds.size() == 1, "Bulk insert of accounts isn't supported");
// AccountModelDao in practice
final TimeZoneAwareEntity accountModelDao = retrieveTimeZoneAwareEntityFromArguments(args);
context = internalCallContextFactory.createInternalCallContext(accountModelDao, entityRecordIds.get(0), contextMaybeWithoutAccountRecordId);
}
} else {
// Rehydrate entry with latest state
final List<M> retrievedEntities = sqlDao.getByIdsIncludedDeleted(entityIds, contextMaybeWithoutAccountRecordId);
printSQLWarnings();
for (final M entity : retrievedEntities) {
deletedAndUpdatedEntities.put(entity.getRecordId(), entity);
entityRecordIds.add(entity.getRecordId());
if (tableName == null) {
tableName = entity.getTableName();
} else {
Preconditions.checkState(tableName == entity.getTableName(), "Entities with different TableName");
}
}
}
Preconditions.checkState(entityIds.size() == entityRecordIds.size(), "SqlDao method has %s as ids but found %s as recordIds", entityIds, entityRecordIds);
// Context validations
if (context != null) {
// context was already updated, see above (createAccount code path). Just make sure we don't attempt to bulk create
Preconditions.checkState(entityIds.size() == 1, "Bulk insert of accounts isn't supported");
} else {
context = contextMaybeWithoutAccountRecordId;
final boolean tableWithoutAccountRecordId = tableName == TableName.TENANT || tableName == TableName.TENANT_BROADCASTS || tableName == TableName.TENANT_KVS || tableName == TableName.TAG_DEFINITIONS || tableName == TableName.SERVICE_BRODCASTS || tableName == TableName.NODE_INFOS;
Preconditions.checkState(context.getAccountRecordId() != null || tableWithoutAccountRecordId, "accountRecordId should be set for tableName=%s and changeType=%s", tableName, changeType);
}
final Collection<M> reHydratedEntities = updateHistoryAndAudit(entityRecordIds, deletedAndUpdatedEntities, tableName, changeType, context);
if (method.getReturnType().equals(Void.TYPE)) {
// Return early
return null;
} else if (isBatchQuery) {
// Return the raw jdbc response (generated keys)
return obj;
} else {
// PERF: override the return value with the reHydrated entity to avoid an extra 'get' in the transaction,
// (see EntityDaoBase#createAndRefresh for an example, but it works for updates as well).
Preconditions.checkState(entityRecordIds.size() == 1, "Invalid number of entityRecordIds: %s", entityRecordIds);
if (!reHydratedEntities.isEmpty()) {
Preconditions.checkState(reHydratedEntities.size() == 1, "Invalid number of entities: %s", reHydratedEntities);
return Iterables.<M>getFirst(reHydratedEntities, null);
} else {
// Updated entity not retrieved yet, we have to go back to the database
final M entity = sqlDao.getByRecordId(entityRecordIds.get(0), context);
printSQLWarnings();
return entity;
}
}
}
Aggregations