use of org.apache.cayenne.query.Query in project cayenne by apache.
the class ClientServerChannelQueryAction method interceptSinglePageQuery.
private boolean interceptSinglePageQuery() {
// retrieve range from the previously cached list
if (serverMetadata.getFetchOffset() >= 0 && serverMetadata.getFetchLimit() > 0 && serverMetadata.getCacheKey() != null) {
List cachedList = channel.getQueryCache().get(serverMetadata);
if (cachedList == null) {
// attempt to refetch... respawn the action...
Query originatingQuery = serverMetadata.getOriginatingQuery();
if (originatingQuery != null) {
ClientServerChannelQueryAction subaction = new ClientServerChannelQueryAction(channel, originatingQuery);
subaction.execute();
cachedList = channel.getQueryCache().get(serverMetadata);
if (cachedList == null) {
throw new CayenneRuntimeException("No cached list for %s", serverMetadata.getCacheKey());
}
} else {
return !DONE;
}
}
int startIndex = serverMetadata.getFetchOffset();
int endIndex = startIndex + serverMetadata.getFetchLimit();
// send back just one page... query sender will figure out where it fits in
// the incremental list
this.response = new ListResponse(new ArrayList<>(cachedList.subList(startIndex, endIndex)));
return DONE;
}
return !DONE;
}
use of org.apache.cayenne.query.Query in project cayenne by apache.
the class DataDomainQueryAction method runQuery.
private void runQuery() {
// reset
this.fullResponse = new GenericResponse();
this.response = this.fullResponse;
this.queriesByNode = null;
this.queriesByExecutedQueries = null;
// whether this is null or not will driver further decisions on how to process prefetched rows
this.prefetchResultsByPath = metadata.getPrefetchTree() != null && !metadata.isFetchingDataRows() ? new HashMap<String, List>() : null;
// categorize queries by node and by "executable" query...
query.route(this, domain.getEntityResolver(), null);
// run categorized queries
if (queriesByNode != null) {
for (Map.Entry<QueryEngine, Collection<Query>> entry : queriesByNode.entrySet()) {
QueryEngine nextNode = entry.getKey();
Collection<Query> nodeQueries = entry.getValue();
nextNode.performQueries(nodeQueries, this);
}
}
}
use of org.apache.cayenne.query.Query in project cayenne by apache.
the class DataDomainQueryAction method interceptRefreshQuery.
/**
* @since 3.0
*/
private boolean interceptRefreshQuery() {
if (query instanceof RefreshQuery) {
RefreshQuery refreshQuery = (RefreshQuery) query;
if (refreshQuery.isRefreshAll()) {
// not sending any events - peer contexts will not get refreshed
if (domain.getSharedSnapshotCache() != null) {
domain.getSharedSnapshotCache().clear();
} else {
// remove snapshots from local ObjectStore only
context.getObjectStore().getDataRowCache().clear();
}
context.getQueryCache().clear();
GenericResponse response = new GenericResponse();
response.addUpdateCount(1);
this.response = response;
return DONE;
}
Collection<Persistent> objects = (Collection<Persistent>) refreshQuery.getObjects();
if (objects != null && !objects.isEmpty()) {
Collection<ObjectId> ids = new ArrayList<>(objects.size());
for (final Persistent object : objects) {
ids.add(object.getObjectId());
}
if (domain.getSharedSnapshotCache() != null) {
// send an event for removed snapshots
domain.getSharedSnapshotCache().processSnapshotChanges(context.getObjectStore(), Collections.EMPTY_MAP, Collections.EMPTY_LIST, ids, Collections.EMPTY_LIST);
} else {
// remove snapshots from local ObjectStore only
context.getObjectStore().getDataRowCache().processSnapshotChanges(context.getObjectStore(), Collections.EMPTY_MAP, Collections.EMPTY_LIST, ids, Collections.EMPTY_LIST);
}
GenericResponse response = new GenericResponse();
response.addUpdateCount(1);
this.response = response;
return DONE;
}
// usually does a cascading refresh
if (refreshQuery.getQuery() != null) {
Query cachedQuery = refreshQuery.getQuery();
String cacheKey = cachedQuery.getMetaData(context.getEntityResolver()).getCacheKey();
context.getQueryCache().remove(cacheKey);
this.response = domain.onQuery(context, cachedQuery);
return DONE;
}
// 4. refresh groups...
if (refreshQuery.getGroupKeys() != null && refreshQuery.getGroupKeys().length > 0) {
String[] groups = refreshQuery.getGroupKeys();
for (String group : groups) {
domain.getQueryCache().removeGroup(group);
}
GenericResponse response = new GenericResponse();
response.addUpdateCount(1);
this.response = response;
return DONE;
}
}
return !DONE;
}
use of org.apache.cayenne.query.Query in project cayenne by apache.
the class DataDomainUpdateBucket method appendQueriesInternal.
@Override
void appendQueriesInternal(Collection<Query> queries) {
DataDomainDBDiffBuilder diffBuilder = new DataDomainDBDiffBuilder();
DataNodeSyncQualifierDescriptor qualifierBuilder = new DataNodeSyncQualifierDescriptor();
for (DbEntity dbEntity : dbEntities) {
Collection<DbEntityClassDescriptor> descriptors = descriptorsByDbEntity.get(dbEntity);
Map<Object, Query> batches = new LinkedHashMap<>();
for (DbEntityClassDescriptor descriptor : descriptors) {
ObjEntity entity = descriptor.getEntity();
diffBuilder.reset(descriptor);
qualifierBuilder.reset(descriptor);
boolean isRootDbEntity = entity.getDbEntity() == dbEntity;
for (Persistent o : objectsByDescriptor.get(descriptor.getClassDescriptor())) {
ObjectDiff diff = parent.objectDiff(o.getObjectId());
Map<String, Object> snapshot = diffBuilder.buildDBDiff(diff);
// check whether MODIFIED object has real db-level modifications
if (snapshot == null) {
continue;
}
// after we filtered out "fake" modifications, check if an
// attempt is made to modify a read only entity
checkReadOnly(entity);
Map<String, Object> qualifierSnapshot = qualifierBuilder.createQualifierSnapshot(diff);
// organize batches by the updated columns + nulls in qualifier
Set<String> snapshotSet = snapshot.keySet();
Set<String> nullQualifierNames = new HashSet<>();
for (Map.Entry<String, Object> entry : qualifierSnapshot.entrySet()) {
if (entry.getValue() == null) {
nullQualifierNames.add(entry.getKey());
}
}
List<Set<String>> batchKey = Arrays.asList(snapshotSet, nullQualifierNames);
UpdateBatchQuery batch = (UpdateBatchQuery) batches.get(batchKey);
if (batch == null) {
batch = new UpdateBatchQuery(dbEntity, qualifierBuilder.getAttributes(), updatedAttributes(dbEntity, snapshot), nullQualifierNames, 10);
batch.setUsingOptimisticLocking(qualifierBuilder.isUsingOptimisticLocking());
batches.put(batchKey, batch);
}
batch.add(qualifierSnapshot, snapshot, o.getObjectId());
// update replacement id with meaningful PK changes
if (isRootDbEntity) {
Map<String, Object> replacementId = o.getObjectId().getReplacementIdMap();
for (DbAttribute pk : dbEntity.getPrimaryKeys()) {
String name = pk.getName();
if (snapshot.containsKey(name) && !replacementId.containsKey(name)) {
replacementId.put(name, snapshot.get(name));
}
}
}
}
}
queries.addAll(batches.values());
}
}
use of org.apache.cayenne.query.Query in project cayenne by apache.
the class DataNode method performQueries.
/**
* Runs queries using Connection obtained from internal DataSource.
*
* @since 1.1
*/
@Override
public void performQueries(Collection<? extends Query> queries, OperationObserver callback) {
int listSize = queries.size();
if (listSize == 0) {
return;
}
if (callback.isIteratedResult() && listSize > 1) {
throw new CayenneRuntimeException("Iterated queries are not allowed in a batch. Batch size: %d", listSize);
}
// do this meaningless inexpensive operation to trigger AutoAdapter lazy
// initialization before opening a connection. Otherwise we may end up
// with two
// connections open simultaneously, possibly hitting connection pool
// upper limit.
getAdapter().getExtendedTypes();
Connection connection = null;
try {
connection = this.getDataSource().getConnection();
} catch (Exception globalEx) {
getJdbcEventLogger().logQueryError(globalEx);
Transaction transaction = BaseTransaction.getThreadTransaction();
if (transaction != null) {
transaction.setRollbackOnly();
}
callback.nextGlobalException(globalEx);
return;
}
try {
DataNodeQueryAction queryRunner = new DataNodeQueryAction(this, callback);
for (Query nextQuery : queries) {
// catch exceptions for each individual query
try {
queryRunner.runQuery(connection, nextQuery);
} catch (Exception queryEx) {
getJdbcEventLogger().logQueryError(queryEx);
// notify consumer of the exception,
// stop running further queries
callback.nextQueryException(nextQuery, queryEx);
Transaction transaction = BaseTransaction.getThreadTransaction();
if (transaction != null) {
transaction.setRollbackOnly();
}
break;
}
}
} finally {
try {
connection.close();
} catch (SQLException e) {
// ignore closing exceptions...
}
}
}
Aggregations