use of com.hazelcast.query.impl.Indexes in project hazelcast by hazelcast.
the class MapReplicationStateHolder method applyState.
@SuppressWarnings({ "checkstyle:npathcomplexity", "checkstyle:methodlength", "checkstyle:cyclomaticcomplexity", "checkstyle:nestedifdepth" })
void applyState() {
ThreadUtil.assertRunningOnPartitionThread();
applyIndexesState();
if (!isNullOrEmpty(data)) {
for (Map.Entry<String, List> dataEntry : data.entrySet()) {
String mapName = dataEntry.getKey();
List keyRecordExpiry = dataEntry.getValue();
RecordStore recordStore = operation.getRecordStore(mapName);
recordStore.beforeOperation();
try {
initializeRecordStore(mapName, recordStore);
recordStore.setPreMigrationLoadedStatus(loaded.get(mapName));
MapContainer mapContainer = recordStore.getMapContainer();
PartitionContainer partitionContainer = recordStore.getMapContainer().getMapServiceContext().getPartitionContainer(operation.getPartitionId());
for (Map.Entry<String, IndexConfig> indexDefinition : mapContainer.getIndexDefinitions().entrySet()) {
Indexes indexes = mapContainer.getIndexes(partitionContainer.getPartitionId());
indexes.addOrGetIndex(indexDefinition.getValue());
}
final Indexes indexes = mapContainer.getIndexes(partitionContainer.getPartitionId());
final boolean populateIndexes = indexesMustBePopulated(indexes, operation);
InternalIndex[] indexesSnapshot = null;
if (populateIndexes) {
// defensively clear possible stale leftovers in non-global indexes from
// the previous failed promotion attempt
indexesSnapshot = indexes.getIndexes();
Indexes.beginPartitionUpdate(indexesSnapshot);
indexes.clearAll();
}
long nowInMillis = Clock.currentTimeMillis();
forEachReplicatedRecord(keyRecordExpiry, mapContainer, recordStore, populateIndexes, nowInMillis);
if (populateIndexes) {
Indexes.markPartitionAsIndexed(partitionContainer.getPartitionId(), indexesSnapshot);
}
} finally {
recordStore.afterOperation();
}
}
}
for (Map.Entry<String, LocalRecordStoreStats> statsEntry : recordStoreStatsPerMapName.entrySet()) {
String mapName = statsEntry.getKey();
LocalRecordStoreStats stats = statsEntry.getValue();
RecordStore recordStore = operation.getRecordStore(mapName);
recordStore.setStats(stats);
}
}
use of com.hazelcast.query.impl.Indexes in project hazelcast by hazelcast.
the class PartitionWideEntryOperation method runWithIndex.
/**
* @return {@code true} if index has been used and the EP
* has been executed on its keys, {@code false} otherwise
*/
private boolean runWithIndex() {
// here we try to query the partitioned-index
Predicate predicate = getPredicate();
if (predicate == null) {
return false;
}
// we use the partitioned-index to operate on the selected keys only
Indexes indexes = mapContainer.getIndexes(getPartitionId());
Iterable<QueryableEntry> entries = indexes.query(queryOptimizer.optimize(predicate, indexes), 1);
if (entries == null) {
return false;
}
responses = new MapEntries();
// when NATIVE we can pass null as predicate since it's all
// happening on partition thread so no data-changes may occur
operator = operator(this, entryProcessor, null);
keysFromIndex = new HashSet<>();
for (QueryableEntry entry : entries) {
keysFromIndex.add(entry.getKeyData());
Data response = operator.operateOnKey(entry.getKeyData()).doPostOperateOps().getResult();
if (response != null) {
responses.add(entry.getKeyData(), response);
}
}
return true;
}
use of com.hazelcast.query.impl.Indexes in project hazelcast by hazelcast.
the class MapChunk method applyIndexStateBefore.
private void applyIndexStateBefore(RecordStore recordStore) {
MapContainer mapContainer = recordStore.getMapContainer();
PartitionContainer partitionContainer = mapContainer.getMapServiceContext().getPartitionContainer(getPartitionId());
for (Map.Entry<String, IndexConfig> indexDefinition : mapContainer.getIndexDefinitions().entrySet()) {
Indexes indexes = mapContainer.getIndexes(partitionContainer.getPartitionId());
indexes.addOrGetIndex(indexDefinition.getValue());
}
Indexes indexes = mapContainer.getIndexes(partitionContainer.getPartitionId());
boolean populateIndexes = indexesMustBePopulated(indexes);
if (populateIndexes) {
// defensively clear possible stale
// leftovers in non-global indexes from
// the previous failed promotion attempt
Indexes.beginPartitionUpdate(indexes.getIndexes());
indexes.clearAll();
}
}
use of com.hazelcast.query.impl.Indexes in project hazelcast by hazelcast.
the class MapChunk method putOrUpdateReplicatedDataWithPerNodeEviction.
// owned or backup
private void putOrUpdateReplicatedDataWithPerNodeEviction(RecordStore recordStore) {
MapContainer mapContainer = recordStore.getMapContainer();
EvictionConfig evictionConfig = mapContainer.getMapConfig().getEvictionConfig();
long ownedEntryCountOnThisNode = entryCountOnThisNode(mapContainer);
int count = 0;
long nowInMillis = Clock.currentTimeMillis();
do {
Data dataKey = (Data) keyRecordExpiry.poll();
Record record = (Record) keyRecordExpiry.poll();
ExpiryMetadata expiryMetadata = (ExpiryMetadata) keyRecordExpiry.poll();
if (ownedEntryCountOnThisNode >= evictionConfig.getSize()) {
if (getReplicaIndex() == 0) {
recordStore.doPostEvictionOperations(dataKey, record.getValue(), ExpiryReason.NOT_EXPIRED);
}
} else {
Indexes indexes = mapContainer.getIndexes(recordStore.getPartitionId());
recordStore.putOrUpdateReplicatedRecord(dataKey, record, expiryMetadata, indexesMustBePopulated(indexes), nowInMillis);
ownedEntryCountOnThisNode++;
}
if (++count % DISPOSE_AT_COUNT == 0) {
recordStore.disposeDeferredBlocks();
}
} while (!keyRecordExpiry.isEmpty());
recordStore.disposeDeferredBlocks();
}
use of com.hazelcast.query.impl.Indexes in project hazelcast by hazelcast.
the class QueryRunner method runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition.
// MIGRATION UNSAFE QUERYING - MIGRATION STAMPS ARE NOT VALIDATED, so assumes a run on partition-thread
// for a single partition. If the index is global it won't be asked
public Result runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition(Query query, int partitionId) {
MapContainer mapContainer = mapServiceContext.getMapContainer(query.getMapName());
PartitionIdSet partitions = singletonPartitionIdSet(partitionCount, partitionId);
// first we optimize the query
Predicate predicate = queryOptimizer.optimize(query.getPredicate(), mapContainer.getIndexes(partitionId));
Iterable<QueryableEntry> entries = null;
Indexes indexes = mapContainer.getIndexes(partitionId);
if (indexes != null && !indexes.isGlobal()) {
entries = indexes.query(predicate, partitions.size());
}
Result result;
if (entries == null) {
result = createResult(query, partitions);
partitionScanExecutor.execute(query.getMapName(), predicate, partitions, result);
result.completeConstruction(partitions);
} else {
result = populateNonEmptyResult(query, entries, partitions);
}
return result;
}
Aggregations