use of io.spine.server.entity.storage.EntityRecordWithColumns in project core-java by SpineEventEngine.
the class ProjectionRepository method store.
/**
* Store a number of projections at a time.
*
* @param projections {@link Projection} bulk to store
*/
@VisibleForTesting
void store(Collection<P> projections) {
final RecordStorage<I> storage = recordStorage();
final Map<I, EntityRecordWithColumns> records = Maps.newHashMapWithExpectedSize(projections.size());
for (P projection : projections) {
final I id = projection.getId();
final EntityRecordWithColumns record = toRecord(projection);
records.put(id, record);
}
storage.write(records);
}
use of io.spine.server.entity.storage.EntityRecordWithColumns in project core-java by SpineEventEngine.
the class ProjectionStorageShould method fillStorage.
// Converter nullability issues
@SuppressWarnings("ConstantConditions")
private List<ProjectId> fillStorage(int count) {
final List<ProjectId> ids = new LinkedList<>();
for (int i = 0; i < count; i++) {
final ProjectId id = newId();
final Project state = Given.project(id, format("project-%d", i));
final Any packedState = AnyPacker.pack(state);
final EntityRecord rawRecord = EntityRecord.newBuilder().setState(packedState).setVersion(GivenVersion.withNumber(1)).build();
final EntityRecordWithColumns record = withLifecycleColumns(rawRecord);
storage.write(id, record);
ids.add(id);
}
return ids;
}
use of io.spine.server.entity.storage.EntityRecordWithColumns in project core-java by SpineEventEngine.
the class RecordStorage method write.
/**
* {@inheritDoc}
*/
@Override
public void write(I id, EntityRecord record) {
final EntityRecordWithColumns recordWithStorageFields = EntityRecordWithColumns.of(record);
write(id, recordWithStorageFields);
}
use of io.spine.server.entity.storage.EntityRecordWithColumns in project core-java by SpineEventEngine.
the class RecordStorageShould method filter_records_by_columns.
// Complex test case (still tests a single operation)
@SuppressWarnings("OverlyLongMethod")
@Test
public void filter_records_by_columns() {
final Project.Status requiredValue = DONE;
final Int32Value wrappedValue = Int32Value.newBuilder().setValue(requiredValue.getNumber()).build();
final Version versionValue = Version.newBuilder().setNumber(// Value of the counter after one columns
2).build();
final ColumnFilter status = eq("projectStatusValue", wrappedValue);
final ColumnFilter version = eq("counterVersion", versionValue);
final CompositeColumnFilter aggregatingFilter = CompositeColumnFilter.newBuilder().setOperator(ALL).addFilter(status).addFilter(version).build();
final EntityFilters filters = EntityFilters.newBuilder().addFilter(aggregatingFilter).build();
final RecordStorage<I> storage = getStorage();
final EntityQuery<I> query = EntityQueries.from(filters, storage);
final I idMatching = newId();
final I idWrong1 = newId();
final I idWrong2 = newId();
final TestCounterEntity<I> matchingEntity = new TestCounterEntity<>(idMatching);
final TestCounterEntity<I> wrongEntity1 = new TestCounterEntity<>(idWrong1);
final TestCounterEntity<I> wrongEntity2 = new TestCounterEntity<>(idWrong2);
// 2 of 3 have required values
matchingEntity.setStatus(requiredValue);
wrongEntity1.setStatus(requiredValue);
wrongEntity2.setStatus(CANCELLED);
// Change internal Entity state
wrongEntity1.getCounter();
// After the mutation above the single matching record is the one under the `idMatching` ID
final EntityRecord fineRecord = newStorageRecord(idMatching, newState(idMatching));
final EntityRecord notFineRecord1 = newStorageRecord(idWrong1, newState(idWrong1));
final EntityRecord notFineRecord2 = newStorageRecord(idWrong2, newState(idWrong2));
final EntityRecordWithColumns recordRight = create(fineRecord, matchingEntity, storage);
final EntityRecordWithColumns recordWrong1 = create(notFineRecord1, wrongEntity1, storage);
final EntityRecordWithColumns recordWrong2 = create(notFineRecord2, wrongEntity2, storage);
storage.write(idMatching, recordRight);
storage.write(idWrong1, recordWrong1);
storage.write(idWrong2, recordWrong2);
final Iterator<EntityRecord> readRecords = storage.readAll(query, FieldMask.getDefaultInstance());
assertSingleRecord(fineRecord, readRecords);
}
use of io.spine.server.entity.storage.EntityRecordWithColumns in project core-java by SpineEventEngine.
the class RecordStorageShould method write_record_bulk.
@Test
public void write_record_bulk() {
final RecordStorage<I> storage = getStorage();
final int bulkSize = 5;
final Map<I, EntityRecordWithColumns> initial = new HashMap<>(bulkSize);
for (int i = 0; i < bulkSize; i++) {
final I id = newId();
final EntityRecord record = newStorageRecord(id);
initial.put(id, EntityRecordWithColumns.of(record));
}
storage.write(initial);
final Collection<EntityRecord> actual = newArrayList(storage.readMultiple(initial.keySet()));
final Collection<EntityRecord> expected = Collections2.transform(initial.values(), RECORD_EXTRACTOR_FUNCTION);
assertEquals(expected.size(), actual.size());
assertTrue(actual.containsAll(expected));
close(storage);
}
Aggregations