use of com.torodb.core.transaction.metainf.ImmutableMetaScalar in project torodb by torodb.
the class BatchMetaDocPart method addMetaScalar.
@Override
public ImmutableMetaScalar addMetaScalar(String identifier, FieldType type) throws IllegalArgumentException {
ImmutableMetaScalar newMetaScalar = delegate.addMetaScalar(identifier, type);
scalarChangesOnBatch.add(newMetaScalar);
changeConsumer.accept(this);
return newMetaScalar;
}
use of com.torodb.core.transaction.metainf.ImmutableMetaScalar in project torodb by torodb.
the class DefaultToBackendFunction method apply.
public Iterable<BackendTransactionJob> apply(CollectionData collectionData) {
ArrayList<BackendTransactionJob> jobs = new ArrayList<>();
for (DocPartData docPartData : collectionData.orderedDocPartData()) {
assert docPartData.getMetaDocPart() instanceof BatchMetaDocPart : "This function can only use inputs whose meta doc part information is an instance of " + BatchMetaDocPart.class;
BatchMetaDocPart metaDocPart = (BatchMetaDocPart) docPartData.getMetaDocPart();
if (metaDocPart.isCreatedOnCurrentBatch()) {
jobs.add(factory.createAddDocPartDdlJob(database, collection, metaDocPart));
metaDocPart.streamScalars().map((scalar) -> factory.createAddScalarDdlJob(database, collection, metaDocPart, scalar)).forEachOrdered((job) -> jobs.add(job));
metaDocPart.streamFields().map((field) -> factory.createAddFieldDdlJob(database, collection, metaDocPart, field)).forEachOrdered((job) -> jobs.add(job));
} else {
//it already exists, we only need to add the new scalars and fields
for (ImmutableMetaScalar newScalar : metaDocPart.getOnBatchModifiedMetaScalars()) {
jobs.add(factory.createAddScalarDdlJob(database, collection, metaDocPart, newScalar));
}
for (ImmutableMetaField newField : metaDocPart.getOnBatchModifiedMetaFields()) {
jobs.add(factory.createAddFieldDdlJob(database, collection, metaDocPart, newField));
}
}
jobs.add(factory.insert(database, collection, docPartData));
}
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Executing the following jobs: {}", jobs);
}
return jobs;
}
use of com.torodb.core.transaction.metainf.ImmutableMetaScalar in project torodb by torodb.
the class DefaultToBackendFunctionTest method testApply_newDocPart.
@Test
public void testApply_newDocPart() {
MockSettings settings = new MockSettingsImpl().defaultAnswer((t) -> {
throw new AssertionError("Method " + t.getMethod() + " was not expected to be called");
});
BatchMetaDocPart allNewDocPart = mock(BatchMetaDocPart.class, settings);
doReturn(true).when(allNewDocPart).isCreatedOnCurrentBatch();
doReturn(Lists.newArrayList(Lists.newArrayList(new ImmutableMetaField("newFieldName", "newFieldId", FieldType.BOOLEAN))).stream()).when(allNewDocPart).streamFields();
doReturn(Lists.newArrayList(new ImmutableMetaScalar("newScalarId", FieldType.BOOLEAN)).stream()).when(allNewDocPart).streamScalars();
DocPartData allNewData = mock(DocPartData.class);
given(allNewData.getMetaDocPart()).willReturn(allNewDocPart);
CollectionData collectionData = mock(CollectionData.class);
given(collectionData.orderedDocPartData()).willReturn(Lists.<DocPartData>newArrayList(allNewData));
//when
Iterable<BackendTransactionJob> result = fun.apply(collectionData);
ArrayList<BackendTransactionJob> resultList = Lists.newArrayList(result);
//then
assertEquals("Expected 4 jobs to do, but " + resultList.size() + " were recived", 4, resultList.size());
{
Optional<BackendTransactionJob> insertJob = resultList.stream().filter((job) -> job instanceof InsertBackendJob && ((InsertBackendJob) job).getDataToInsert().equals(allNewData)).findAny();
assertTrue(insertJob.isPresent());
Optional<BackendTransactionJob> addFieldJob = resultList.stream().filter((job) -> {
if (!(job instanceof AddFieldDdlJob)) {
return false;
}
AddFieldDdlJob castedJob = (AddFieldDdlJob) job;
return castedJob.getDocPart().equals(allNewDocPart) && castedJob.getField().getName().equals("newFieldName") && castedJob.getField().getIdentifier().equals("newFieldId");
}).findAny();
assertTrue(addFieldJob.isPresent());
Optional<BackendTransactionJob> addScalarJob = resultList.stream().filter((job) -> {
if (!(job instanceof AddScalarDddlJob)) {
return false;
}
AddScalarDddlJob castedJob = (AddScalarDddlJob) job;
return castedJob.getDocPart().equals(allNewDocPart) && castedJob.getScalar().getIdentifier().equals("newScalarId") && castedJob.getScalar().getType().equals(FieldType.BOOLEAN);
}).findAny();
assertTrue(addScalarJob.isPresent());
Optional<BackendTransactionJob> createDocPartJob = resultList.stream().filter((job) -> {
if (!(job instanceof AddDocPartDdlJob)) {
return false;
}
AddDocPartDdlJob castedJob = (AddDocPartDdlJob) job;
return castedJob.getDocPart().equals(allNewDocPart);
}).findAny();
assertTrue(createDocPartJob.isPresent());
int createDocPartIndex = resultList.indexOf(createDocPartJob.get());
int addFieldIndex = resultList.indexOf(addFieldJob.get());
int addScalarIndex = resultList.indexOf(addScalarJob.get());
int insertIndex = resultList.indexOf(insertJob.get());
assert createDocPartIndex >= 0;
assert addFieldIndex >= 0;
assert addScalarIndex >= 0;
assert insertIndex >= 0;
assertTrue("For a given doc part, all related add fields jobs must be executed before insert " + "jobs, but in this case the add field job has index " + addFieldIndex + " and the insert job has index " + insertIndex, addFieldIndex < insertIndex);
assertTrue("For a given doc part, all related add scalar jobs must be executed before insert " + "jobs, but in this case the add scalr job has index " + addScalarIndex + " and the insert job has index " + insertIndex, addScalarIndex < insertIndex);
assertTrue("For a given doc part, all related create doc part jobs must be executed " + "before add field jobs, but in this case the create doc part job has index " + createDocPartIndex + " and " + "the add field job has index " + addFieldIndex, createDocPartIndex < addFieldIndex);
}
}
use of com.torodb.core.transaction.metainf.ImmutableMetaScalar in project torodb by torodb.
the class DefaultToBackendFunctionTest method testApply_newScalar.
@Test
public void testApply_newScalar() {
MockSettings settings = new MockSettingsImpl().defaultAnswer((t) -> {
throw new AssertionError("Method " + t.getMethod() + " was not expected to be called");
});
BatchMetaDocPart withNewScalarDocPart = mock(BatchMetaDocPart.class, settings);
doReturn(false).when(withNewScalarDocPart).isCreatedOnCurrentBatch();
doReturn(Collections.emptyList()).when(withNewScalarDocPart).getOnBatchModifiedMetaFields();
doReturn(Lists.newArrayList(new ImmutableMetaScalar("newScalarId", FieldType.INTEGER))).when(withNewScalarDocPart).getOnBatchModifiedMetaScalars();
DocPartData withNewScalar = mock(DocPartData.class);
given(withNewScalar.getMetaDocPart()).willReturn(withNewScalarDocPart);
CollectionData collectionData = mock(CollectionData.class);
given(collectionData.orderedDocPartData()).willReturn(Lists.<DocPartData>newArrayList(withNewScalar));
//when
Iterable<BackendTransactionJob> result = fun.apply(collectionData);
ArrayList<BackendTransactionJob> resultList = Lists.newArrayList(result);
//then
assertEquals("Expected 2 jobs to do, but " + resultList.size() + " were recived", 2, resultList.size());
{
Optional<BackendTransactionJob> insertJob = resultList.stream().filter((job) -> job instanceof InsertBackendJob && ((InsertBackendJob) job).getDataToInsert().equals(withNewScalar)).findAny();
assertTrue(insertJob.isPresent());
Optional<BackendTransactionJob> addScalarJob = resultList.stream().filter((job) -> {
if (!(job instanceof AddScalarDddlJob)) {
return false;
}
AddScalarDddlJob castedJob = (AddScalarDddlJob) job;
return castedJob.getDocPart().equals(withNewScalarDocPart) && castedJob.getScalar().getIdentifier().equals("newScalarId") && castedJob.getScalar().getType().equals(FieldType.INTEGER);
}).findAny();
assertTrue(addScalarJob.isPresent());
int addScalarIndex = resultList.indexOf(addScalarJob.get());
int insertIndex = resultList.indexOf(insertJob.get());
assert addScalarIndex >= 0;
assert insertIndex >= 0;
assertTrue("For a given doc part, all related add scalar jobs must be executed before insert " + "jobs, but in this case the add scalr job has index " + addScalarIndex + " and the insert job has index " + insertIndex, addScalarIndex < insertIndex);
}
}
use of com.torodb.core.transaction.metainf.ImmutableMetaScalar in project torodb by torodb.
the class SnapshotMerger method merge.
private void merge(MetaDatabase db, MetaCollection col, ImmutableMetaDocPart oldStructure, ImmutableMetaDocPart.Builder parentBuilder, ImmutableMetaScalar changed) {
MetaScalar byId = oldStructure.getScalar(changed.getIdentifier());
MetaScalar byType = oldStructure.getScalar(changed.getType());
if (byType != byId) {
throw createUnmergeableException(db, col, oldStructure, changed, byType, byId);
}
if (byType == null && byId == null) {
parentBuilder.put(changed);
}
}
Aggregations