use of com.torodb.core.transaction.metainf.FieldType in project torodb by torodb.
the class BatchMetaDocPartTest method testAddMetaField.
@Test
public void testAddMetaField() {
String fieldName = "aFieldName";
String fieldId = "aFieldID";
FieldType fieldType = FieldType.INTEGER;
assertNull(delegate.getMetaFieldByIdentifier(fieldId));
assertNull(delegate.getMetaFieldByNameAndType(fieldName, fieldType));
assertNull(docPart.getMetaFieldByIdentifier(fieldId));
assertNull(docPart.getMetaFieldByNameAndType(fieldName, fieldType));
docPart.addMetaField(fieldName, fieldId, fieldType);
assertNotNull(delegate.getMetaFieldByIdentifier(fieldId));
assertNotNull(delegate.getMetaFieldByNameAndType(fieldName, fieldType));
assertNotNull(docPart.getMetaFieldByIdentifier(fieldId));
assertNotNull(docPart.getMetaFieldByNameAndType(fieldName, fieldType));
assertFalse(Iterables.isEmpty(docPart.getAddedMetaFields()));
assertFalse(Iterables.isEmpty(delegate.getAddedMetaFields()));
assertFalse(Iterables.isEmpty(docPart.getOnBatchModifiedMetaFields()));
verify(testChangeConsumer).accept(docPart);
verifyNoMoreInteractions(testChangeConsumer);
}
use of com.torodb.core.transaction.metainf.FieldType in project torodb by torodb.
the class PostgreSqlWriteInterface method getInsertDocPartDataStatement.
@Override
protected String getInsertDocPartDataStatement(String schemaName, MetaDocPart metaDocPart, Iterator<MetaField> metaFieldIterator, Iterator<MetaScalar> metaScalarIterator, Collection<InternalField<?>> internalFields, List<FieldType> fieldTypeList) {
final StringBuilder insertStatementBuilder = new StringBuilder(2048);
final StringBuilder insertStatementValuesBuilder = new StringBuilder(1024);
insertStatementBuilder.append("INSERT INTO \"").append(schemaName).append("\".\"").append(metaDocPart.getIdentifier()).append("\" (");
insertStatementValuesBuilder.append(" VALUES (");
for (InternalField<?> internalField : internalFields) {
insertStatementBuilder.append("\"").append(internalField.getName()).append("\",");
insertStatementValuesBuilder.append("?,");
}
while (metaScalarIterator.hasNext()) {
MetaScalar metaScalar = metaScalarIterator.next();
FieldType type = metaScalar.getType();
insertStatementBuilder.append("\"").append(metaScalar.getIdentifier()).append("\",");
insertStatementValuesBuilder.append(sqlHelper.getPlaceholder(type)).append(',');
fieldTypeList.add(type);
}
while (metaFieldIterator.hasNext()) {
MetaField metaField = metaFieldIterator.next();
FieldType type = metaField.getType();
insertStatementBuilder.append("\"").append(metaField.getIdentifier()).append("\",");
insertStatementValuesBuilder.append(sqlHelper.getPlaceholder(type)).append(',');
fieldTypeList.add(type);
}
insertStatementBuilder.setCharAt(insertStatementBuilder.length() - 1, ')');
insertStatementValuesBuilder.setCharAt(insertStatementValuesBuilder.length() - 1, ')');
insertStatementBuilder.append(insertStatementValuesBuilder);
String statement = insertStatementBuilder.toString();
return statement;
}
use of com.torodb.core.transaction.metainf.FieldType in project torodb by torodb.
the class AbstractWriteInterface method standardInsertDocPartData.
protected void standardInsertDocPartData(DSLContext dsl, String schemaName, DocPartData docPartData, MetaDocPart metaDocPart, Iterator<MetaScalar> metaScalarIterator, Iterator<MetaField> metaFieldIterator, Iterator<DocPartRow> docPartRowIterator) throws UserException {
final int maxBatchSize = getMaxBatchSize();
Collection<InternalField<?>> internalFields = metaDataReadInterface.getInternalFields(metaDocPart);
List<FieldType> fieldTypeList = new ArrayList<>();
String statement = getInsertDocPartDataStatement(schemaName, metaDocPart, metaFieldIterator, metaScalarIterator, internalFields, fieldTypeList);
assert assertFieldTypeListIsConsistent(docPartData, fieldTypeList) : "fieldTypeList should be an ordered list of FieldType" + " from MetaScalar and MetaField following the the ordering of " + "DocPartData.orderedMetaScalarIterator and DocPartData.orderedMetaFieldIterator";
Connection connection = dsl.configuration().connectionProvider().acquire();
try {
try (PreparedStatement preparedStatement = connection.prepareStatement(statement)) {
int docCounter = 0;
while (docPartRowIterator.hasNext()) {
DocPartRow docPartRow = docPartRowIterator.next();
docCounter++;
int parameterIndex = 1;
for (InternalField<?> internalField : internalFields) {
internalField.set(preparedStatement, parameterIndex, docPartRow);
parameterIndex++;
}
Iterator<FieldType> fieldTypeIterator = fieldTypeList.iterator();
for (KvValue<?> value : docPartRow.getScalarValues()) {
sqlHelper.setPreparedStatementNullableValue(preparedStatement, parameterIndex++, fieldTypeIterator.next(), value);
}
for (KvValue<?> value : docPartRow.getFieldValues()) {
sqlHelper.setPreparedStatementNullableValue(preparedStatement, parameterIndex++, fieldTypeIterator.next(), value);
}
preparedStatement.addBatch();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Added to insert {}", preparedStatement.toString());
}
if (docCounter % maxBatchSize == 0 || !docPartRowIterator.hasNext()) {
preparedStatement.executeBatch();
LOGGER.trace("Insertion batch executed");
}
}
}
} catch (SQLException ex) {
throw errorHandler.handleUserException(Context.INSERT, ex);
} finally {
dsl.configuration().connectionProvider().release(connection);
}
}
Aggregations