use of com.torodb.core.d2r.DocPartRow in project torodb by torodb.
the class AbstractWriteInterface method insertDocPartData.
@Override
public void insertDocPartData(DSLContext dsl, String schemaName, DocPartData docPartData) throws UserException {
Iterator<DocPartRow> docPartRowIterator = docPartData.iterator();
if (!docPartRowIterator.hasNext()) {
return;
}
try {
MetaDocPart metaDocPart = docPartData.getMetaDocPart();
Iterator<MetaScalar> metaScalarIterator = docPartData.orderedMetaScalarIterator();
Iterator<MetaField> metaFieldIterator = docPartData.orderedMetaFieldIterator();
standardInsertDocPartData(dsl, schemaName, docPartData, metaDocPart, metaScalarIterator, metaFieldIterator, docPartRowIterator);
} catch (DataAccessException ex) {
throw errorHandler.handleUserException(Context.INSERT, ex);
}
}
use of com.torodb.core.d2r.DocPartRow in project torodb by torodb.
the class AbstractWriteInterface method standardInsertDocPartData.
protected void standardInsertDocPartData(DSLContext dsl, String schemaName, DocPartData docPartData, MetaDocPart metaDocPart, Iterator<MetaScalar> metaScalarIterator, Iterator<MetaField> metaFieldIterator, Iterator<DocPartRow> docPartRowIterator) throws UserException {
final int maxBatchSize = getMaxBatchSize();
Collection<InternalField<?>> internalFields = metaDataReadInterface.getInternalFields(metaDocPart);
List<FieldType> fieldTypeList = new ArrayList<>();
String statement = getInsertDocPartDataStatement(schemaName, metaDocPart, metaFieldIterator, metaScalarIterator, internalFields, fieldTypeList);
assert assertFieldTypeListIsConsistent(docPartData, fieldTypeList) : "fieldTypeList should be an ordered list of FieldType" + " from MetaScalar and MetaField following the the ordering of " + "DocPartData.orderedMetaScalarIterator and DocPartData.orderedMetaFieldIterator";
Connection connection = dsl.configuration().connectionProvider().acquire();
try {
try (PreparedStatement preparedStatement = connection.prepareStatement(statement)) {
int docCounter = 0;
while (docPartRowIterator.hasNext()) {
DocPartRow docPartRow = docPartRowIterator.next();
docCounter++;
int parameterIndex = 1;
for (InternalField<?> internalField : internalFields) {
internalField.set(preparedStatement, parameterIndex, docPartRow);
parameterIndex++;
}
Iterator<FieldType> fieldTypeIterator = fieldTypeList.iterator();
for (KvValue<?> value : docPartRow.getScalarValues()) {
sqlHelper.setPreparedStatementNullableValue(preparedStatement, parameterIndex++, fieldTypeIterator.next(), value);
}
for (KvValue<?> value : docPartRow.getFieldValues()) {
sqlHelper.setPreparedStatementNullableValue(preparedStatement, parameterIndex++, fieldTypeIterator.next(), value);
}
preparedStatement.addBatch();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Added to insert {}", preparedStatement.toString());
}
if (docCounter % maxBatchSize == 0 || !docPartRowIterator.hasNext()) {
preparedStatement.executeBatch();
LOGGER.trace("Insertion batch executed");
}
}
}
} catch (SQLException ex) {
throw errorHandler.handleUserException(Context.INSERT, ex);
} finally {
dsl.configuration().connectionProvider().release(connection);
}
}
use of com.torodb.core.d2r.DocPartRow in project torodb by torodb.
the class PostgreSqlWriteInterface method copyInsertDocPartData.
private void copyInsertDocPartData(PGConnection connection, String schemaName, DocPartData docPartData) throws SQLException, IOException {
final int maxBatchSize = 1024;
final CopyManager copyManager = connection.getCopyAPI();
final MetaDocPart metaDocPart = docPartData.getMetaDocPart();
Collection<InternalField<?>> internalFields = postgreSqlMetaDataReadInterface.getInternalFields(metaDocPart);
final StringBuilder sb = new StringBuilder(65536);
final String copyStatement = getCopyInsertDocPartDataStatement(schemaName, docPartData, metaDocPart, internalFields);
Iterator<DocPartRow> docPartRowIterator = docPartData.iterator();
int docCounter = 0;
while (docPartRowIterator.hasNext()) {
DocPartRow tableRow = docPartRowIterator.next();
docCounter++;
addValuesToCopy(sb, tableRow, internalFields);
assert sb.length() != 0;
if (docCounter % maxBatchSize == 0 || !docPartRowIterator.hasNext()) {
executeCopy(copyManager, copyStatement, sb);
sb.setLength(0);
}
}
}
Aggregations