use of org.jooq.BatchBindStep in project jOOQ by jOOQ.
the class LoaderImpl method executeSQL.
private void executeSQL(Iterator<? extends Object[]> iterator) throws SQLException {
Object[] row = null;
BatchBindStep bind = null;
InsertQuery<R> insert = null;
execution: {
rows: while (iterator.hasNext() && ((row = iterator.next()) != null)) {
try {
// [#5858] Work with non String[] types from here on (e.g. after CSV import)
if (row.getClass() != Object[].class)
row = Arrays.copyOf(row, row.length, Object[].class);
// in case LoaderFieldMapper was used.
if (fields == null)
fields0(row);
// [#2741] TODO: This logic will be externalised in new SPI
for (int i = 0; i < row.length; i++) if (StringUtils.equals(nullString, row[i]))
row[i] = null;
else if (i < fields.length && fields[i] != null)
if (fields[i].getType() == byte[].class && row[i] instanceof String)
row[i] = DatatypeConverter.parseBase64Binary((String) row[i]);
// TODO: In batch mode, we can probably optimise this by not creating
// new statements every time, just to convert bind values to their
// appropriate target types. But beware of SQL dialects that tend to
// need very explicit casting of bind values (e.g. Firebird)
processed++;
// in some dialects
if (onDuplicate == ON_DUPLICATE_KEY_IGNORE) {
SelectQuery<R> select = create.selectQuery(table);
for (int i = 0; i < row.length; i++) if (i < fields.length && primaryKey[i])
select.addConditions(getCondition(fields[i], row[i]));
try {
if (create.fetchExists(select)) {
ignored++;
continue rows;
}
} catch (DataAccessException e) {
errors.add(new LoaderErrorImpl(e, row, processed - 1, select));
}
}
buffered++;
if (insert == null)
insert = create.insertQuery(table);
for (int i = 0; i < row.length; i++) if (i < fields.length && fields[i] != null)
addValue0(insert, fields[i], row[i]);
// dialects execute a SELECT and then either an INSERT or UPDATE
if (onDuplicate == ON_DUPLICATE_KEY_UPDATE) {
insert.onDuplicateKeyUpdate(true);
for (int i = 0; i < row.length; i++) if (i < fields.length && fields[i] != null && !primaryKey[i])
addValueForUpdate0(insert, fields[i], row[i]);
} else // Don't do anything. Let the execution fail
if (onDuplicate == ON_DUPLICATE_KEY_ERROR) {
}
try {
if (bulk != BULK_NONE) {
if (bulk == BULK_ALL || processed % bulkAfter != 0) {
insert.newRecord();
continue rows;
}
}
if (batch != BATCH_NONE) {
if (bind == null)
bind = create.batch(insert);
bind.bind(insert.getBindValues().toArray());
insert = null;
if (batch == BATCH_ALL || processed % (bulkAfter * batchAfter) != 0)
continue rows;
}
if (bind != null)
bind.execute();
else if (insert != null)
insert.execute();
stored += buffered;
executed++;
buffered = 0;
bind = null;
insert = null;
if (commit == COMMIT_AFTER)
if ((processed % batchAfter == 0) && ((processed / batchAfter) % commitAfter == 0))
commit();
} catch (DataAccessException e) {
errors.add(new LoaderErrorImpl(e, row, processed - 1, insert));
ignored += buffered;
buffered = 0;
if (onError == ON_ERROR_ABORT)
break execution;
}
} finally {
if (listener != null)
listener.row(result);
}
// rows:
}
// Execute remaining batch
if (buffered != 0) {
try {
if (bind != null)
bind.execute();
if (insert != null)
insert.execute();
stored += buffered;
executed++;
buffered = 0;
} catch (DataAccessException e) {
errors.add(new LoaderErrorImpl(e, row, processed - 1, insert));
ignored += buffered;
buffered = 0;
}
if (onError == ON_ERROR_ABORT)
break execution;
}
// execution:
}
// Rollback on errors in COMMIT_ALL mode
try {
if (commit == COMMIT_ALL) {
if (!errors.isEmpty()) {
stored = 0;
rollback();
} else {
commit();
}
} else // Commit remaining elements in COMMIT_AFTER mode
if (commit == COMMIT_AFTER) {
commit();
}
} catch (DataAccessException e) {
errors.add(new LoaderErrorImpl(e, null, processed - 1, null));
}
}
use of org.jooq.BatchBindStep in project jOOQ by jOOQ.
the class BatchCRUD method executePrepared.
private final int[] executePrepared() {
Map<String, List<Query>> queries = new LinkedHashMap<String, List<Query>>();
QueryCollector collector = new QueryCollector();
// Add the QueryCollector to intercept query execution after rendering
Configuration local = configuration.derive(Tools.combine(configuration.executeListenerProviders(), new DefaultExecuteListenerProvider(collector)));
// [#1537] Communicate with UpdatableRecordImpl
local.data(DATA_OMIT_RETURNING_CLAUSE, true);
// [#1529] Avoid DEBUG logging of single INSERT / UPDATE statements
local.settings().setExecuteLogging(false);
for (int i = 0; i < records.length; i++) {
Configuration previous = ((AttachableInternal) records[i]).configuration();
try {
records[i].attach(local);
executeAction(i);
} catch (QueryCollectorSignal e) {
Query query = e.getQuery();
String sql = e.getSQL();
// Aggregate executable queries by identical SQL
if (query.isExecutable()) {
List<Query> list = queries.get(sql);
if (list == null) {
list = new ArrayList<Query>();
queries.put(sql, list);
}
list.add(query);
}
} finally {
records[i].attach(previous);
}
}
// Execute one batch statement for each identical SQL statement. Every
// SQL statement may have several queries with different bind values.
// The order is preserved as much as possible
List<Integer> result = new ArrayList<Integer>();
for (Entry<String, List<Query>> entry : queries.entrySet()) {
BatchBindStep batch = create.batch(entry.getValue().get(0));
for (Query query : entry.getValue()) {
batch.bind(query.getBindValues().toArray());
}
int[] array = batch.execute();
for (int i : array) {
result.add(i);
}
}
int[] array = new int[result.size()];
for (int i = 0; i < result.size(); i++) {
array[i] = result.get(i);
}
updateChangedFlag();
return array;
}
Aggregations