use of org.knime.core.node.port.database.DatabaseConnectionSettings in project knime-core by knime.
the class DBWriterImpl method updateTable.
/**
* Create connection to update table in database.
* @param data The data to write.
* @param setColumns columns part of the SET clause
* @param whereColumns columns part of the WHERE clause
* @param updateStatus int array of length data#getRowCount; will be filled with
* update info from the database
* @param table name of table to write
* @param exec Used the cancel writing.
* @param cp {@link CredentialsProvider} providing user/password
* @param batchSize number of rows updated in one batch
* @return error string or null, if non
* @throws Exception if connection could not be established
*/
@Override
public final String updateTable(final String schema, final String table, final BufferedDataTable data, final String[] setColumns, final String[] whereColumns, final int[] updateStatus, final ExecutionMonitor exec, final CredentialsProvider cp, final int batchSize) throws Exception {
final DatabaseConnectionSettings conSettings = getDatabaseConnectionSettings();
// synchronized (conSettings.syncConnection(conn)) {
return conSettings.execute(cp, conn -> {
exec.setMessage("Start updating rows in database...");
final DataTableSpec spec = data.getDataTableSpec();
final String updateStmt = createUpdateStatement(table, setColumns, whereColumns);
// problems writing more than 13 columns. the prepare statement
// ensures that we can set the columns directly row-by-row, the
// database will handle the commit
long rowCount = data.size();
int cnt = 1;
int errorCnt = 0;
int allErrors = 0;
// count number of rows added to current batch
int curBatchSize = 0;
// selected timezone
final TimeZone timezone = conSettings.getTimeZone();
LOGGER.debug("Executing SQL statement as prepareStatement: " + updateStmt);
final PreparedStatement stmt = conn.prepareStatement(updateStmt);
// remember auto-commit flag
final boolean autoCommit = conn.getAutoCommit();
DatabaseConnectionSettings.setAutoCommit(conn, false);
try {
for (RowIterator it = data.iterator(); it.hasNext(); cnt++) {
exec.checkCanceled();
exec.setProgress(1.0 * cnt / rowCount, "Row " + "#" + cnt);
final DataRow row = it.next();
// SET columns
for (int i = 0; i < setColumns.length; i++) {
final int dbIdx = i + 1;
final int columnIndex = spec.findColumnIndex(setColumns[i]);
final DataColumnSpec cspec = spec.getColumnSpec(columnIndex);
final DataCell cell = row.getCell(columnIndex);
fillStatement(stmt, dbIdx, cspec, cell, timezone, null);
}
// WHERE columns
for (int i = 0; i < whereColumns.length; i++) {
final int dbIdx = i + 1 + setColumns.length;
final int columnIndex = spec.findColumnIndex(whereColumns[i]);
final DataColumnSpec cspec = spec.getColumnSpec(columnIndex);
final DataCell cell = row.getCell(columnIndex);
fillStatement(stmt, dbIdx, cspec, cell, timezone, null);
}
// if batch mode
if (batchSize > 1) {
// a new row will be added
stmt.addBatch();
}
curBatchSize++;
// if batch size equals number of row in batch or input table at end
if ((curBatchSize == batchSize) || !it.hasNext()) {
curBatchSize = 0;
try {
// write batch
if (batchSize > 1) {
int[] status = stmt.executeBatch();
for (int i = 0; i < status.length; i++) {
updateStatus[cnt - status.length + i] = status[i];
}
} else {
// or write single row
int status = stmt.executeUpdate();
updateStatus[cnt - 1] = status;
}
} catch (Throwable t) {
// anyway.
if (!conn.getAutoCommit()) {
conn.commit();
}
allErrors++;
if (errorCnt > -1) {
final String errorMsg;
if (batchSize > 1) {
errorMsg = "Error while updating rows #" + (cnt - batchSize) + " - #" + cnt + ", reason: " + t.getMessage();
} else {
errorMsg = "Error while updating row #" + cnt + " (" + row.getKey() + "), reason: " + t.getMessage();
}
exec.setMessage(errorMsg);
if (errorCnt++ < 10) {
LOGGER.warn(errorMsg);
} else {
errorCnt = -1;
LOGGER.warn(errorMsg + " - more errors...", t);
}
}
} finally {
// clear batch if in batch mode
if (batchSize > 1) {
stmt.clearBatch();
}
}
}
}
if (!conn.getAutoCommit()) {
conn.commit();
}
if (allErrors == 0) {
return null;
} else {
return "Errors \"" + allErrors + "\" updating " + rowCount + " rows.";
}
} finally {
DatabaseConnectionSettings.setAutoCommit(conn, autoCommit);
stmt.close();
}
});
}
Aggregations