use of org.apache.hop.core.exception.HopDatabaseBatchException in project hop by apache.
the class Database method createHopDatabaseBatchException.
public static HopDatabaseBatchException createHopDatabaseBatchException(String message, SQLException ex) {
HopDatabaseBatchException kdbe = new HopDatabaseBatchException(message, ex);
if (ex instanceof BatchUpdateException) {
kdbe.setUpdateCounts(((BatchUpdateException) ex).getUpdateCounts());
} else {
// Null update count forces rollback of batch
kdbe.setUpdateCounts(null);
}
List<Exception> exceptions = new ArrayList<>();
SQLException nextException = ex.getNextException();
SQLException oldException = null;
// So it's not "equals" but != (comments from Sven Boden).
while ((nextException != null) && (oldException != nextException)) {
exceptions.add(nextException);
oldException = nextException;
nextException = nextException.getNextException();
}
kdbe.setExceptionsList(exceptions);
return kdbe;
}
use of org.apache.hop.core.exception.HopDatabaseBatchException in project hop by apache.
the class SynchronizeAfterMerge method finishTransform.
private void finishTransform() {
if (data.db != null && data.db.getConnection() != null) {
try {
if (!data.db.getConnection().isClosed()) {
for (String schemaTable : data.preparedStatements.keySet()) {
// Get a commit counter per prepared statement to keep track of separate tables, etc.
//
Integer batchCounter = data.commitCounterMap.get(schemaTable);
if (batchCounter == null) {
batchCounter = 0;
}
PreparedStatement insertStatement = data.preparedStatements.get(schemaTable);
data.db.emptyAndCommit(insertStatement, data.batchMode, batchCounter);
}
for (int i = 0; i < data.batchBuffer.size(); i++) {
Object[] row = data.batchBuffer.get(i);
putRow(data.outputRowMeta, row);
if (data.inputRowMeta.getString(row, data.indexOfOperationOrderField).equals(data.insertValue)) {
incrementLinesOutput();
}
}
// Clear the buffer
data.batchBuffer.clear();
}
} catch (HopDatabaseBatchException be) {
if (getTransformMeta().isDoingErrorHandling()) {
// OK, we have the numbers...
try {
processBatchException(be.toString(), be.getUpdateCounts(), be.getExceptionsList());
} catch (HopException e) {
logError("Unexpected error processing batch error", e);
setErrors(1);
stopAll();
}
} else {
logError("Unexpected batch update error committing the database connection.", be);
setErrors(1);
stopAll();
}
} catch (Exception dbe) {
logError("Unexpected error committing the database connection.", dbe);
logError(Const.getStackTracker(dbe));
setErrors(1);
stopAll();
} finally {
setOutputDone();
if (getErrors() > 0) {
try {
data.db.rollback();
} catch (HopDatabaseException e) {
logError("Unexpected error rolling back the database connection.", e);
}
}
data.db.disconnect();
}
}
}
use of org.apache.hop.core.exception.HopDatabaseBatchException in project hop by apache.
the class SynchronizeAfterMerge method lookupValues.
private synchronized void lookupValues(Object[] row) throws HopException {
// get operation for the current
// do we insert, update or delete ?
String operation = data.inputRowMeta.getString(row, data.indexOfOperationOrderField);
boolean rowIsSafe = false;
boolean sendToErrorRow = false;
String errorMessage = null;
int[] updateCounts = null;
List<Exception> exceptionsList = null;
boolean batchProblem = false;
data.lookupFailure = false;
boolean performInsert = false;
boolean performUpdate = false;
boolean performDelete = false;
boolean lineSkipped = false;
try {
if (operation == null) {
throw new HopException(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.OperationFieldEmpty", meta.getOperationOrderField()));
}
if (meta.istablenameInField()) {
// get dynamic table name
data.realTableName = data.inputRowMeta.getString(row, data.indexOfTableNameField);
if (Utils.isEmpty(data.realTableName)) {
throw new HopTransformException("The name of the table is not specified!");
}
data.realSchemaTable = data.db.getDatabaseMeta().getQuotedSchemaTableCombination(this, data.realSchemaName, data.realTableName);
}
if (operation.equals(data.insertValue)) {
if (log.isRowLevel()) {
logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.InsertRow", Arrays.toString(row)));
}
// The values to insert are those in the update section
//
Object[] insertRowData = new Object[data.valuenrs.length];
for (int i = 0; i < data.valuenrs.length; i++) {
insertRowData[i] = row[data.valuenrs[i]];
}
if (meta.istablenameInField()) {
data.insertStatement = data.preparedStatements.get(data.realSchemaTable + "insert");
if (data.insertStatement == null) {
String sql = data.db.getInsertStatement(data.realSchemaName, data.realTableName, data.insertRowMeta);
if (log.isDebug()) {
logDebug("Preparation of the insert SQL statement: " + sql);
}
data.insertStatement = data.db.prepareSql(sql);
data.preparedStatements.put(data.realSchemaTable + "insert", data.insertStatement);
}
}
//
if (data.specialErrorHandling && data.supportsSavepoints) {
data.savepoint = data.db.setSavepoint();
}
// Set the values on the prepared statement...
data.db.setValues(data.insertRowMeta, insertRowData, data.insertStatement);
data.db.insertRow(data.insertStatement, data.batchMode);
performInsert = true;
if (!data.batchMode) {
incrementLinesOutput();
}
if (log.isRowLevel()) {
logRowlevel("Written row: " + data.insertRowMeta.getString(insertRowData));
}
} else {
Object[] lookupRow = new Object[data.keynrs.length];
int lookupIndex = 0;
for (int i = 0; i < meta.getKeyStream().length; i++) {
if (data.keynrs[i] >= 0) {
lookupRow[lookupIndex] = row[data.keynrs[i]];
lookupIndex++;
}
if (data.keynrs2[i] >= 0) {
lookupRow[lookupIndex] = row[data.keynrs2[i]];
lookupIndex++;
}
}
boolean updateorDelete = false;
if (meta.isPerformLookup()) {
if (meta.istablenameInField()) {
// Prepare Lookup statement
data.lookupStatement = data.preparedStatements.get(data.realSchemaTable + "lookup");
if (data.lookupStatement == null) {
String sql = getLookupStatement(data.inputRowMeta);
if (log.isDebug()) {
logDebug("Preparating SQL for insert: " + sql);
}
data.lookupStatement = data.db.prepareSql(sql);
data.preparedStatements.put(data.realSchemaTable + "lookup", data.lookupStatement);
}
}
data.db.setValues(data.lookupParameterRowMeta, lookupRow, data.lookupStatement);
if (log.isRowLevel()) {
logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.ValuesSetForLookup", data.lookupParameterRowMeta.getString(lookupRow)));
}
Object[] add = data.db.getLookup(data.lookupStatement);
incrementLinesInput();
if (add == null) {
if (data.stringErrorKeyNotFound == null) {
data.stringErrorKeyNotFound = BaseMessages.getString(PKG, "SynchronizeAfterMerge.Exception.KeyCouldNotFound") + data.lookupParameterRowMeta.getString(lookupRow);
data.stringFieldnames = "";
for (int i = 0; i < data.lookupParameterRowMeta.size(); i++) {
if (i > 0) {
data.stringFieldnames += ", ";
}
data.stringFieldnames += data.lookupParameterRowMeta.getValueMeta(i).getName();
}
}
data.lookupFailure = true;
throw new HopDatabaseException(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Exception.KeyCouldNotFound", data.lookupParameterRowMeta.getString(lookupRow)));
} else {
if (log.isRowLevel()) {
logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.FoundRowForUpdate", data.insertRowMeta.getString(row)));
}
for (int i = 0; i < data.valuenrs.length; i++) {
if (meta.getUpdate()[i].booleanValue()) {
IValueMeta valueMeta = data.inputRowMeta.getValueMeta(data.valuenrs[i]);
IValueMeta retMeta = data.db.getReturnRowMeta().getValueMeta(i);
Object rowvalue = row[data.valuenrs[i]];
Object retvalue = add[i];
if (valueMeta.compare(rowvalue, retMeta, retvalue) != 0) {
updateorDelete = true;
}
}
}
}
}
if (operation.equals(data.updateValue)) {
if (!meta.isPerformLookup() || updateorDelete) {
if (meta.istablenameInField()) {
data.updateStatement = data.preparedStatements.get(data.realSchemaTable + "update");
if (data.updateStatement == null) {
String sql = getUpdateStatement(data.inputRowMeta);
data.updateStatement = data.db.prepareSql(sql);
data.preparedStatements.put(data.realSchemaTable + "update", data.updateStatement);
if (log.isDebug()) {
logDebug("Preparation of the Update SQL statement : " + sql);
}
}
}
// Create the update row...
Object[] updateRow = new Object[data.updateParameterRowMeta.size()];
int j = 0;
for (int i = 0; i < data.valuenrs.length; i++) {
if (meta.getUpdate()[i].booleanValue()) {
// the setters
updateRow[j] = row[data.valuenrs[i]];
j++;
}
}
// add the where clause parameters, they are exactly the same for lookup and update
for (int i = 0; i < lookupRow.length; i++) {
updateRow[j + i] = lookupRow[i];
}
//
if (data.specialErrorHandling && data.supportsSavepoints) {
data.savepoint = data.db.setSavepoint();
}
data.db.setValues(data.updateParameterRowMeta, updateRow, data.updateStatement);
if (log.isRowLevel()) {
logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.SetValuesForUpdate", data.updateParameterRowMeta.getString(updateRow), data.inputRowMeta.getString(row)));
}
data.db.insertRow(data.updateStatement, data.batchMode);
performUpdate = true;
incrementLinesUpdated();
} else {
// end if operation update
incrementLinesSkipped();
lineSkipped = true;
}
} else if (operation.equals(data.deleteValue)) {
if (meta.istablenameInField()) {
data.deleteStatement = data.preparedStatements.get(data.realSchemaTable + "delete");
if (data.deleteStatement == null) {
String sql = getDeleteStatement(data.inputRowMeta);
data.deleteStatement = data.db.prepareSql(sql);
data.preparedStatements.put(data.realSchemaTable + "delete", data.deleteStatement);
if (log.isDebug()) {
logDebug("Preparation of the Delete SQL statement : " + sql);
}
}
}
Object[] deleteRow = new Object[data.deleteParameterRowMeta.size()];
int deleteIndex = 0;
for (int i = 0; i < meta.getKeyStream().length; i++) {
if (data.keynrs[i] >= 0) {
deleteRow[deleteIndex] = row[data.keynrs[i]];
deleteIndex++;
}
if (data.keynrs2[i] >= 0) {
deleteRow[deleteIndex] = row[data.keynrs2[i]];
deleteIndex++;
}
}
//
if (data.specialErrorHandling && data.supportsSavepoints) {
data.savepoint = data.db.setSavepoint();
}
data.db.setValues(data.deleteParameterRowMeta, deleteRow, data.deleteStatement);
if (log.isRowLevel()) {
logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.SetValuesForDelete", data.deleteParameterRowMeta.getString(deleteRow), data.inputRowMeta.getString(row)));
}
data.db.insertRow(data.deleteStatement, data.batchMode);
performDelete = true;
incrementLinesUpdated();
} else {
// endif operation delete
incrementLinesSkipped();
lineSkipped = true;
}
}
//
if (performInsert || performUpdate || performDelete || (data.batchBuffer.size() > 0 && lineSkipped)) {
// Get a commit counter per prepared statement to keep track of separate tables, etc.
//
String tableName = data.realSchemaTable;
if (performInsert) {
tableName += "insert";
} else if (performUpdate) {
tableName += "update";
}
if (performDelete) {
tableName += "delete";
}
Integer commitCounter = data.commitCounterMap.get(tableName);
if (commitCounter == null) {
commitCounter = Integer.valueOf(0);
}
data.commitCounterMap.put(tableName, Integer.valueOf(commitCounter.intValue() + 1));
//
if (data.specialErrorHandling && data.supportsSavepoints) {
if (data.releaseSavepoint) {
data.db.releaseSavepoint(data.savepoint);
}
}
//
if (commitCounter > 0 && (commitCounter % data.commitSize) == 0) {
if (data.batchMode) {
try {
if (performInsert) {
data.insertStatement.executeBatch();
data.db.commit();
data.insertStatement.clearBatch();
} else if (performUpdate) {
data.updateStatement.executeBatch();
data.db.commit();
data.updateStatement.clearBatch();
} else if (performDelete) {
data.deleteStatement.executeBatch();
data.db.commit();
data.deleteStatement.clearBatch();
}
} catch (SQLException ex) {
throw Database.createHopDatabaseBatchException(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Error.UpdatingBatch"), ex);
} catch (Exception ex) {
throw new HopDatabaseException("Unexpected error inserting row", ex);
}
} else {
// insertRow normal commit
data.db.commit();
}
// Clear the batch/commit counter...
//
data.commitCounterMap.put(tableName, Integer.valueOf(0));
rowIsSafe = true;
} else {
rowIsSafe = false;
}
}
} catch (HopDatabaseBatchException be) {
errorMessage = be.toString();
batchProblem = true;
sendToErrorRow = true;
updateCounts = be.getUpdateCounts();
exceptionsList = be.getExceptionsList();
if (data.insertStatement != null) {
data.db.clearBatch(data.insertStatement);
}
if (data.updateStatement != null) {
data.db.clearBatch(data.updateStatement);
}
if (data.deleteStatement != null) {
data.db.clearBatch(data.deleteStatement);
}
if (getTransformMeta().isDoingErrorHandling()) {
data.db.commit(true);
} else {
data.db.rollback();
StringBuilder msg = new StringBuilder("Error batch inserting rows into table [" + data.realTableName + "].");
msg.append(Const.CR);
msg.append("Errors encountered (first 10):").append(Const.CR);
for (int x = 0; x < be.getExceptionsList().size() && x < 10; x++) {
Exception exception = be.getExceptionsList().get(x);
if (exception.getMessage() != null) {
msg.append(exception.getMessage()).append(Const.CR);
}
}
throw new HopException(msg.toString(), be);
}
} catch (HopDatabaseException dbe) {
if (getTransformMeta().isDoingErrorHandling()) {
if (log.isRowLevel()) {
logRowlevel("Written row to error handling : " + getInputRowMeta().getString(row));
}
if (data.specialErrorHandling && data.supportsSavepoints) {
if (data.savepoint != null || !data.lookupFailure) {
// do this when savepoint was set, and this is not lookup failure
data.db.rollback(data.savepoint);
if (data.releaseSavepoint) {
data.db.releaseSavepoint(data.savepoint);
}
}
}
sendToErrorRow = true;
errorMessage = dbe.toString();
} else {
setErrors(getErrors() + 1);
data.db.rollback();
throw new HopException("Error inserting row into table [" + data.realTableName + "] with values: " + data.inputRowMeta.getString(row), dbe);
}
}
if (data.batchMode) {
if (sendToErrorRow) {
if (batchProblem) {
data.batchBuffer.add(row);
processBatchException(errorMessage, updateCounts, exceptionsList);
} else {
// Simply add this row to the error row
putError(data.inputRowMeta, row, 1L, errorMessage, null, "SUYNC002");
}
} else {
if (!lineSkipped) {
data.batchBuffer.add(row);
}
if (rowIsSafe) {
// A commit was done and the rows are all safe (no error)
for (int i = 0; i < data.batchBuffer.size(); i++) {
Object[] rowb = data.batchBuffer.get(i);
putRow(data.outputRowMeta, rowb);
if (data.inputRowMeta.getString(rowb, data.indexOfOperationOrderField).equals(data.insertValue)) {
incrementLinesOutput();
}
}
// Clear the buffer
data.batchBuffer.clear();
}
//
if (lineSkipped) {
putRow(data.outputRowMeta, row);
}
}
} else {
if (sendToErrorRow) {
if (data.lookupFailure) {
putError(data.inputRowMeta, row, 1, data.stringErrorKeyNotFound, data.stringFieldnames, "SUYNC001");
} else {
putError(data.inputRowMeta, row, 1, errorMessage, null, "SUYNC001");
}
}
}
}
use of org.apache.hop.core.exception.HopDatabaseBatchException in project hop by apache.
the class TableOutput method writeToTable.
protected Object[] writeToTable(IRowMeta rowMeta, Object[] r) throws HopException {
if (r == null) {
// Stop: last line or error encountered
if (log.isDetailed()) {
logDetailed("Last line inserted: stop");
}
return null;
}
PreparedStatement insertStatement = null;
Object[] insertRowData;
Object[] outputRowData = r;
String tableName = null;
boolean sendToErrorRow = false;
String errorMessage = null;
boolean rowIsSafe = false;
int[] updateCounts = null;
List<Exception> exceptionsList = null;
boolean batchProblem = false;
Object generatedKey = null;
if (meta.isTableNameInField()) {
// Cache the position of the table name field
if (data.indexOfTableNameField < 0) {
String realTablename = resolve(meta.getTableNameField());
data.indexOfTableNameField = rowMeta.indexOfValue(realTablename);
if (data.indexOfTableNameField < 0) {
String message = "Unable to find table name field [" + realTablename + "] in input row";
logError(message);
throw new HopTransformException(message);
}
if (!meta.isTableNameInTable() && !meta.isSpecifyFields()) {
data.insertRowMeta.removeValueMeta(data.indexOfTableNameField);
}
}
tableName = rowMeta.getString(r, data.indexOfTableNameField);
if (!meta.isTableNameInTable() && !meta.isSpecifyFields()) {
// If the name of the table should not be inserted itself, remove the table name
// from the input row data as well. This forcibly creates a copy of r
//
insertRowData = RowDataUtil.removeItem(rowMeta.cloneRow(r), data.indexOfTableNameField);
} else {
insertRowData = r;
}
} else if (meta.isPartitioningEnabled() && (meta.isPartitioningDaily() || meta.isPartitioningMonthly()) && (meta.getPartitioningField() != null && meta.getPartitioningField().length() > 0)) {
// Initialize some stuff!
if (data.indexOfPartitioningField < 0) {
data.indexOfPartitioningField = rowMeta.indexOfValue(resolve(meta.getPartitioningField()));
if (data.indexOfPartitioningField < 0) {
throw new HopTransformException("Unable to find field [" + meta.getPartitioningField() + "] in the input row!");
}
if (Boolean.TRUE.equals(meta.isPartitioningDaily())) {
data.dateFormater = new SimpleDateFormat("yyyyMMdd");
} else {
data.dateFormater = new SimpleDateFormat("yyyyMM");
}
}
IValueMeta partitioningValue = rowMeta.getValueMeta(data.indexOfPartitioningField);
if (!partitioningValue.isDate() || r[data.indexOfPartitioningField] == null) {
throw new HopTransformException("Sorry, the partitioning field needs to contain a data value and can't be empty!");
}
Object partitioningValueData = rowMeta.getDate(r, data.indexOfPartitioningField);
tableName = resolve(meta.getTableName()) + "_" + data.dateFormater.format((Date) partitioningValueData);
insertRowData = r;
} else {
tableName = data.tableName;
insertRowData = r;
}
if (meta.isSpecifyFields()) {
//
// The values to insert are those in the fields sections
//
insertRowData = new Object[data.valuenrs.length];
for (int idx = 0; idx < data.valuenrs.length; idx++) {
insertRowData[idx] = r[data.valuenrs[idx]];
}
}
if (Utils.isEmpty(tableName)) {
throw new HopTransformException("The tablename is not defined (empty)");
}
insertStatement = data.preparedStatements.get(tableName);
if (insertStatement == null) {
String sql = data.db.getInsertStatement(resolve(meta.getSchemaName()), tableName, data.insertRowMeta);
if (log.isDetailed()) {
logDetailed("Prepared statement : " + sql);
}
insertStatement = data.db.prepareSql(sql, meta.isReturningGeneratedKeys());
data.preparedStatements.put(tableName, insertStatement);
}
try {
//
if (data.useSafePoints) {
data.savepoint = data.db.setSavepoint();
}
data.db.setValues(data.insertRowMeta, insertRowData, insertStatement);
data.db.insertRow(insertStatement, data.batchMode, // false: no commit, it is handled in this transform differently
false);
if (isRowLevel()) {
logRowlevel("Written row: " + data.insertRowMeta.getString(insertRowData));
}
// Get a commit counter per prepared statement to keep track of separate tables, etc.
//
Integer commitCounter = data.commitCounterMap.get(tableName);
if (commitCounter == null) {
commitCounter = Integer.valueOf(1);
} else {
commitCounter++;
}
data.commitCounterMap.put(tableName, commitCounter);
//
if (data.useSafePoints && data.releaseSavepoint) {
data.db.releaseSavepoint(data.savepoint);
}
if ((data.commitSize > 0) && ((commitCounter % data.commitSize) == 0)) {
if (data.db.getUseBatchInsert(data.batchMode)) {
try {
insertStatement.executeBatch();
data.db.commit();
insertStatement.clearBatch();
} catch (SQLException ex) {
throw Database.createHopDatabaseBatchException("Error updating batch", ex);
} catch (Exception ex) {
throw new HopDatabaseException("Unexpected error inserting row", ex);
}
} else {
// insertRow normal commit
data.db.commit();
}
// Clear the batch/commit counter...
//
data.commitCounterMap.put(tableName, Integer.valueOf(0));
rowIsSafe = true;
} else {
rowIsSafe = false;
}
// See if we need to get back the keys as well...
if (meta.isReturningGeneratedKeys()) {
RowMetaAndData extraKeys = data.db.getGeneratedKeys(insertStatement);
if (extraKeys.getRowMeta().size() > 0) {
// Send out the good word!
// Only 1 key at the moment. (should be enough for now :-)
generatedKey = extraKeys.getRowMeta().getInteger(extraKeys.getData(), 0);
} else {
// that a hop should always contain rows of the same type.
throw new HopTransformException("No generated keys while \"return generated keys\" is active!");
}
}
} catch (HopDatabaseBatchException be) {
errorMessage = be.toString();
batchProblem = true;
sendToErrorRow = true;
updateCounts = be.getUpdateCounts();
exceptionsList = be.getExceptionsList();
if (getTransformMeta().isDoingErrorHandling()) {
data.db.clearBatch(insertStatement);
data.db.commit(true);
} else {
data.db.clearBatch(insertStatement);
data.db.rollback();
StringBuilder msg = new StringBuilder("Error batch inserting rows into table [" + tableName + "].");
msg.append(Const.CR);
msg.append("Errors encountered (first 10):").append(Const.CR);
for (int x = 0; x < be.getExceptionsList().size() && x < 10; x++) {
Exception exception = be.getExceptionsList().get(x);
if (exception.getMessage() != null) {
msg.append(exception.getMessage()).append(Const.CR);
}
}
throw new HopException(msg.toString(), be);
}
} catch (HopDatabaseException dbe) {
if (getTransformMeta().isDoingErrorHandling()) {
if (isRowLevel()) {
logRowlevel("Written row to error handling : " + getInputRowMeta().getString(r));
}
if (data.useSafePoints) {
data.db.rollback(data.savepoint);
if (data.releaseSavepoint) {
data.db.releaseSavepoint(data.savepoint);
}
// data.db.commit(true); // force a commit on the connection too.
}
sendToErrorRow = true;
errorMessage = dbe.toString();
} else {
if (meta.isIgnoreErrors()) {
if (data.warnings < 20) {
if (log.isBasic()) {
logBasic("WARNING: Couldn't insert row into table: " + rowMeta.getString(r) + Const.CR + dbe.getMessage());
}
} else if (data.warnings == 20 && log.isBasic()) {
logBasic("FINAL WARNING (no more then 20 displayed): Couldn't insert row into table: " + rowMeta.getString(r) + Const.CR + dbe.getMessage());
}
data.warnings++;
} else {
setErrors(getErrors() + 1);
data.db.rollback();
throw new HopException("Error inserting row into table [" + tableName + "] with values: " + rowMeta.getString(r), dbe);
}
}
}
// We need to add a key
if (generatedKey != null) {
outputRowData = RowDataUtil.addValueData(outputRowData, rowMeta.size(), generatedKey);
}
if (data.batchMode) {
if (sendToErrorRow) {
if (batchProblem) {
data.batchBuffer.add(outputRowData);
outputRowData = null;
processBatchException(errorMessage, updateCounts, exceptionsList);
} else {
// Simply add this row to the error row
putError(rowMeta, r, 1L, errorMessage, null, "TOP001");
outputRowData = null;
}
} else {
data.batchBuffer.add(outputRowData);
outputRowData = null;
if (rowIsSafe) {
// A commit was done and the rows are all safe (no error)
for (int i = 0; i < data.batchBuffer.size(); i++) {
Object[] row = data.batchBuffer.get(i);
putRow(data.outputRowMeta, row);
incrementLinesOutput();
}
// Clear the buffer
data.batchBuffer.clear();
}
}
} else {
if (sendToErrorRow) {
putError(rowMeta, r, 1, errorMessage, null, "TOP001");
outputRowData = null;
}
}
return outputRowData;
}
use of org.apache.hop.core.exception.HopDatabaseBatchException in project hop by apache.
the class TableOutput method emptyAndCommitBatchBuffers.
private void emptyAndCommitBatchBuffers(boolean dispose) {
try {
for (String schemaTable : data.preparedStatements.keySet()) {
// Get a commit counter per prepared statement to keep track of separate tables, etc.
//
Integer batchCounter = data.commitCounterMap.get(schemaTable);
if (batchCounter == null || batchCounter == 0) {
// Skip this one, no work required
continue;
}
PreparedStatement insertStatement = data.preparedStatements.get(schemaTable);
data.db.emptyAndCommit(insertStatement, data.batchMode, batchCounter, dispose);
data.commitCounterMap.put(schemaTable, 0);
}
for (int i = 0; i < data.batchBuffer.size(); i++) {
Object[] row = data.batchBuffer.get(i);
putRow(data.outputRowMeta, row);
incrementLinesOutput();
}
// Clear the buffer
data.batchBuffer.clear();
} catch (HopDatabaseBatchException be) {
if (getTransformMeta().isDoingErrorHandling()) {
// OK, we have the numbers...
try {
processBatchException(be.toString(), be.getUpdateCounts(), be.getExceptionsList());
} catch (HopException e) {
logError("Unexpected error processing batch error", e);
setErrors(1);
stopAll();
}
} else {
logError("Unexpected batch update error committing the database connection.", be);
setErrors(1);
stopAll();
}
} catch (Exception dbe) {
logError("Unexpected error committing the database connection.", dbe);
logError(Const.getStackTracker(dbe));
setErrors(1);
stopAll();
} finally {
setOutputDone();
if (getErrors() > 0) {
try {
data.db.rollback();
} catch (HopDatabaseException e) {
logError("Unexpected error rolling back the database connection.", e);
}
}
}
}
Aggregations