Search in sources :

Example 1 with KettleDatabaseBatchException

use of org.pentaho.di.core.exception.KettleDatabaseBatchException in project pentaho-kettle by pentaho.

the class TableOutput method dispose.

public void dispose(StepMetaInterface smi, StepDataInterface sdi) {
    meta = (TableOutputMeta) smi;
    data = (TableOutputData) sdi;
    if (data.db != null) {
        try {
            for (String schemaTable : data.preparedStatements.keySet()) {
                // Get a commit counter per prepared statement to keep track of separate tables, etc.
                // 
                Integer batchCounter = data.commitCounterMap.get(schemaTable);
                if (batchCounter == null) {
                    batchCounter = 0;
                }
                PreparedStatement insertStatement = data.preparedStatements.get(schemaTable);
                data.db.emptyAndCommit(insertStatement, data.batchMode, batchCounter);
            }
            for (int i = 0; i < data.batchBuffer.size(); i++) {
                Object[] row = data.batchBuffer.get(i);
                putRow(data.outputRowMeta, row);
                incrementLinesOutput();
            }
            // Clear the buffer
            data.batchBuffer.clear();
        } catch (KettleDatabaseBatchException be) {
            if (getStepMeta().isDoingErrorHandling()) {
                // OK, we have the numbers...
                try {
                    processBatchException(be.toString(), be.getUpdateCounts(), be.getExceptionsList());
                } catch (KettleException e) {
                    logError("Unexpected error processing batch error", e);
                    setErrors(1);
                    stopAll();
                }
            } else {
                logError("Unexpected batch update error committing the database connection.", be);
                setErrors(1);
                stopAll();
            }
        } catch (Exception dbe) {
            logError("Unexpected error committing the database connection.", dbe);
            logError(Const.getStackTracker(dbe));
            setErrors(1);
            stopAll();
        } finally {
            setOutputDone();
            if (getErrors() > 0) {
                try {
                    data.db.rollback();
                } catch (KettleDatabaseException e) {
                    logError("Unexpected error rolling back the database connection.", e);
                }
            }
            data.db.disconnect();
        }
        super.dispose(smi, sdi);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) PreparedStatement(java.sql.PreparedStatement) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SQLException(java.sql.SQLException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) KettleStepException(org.pentaho.di.core.exception.KettleStepException)

Example 2 with KettleDatabaseBatchException

use of org.pentaho.di.core.exception.KettleDatabaseBatchException in project pentaho-kettle by pentaho.

the class SynchronizeAfterMerge method finishStep.

private void finishStep() {
    if (data.db != null && data.db.getConnection() != null) {
        try {
            if (!data.db.getConnection().isClosed()) {
                for (String schemaTable : data.preparedStatements.keySet()) {
                    // Get a commit counter per prepared statement to keep track of separate tables, etc.
                    // 
                    Integer batchCounter = data.commitCounterMap.get(schemaTable);
                    if (batchCounter == null) {
                        batchCounter = 0;
                    }
                    PreparedStatement insertStatement = data.preparedStatements.get(schemaTable);
                    data.db.emptyAndCommit(insertStatement, data.batchMode, batchCounter);
                }
                for (int i = 0; i < data.batchBuffer.size(); i++) {
                    Object[] row = data.batchBuffer.get(i);
                    putRow(data.outputRowMeta, row);
                    if (data.inputRowMeta.getString(row, data.indexOfOperationOrderField).equals(data.insertValue)) {
                        incrementLinesOutput();
                    }
                }
                // Clear the buffer
                data.batchBuffer.clear();
            }
        } catch (KettleDatabaseBatchException be) {
            if (getStepMeta().isDoingErrorHandling()) {
                // OK, we have the numbers...
                try {
                    processBatchException(be.toString(), be.getUpdateCounts(), be.getExceptionsList());
                } catch (KettleException e) {
                    logError("Unexpected error processing batch error", e);
                    setErrors(1);
                    stopAll();
                }
            } else {
                logError("Unexpected batch update error committing the database connection.", be);
                setErrors(1);
                stopAll();
            }
        } catch (Exception dbe) {
            logError("Unexpected error committing the database connection.", dbe);
            logError(Const.getStackTracker(dbe));
            setErrors(1);
            stopAll();
        } finally {
            setOutputDone();
            if (getErrors() > 0) {
                try {
                    data.db.rollback();
                } catch (KettleDatabaseException e) {
                    logError("Unexpected error rolling back the database connection.", e);
                }
            }
            data.db.disconnect();
        }
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) PreparedStatement(java.sql.PreparedStatement) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SQLException(java.sql.SQLException) KettleStepException(org.pentaho.di.core.exception.KettleStepException)

Example 3 with KettleDatabaseBatchException

use of org.pentaho.di.core.exception.KettleDatabaseBatchException in project pentaho-kettle by pentaho.

the class TableOutput method writeToTable.

protected Object[] writeToTable(RowMetaInterface rowMeta, Object[] r) throws KettleException {
    if (r == null) {
        // Stop: last line or error encountered
        if (log.isDetailed()) {
            logDetailed("Last line inserted: stop");
        }
        return null;
    }
    PreparedStatement insertStatement = null;
    Object[] insertRowData;
    Object[] outputRowData = r;
    String tableName = null;
    boolean sendToErrorRow = false;
    String errorMessage = null;
    boolean rowIsSafe = false;
    int[] updateCounts = null;
    List<Exception> exceptionsList = null;
    boolean batchProblem = false;
    Object generatedKey = null;
    if (meta.isTableNameInField()) {
        // Cache the position of the table name field
        if (data.indexOfTableNameField < 0) {
            String realTablename = environmentSubstitute(meta.getTableNameField());
            data.indexOfTableNameField = rowMeta.indexOfValue(realTablename);
            if (data.indexOfTableNameField < 0) {
                String message = "Unable to find table name field [" + realTablename + "] in input row";
                logError(message);
                throw new KettleStepException(message);
            }
            if (!meta.isTableNameInTable() && !meta.specifyFields()) {
                data.insertRowMeta.removeValueMeta(data.indexOfTableNameField);
            }
        }
        tableName = rowMeta.getString(r, data.indexOfTableNameField);
        if (!meta.isTableNameInTable() && !meta.specifyFields()) {
            // If the name of the table should not be inserted itself, remove the table name
            // from the input row data as well. This forcibly creates a copy of r
            // 
            insertRowData = RowDataUtil.removeItem(rowMeta.cloneRow(r), data.indexOfTableNameField);
        } else {
            insertRowData = r;
        }
    } else if (meta.isPartitioningEnabled() && (meta.isPartitioningDaily() || meta.isPartitioningMonthly()) && (meta.getPartitioningField() != null && meta.getPartitioningField().length() > 0)) {
        // Initialize some stuff!
        if (data.indexOfPartitioningField < 0) {
            data.indexOfPartitioningField = rowMeta.indexOfValue(environmentSubstitute(meta.getPartitioningField()));
            if (data.indexOfPartitioningField < 0) {
                throw new KettleStepException("Unable to find field [" + meta.getPartitioningField() + "] in the input row!");
            }
            if (meta.isPartitioningDaily()) {
                data.dateFormater = new SimpleDateFormat("yyyyMMdd");
            } else {
                data.dateFormater = new SimpleDateFormat("yyyyMM");
            }
        }
        ValueMetaInterface partitioningValue = rowMeta.getValueMeta(data.indexOfPartitioningField);
        if (!partitioningValue.isDate() || r[data.indexOfPartitioningField] == null) {
            throw new KettleStepException("Sorry, the partitioning field needs to contain a data value and can't be empty!");
        }
        Object partitioningValueData = rowMeta.getDate(r, data.indexOfPartitioningField);
        tableName = environmentSubstitute(meta.getTableName()) + "_" + data.dateFormater.format((Date) partitioningValueData);
        insertRowData = r;
    } else {
        tableName = data.tableName;
        insertRowData = r;
    }
    if (meta.specifyFields()) {
        // 
        // The values to insert are those in the fields sections
        // 
        insertRowData = new Object[data.valuenrs.length];
        for (int idx = 0; idx < data.valuenrs.length; idx++) {
            insertRowData[idx] = r[data.valuenrs[idx]];
        }
    }
    if (Utils.isEmpty(tableName)) {
        throw new KettleStepException("The tablename is not defined (empty)");
    }
    insertStatement = data.preparedStatements.get(tableName);
    if (insertStatement == null) {
        String sql = data.db.getInsertStatement(environmentSubstitute(meta.getSchemaName()), tableName, data.insertRowMeta);
        if (log.isDetailed()) {
            logDetailed("Prepared statement : " + sql);
        }
        insertStatement = data.db.prepareSQL(sql, meta.isReturningGeneratedKeys());
        data.preparedStatements.put(tableName, insertStatement);
    }
    try {
        // 
        if (data.useSafePoints) {
            data.savepoint = data.db.setSavepoint();
        }
        data.db.setValues(data.insertRowMeta, insertRowData, insertStatement);
        // false: no commit, it is handled in this step
        data.db.insertRow(insertStatement, data.batchMode, false);
        // different
        if (isRowLevel()) {
            logRowlevel("Written row: " + data.insertRowMeta.getString(insertRowData));
        }
        // Get a commit counter per prepared statement to keep track of separate tables, etc.
        // 
        Integer commitCounter = data.commitCounterMap.get(tableName);
        if (commitCounter == null) {
            commitCounter = Integer.valueOf(1);
        } else {
            commitCounter++;
        }
        data.commitCounterMap.put(tableName, Integer.valueOf(commitCounter.intValue()));
        // 
        if (data.useSafePoints) {
            if (data.releaseSavepoint) {
                data.db.releaseSavepoint(data.savepoint);
            }
        }
        if ((data.commitSize > 0) && ((commitCounter % data.commitSize) == 0)) {
            if (data.db.getUseBatchInsert(data.batchMode)) {
                try {
                    insertStatement.executeBatch();
                    data.db.commit();
                    insertStatement.clearBatch();
                } catch (SQLException ex) {
                    throw Database.createKettleDatabaseBatchException("Error updating batch", ex);
                } catch (Exception ex) {
                    throw new KettleDatabaseException("Unexpected error inserting row", ex);
                }
            } else {
                // insertRow normal commit
                data.db.commit();
            }
            // Clear the batch/commit counter...
            // 
            data.commitCounterMap.put(tableName, Integer.valueOf(0));
            rowIsSafe = true;
        } else {
            rowIsSafe = false;
        }
        // See if we need to get back the keys as well...
        if (meta.isReturningGeneratedKeys()) {
            RowMetaAndData extraKeys = data.db.getGeneratedKeys(insertStatement);
            if (extraKeys.getRowMeta().size() > 0) {
                // Send out the good word!
                // Only 1 key at the moment. (should be enough for now :-)
                generatedKey = extraKeys.getRowMeta().getInteger(extraKeys.getData(), 0);
            } else {
                // that a hop should always contain rows of the same type.
                throw new KettleStepException("No generated keys while \"return generated keys\" is active!");
            }
        }
    } catch (KettleDatabaseBatchException be) {
        errorMessage = be.toString();
        batchProblem = true;
        sendToErrorRow = true;
        updateCounts = be.getUpdateCounts();
        exceptionsList = be.getExceptionsList();
        if (getStepMeta().isDoingErrorHandling()) {
            data.db.clearBatch(insertStatement);
            data.db.commit(true);
        } else {
            data.db.clearBatch(insertStatement);
            data.db.rollback();
            StringBuilder msg = new StringBuilder("Error batch inserting rows into table [" + tableName + "].");
            msg.append(Const.CR);
            msg.append("Errors encountered (first 10):").append(Const.CR);
            for (int x = 0; x < be.getExceptionsList().size() && x < 10; x++) {
                Exception exception = be.getExceptionsList().get(x);
                if (exception.getMessage() != null) {
                    msg.append(exception.getMessage()).append(Const.CR);
                }
            }
            throw new KettleException(msg.toString(), be);
        }
    } catch (KettleDatabaseException dbe) {
        if (getStepMeta().isDoingErrorHandling()) {
            if (isRowLevel()) {
                logRowlevel("Written row to error handling : " + getInputRowMeta().getString(r));
            }
            if (data.useSafePoints) {
                data.db.rollback(data.savepoint);
                if (data.releaseSavepoint) {
                    data.db.releaseSavepoint(data.savepoint);
                }
            // data.db.commit(true); // force a commit on the connection too.
            }
            sendToErrorRow = true;
            errorMessage = dbe.toString();
        } else {
            if (meta.ignoreErrors()) {
                if (data.warnings < 20) {
                    if (log.isBasic()) {
                        logBasic("WARNING: Couldn't insert row into table: " + rowMeta.getString(r) + Const.CR + dbe.getMessage());
                    }
                } else if (data.warnings == 20) {
                    if (log.isBasic()) {
                        logBasic("FINAL WARNING (no more then 20 displayed): Couldn't insert row into table: " + rowMeta.getString(r) + Const.CR + dbe.getMessage());
                    }
                }
                data.warnings++;
            } else {
                setErrors(getErrors() + 1);
                data.db.rollback();
                throw new KettleException("Error inserting row into table [" + tableName + "] with values: " + rowMeta.getString(r), dbe);
            }
        }
    }
    // We need to add a key
    if (generatedKey != null) {
        outputRowData = RowDataUtil.addValueData(outputRowData, rowMeta.size(), generatedKey);
    }
    if (data.batchMode) {
        if (sendToErrorRow) {
            if (batchProblem) {
                data.batchBuffer.add(outputRowData);
                outputRowData = null;
                processBatchException(errorMessage, updateCounts, exceptionsList);
            } else {
                // Simply add this row to the error row
                putError(rowMeta, r, 1L, errorMessage, null, "TOP001");
                outputRowData = null;
            }
        } else {
            data.batchBuffer.add(outputRowData);
            outputRowData = null;
            if (rowIsSafe) {
                // A commit was done and the rows are all safe (no error)
                for (int i = 0; i < data.batchBuffer.size(); i++) {
                    Object[] row = data.batchBuffer.get(i);
                    putRow(data.outputRowMeta, row);
                    incrementLinesOutput();
                }
                // Clear the buffer
                data.batchBuffer.clear();
            }
        }
    } else {
        if (sendToErrorRow) {
            putError(rowMeta, r, 1, errorMessage, null, "TOP001");
            outputRowData = null;
        }
    }
    return outputRowData;
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) SQLException(java.sql.SQLException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) PreparedStatement(java.sql.PreparedStatement) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SQLException(java.sql.SQLException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) Date(java.util.Date) ValueMetaInterface(org.pentaho.di.core.row.ValueMetaInterface) RowMetaAndData(org.pentaho.di.core.RowMetaAndData) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) SimpleDateFormat(java.text.SimpleDateFormat)

Example 4 with KettleDatabaseBatchException

use of org.pentaho.di.core.exception.KettleDatabaseBatchException in project pentaho-kettle by pentaho.

the class SynchronizeAfterMerge method lookupValues.

private synchronized void lookupValues(Object[] row) throws KettleException {
    // get operation for the current
    // do we insert, update or delete ?
    String operation = data.inputRowMeta.getString(row, data.indexOfOperationOrderField);
    boolean rowIsSafe = false;
    boolean sendToErrorRow = false;
    String errorMessage = null;
    int[] updateCounts = null;
    List<Exception> exceptionsList = null;
    boolean batchProblem = false;
    data.lookupFailure = false;
    boolean performInsert = false;
    boolean performUpdate = false;
    boolean performDelete = false;
    boolean lineSkipped = false;
    try {
        if (operation == null) {
            throw new KettleException(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.OperationFieldEmpty", meta.getOperationOrderField()));
        }
        if (meta.istablenameInField()) {
            // get dynamic table name
            data.realTableName = data.inputRowMeta.getString(row, data.indexOfTableNameField);
            if (Utils.isEmpty(data.realTableName)) {
                throw new KettleStepException("The name of the table is not specified!");
            }
            data.realSchemaTable = data.db.getDatabaseMeta().getQuotedSchemaTableCombination(data.realSchemaName, data.realTableName);
        }
        if (operation.equals(data.insertValue)) {
            if (log.isRowLevel()) {
                logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.InsertRow", row.toString()));
            }
            // The values to insert are those in the update section
            // 
            Object[] insertRowData = new Object[data.valuenrs.length];
            for (int i = 0; i < data.valuenrs.length; i++) {
                insertRowData[i] = row[data.valuenrs[i]];
            }
            if (meta.istablenameInField()) {
                data.insertStatement = data.preparedStatements.get(data.realSchemaTable + "insert");
                if (data.insertStatement == null) {
                    String sql = data.db.getInsertStatement(data.realSchemaName, data.realTableName, data.insertRowMeta);
                    if (log.isDebug()) {
                        logDebug("Preparation of the insert SQL statement: " + sql);
                    }
                    data.insertStatement = data.db.prepareSQL(sql);
                    data.preparedStatements.put(data.realSchemaTable + "insert", data.insertStatement);
                }
            }
            // 
            if (data.specialErrorHandling && data.supportsSavepoints) {
                data.savepoint = data.db.setSavepoint();
            }
            // Set the values on the prepared statement...
            data.db.setValues(data.insertRowMeta, insertRowData, data.insertStatement);
            data.db.insertRow(data.insertStatement, data.batchMode);
            performInsert = true;
            if (!data.batchMode) {
                incrementLinesOutput();
            }
            if (log.isRowLevel()) {
                logRowlevel("Written row: " + data.insertRowMeta.getString(insertRowData));
            }
        } else {
            Object[] lookupRow = new Object[data.keynrs.length];
            int lookupIndex = 0;
            for (int i = 0; i < meta.getKeyStream().length; i++) {
                if (data.keynrs[i] >= 0) {
                    lookupRow[lookupIndex] = row[data.keynrs[i]];
                    lookupIndex++;
                }
                if (data.keynrs2[i] >= 0) {
                    lookupRow[lookupIndex] = row[data.keynrs2[i]];
                    lookupIndex++;
                }
            }
            boolean updateorDelete = false;
            if (meta.isPerformLookup()) {
                if (meta.istablenameInField()) {
                    // Prepare Lookup statement
                    data.lookupStatement = data.preparedStatements.get(data.realSchemaTable + "lookup");
                    if (data.lookupStatement == null) {
                        String sql = getLookupStatement(data.inputRowMeta);
                        if (log.isDebug()) {
                            logDebug("Preparating SQL for insert: " + sql);
                        }
                        data.lookupStatement = data.db.prepareSQL(sql);
                        data.preparedStatements.put(data.realSchemaTable + "lookup", data.lookupStatement);
                    }
                }
                data.db.setValues(data.lookupParameterRowMeta, lookupRow, data.lookupStatement);
                if (log.isRowLevel()) {
                    logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.ValuesSetForLookup", data.lookupParameterRowMeta.getString(lookupRow)));
                }
                Object[] add = data.db.getLookup(data.lookupStatement);
                incrementLinesInput();
                if (add == null) {
                    if (data.stringErrorKeyNotFound == null) {
                        data.stringErrorKeyNotFound = BaseMessages.getString(PKG, "SynchronizeAfterMerge.Exception.KeyCouldNotFound") + data.lookupParameterRowMeta.getString(lookupRow);
                        data.stringFieldnames = "";
                        for (int i = 0; i < data.lookupParameterRowMeta.size(); i++) {
                            if (i > 0) {
                                data.stringFieldnames += ", ";
                            }
                            data.stringFieldnames += data.lookupParameterRowMeta.getValueMeta(i).getName();
                        }
                    }
                    data.lookupFailure = true;
                    throw new KettleDatabaseException(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Exception.KeyCouldNotFound", data.lookupParameterRowMeta.getString(lookupRow)));
                } else {
                    if (log.isRowLevel()) {
                        logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.FoundRowForUpdate", data.insertRowMeta.getString(row)));
                    }
                    for (int i = 0; i < data.valuenrs.length; i++) {
                        if (meta.getUpdate()[i].booleanValue()) {
                            ValueMetaInterface valueMeta = data.inputRowMeta.getValueMeta(data.valuenrs[i]);
                            ValueMetaInterface retMeta = data.db.getReturnRowMeta().getValueMeta(i);
                            Object rowvalue = row[data.valuenrs[i]];
                            Object retvalue = add[i];
                            if (valueMeta.compare(rowvalue, retMeta, retvalue) != 0) {
                                updateorDelete = true;
                            }
                        }
                    }
                }
            }
            if (operation.equals(data.updateValue)) {
                if (!meta.isPerformLookup() || updateorDelete) {
                    if (meta.istablenameInField()) {
                        data.updateStatement = data.preparedStatements.get(data.realSchemaTable + "update");
                        if (data.updateStatement == null) {
                            String sql = getUpdateStatement(data.inputRowMeta);
                            data.updateStatement = data.db.prepareSQL(sql);
                            data.preparedStatements.put(data.realSchemaTable + "update", data.updateStatement);
                            if (log.isDebug()) {
                                logDebug("Preparation of the Update SQL statement : " + sql);
                            }
                        }
                    }
                    // Create the update row...
                    Object[] updateRow = new Object[data.updateParameterRowMeta.size()];
                    int j = 0;
                    for (int i = 0; i < data.valuenrs.length; i++) {
                        if (meta.getUpdate()[i].booleanValue()) {
                            // the setters
                            updateRow[j] = row[data.valuenrs[i]];
                            j++;
                        }
                    }
                    // add the where clause parameters, they are exactly the same for lookup and update
                    for (int i = 0; i < lookupRow.length; i++) {
                        updateRow[j + i] = lookupRow[i];
                    }
                    // 
                    if (data.specialErrorHandling && data.supportsSavepoints) {
                        data.savepoint = data.db.setSavepoint();
                    }
                    data.db.setValues(data.updateParameterRowMeta, updateRow, data.updateStatement);
                    if (log.isRowLevel()) {
                        logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.SetValuesForUpdate", data.updateParameterRowMeta.getString(updateRow), data.inputRowMeta.getString(row)));
                    }
                    data.db.insertRow(data.updateStatement, data.batchMode);
                    performUpdate = true;
                    incrementLinesUpdated();
                } else {
                    // end if operation update
                    incrementLinesSkipped();
                    lineSkipped = true;
                }
            } else if (operation.equals(data.deleteValue)) {
                if (meta.istablenameInField()) {
                    data.deleteStatement = data.preparedStatements.get(data.realSchemaTable + "delete");
                    if (data.deleteStatement == null) {
                        String sql = getDeleteStatement(data.inputRowMeta);
                        data.deleteStatement = data.db.prepareSQL(sql);
                        data.preparedStatements.put(data.realSchemaTable + "delete", data.deleteStatement);
                        if (log.isDebug()) {
                            logDebug("Preparation of the Delete SQL statement : " + sql);
                        }
                    }
                }
                Object[] deleteRow = new Object[data.deleteParameterRowMeta.size()];
                int deleteIndex = 0;
                for (int i = 0; i < meta.getKeyStream().length; i++) {
                    if (data.keynrs[i] >= 0) {
                        deleteRow[deleteIndex] = row[data.keynrs[i]];
                        deleteIndex++;
                    }
                    if (data.keynrs2[i] >= 0) {
                        deleteRow[deleteIndex] = row[data.keynrs2[i]];
                        deleteIndex++;
                    }
                }
                // 
                if (data.specialErrorHandling && data.supportsSavepoints) {
                    data.savepoint = data.db.setSavepoint();
                }
                data.db.setValues(data.deleteParameterRowMeta, deleteRow, data.deleteStatement);
                if (log.isRowLevel()) {
                    logRowlevel(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Log.SetValuesForDelete", data.deleteParameterRowMeta.getString(deleteRow), data.inputRowMeta.getString(row)));
                }
                data.db.insertRow(data.deleteStatement, data.batchMode);
                performDelete = true;
                incrementLinesUpdated();
            } else {
                // endif operation delete
                incrementLinesSkipped();
                lineSkipped = true;
            }
        }
        // 
        if (performInsert || performUpdate || performDelete || (data.batchBuffer.size() > 0 && lineSkipped)) {
            // Get a commit counter per prepared statement to keep track of separate tables, etc.
            // 
            String tableName = data.realSchemaTable;
            if (performInsert) {
                tableName += "insert";
            } else if (performUpdate) {
                tableName += "update";
            }
            if (performDelete) {
                tableName += "delete";
            }
            Integer commitCounter = data.commitCounterMap.get(tableName);
            if (commitCounter == null) {
                commitCounter = Integer.valueOf(0);
            }
            data.commitCounterMap.put(tableName, Integer.valueOf(commitCounter.intValue() + 1));
            // 
            if (data.specialErrorHandling && data.supportsSavepoints) {
                if (data.releaseSavepoint) {
                    data.db.releaseSavepoint(data.savepoint);
                }
            }
            // 
            if (commitCounter > 0 && (commitCounter % data.commitSize) == 0) {
                if (data.batchMode) {
                    try {
                        if (performInsert) {
                            data.insertStatement.executeBatch();
                            data.db.commit();
                            data.insertStatement.clearBatch();
                        } else if (performUpdate) {
                            data.updateStatement.executeBatch();
                            data.db.commit();
                            data.updateStatement.clearBatch();
                        } else if (performDelete) {
                            data.deleteStatement.executeBatch();
                            data.db.commit();
                            data.deleteStatement.clearBatch();
                        }
                    } catch (SQLException ex) {
                        throw Database.createKettleDatabaseBatchException(BaseMessages.getString(PKG, "SynchronizeAfterMerge.Error.UpdatingBatch"), ex);
                    } catch (Exception ex) {
                        throw new KettleDatabaseException("Unexpected error inserting row", ex);
                    }
                } else {
                    // insertRow normal commit
                    data.db.commit();
                }
                // Clear the batch/commit counter...
                // 
                data.commitCounterMap.put(tableName, Integer.valueOf(0));
                rowIsSafe = true;
            } else {
                rowIsSafe = false;
            }
        }
    } catch (KettleDatabaseBatchException be) {
        errorMessage = be.toString();
        batchProblem = true;
        sendToErrorRow = true;
        updateCounts = be.getUpdateCounts();
        exceptionsList = be.getExceptionsList();
        if (data.insertStatement != null) {
            data.db.clearBatch(data.insertStatement);
        }
        if (data.updateStatement != null) {
            data.db.clearBatch(data.updateStatement);
        }
        if (data.deleteStatement != null) {
            data.db.clearBatch(data.deleteStatement);
        }
        if (getStepMeta().isDoingErrorHandling()) {
            data.db.commit(true);
        } else {
            data.db.rollback();
            StringBuilder msg = new StringBuilder("Error batch inserting rows into table [" + data.realTableName + "].");
            msg.append(Const.CR);
            msg.append("Errors encountered (first 10):").append(Const.CR);
            for (int x = 0; x < be.getExceptionsList().size() && x < 10; x++) {
                Exception exception = be.getExceptionsList().get(x);
                if (exception.getMessage() != null) {
                    msg.append(exception.getMessage()).append(Const.CR);
                }
            }
            throw new KettleException(msg.toString(), be);
        }
    } catch (KettleDatabaseException dbe) {
        if (getStepMeta().isDoingErrorHandling()) {
            if (log.isRowLevel()) {
                logRowlevel("Written row to error handling : " + getInputRowMeta().getString(row));
            }
            if (data.specialErrorHandling && data.supportsSavepoints) {
                if (data.savepoint != null || !data.lookupFailure) {
                    // do this when savepoint was set, and this is not lookup failure PDI-10878
                    data.db.rollback(data.savepoint);
                    if (data.releaseSavepoint) {
                        data.db.releaseSavepoint(data.savepoint);
                    }
                }
            }
            sendToErrorRow = true;
            errorMessage = dbe.toString();
        } else {
            setErrors(getErrors() + 1);
            data.db.rollback();
            throw new KettleException("Error inserting row into table [" + data.realTableName + "] with values: " + data.inputRowMeta.getString(row), dbe);
        }
    }
    if (data.batchMode) {
        if (sendToErrorRow) {
            if (batchProblem) {
                data.batchBuffer.add(row);
                processBatchException(errorMessage, updateCounts, exceptionsList);
            } else {
                // Simply add this row to the error row
                putError(data.inputRowMeta, row, 1L, errorMessage, null, "SUYNC002");
            }
        } else {
            if (!lineSkipped) {
                data.batchBuffer.add(row);
            }
            if (rowIsSafe) {
                // A commit was done and the rows are all safe (no error)
                for (int i = 0; i < data.batchBuffer.size(); i++) {
                    Object[] rowb = data.batchBuffer.get(i);
                    putRow(data.outputRowMeta, rowb);
                    if (data.inputRowMeta.getString(rowb, data.indexOfOperationOrderField).equals(data.insertValue)) {
                        incrementLinesOutput();
                    }
                }
                // Clear the buffer
                data.batchBuffer.clear();
            }
            // 
            if (lineSkipped) {
                putRow(data.outputRowMeta, row);
            }
        }
    } else {
        if (sendToErrorRow) {
            if (data.lookupFailure) {
                putError(data.inputRowMeta, row, 1, data.stringErrorKeyNotFound, data.stringFieldnames, "SUYNC001");
            } else {
                putError(data.inputRowMeta, row, 1, errorMessage, null, "SUYNC001");
            }
        }
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) SQLException(java.sql.SQLException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SQLException(java.sql.SQLException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) ValueMetaInterface(org.pentaho.di.core.row.ValueMetaInterface) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException)

Example 5 with KettleDatabaseBatchException

use of org.pentaho.di.core.exception.KettleDatabaseBatchException in project pentaho-kettle by pentaho.

the class Database method createKettleDatabaseBatchException.

public static KettleDatabaseBatchException createKettleDatabaseBatchException(String message, SQLException ex) {
    KettleDatabaseBatchException kdbe = new KettleDatabaseBatchException(message, ex);
    if (ex instanceof BatchUpdateException) {
        kdbe.setUpdateCounts(((BatchUpdateException) ex).getUpdateCounts());
    } else {
        // Null update count forces rollback of batch
        kdbe.setUpdateCounts(null);
    }
    List<Exception> exceptions = new ArrayList<Exception>();
    SQLException nextException = ex.getNextException();
    SQLException oldException = null;
    // So it's not "equals" but != (comments from Sven Boden).
    while ((nextException != null) && (oldException != nextException)) {
        exceptions.add(nextException);
        oldException = nextException;
        nextException = nextException.getNextException();
    }
    kdbe.setExceptionsList(exceptions);
    return kdbe;
}
Also used : KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) KettleValueException(org.pentaho.di.core.exception.KettleValueException) BatchUpdateException(java.sql.BatchUpdateException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SQLException(java.sql.SQLException) KettleDatabaseBatchException(org.pentaho.di.core.exception.KettleDatabaseBatchException) BatchUpdateException(java.sql.BatchUpdateException)

Aggregations

SQLException (java.sql.SQLException)5 KettleDatabaseBatchException (org.pentaho.di.core.exception.KettleDatabaseBatchException)5 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)5 KettleException (org.pentaho.di.core.exception.KettleException)5 KettleStepException (org.pentaho.di.core.exception.KettleStepException)4 PreparedStatement (java.sql.PreparedStatement)3 ValueMetaInterface (org.pentaho.di.core.row.ValueMetaInterface)2 BatchUpdateException (java.sql.BatchUpdateException)1 SimpleDateFormat (java.text.SimpleDateFormat)1 ArrayList (java.util.ArrayList)1 Date (java.util.Date)1 RowMetaAndData (org.pentaho.di.core.RowMetaAndData)1 KettleValueException (org.pentaho.di.core.exception.KettleValueException)1