use of org.apache.hop.core.exception.HopDatabaseException in project hop by apache.
the class Database method getRows.
/**
* Reads the result of a ResultSet into an ArrayList
*
* @param rset the ResultSet to read out
* @param limit <=0 means unlimited, otherwise this specifies the maximum number of rows read.
* @param monitor The progress monitor to update while getting the rows.
* @return An ArrayList of rows.
* @throws HopDatabaseException if something goes wrong.
*/
public List<Object[]> getRows(ResultSet rset, int limit, IProgressMonitor monitor) throws HopDatabaseException {
try {
List<Object[]> result = new ArrayList<>();
boolean stop = false;
int i = 0;
if (rset != null) {
if (monitor != null && limit > 0) {
monitor.beginTask("Reading rows...", limit);
}
while ((limit <= 0 || i < limit) && !stop) {
Object[] row = getRow(rset);
if (row != null) {
result.add(row);
i++;
} else {
stop = true;
}
if (monitor != null && limit > 0) {
monitor.worked(1);
}
if (monitor != null && monitor.isCanceled()) {
break;
}
}
closeQuery(rset);
if (monitor != null) {
monitor.done();
}
}
return result;
} catch (Exception e) {
throw new HopDatabaseException("Unable to get list of rows from ResultSet : ", e);
}
}
use of org.apache.hop.core.exception.HopDatabaseException in project hop by apache.
the class BaseDatabaseMeta method hasIndex.
/**
* Verifies on the specified database connection if an index exists on the fields with the
* specified name.
*
* @param database a connected database
* @param schemaName
* @param tableName
* @param idxFields
* @return true if the index exists, false if it doesn't.
* @throws HopDatabaseException
*/
@Override
public boolean hasIndex(Database database, String schemaName, String tableName, String[] idxFields) throws HopDatabaseException {
String schemaTable = database.getDatabaseMeta().getQuotedSchemaTableCombination(database, schemaName, tableName);
boolean[] exists = new boolean[idxFields.length];
for (int i = 0; i < exists.length; i++) {
exists[i] = false;
}
try {
// Get a list of all the indexes for this table
ResultSet indexList = null;
try {
indexList = database.getDatabaseMetaData().getIndexInfo(null, null, schemaTable, false, true);
while (indexList.next()) {
String column = indexList.getString("COLUMN_NAME");
int idx = Const.indexOfString(column, idxFields);
if (idx >= 0) {
exists[idx] = true;
}
}
} finally {
if (indexList != null) {
indexList.close();
}
}
// See if all the fields are indexed...
boolean all = true;
for (int i = 0; i < exists.length && all; i++) {
if (!exists[i]) {
all = false;
}
}
return all;
} catch (Exception e) {
throw new HopDatabaseException("Unable to determine if indexes exists on table [" + schemaTable + "]", e);
}
}
use of org.apache.hop.core.exception.HopDatabaseException in project hop by apache.
the class LocalPipelineEngine method prepareExecution.
@Override
public void prepareExecution() throws HopException {
if (!(pipelineRunConfiguration.getEngineRunConfiguration() instanceof LocalPipelineRunConfiguration)) {
throw new HopException("A local pipeline execution expects a local pipeline configuration, not an instance of class " + pipelineRunConfiguration.getEngineRunConfiguration().getClass().getName());
}
LocalPipelineRunConfiguration config = (LocalPipelineRunConfiguration) pipelineRunConfiguration.getEngineRunConfiguration();
int sizeRowsSet = Const.toInt(resolve(config.getRowSetSize()), Const.ROWS_IN_ROWSET);
setRowSetSize(sizeRowsSet);
setSafeModeEnabled(config.isSafeModeEnabled());
setSortingTransformsTopologically(config.isSortingTransformsTopologically());
setGatheringMetrics(config.isGatheringMetrics());
setFeedbackShown(config.isFeedbackShown());
setFeedbackSize(Const.toInt(resolve(config.getFeedbackSize()), Const.ROWS_UPDATE));
// See if we need to enable transactions...
//
IExtensionData parentExtensionData = getParentPipeline();
if (parentExtensionData == null) {
parentExtensionData = getParentWorkflow();
}
String connectionGroup = null;
if (parentExtensionData != null) {
connectionGroup = (String) parentExtensionData.getExtensionDataMap().get(Const.CONNECTION_GROUP);
}
//
if (config.isTransactional() && connectionGroup == null) {
// Store a value in the parent...
//
connectionGroup = getPipelineMeta().getName() + " - " + UUID.randomUUID();
// We also need to commit/rollback at the end of this pipeline...
// We only do this when we created a new group. Never in a child
//
addExecutionFinishedListener((IExecutionFinishedListener<IPipelineEngine>) pipeline -> {
String group = (String) pipeline.getExtensionDataMap().get(Const.CONNECTION_GROUP);
List<Database> databases = DatabaseConnectionMap.getInstance().getDatabases(group);
Result result = pipeline.getResult();
for (Database database : databases) {
try {
if (result.getResult() && !result.isStopped() && result.getNrErrors() == 0) {
try {
database.commit(true);
pipeline.getLogChannel().logBasic("All transactions of database connection '" + database.getDatabaseMeta().getName() + "' were committed at the end of the pipeline!");
} catch (HopDatabaseException e) {
throw new HopException("Error committing database connection " + database.getDatabaseMeta().getName(), e);
}
} else {
try {
database.rollback(true);
pipeline.getLogChannel().logBasic("All transactions of database connection '" + database.getDatabaseMeta().getName() + "' were rolled back at the end of the pipeline!");
} catch (HopDatabaseException e) {
throw new HopException("Error rolling back database connection " + database.getDatabaseMeta().getName(), e);
}
}
} finally {
try {
database.closeConnectionOnly();
pipeline.getLogChannel().logDebug("Database connection '" + database.getDatabaseMeta().getName() + "' closed successfully!");
} catch (HopDatabaseException hde) {
pipeline.getLogChannel().logError("Error disconnecting from database - closeConnectionOnly failed:" + Const.CR + hde.getMessage());
pipeline.getLogChannel().logError(Const.getStackTracker(hde));
}
}
DatabaseConnectionMap.getInstance().removeConnection(group, null, database);
}
});
}
//
if (connectionGroup != null && getExtensionDataMap() != null) {
// Set the connection group for this pipeline
//
getExtensionDataMap().put(Const.CONNECTION_GROUP, connectionGroup);
}
super.prepareExecution();
}
use of org.apache.hop.core.exception.HopDatabaseException in project hop by apache.
the class ValueMetaTimestamp method getMetadataPreview.
@Override
public IValueMeta getMetadataPreview(IVariables variables, DatabaseMeta databaseMeta, ResultSet rs) throws HopDatabaseException {
try {
if (java.sql.Types.TIMESTAMP == rs.getInt("COLUMN_TYPE")) {
IValueMeta vmi = super.getMetadataPreview(variables, databaseMeta, rs);
IValueMeta valueMeta;
if (databaseMeta.supportsTimestampDataType()) {
valueMeta = new ValueMetaTimestamp(name);
} else {
valueMeta = new ValueMetaDate(name);
}
valueMeta.setLength(vmi.getLength());
valueMeta.setOriginalColumnType(vmi.getOriginalColumnType());
valueMeta.setOriginalColumnTypeName(vmi.getOriginalColumnTypeName());
valueMeta.setOriginalNullable(vmi.getOriginalNullable());
valueMeta.setOriginalPrecision(vmi.getOriginalPrecision());
valueMeta.setOriginalScale(vmi.getOriginalScale());
valueMeta.setOriginalSigned(vmi.getOriginalSigned());
return valueMeta;
}
} catch (SQLException e) {
throw new HopDatabaseException(e);
}
return null;
}
use of org.apache.hop.core.exception.HopDatabaseException in project hop by apache.
the class ValueMetaTimestamp method getValueFromSqlType.
@Override
public IValueMeta getValueFromSqlType(IVariables variables, DatabaseMeta databaseMeta, String name, ResultSetMetaData rm, int index, boolean ignoreLength, boolean lazyConversion) throws HopDatabaseException {
try {
int type = rm.getColumnType(index);
if (type == java.sql.Types.TIMESTAMP) {
int length = rm.getScale(index);
IValueMeta valueMeta;
if (databaseMeta.supportsTimestampDataType()) {
valueMeta = new ValueMetaTimestamp(name);
} else {
valueMeta = new ValueMetaDate(name);
}
valueMeta.setLength(length);
// Also get original column details, comment, etc.
//
getOriginalColumnMetadata(valueMeta, rm, index, ignoreLength);
return valueMeta;
}
return null;
} catch (Exception e) {
throw new HopDatabaseException("Error evaluating timestamp value metadata", e);
}
}
Aggregations