use of org.pentaho.di.trans.step.StepErrorMeta in project data-access by pentaho.
the class TableInputTransformGenerator method createInputStep.
protected StepMeta createInputStep(TransMeta transMeta) {
TableInputMeta inputMeta = new TableInputMeta();
inputMeta.setDatabaseMeta(sourceDatabaseMeta);
inputMeta.setExecuteEachInputRow(false);
inputMeta.setRowLimit(Integer.toString(rowLimit));
inputMeta.setSQL(sql);
inputMeta.setVariableReplacementActive(false);
inputMeta.setLazyConversionActive(false);
// inputMeta.setTargetSteps(null);
StepMeta inputStepMeta = new StepMeta(TABLE_INPUT, TABLE_INPUT, inputMeta);
inputStepMeta.setStepErrorMeta(new StepErrorMeta(transMeta, inputStepMeta));
transMeta.addStep(inputStepMeta);
final FileTransformStats stats = getTransformStats();
StepErrorMeta inputErrorMeta = new StepErrorMeta(transMeta, inputStepMeta) {
public void addErrorRowData(Object[] row, int startIndex, long nrErrors, String errorDescriptions, String fieldNames, String errorCodes) {
StringBuffer sb = new StringBuffer();
sb.append("Rejected Row: ");
for (Object rowData : row) {
sb.append(rowData);
sb.append(", ");
}
sb.append("\r\n");
stats.getErrors().add(sb.toString() + errorDescriptions);
super.addErrorRowData(row, startIndex, nrErrors, errorDescriptions, fieldNames, errorCodes);
}
};
StepMeta outputDummyStepMeta = addDummyStep(transMeta, "InputErrorDummy");
inputErrorMeta.setTargetStep(outputDummyStepMeta);
inputErrorMeta.setEnabled(true);
inputStepMeta.setStepErrorMeta(inputErrorMeta);
return inputStepMeta;
}
use of org.pentaho.di.trans.step.StepErrorMeta in project data-access by pentaho.
the class CsvTransformGenerator method createInputStep.
protected StepMeta createInputStep(TransMeta transMeta) {
CsvInputMeta csvInputMeta = new CsvInputMeta();
CsvFileInfo fileInfo = getModelInfo().getFileInfo();
String fileName = fileInfo.getTmpFilename();
String path;
if (fileName.endsWith(".tmp")) {
// $NON-NLS-1$
path = PentahoSystem.getApplicationContext().getSolutionPath(TMP_FILE_PATH);
} else {
String relativePath = PentahoSystem.getSystemSetting("file-upload-defaults/relative-path", // $NON-NLS-1$
String.valueOf(DEFAULT_RELATIVE_UPLOAD_FILE_PATH));
path = PentahoSystem.getApplicationContext().getSolutionPath(relativePath);
}
File file = new File(path + fileInfo.getTmpFilename());
String filename = file.getAbsolutePath();
ColumnInfo[] columns = getModelInfo().getColumns();
TextFileInputField[] inputFields = new TextFileInputField[columns.length];
int idx = 0;
for (ColumnInfo column : columns) {
TextFileInputField field = new TextFileInputField();
field.setCurrencySymbol(fileInfo.getCurrencySymbol());
field.setDecimalSymbol(fileInfo.getCurrencySymbol());
field.setFormat(column.getFormat());
field.setGroupSymbol(fileInfo.getGroupSymbol());
field.setIfNullValue(fileInfo.getIfNull());
field.setIgnored(column.isIgnore());
field.setLength(column.getLength());
field.setName(column.getId());
field.setNullString(fileInfo.getNullStr());
// field.setPosition(position);
field.setPrecision(column.getPrecision());
field.setRepeated(false);
field.setSamples(null);
field.setTrimType(ValueMeta.TRIM_TYPE_BOTH);
field.setType(convertDataType(column));
inputFields[idx] = field;
idx++;
}
csvInputMeta.setAddResultFile(false);
// $NON-NLS-1$
csvInputMeta.setBufferSize("5000");
csvInputMeta.setDelimiter(fileInfo.getDelimiter());
csvInputMeta.setEnclosure(fileInfo.getEnclosure());
csvInputMeta.setEncoding(fileInfo.getEncoding());
csvInputMeta.setFilename(filename);
csvInputMeta.setFilenameField(null);
// TODO strip off more than one row if present...
csvInputMeta.setHeaderPresent(fileInfo.getHeaderRows() > 0);
// inputMeta.get.setID(1);
csvInputMeta.setIncludingFilename(false);
csvInputMeta.setInputFields(inputFields);
csvInputMeta.setLazyConversionActive(true);
// $NON-NLS-1$
csvInputMeta.setRowNumField("");
csvInputMeta.setRunningInParallel(false);
// inputMeta.setTargetSteps(null);
StepMeta csvInputStepMeta = new StepMeta(CSV_INPUT, CSV_INPUT, csvInputMeta);
csvInputStepMeta.setStepErrorMeta(new StepErrorMeta(transMeta, csvInputStepMeta));
transMeta.addStep(csvInputStepMeta);
csvErrorRowCount = 0;
final FileTransformStats stats = getTransformStats();
StepErrorMeta csvInputErrorMeta = new StepErrorMeta(transMeta, csvInputStepMeta) {
public void addErrorRowData(Object[] row, int startIndex, long nrErrors, String errorDescriptions, String fieldNames, String errorCodes) {
if (csvErrorRowCount < maxErrorRows) {
StringBuffer sb = new StringBuffer();
sb.append("Rejected Row: ");
for (Object rowData : row) {
sb.append(rowData);
sb.append(", ");
}
sb.append("\r\n");
stats.getErrors().add(sb.toString() + errorDescriptions);
}
csvErrorRowCount++;
stats.setErrorCount(csvErrorRowCount);
super.addErrorRowData(row, startIndex, nrErrors, errorDescriptions, fieldNames, errorCodes);
}
};
StepMeta outputDummyStepMeta = addDummyStep(transMeta, "CSVInputErrorDummy");
csvInputErrorMeta.setTargetStep(outputDummyStepMeta);
csvInputErrorMeta.setEnabled(true);
csvInputStepMeta.setStepErrorMeta(csvInputErrorMeta);
return csvInputStepMeta;
}
use of org.pentaho.di.trans.step.StepErrorMeta in project pentaho-kettle by pentaho.
the class TransMeta method loadXML.
/**
* Parses an XML DOM (starting at the specified Node) that describes the transformation.
*
* @param transnode
* The XML node to load from
* @param fname
* The filename
* @param rep
* The repository to load the default list of database connections from (null if no repository is available)
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @param prompter
* the changed/replace listener or null if there is none
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public void loadXML(Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, OverwritePrompter prompter) throws KettleXMLException, KettleMissingPluginsException {
KettleMissingPluginsException missingPluginsException = new KettleMissingPluginsException(BaseMessages.getString(PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception"));
// Remember this as the primary meta store.
this.metaStore = metaStore;
try {
Props props = null;
if (Props.isInitialized()) {
props = Props.getInstance();
}
initializeVariablesFrom(parentVariableSpace);
try {
// Clear the transformation
clear();
// Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890
if (null == rep) {
setFilename(fname);
} else {
// Set the repository here so it can be used in variables for ALL aspects of the job FIX: PDI-16441
setRepository(rep);
}
//
try {
sharedObjectsFile = XMLHandler.getTagValue(transnode, "info", "shared_objects_file");
sharedObjects = rep != null ? rep.readTransSharedObjects(this) : readSharedObjects();
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString()));
log.logError(Const.getStackTracker(e));
}
// Load the database connections, slave servers, cluster schemas & partition schemas into this object.
//
importFromMetaStore();
// Handle connections
int n = XMLHandler.countNodes(transnode, DatabaseMeta.XML_TAG);
Set<String> privateTransformationDatabases = new HashSet<>(n);
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveConnections", String.valueOf(n)));
}
for (int i = 0; i < n; i++) {
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtConnection") + i);
}
Node nodecon = XMLHandler.getSubNodeByNr(transnode, DatabaseMeta.XML_TAG, i);
DatabaseMeta dbcon = new DatabaseMeta(nodecon);
dbcon.shareVariablesWith(this);
if (!dbcon.isShared()) {
privateTransformationDatabases.add(dbcon.getName());
}
DatabaseMeta exist = findDatabase(dbcon.getName());
if (exist == null) {
addDatabase(dbcon);
} else {
if (!exist.isShared()) {
// otherwise, we just keep the shared connection.
if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnectionYN", dbcon.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
int idx = indexOfDatabase(exist);
removeDatabase(idx);
addDatabase(idx, dbcon);
}
}
}
}
setPrivateDatabases(privateTransformationDatabases);
// Read the notes...
Node notepadsnode = XMLHandler.getSubNode(transnode, XML_TAG_NOTEPADS);
int nrnotes = XMLHandler.countNodes(notepadsnode, NotePadMeta.XML_TAG);
for (int i = 0; i < nrnotes; i++) {
Node notepadnode = XMLHandler.getSubNodeByNr(notepadsnode, NotePadMeta.XML_TAG, i);
NotePadMeta ni = new NotePadMeta(notepadnode);
notes.add(ni);
}
// Handle Steps
int s = XMLHandler.countNodes(transnode, StepMeta.XML_TAG);
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.ReadingSteps") + s + " steps...");
}
for (int i = 0; i < s; i++) {
Node stepnode = XMLHandler.getSubNodeByNr(transnode, StepMeta.XML_TAG, i);
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtStep") + i);
}
StepMeta stepMeta = new StepMeta(stepnode, databases, metaStore);
// for tracing, retain hierarchy
stepMeta.setParentTransMeta(this);
if (stepMeta.isMissing()) {
addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface());
}
// Check if the step exists and if it's a shared step.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
//
StepMeta check = findStep(stepMeta.getName());
if (check != null) {
if (!check.isShared()) {
// Don't overwrite shared objects
addOrReplaceStep(stepMeta);
} else {
// Just keep the drawn flag and location
check.setDraw(stepMeta.isDrawn());
check.setLocation(stepMeta.getLocation());
}
} else {
// simply add it.
addStep(stepMeta);
}
}
// Read the error handling code of the steps...
//
Node errorHandlingNode = XMLHandler.getSubNode(transnode, XML_TAG_STEP_ERROR_HANDLING);
int nrErrorHandlers = XMLHandler.countNodes(errorHandlingNode, StepErrorMeta.XML_ERROR_TAG);
for (int i = 0; i < nrErrorHandlers; i++) {
Node stepErrorMetaNode = XMLHandler.getSubNodeByNr(errorHandlingNode, StepErrorMeta.XML_ERROR_TAG, i);
StepErrorMeta stepErrorMeta = new StepErrorMeta(this, stepErrorMetaNode, steps);
if (stepErrorMeta.getSourceStep() != null) {
// a bit of a trick, I know.
stepErrorMeta.getSourceStep().setStepErrorMeta(stepErrorMeta);
}
}
//
for (int i = 0; i < nrSteps(); i++) {
StepMeta stepMeta = getStep(i);
StepMetaInterface sii = stepMeta.getStepMetaInterface();
if (sii != null) {
sii.searchInfoAndTargetSteps(steps);
}
}
// Handle Hops
//
Node ordernode = XMLHandler.getSubNode(transnode, XML_TAG_ORDER);
n = XMLHandler.countNodes(ordernode, TransHopMeta.XML_HOP_TAG);
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveHops") + n + " hops...");
}
for (int i = 0; i < n; i++) {
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtHop") + i);
}
Node hopnode = XMLHandler.getSubNodeByNr(ordernode, TransHopMeta.XML_HOP_TAG, i);
TransHopMeta hopinf = new TransHopMeta(hopnode, steps);
hopinf.setErrorHop(isErrorNode(errorHandlingNode, hopnode));
addTransHop(hopinf);
}
//
// get transformation info:
//
Node infonode = XMLHandler.getSubNode(transnode, XML_TAG_INFO);
// Name
//
setName(XMLHandler.getTagValue(infonode, "name"));
// description
//
description = XMLHandler.getTagValue(infonode, "description");
// extended description
//
extendedDescription = XMLHandler.getTagValue(infonode, "extended_description");
// trans version
//
trans_version = XMLHandler.getTagValue(infonode, "trans_version");
// trans status
//
trans_status = Const.toInt(XMLHandler.getTagValue(infonode, "trans_status"), -1);
String transTypeCode = XMLHandler.getTagValue(infonode, "trans_type");
transformationType = TransformationType.getTransformationTypeByCode(transTypeCode);
//
if (rep != null) {
String directoryPath = XMLHandler.getTagValue(infonode, "directory");
if (directoryPath != null) {
directory = rep.findDirectory(directoryPath);
if (directory == null) {
// not found
// The root as default
directory = new RepositoryDirectory();
}
}
}
// Read logging table information
//
Node logNode = XMLHandler.getSubNode(infonode, "log");
if (logNode != null) {
// Backward compatibility...
//
Node transLogNode = XMLHandler.getSubNode(logNode, TransLogTable.XML_TAG);
if (transLogNode == null) {
// Load the XML
//
transLogTable.findField(TransLogTable.ID.LINES_READ).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "read")));
transLogTable.findField(TransLogTable.ID.LINES_WRITTEN).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "write")));
transLogTable.findField(TransLogTable.ID.LINES_INPUT).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "input")));
transLogTable.findField(TransLogTable.ID.LINES_OUTPUT).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "output")));
transLogTable.findField(TransLogTable.ID.LINES_UPDATED).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "update")));
transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "rejected")));
transLogTable.setConnectionName(XMLHandler.getTagValue(infonode, "log", "connection"));
transLogTable.setSchemaName(XMLHandler.getTagValue(infonode, "log", "schema"));
transLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "table"));
transLogTable.findField(TransLogTable.ID.ID_BATCH).setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "use_batchid")));
transLogTable.findField(TransLogTable.ID.LOG_FIELD).setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "USE_LOGFIELD")));
transLogTable.setLogSizeLimit(XMLHandler.getTagValue(infonode, "log", "size_limit_lines"));
transLogTable.setLogInterval(XMLHandler.getTagValue(infonode, "log", "interval"));
transLogTable.findField(TransLogTable.ID.CHANNEL_ID).setEnabled(false);
transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setEnabled(false);
performanceLogTable.setConnectionName(transLogTable.getConnectionName());
performanceLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "step_performance_table"));
} else {
transLogTable.loadXML(transLogNode, databases, steps);
}
Node perfLogNode = XMLHandler.getSubNode(logNode, PerformanceLogTable.XML_TAG);
if (perfLogNode != null) {
performanceLogTable.loadXML(perfLogNode, databases, steps);
}
Node channelLogNode = XMLHandler.getSubNode(logNode, ChannelLogTable.XML_TAG);
if (channelLogNode != null) {
channelLogTable.loadXML(channelLogNode, databases, steps);
}
Node stepLogNode = XMLHandler.getSubNode(logNode, StepLogTable.XML_TAG);
if (stepLogNode != null) {
stepLogTable.loadXML(stepLogNode, databases, steps);
}
Node metricsLogNode = XMLHandler.getSubNode(logNode, MetricsLogTable.XML_TAG);
if (metricsLogNode != null) {
metricsLogTable.loadXML(metricsLogNode, databases, steps);
}
}
// Maxdate range options...
String maxdatcon = XMLHandler.getTagValue(infonode, "maxdate", "connection");
maxDateConnection = findDatabase(maxdatcon);
maxDateTable = XMLHandler.getTagValue(infonode, "maxdate", "table");
maxDateField = XMLHandler.getTagValue(infonode, "maxdate", "field");
String offset = XMLHandler.getTagValue(infonode, "maxdate", "offset");
maxDateOffset = Const.toDouble(offset, 0.0);
String mdiff = XMLHandler.getTagValue(infonode, "maxdate", "maxdiff");
maxDateDifference = Const.toDouble(mdiff, 0.0);
// Check the dependencies as far as dates are concerned...
// We calculate BEFORE we run the MAX of these dates
// If the date is larger then enddate, startdate is set to MIN_DATE
//
Node depsNode = XMLHandler.getSubNode(infonode, XML_TAG_DEPENDENCIES);
int nrDeps = XMLHandler.countNodes(depsNode, TransDependency.XML_TAG);
for (int i = 0; i < nrDeps; i++) {
Node depNode = XMLHandler.getSubNodeByNr(depsNode, TransDependency.XML_TAG, i);
TransDependency transDependency = new TransDependency(depNode, databases);
if (transDependency.getDatabase() != null && transDependency.getFieldname() != null) {
addDependency(transDependency);
}
}
// Read the named parameters.
Node paramsNode = XMLHandler.getSubNode(infonode, XML_TAG_PARAMETERS);
int nrParams = XMLHandler.countNodes(paramsNode, "parameter");
for (int i = 0; i < nrParams; i++) {
Node paramNode = XMLHandler.getSubNodeByNr(paramsNode, "parameter", i);
String paramName = XMLHandler.getTagValue(paramNode, "name");
String defaultValue = XMLHandler.getTagValue(paramNode, "default_value");
String descr = XMLHandler.getTagValue(paramNode, "description");
addParameterDefinition(paramName, defaultValue, descr);
}
// Read the partitioning schemas
//
Node partSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_PARTITIONSCHEMAS);
int nrPartSchemas = XMLHandler.countNodes(partSchemasNode, PartitionSchema.XML_TAG);
for (int i = 0; i < nrPartSchemas; i++) {
Node partSchemaNode = XMLHandler.getSubNodeByNr(partSchemasNode, PartitionSchema.XML_TAG, i);
PartitionSchema partitionSchema = new PartitionSchema(partSchemaNode);
// Check if the step exists and if it's a shared step.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
//
PartitionSchema check = findPartitionSchema(partitionSchema.getName());
if (check != null) {
if (!check.isShared()) {
// we don't overwrite shared objects.
if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
addOrReplacePartitionSchema(partitionSchema);
}
}
} else {
partitionSchemas.add(partitionSchema);
}
}
//
for (int i = 0; i < nrSteps(); i++) {
StepPartitioningMeta stepPartitioningMeta = getStep(i).getStepPartitioningMeta();
if (stepPartitioningMeta != null) {
stepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas);
}
StepPartitioningMeta targetStepPartitioningMeta = getStep(i).getTargetStepPartitioningMeta();
if (targetStepPartitioningMeta != null) {
targetStepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas);
}
}
// Read the slave servers...
//
Node slaveServersNode = XMLHandler.getSubNode(infonode, XML_TAG_SLAVESERVERS);
int nrSlaveServers = XMLHandler.countNodes(slaveServersNode, SlaveServer.XML_TAG);
for (int i = 0; i < nrSlaveServers; i++) {
Node slaveServerNode = XMLHandler.getSubNodeByNr(slaveServersNode, SlaveServer.XML_TAG, i);
SlaveServer slaveServer = new SlaveServer(slaveServerNode);
if (slaveServer.getName() == null) {
log.logError(BaseMessages.getString(PKG, "TransMeta.Log.WarningWhileCreationSlaveServer", slaveServer.getName()));
continue;
}
slaveServer.shareVariablesWith(this);
// Check if the object exists and if it's a shared object.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
SlaveServer check = findSlaveServer(slaveServer.getName());
if (check != null) {
if (!check.isShared()) {
// we don't overwrite shared objects.
if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
addOrReplaceSlaveServer(slaveServer);
}
}
} else {
slaveServers.add(slaveServer);
}
}
// Read the cluster schemas
//
Node clusterSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_CLUSTERSCHEMAS);
int nrClusterSchemas = XMLHandler.countNodes(clusterSchemasNode, ClusterSchema.XML_TAG);
for (int i = 0; i < nrClusterSchemas; i++) {
Node clusterSchemaNode = XMLHandler.getSubNodeByNr(clusterSchemasNode, ClusterSchema.XML_TAG, i);
ClusterSchema clusterSchema = new ClusterSchema(clusterSchemaNode, slaveServers);
clusterSchema.shareVariablesWith(this);
// Check if the object exists and if it's a shared object.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
ClusterSchema check = findClusterSchema(clusterSchema.getName());
if (check != null) {
if (!check.isShared()) {
// we don't overwrite shared objects.
if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
addOrReplaceClusterSchema(clusterSchema);
}
}
} else {
clusterSchemas.add(clusterSchema);
}
}
//
for (int i = 0; i < nrSteps(); i++) {
getStep(i).setClusterSchemaAfterLoading(clusterSchemas);
}
String srowset = XMLHandler.getTagValue(infonode, "size_rowset");
sizeRowset = Const.toInt(srowset, Const.ROWS_IN_ROWSET);
sleepTimeEmpty = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_empty"), Const.TIMEOUT_GET_MILLIS);
sleepTimeFull = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_full"), Const.TIMEOUT_PUT_MILLIS);
usingUniqueConnections = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "unique_connections"));
feedbackShown = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "feedback_shown"));
feedbackSize = Const.toInt(XMLHandler.getTagValue(infonode, "feedback_size"), Const.ROWS_UPDATE);
usingThreadPriorityManagment = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "using_thread_priorities"));
// Performance monitoring for steps...
//
capturingStepPerformanceSnapShots = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "capture_step_performance"));
stepPerformanceCapturingDelay = Const.toLong(XMLHandler.getTagValue(infonode, "step_performance_capturing_delay"), 1000);
stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue(infonode, "step_performance_capturing_size_limit");
// Created user/date
createdUser = XMLHandler.getTagValue(infonode, "created_user");
String createDate = XMLHandler.getTagValue(infonode, "created_date");
if (createDate != null) {
createdDate = XMLHandler.stringToDate(createDate);
}
// Changed user/date
modifiedUser = XMLHandler.getTagValue(infonode, "modified_user");
String modDate = XMLHandler.getTagValue(infonode, "modified_date");
if (modDate != null) {
modifiedDate = XMLHandler.stringToDate(modDate);
}
Node partitionDistNode = XMLHandler.getSubNode(transnode, SlaveStepCopyPartitionDistribution.XML_TAG);
if (partitionDistNode != null) {
slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(partitionDistNode);
} else {
// leave empty
slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
}
// Is this a slave transformation?
//
slaveTransformation = "Y".equalsIgnoreCase(XMLHandler.getTagValue(transnode, "slave_transformation"));
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfStepsReaded") + nrSteps());
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfHopsReaded") + nrTransHops());
}
sortSteps();
// Load the attribute groups map
//
attributesMap = AttributesUtil.loadAttributes(XMLHandler.getSubNode(transnode, AttributesUtil.XML_TAG));
keyForSessionKey = XMLHandler.stringToBinary(XMLHandler.getTagValue(infonode, "key_for_session_key"));
isKeyPrivate = "Y".equals(XMLHandler.getTagValue(infonode, "is_key_private"));
} catch (KettleXMLException xe) {
throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), xe);
} catch (KettleException e) {
throw new KettleXMLException(e);
} finally {
initializeVariablesFrom(null);
if (setInternalVariables) {
setInternalKettleVariables();
}
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationMetaLoaded.id, this);
}
} catch (Exception e) {
//
if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) {
throw missingPluginsException;
} else {
throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), e);
}
} finally {
if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) {
throw missingPluginsException;
}
}
}
use of org.pentaho.di.trans.step.StepErrorMeta in project pentaho-kettle by pentaho.
the class TransMeta method getStepFields.
/**
* Returns the fields that are emitted by a certain step.
*
* @param stepMeta
* The step to be queried.
* @param targetStep
* the target step
* @param monitor
* The progress monitor for progress dialog. (null if not used!)
* @return A row containing the fields emitted.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getStepFields(StepMeta stepMeta, StepMeta targetStep, ProgressMonitorListener monitor) throws KettleStepException {
RowMetaInterface row = new RowMeta();
if (stepMeta == null) {
return row;
}
String fromToCacheEntry = stepMeta.getName() + (targetStep != null ? ("-" + targetStep.getName()) : "");
RowMetaInterface rowMeta = stepsFieldsCache.get(fromToCacheEntry);
if (rowMeta != null) {
return rowMeta;
}
//
if (targetStep != null && stepMeta.isSendingErrorRowsToStep(targetStep)) {
// The error rows are the same as the input rows for
// the step but with the selected error fields added
//
row = getPrevStepFields(stepMeta);
// Add to this the error fields...
StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta();
row.addRowMeta(stepErrorMeta.getErrorFields());
// Store this row in the cache
//
stepsFieldsCache.put(fromToCacheEntry, row);
return row;
}
// Resume the regular program...
List<StepMeta> prevSteps = findPreviousSteps(stepMeta, false);
int nrPrevious = prevSteps.size();
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), String.valueOf(nrPrevious)));
}
for (int i = 0; i < prevSteps.size(); i++) {
StepMeta prevStepMeta = prevSteps.get(i);
if (monitor != null) {
monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName()));
}
RowMetaInterface add = getStepFields(prevStepMeta, stepMeta, monitor);
if (add == null) {
add = new RowMeta();
}
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.FoundFieldsToAdd") + add.toString());
}
if (i == 0) {
row.addRowMeta(add);
} else {
// See if the add fields are not already in the row
for (int x = 0; x < add.size(); x++) {
ValueMetaInterface v = add.getValueMeta(x);
ValueMetaInterface s = row.searchValueMeta(v.getName());
if (s == null) {
row.addValueMeta(v);
}
}
}
}
if (nrPrevious == 0 && stepMeta.getRemoteInputSteps().size() > 0) {
//
for (RemoteStep remoteStep : stepMeta.getRemoteInputSteps()) {
RowMetaInterface inputFields = remoteStep.getRowMeta();
for (ValueMetaInterface inputField : inputFields.getValueMetaList()) {
if (row.searchValueMeta(inputField.getName()) == null) {
row.addValueMeta(inputField);
}
}
}
}
// Finally, see if we need to add/modify/delete fields with this step "name"
rowMeta = getThisStepFields(stepMeta, targetStep, row, monitor);
// Store this row in the cache
//
stepsFieldsCache.put(fromToCacheEntry, rowMeta);
return rowMeta;
}
use of org.pentaho.di.trans.step.StepErrorMeta in project pentaho-kettle by pentaho.
the class KettleDatabaseRepositoryTransDelegate method saveTransformation.
/**
* Saves the transformation to a repository.
*
* @param transMeta
* the transformation metadata to store
* @param monitor
* the way we report progress to the user, can be null if no UI is present
* @param overwriteAssociated
* Overwrite existing object(s)?
* @throws KettleException
* if an error occurs.
*/
public void saveTransformation(TransMeta transMeta, String versionComment, ProgressMonitorListener monitor, boolean overwriteAssociated) throws KettleException {
try {
if (monitor != null) {
monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.LockingRepository"));
}
repository.insertLogEntry("save transformation '" + transMeta.getName() + "'");
// Clear attribute id cache
// force repository lookup.
repository.connectionDelegate.clearNextIDCounters();
// Do we have a valid directory?
if (transMeta.getRepositoryDirectory().getObjectId() == null) {
throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Exception.PlsSelectAValidDirectoryBeforeSavingTheTransformation"));
}
int nrWorks = 2 + transMeta.nrDatabases() + transMeta.nrNotes() + transMeta.nrSteps() + transMeta.nrTransHops();
if (monitor != null) {
monitor.beginTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingTransformationTask.Title") + transMeta.getPathAndName(), nrWorks);
}
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingOfTransformationStarted"));
}
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException();
}
//
if (monitor != null) {
monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.HandlingOldVersionTransformationTask.Title"));
// transMeta.setObjectId(getTransformationID(transMeta.getName(),
// transMeta.getRepositoryDirectory().getObjectId()));
}
// If no valid id is available in the database, assign one...
if (transMeta.getObjectId() == null) {
transMeta.setObjectId(repository.connectionDelegate.getNextTransformationID());
} else {
// of the database for this id_transformation, before we put it back in...
if (monitor != null) {
monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.DeletingOldVersionTransformationTask.Title"));
}
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.DeletingOldVersionTransformation"));
}
repository.deleteTransformation(transMeta.getObjectId());
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.OldVersionOfTransformationRemoved"));
}
}
if (monitor != null) {
monitor.worked(1);
}
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingNotes"));
}
for (int i = 0; i < transMeta.nrNotes(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
// if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingNoteTask.Title") +
// (i + 1) + "/" + transMeta.nrNotes());
NotePadMeta ni = transMeta.getNote(i);
repository.saveNotePadMeta(ni, transMeta.getObjectId());
if (ni.getObjectId() != null) {
repository.insertTransNote(transMeta.getObjectId(), ni.getObjectId());
}
if (monitor != null) {
monitor.worked(1);
}
}
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingDatabaseConnections"));
}
for (int i = 0; i < transMeta.nrDatabases(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
// if (monitor != null) monitor.subTask(BaseMessages.getString(PKG,
// "TransMeta.Monitor.SavingDatabaseTask.Title") + (i + 1) + "/" + transMeta.nrDatabases());
DatabaseMeta databaseMeta = transMeta.getDatabase(i);
// repository)
if (overwriteAssociated || databaseMeta.hasChanged() || databaseMeta.getObjectId() == null) {
repository.save(databaseMeta, versionComment, monitor, overwriteAssociated);
}
if (monitor != null) {
monitor.worked(1);
}
}
// It is possible that we received another step through a plugin.
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.CheckingStepTypes"));
}
repository.updateStepTypes();
repository.updateDatabaseTypes();
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingSteps"));
}
for (int i = 0; i < transMeta.nrSteps(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
// if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingStepTask.Title") +
// (i + 1) + "/" + transMeta.nrSteps());
StepMeta stepMeta = transMeta.getStep(i);
repository.stepDelegate.saveStepMeta(stepMeta, transMeta.getObjectId());
if (monitor != null) {
monitor.worked(1);
}
}
repository.connectionDelegate.closeStepAttributeInsertPreparedStatement();
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingHops"));
}
for (int i = 0; i < transMeta.nrTransHops(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
// if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingHopTask.Title") +
// (i + 1) + "/" + transMeta.nrTransHops());
TransHopMeta hi = transMeta.getTransHop(i);
saveTransHopMeta(hi, transMeta.getObjectId());
if (monitor != null) {
monitor.worked(1);
}
}
// if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.FinishingTask.Title"));
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingTransformationInfo"));
}
// save the top level information for the transformation
insertTransformation(transMeta);
saveTransParameters(transMeta);
repository.connectionDelegate.closeTransAttributeInsertPreparedStatement();
//
for (int i = 0; i < transMeta.getPartitionSchemas().size(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
PartitionSchema partitionSchema = transMeta.getPartitionSchemas().get(i);
// See if this transformation really is a consumer of this object
// It might be simply loaded as a shared object from the repository
//
boolean isUsedByTransformation = transMeta.isUsingPartitionSchema(partitionSchema);
repository.save(partitionSchema, versionComment, null, transMeta.getObjectId(), isUsedByTransformation, overwriteAssociated);
}
//
for (int i = 0; i < transMeta.getSlaveServers().size(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
SlaveServer slaveServer = transMeta.getSlaveServers().get(i);
boolean isUsedByTransformation = transMeta.isUsingSlaveServer(slaveServer);
repository.save(slaveServer, versionComment, null, transMeta.getObjectId(), isUsedByTransformation, overwriteAssociated);
}
// Save the clustering schemas
for (int i = 0; i < transMeta.getClusterSchemas().size(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
ClusterSchema clusterSchema = transMeta.getClusterSchemas().get(i);
boolean isUsedByTransformation = transMeta.isUsingClusterSchema(clusterSchema);
repository.save(clusterSchema, versionComment, null, transMeta.getObjectId(), isUsedByTransformation, overwriteAssociated);
}
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingDependencies"));
}
for (int i = 0; i < transMeta.nrDependencies(); i++) {
if (monitor != null && monitor.isCanceled()) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
}
TransDependency td = transMeta.getDependency(i);
saveTransDependency(td, transMeta.getObjectId());
}
saveTransAttributesMap(transMeta.getObjectId(), transMeta.getAttributesMap());
// Save the step error handling information as well!
for (int i = 0; i < transMeta.nrSteps(); i++) {
StepMeta stepMeta = transMeta.getStep(i);
StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta();
if (stepErrorMeta != null) {
repository.stepDelegate.saveStepErrorMeta(stepErrorMeta, transMeta.getObjectId(), stepMeta.getObjectId());
}
}
repository.connectionDelegate.closeStepAttributeInsertPreparedStatement();
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingFinished"));
}
if (monitor != null) {
monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.UnlockingRepository"));
}
repository.unlockRepository();
// Perform a commit!
repository.commit();
transMeta.clearChanged();
if (monitor != null) {
monitor.worked(1);
}
if (monitor != null) {
monitor.done();
}
} catch (KettleDatabaseException dbe) {
// Oops, roll back!
repository.rollback();
log.logError(BaseMessages.getString(PKG, "TransMeta.Log.ErrorSavingTransformationToRepository") + Const.CR + dbe.getMessage());
throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Log.ErrorSavingTransformationToRepository"), dbe);
}
}
Aggregations