use of org.pentaho.di.core.logging.TransLogTable in project pentaho-kettle by pentaho.
the class Trans method calculateBatchIdAndDateRange.
/**
* Calculate the batch id and date range for the transformation.
*
* @throws KettleTransException if there are any errors during calculation
*/
public void calculateBatchIdAndDateRange() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
currentDate = new Date();
logDate = new Date();
startDate = Const.MIN_DATE;
endDate = currentDate;
DatabaseMeta logConnection = transLogTable.getDatabaseMeta();
String logTable = environmentSubstitute(transLogTable.getActualTableName());
String logSchema = environmentSubstitute(transLogTable.getActualSchemaName());
try {
if (logConnection != null) {
String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination(logSchema, logTable);
if (Utils.isEmpty(logTable)) {
// to log to.
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.NoLogTableDefined"));
}
if (Utils.isEmpty(transMeta.getName()) && logTable != null) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.NoTransnameAvailableForLogging"));
}
transLogTableDatabaseConnection = new Database(this, logConnection);
transLogTableDatabaseConnection.shareVariablesWith(this);
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningLogConnection", "" + logConnection));
}
transLogTableDatabaseConnection.connect();
transLogTableDatabaseConnection.setCommit(logCommitSize);
//
if (transLogTable.isBatchIdUsed()) {
Long id_batch = logConnection.getNextBatchId(transLogTableDatabaseConnection, logSchema, logTable, transLogTable.getKeyField().getFieldName());
setBatchId(id_batch.longValue());
}
//
// Get the date range from the logging table: from the last end_date to now. (currentDate)
//
Object[] lastr = transLogTableDatabaseConnection.getLastLogDate(logSchemaAndTable, transMeta.getName(), false, LogStatus.END);
if (lastr != null && lastr.length > 0) {
startDate = (Date) lastr[0];
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.StartDateFound") + startDate);
}
}
//
if (transMeta.getMaxDateConnection() != null && transMeta.getMaxDateTable() != null && transMeta.getMaxDateTable().length() > 0 && transMeta.getMaxDateField() != null && transMeta.getMaxDateField().length() > 0) {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LookingForMaxdateConnection", "" + transMeta.getMaxDateConnection()));
}
DatabaseMeta maxcon = transMeta.getMaxDateConnection();
if (maxcon != null) {
Database maxdb = new Database(this, maxcon);
maxdb.shareVariablesWith(this);
try {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningMaximumDateConnection"));
}
maxdb.connect();
maxdb.setCommit(logCommitSize);
//
// Determine the endDate by looking at a field in a table...
//
String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable();
RowMetaAndData r1 = maxdb.getOneRow(sql);
if (r1 != null) {
// OK, we have a value, what's the offset?
Date maxvalue = r1.getRowMeta().getDate(r1.getData(), 0);
if (maxvalue != null) {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection") + r1);
}
endDate.setTime((long) (maxvalue.getTime() + (transMeta.getMaxDateOffset() * 1000)));
}
} else {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection"));
}
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorConnectingToDatabase", "" + transMeta.getMaxDateConnection()), e);
} finally {
maxdb.disconnect();
}
} else {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.MaximumDateConnectionCouldNotBeFound", "" + transMeta.getMaxDateConnection()));
}
}
// Get the maximum in depdate...
if (transMeta.nrDependencies() > 0) {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.CheckingForMaxDependencyDate"));
}
//
// Maybe one of the tables where this transformation is dependent on has changed?
// If so we need to change the start-date!
//
depDate = Const.MIN_DATE;
Date maxdepdate = Const.MIN_DATE;
if (lastr != null && lastr.length > 0) {
// #1: last depdate
Date dep = (Date) lastr[1];
if (dep != null) {
maxdepdate = dep;
depDate = dep;
}
}
for (int i = 0; i < transMeta.nrDependencies(); i++) {
TransDependency td = transMeta.getDependency(i);
DatabaseMeta depcon = td.getDatabase();
if (depcon != null) {
Database depdb = new Database(this, depcon);
try {
depdb.connect();
depdb.setCommit(logCommitSize);
String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename();
RowMetaAndData r1 = depdb.getOneRow(sql);
if (r1 != null) {
// OK, we have a row, get the result!
Date maxvalue = (Date) r1.getData()[0];
if (maxvalue != null) {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.FoundDateFromTable", td.getTablename(), "." + td.getFieldname(), " = " + maxvalue.toString()));
}
if (maxvalue.getTime() > maxdepdate.getTime()) {
maxdepdate = maxvalue;
}
} else {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td.getTablename() + ".", td.getFieldname()));
}
} else {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td.getTablename() + ".", td.getFieldname()));
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorInDatabase", "" + td.getDatabase()), e);
} finally {
depdb.disconnect();
}
} else {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ConnectionCouldNotBeFound", "" + td.getDatabase()));
}
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.Maxdepdate") + (XMLHandler.date2string(maxdepdate)));
}
}
//
if (maxdepdate.getTime() > depDate.getTime()) {
depDate = maxdepdate;
startDate = Const.MIN_DATE;
}
} else {
depDate = currentDate;
}
}
// OK, now we have a date-range. See if we need to set a maximum!
if (// Do we have a difference specified?
transMeta.getMaxDateDifference() > 0.0 && startDate.getTime() > Const.MIN_DATE.getTime()) {
// Is the startdate > Minimum?
// See if the end-date is larger then Start_date + DIFF?
Date maxdesired = new Date(startDate.getTime() + ((long) transMeta.getMaxDateDifference() * 1000));
//
if (endDate.compareTo(maxdesired) > 0) {
endDate = maxdesired;
}
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorCalculatingDateRange", logTable), e);
}
// Be careful, We DO NOT close the trans log table database connection!!!
// It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions.
}
use of org.pentaho.di.core.logging.TransLogTable in project pentaho-kettle by pentaho.
the class Trans method endProcessing.
/**
* End processing. Also handle any logging operations associated with the end of a transformation
*
* @return true if all end processing is successful, false otherwise
* @throws KettleException if any errors occur during processing
*/
private synchronized boolean endProcessing() throws KettleException {
LogStatus status;
if (isStopped()) {
status = LogStatus.STOP;
} else if (isFinished()) {
status = LogStatus.END;
} else if (isPaused()) {
status = LogStatus.PAUSED;
} else {
status = LogStatus.RUNNING;
}
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1);
logDate = new Date();
// OK, we have some logging to do...
//
DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta();
String logTable = transMeta.getTransLogTable().getActualTableName();
if (logcon != null) {
Database ldb = null;
try {
//
if (transLogTableDatabaseConnection == null) {
ldb = new Database(this, logcon);
ldb.shareVariablesWith(this);
ldb.connect();
ldb.setCommit(logCommitSize);
transLogTableDatabaseConnection = ldb;
} else {
ldb = transLogTableDatabaseConnection;
}
//
if (!Utils.isEmpty(logTable)) {
ldb.writeLogRecord(transLogTable, status, this, null);
}
//
if (status.equals(LogStatus.END) || status.equals(LogStatus.STOP)) {
ldb.cleanupLogRecords(transLogTable, getName());
}
//
if (!ldb.isAutoCommit()) {
ldb.commitLog(true, transMeta.getTransLogTable());
}
} catch (KettleDatabaseException e) {
// PDI-9790 error write to log db is transaction error
log.logError(BaseMessages.getString(PKG, "Database.Error.WriteLogTable", logTable), e);
errors.incrementAndGet();
// end PDI-9790
} catch (Exception e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", transMeta.getTransLogTable().getActualTableName()), e);
} finally {
if (intervalInSeconds <= 0 || (status.equals(LogStatus.END) || status.equals(LogStatus.STOP))) {
ldb.disconnect();
// disconnected
transLogTableDatabaseConnection = null;
}
}
}
return true;
}
use of org.pentaho.di.core.logging.TransLogTable in project pentaho-kettle by pentaho.
the class KettleDatabaseRepositoryTransDelegate method insertTransformation.
// CHECKSTYLE:LineLength:OFF
private synchronized void insertTransformation(TransMeta transMeta) throws KettleException {
RowMetaAndData table = new RowMetaAndData();
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_TRANSFORMATION), new LongObjectId(transMeta.getObjectId()));
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_NAME), transMeta.getName());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_DESCRIPTION), transMeta.getDescription());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_EXTENDED_DESCRIPTION), transMeta.getExtendedDescription());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_TRANS_VERSION), transMeta.getTransversion());
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_TRANS_STATUS), new Long(transMeta.getTransstatus() < 0 ? -1L : transMeta.getTransstatus()));
TransLogTable logTable = transMeta.getTransLogTable();
StepMeta step = (StepMeta) logTable.getSubject(TransLogTable.ID.LINES_READ);
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_STEP_READ), step == null ? null : step.getObjectId());
step = (StepMeta) logTable.getSubject(TransLogTable.ID.LINES_WRITTEN);
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_STEP_WRITE), step == null ? null : step.getObjectId());
step = (StepMeta) logTable.getSubject(TransLogTable.ID.LINES_INPUT);
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_STEP_INPUT), step == null ? null : step.getObjectId());
step = (StepMeta) logTable.getSubject(TransLogTable.ID.LINES_OUTPUT);
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_STEP_OUTPUT), step == null ? null : step.getObjectId());
step = (StepMeta) logTable.getSubject(TransLogTable.ID.LINES_UPDATED);
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_STEP_UPDATE), step == null ? null : step.getObjectId());
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DATABASE_LOG), logTable.getDatabaseMeta() == null ? new LongObjectId(-1L).longValue() : new LongObjectId(logTable.getDatabaseMeta().getObjectId()).longValue());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_TABLE_NAME_LOG), logTable.getDatabaseMeta());
table.addValue(new ValueMetaBoolean(KettleDatabaseRepository.FIELD_TRANSFORMATION_USE_BATCHID), Boolean.valueOf(logTable.isBatchIdUsed()));
table.addValue(new ValueMetaBoolean(KettleDatabaseRepository.FIELD_TRANSFORMATION_USE_LOGFIELD), Boolean.valueOf(logTable.isLogFieldUsed()));
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DATABASE_MAXDATE), transMeta.getMaxDateConnection() == null ? new LongObjectId(-1L).longValue() : new LongObjectId(transMeta.getMaxDateConnection().getObjectId()).longValue());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_TABLE_NAME_MAXDATE), transMeta.getMaxDateTable());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_FIELD_NAME_MAXDATE), transMeta.getMaxDateField());
table.addValue(new ValueMetaNumber(KettleDatabaseRepository.FIELD_TRANSFORMATION_OFFSET_MAXDATE), new Double(transMeta.getMaxDateOffset()));
table.addValue(new ValueMetaNumber(KettleDatabaseRepository.FIELD_TRANSFORMATION_DIFF_MAXDATE), new Double(transMeta.getMaxDateDifference()));
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_CREATED_USER), transMeta.getCreatedUser());
table.addValue(new ValueMetaDate(KettleDatabaseRepository.FIELD_TRANSFORMATION_CREATED_DATE), transMeta.getCreatedDate());
table.addValue(new ValueMetaString(KettleDatabaseRepository.FIELD_TRANSFORMATION_MODIFIED_USER), transMeta.getModifiedUser());
table.addValue(new ValueMetaDate(KettleDatabaseRepository.FIELD_TRANSFORMATION_MODIFIED_DATE), transMeta.getModifiedDate());
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_SIZE_ROWSET), new Long(transMeta.getSizeRowset()));
table.addValue(new ValueMetaInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DIRECTORY), transMeta.getRepositoryDirectory().getObjectId());
repository.connectionDelegate.getDatabase().prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_TRANSFORMATION);
repository.connectionDelegate.getDatabase().setValuesInsert(table);
repository.connectionDelegate.getDatabase().insertRow();
repository.connectionDelegate.getDatabase().closeInsert();
step = (StepMeta) logTable.getSubject(TransLogTable.ID.LINES_REJECTED);
if (step != null) {
ObjectId rejectedId = step.getObjectId();
Preconditions.checkNotNull(rejectedId);
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_ID_STEP_REJECTED, Long.valueOf(rejectedId.toString()), null);
}
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_UNIQUE_CONNECTIONS, 0, transMeta.isUsingUniqueConnections() ? "Y" : "N");
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_FEEDBACK_SHOWN, 0, transMeta.isFeedbackShown() ? "Y" : "N");
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_FEEDBACK_SIZE, transMeta.getFeedbackSize(), "");
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_USING_THREAD_PRIORITIES, 0, transMeta.isUsingThreadPriorityManagment() ? "Y" : "N");
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_SHARED_FILE, 0, transMeta.getSharedObjectsFile());
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_CAPTURE_STEP_PERFORMANCE, 0, transMeta.isCapturingStepPerformanceSnapShots() ? "Y" : "N");
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_STEP_PERFORMANCE_CAPTURING_DELAY, transMeta.getStepPerformanceCapturingDelay(), "");
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_STEP_PERFORMANCE_CAPTURING_SIZE_LIMIT, 0, transMeta.getStepPerformanceCapturingSizeLimit());
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_STEP_PERFORMANCE_LOG_TABLE, 0, transMeta.getPerformanceLogTable().getTableName());
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_LOG_SIZE_LIMIT, 0, transMeta.getTransLogTable().getLogSizeLimit());
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_LOG_INTERVAL, 0, transMeta.getTransLogTable().getLogInterval());
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_TRANSFORMATION_TYPE, 0, transMeta.getTransformationType().getCode());
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_SLEEP_TIME_EMPTY, transMeta.getSleepTimeEmpty(), null);
repository.connectionDelegate.insertTransAttribute(transMeta.getObjectId(), 0, KettleDatabaseRepository.TRANS_ATTRIBUTE_SLEEP_TIME_FULL, transMeta.getSleepTimeFull(), null);
// Save the logging connection link...
if (logTable.getDatabaseMeta() != null) {
repository.insertStepDatabase(transMeta.getObjectId(), null, logTable.getDatabaseMeta().getObjectId());
}
// Save the maxdate connection link...
if (transMeta.getMaxDateConnection() != null) {
repository.insertStepDatabase(transMeta.getObjectId(), null, transMeta.getMaxDateConnection().getObjectId());
}
// Save the logging tables too..
//
RepositoryAttributeInterface attributeInterface = new KettleDatabaseRepositoryTransAttribute(repository.connectionDelegate, transMeta.getObjectId());
transMeta.getTransLogTable().saveToRepository(attributeInterface);
transMeta.getStepLogTable().saveToRepository(attributeInterface);
transMeta.getPerformanceLogTable().saveToRepository(attributeInterface);
transMeta.getChannelLogTable().saveToRepository(attributeInterface);
}
use of org.pentaho.di.core.logging.TransLogTable in project pentaho-kettle by pentaho.
the class TransformationHasTransLogConfiguredImportRuleIT method testRule.
public void testRule() throws Exception {
TransMeta transMeta = new TransMeta();
DatabaseMeta logDbMeta = new DatabaseMeta("LOGDB", "MYSQL", "JDBC", "localhost", "test", "3306", "foo", "bar");
transMeta.addDatabase(logDbMeta);
TransLogTable logTable = transMeta.getTransLogTable();
PluginRegistry registry = PluginRegistry.getInstance();
PluginInterface plugin = registry.findPluginWithId(ImportRulePluginType.class, "TransformationHasTransLogConfigured");
assertNotNull("The 'transformation has trans log table configured' rule could not be found in the plugin registry!", plugin);
TransformationHasTransLogConfiguredImportRule rule = (TransformationHasTransLogConfiguredImportRule) registry.loadClass(plugin);
assertNotNull("The 'transformation has trans log table configured' class could not be loaded by the plugin registry!", plugin);
rule.setEnabled(true);
List<ImportValidationFeedback> feedback = rule.verifyRule(transMeta);
assertTrue("We didn't get any feedback from the 'transformation has trans log table configured'", !feedback.isEmpty());
assertTrue("An error ruling was expected", feedback.get(0).getResultType() == ImportValidationResultType.ERROR);
logTable.setTableName("SCHEMA");
logTable.setTableName("LOGTABLE");
logTable.setConnectionName(logDbMeta.getName());
feedback = rule.verifyRule(transMeta);
assertTrue("We didn't get any feedback from the 'transformation has description rule'", !feedback.isEmpty());
assertTrue("An approval ruling was expected", feedback.get(0).getResultType() == ImportValidationResultType.APPROVAL);
// Make the rules stricter!
//
rule.setTableName("SCHEMA");
rule.setTableName("LOGTABLE");
rule.setConnectionName(logDbMeta.getName());
feedback = rule.verifyRule(transMeta);
assertTrue("We didn't get any feedback from the 'transformation has description rule'", !feedback.isEmpty());
assertTrue("An approval ruling was expected", feedback.get(0).getResultType() == ImportValidationResultType.APPROVAL);
// Break the rule
//
rule.setSchemaName("INCORRECT_SCHEMA");
rule.setTableName("LOGTABLE");
rule.setConnectionName(logDbMeta.getName());
feedback = rule.verifyRule(transMeta);
assertTrue("We didn't get any feedback from the 'transformation has description rule'", !feedback.isEmpty());
assertTrue("An error ruling was expected", feedback.get(0).getResultType() == ImportValidationResultType.ERROR);
rule.setSchemaName("SCHEMA");
rule.setTableName("INCORRECT_LOGTABLE");
rule.setConnectionName(logDbMeta.getName());
feedback = rule.verifyRule(transMeta);
assertTrue("We didn't get any feedback from the 'transformation has description rule'", !feedback.isEmpty());
assertTrue("An error ruling was expected", feedback.get(0).getResultType() == ImportValidationResultType.ERROR);
rule.setSchemaName("SCHEMA");
rule.setTableName("LOGTABLE");
rule.setConnectionName("INCORRECT_DATABASE");
feedback = rule.verifyRule(transMeta);
assertTrue("We didn't get any feedback from the 'transformation has description rule'", !feedback.isEmpty());
assertTrue("An error ruling was expected", feedback.get(0).getResultType() == ImportValidationResultType.ERROR);
// No feedback expected!
//
rule.setEnabled(false);
feedback = rule.verifyRule(transMeta);
assertTrue("We didn't expect any feedback from the 'transformation has trans " + "log table configured' since the rule is not enabled", feedback.isEmpty());
}
use of org.pentaho.di.core.logging.TransLogTable in project pentaho-kettle by pentaho.
the class SpoonTransformationDelegate method addTransGraph.
public void addTransGraph(TransMeta transMeta) {
boolean added = addTransformation(transMeta);
if (added) {
// See if there already is a tab for this graph with the short default name.
// If there is, set that one to show the location as well.
// If not, simply add it without
// If no, add it
// If yes, select that tab
//
boolean showLocation = false;
boolean addTab = true;
String tabName = spoon.delegates.tabs.makeTabName(transMeta, showLocation);
TabMapEntry tabEntry = spoon.delegates.tabs.findTabMapEntry(tabName, ObjectType.TRANSFORMATION_GRAPH);
if (tabEntry != null) {
// We change the already loaded transformation to also show the location.
//
showLocation = true;
// Try again, including the location of the object...
//
tabName = spoon.delegates.tabs.makeTabName(transMeta, showLocation);
TabMapEntry exactSameEntry = spoon.delegates.tabs.findTabMapEntry(tabName, ObjectType.TRANSFORMATION_GRAPH);
if (exactSameEntry != null) {
// Already loaded, simply select the tab item in question...
//
addTab = false;
} else {
// We might need to rename the tab of the entry already loaded!
//
tabEntry.setShowingLocation(true);
String newTabName = spoon.delegates.tabs.makeTabName(tabEntry.getObject().getMeta(), showLocation);
tabEntry.getTabItem().setText(newTabName);
}
}
TransGraph transGraph = null;
if (addTab) {
transGraph = new TransGraph(spoon.tabfolder.getSwtTabset(), spoon, transMeta);
PropsUI props = PropsUI.getInstance();
if (tabName.length() >= getMaxTabLength()) {
tabName = new StringBuilder().append(tabName.substring(0, getMaxTabLength())).append("\u2026").toString();
}
TabItem tabItem = new TabItem(spoon.tabfolder, tabName, tabName, props.getSashWeights());
String toolTipText = BaseMessages.getString(PKG, "Spoon.TabTrans.Tooltip", spoon.delegates.tabs.makeTabName(transMeta, showLocation));
if (!Utils.isEmpty(transMeta.getFilename())) {
toolTipText += Const.CR + Const.CR + transMeta.getFilename();
}
tabItem.setToolTipText(toolTipText);
tabItem.setImage(GUIResource.getInstance().getImageTransGraph());
tabItem.setControl(transGraph);
TransLogTable logTable = transMeta.getTransLogTable();
String versionLabel = transMeta.getObjectRevision() == null ? null : transMeta.getObjectRevision().getName();
tabEntry = new TabMapEntry(tabItem, transMeta.getFilename(), transMeta.getName(), transMeta.getRepositoryDirectory(), versionLabel, transGraph, ObjectType.TRANSFORMATION_GRAPH, transMeta.getVariable(Spoon.CONNECTION));
tabEntry.setShowingLocation(showLocation);
spoon.delegates.tabs.addTab(tabEntry);
}
int idx = spoon.tabfolder.indexOf(tabEntry.getTabItem());
// keep the focus on the graph
spoon.tabfolder.setSelected(idx);
if (addTab) {
TransLogTable logTable = transMeta.getTransLogTable();
// OK, also see if we need to open a new history window.
if (isLogTableDefined(logTable) && !transMeta.isSlaveTransformation()) {
addTabsToTransGraph(transGraph);
}
}
spoon.setUndoMenu(transMeta);
spoon.enableMenus();
} else {
TabMapEntry tabEntry = spoon.delegates.tabs.findTabMapEntry(transMeta);
if (tabEntry != null) {
int idx = spoon.tabfolder.indexOf(tabEntry.getTabItem());
// keep the focus on the graph
spoon.tabfolder.setSelected(idx);
spoon.setUndoMenu(transMeta);
spoon.enableMenus();
}
}
}
Aggregations