use of org.pentaho.di.core.logging.StepLogTable in project pentaho-kettle by pentaho.
the class SpoonExportXmlTest method savingTransToXmlNotChangesLogTables.
@Test
public void savingTransToXmlNotChangesLogTables() {
TransMeta transMeta = new TransMeta();
initTables(transMeta);
TransLogTable originTransLogTable = transMeta.getTransLogTable();
StepLogTable originStepLogTable = transMeta.getStepLogTable();
PerformanceLogTable originPerformanceLogTable = transMeta.getPerformanceLogTable();
ChannelLogTable originChannelLogTable = transMeta.getChannelLogTable();
MetricsLogTable originMetricsLogTable = transMeta.getMetricsLogTable();
when(spoon.getActiveTransformation()).thenReturn(transMeta);
when(spoon.saveXMLFile(any(TransMeta.class), anyBoolean())).thenReturn(true);
when(spoon.saveXMLFile(anyBoolean())).thenCallRealMethod();
spoon.saveXMLFile(true);
tablesCommonValuesEqual(originTransLogTable, transMeta.getTransLogTable());
assertEquals(originTransLogTable.getLogInterval(), transMeta.getTransLogTable().getLogInterval());
assertEquals(originTransLogTable.getLogSizeLimit(), transMeta.getTransLogTable().getLogSizeLimit());
tablesCommonValuesEqual(originStepLogTable, transMeta.getStepLogTable());
tablesCommonValuesEqual(originPerformanceLogTable, transMeta.getPerformanceLogTable());
assertEquals(originPerformanceLogTable.getLogInterval(), transMeta.getPerformanceLogTable().getLogInterval());
tablesCommonValuesEqual(originChannelLogTable, transMeta.getChannelLogTable());
tablesCommonValuesEqual(originMetricsLogTable, transMeta.getMetricsLogTable());
}
use of org.pentaho.di.core.logging.StepLogTable in project pentaho-kettle by pentaho.
the class Trans method writeStepLogInformation.
/**
* Writes step information to a step logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeStepLogInformation() throws KettleException {
Database db = null;
StepLogTable stepLogTable = getTransMeta().getStepLogTable();
try {
db = createDataBase(stepLogTable.getDatabaseMeta());
db.shareVariablesWith(this);
db.connect();
db.setCommit(logCommitSize);
for (StepMetaDataCombi combi : getSteps()) {
db.writeLogRecord(stepLogTable, LogStatus.START, combi, null);
}
db.cleanupLogRecords(stepLogTable);
} catch (Exception e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToWriteStepInformationToLogTable"), e);
} finally {
if (!db.isAutoCommit()) {
db.commit(true);
}
db.disconnect();
}
}
use of org.pentaho.di.core.logging.StepLogTable in project pentaho-kettle by pentaho.
the class Trans method beginProcessing.
/**
* Begin processing. Also handle logging operations related to the start of the transformation
*
* @throws KettleTransException
* the kettle trans exception
*/
public void beginProcessing() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1);
try {
String logTable = transLogTable.getActualTableName();
SimpleDateFormat df = new SimpleDateFormat(REPLAY_DATE_FORMAT);
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationCanBeReplayed") + df.format(currentDate));
try {
if (transLogTableDatabaseConnection != null && !Utils.isEmpty(logTable) && !Utils.isEmpty(transMeta.getName())) {
transLogTableDatabaseConnection.writeLogRecord(transLogTable, LogStatus.START, this, null);
//
if (!transLogTableDatabaseConnection.isAutoCommit()) {
transLogTableDatabaseConnection.commitLog(true, transLogTable);
}
//
if (intervalInSeconds > 0) {
final Timer timer = new Timer(getName() + " - interval logging timer");
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
try {
endProcessing();
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalLogging"), e);
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000);
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) {
timer.cancel();
}
});
}
// Add a listener to make sure that the last record is also written when transformation finishes...
//
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) throws KettleException {
try {
endProcessing();
lastWrittenStepPerformanceSequenceNr = writeStepPerformanceLogRecords(lastWrittenStepPerformanceSequenceNr, LogStatus.END);
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
// If we need to write out the step logging information, do so at the end of the transformation too...
//
StepLogTable stepLogTable = transMeta.getStepLogTable();
if (stepLogTable.isDefined()) {
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) throws KettleException {
try {
writeStepLogInformation();
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
// If we need to write the log channel hierarchy and lineage information, add a listener for that too...
//
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
if (channelLogTable.isDefined()) {
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) throws KettleException {
try {
writeLogChannelInformation();
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
// See if we need to write the step performance records at intervals too...
//
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
int perfLogInterval = Const.toInt(environmentSubstitute(performanceLogTable.getLogInterval()), -1);
if (performanceLogTable.isDefined() && perfLogInterval > 0) {
final Timer timer = new Timer(getName() + " - step performance log interval timer");
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
try {
lastWrittenStepPerformanceSequenceNr = writeStepPerformanceLogRecords(lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING);
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalPerformanceLogging"), e);
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule(timerTask, perfLogInterval * 1000, perfLogInterval * 1000);
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) {
timer.cancel();
}
});
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", logTable), e);
} finally {
//
if (transLogTableDatabaseConnection != null && (intervalInSeconds <= 0)) {
transLogTableDatabaseConnection.disconnect();
transLogTableDatabaseConnection = null;
}
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToBeginProcessingTransformation"), e);
}
}
use of org.pentaho.di.core.logging.StepLogTable in project pentaho-kettle by pentaho.
the class TransTest method testRecordsCleanUpMethodIsCalled.
@Test
public void testRecordsCleanUpMethodIsCalled() throws Exception {
Database mockedDataBase = mock(Database.class);
Trans trans = mock(Trans.class);
StepLogTable stepLogTable = StepLogTable.getDefault(mock(VariableSpace.class), mock(HasDatabasesInterface.class));
stepLogTable.setConnectionName("connection");
TransMeta transMeta = new TransMeta();
transMeta.setStepLogTable(stepLogTable);
when(trans.getTransMeta()).thenReturn(transMeta);
when(trans.createDataBase(any(DatabaseMeta.class))).thenReturn(mockedDataBase);
when(trans.getSteps()).thenReturn(new ArrayList<StepMetaDataCombi>());
doCallRealMethod().when(trans).writeStepLogInformation();
trans.writeStepLogInformation();
verify(mockedDataBase).cleanupLogRecords(stepLogTable);
}
use of org.pentaho.di.core.logging.StepLogTable in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createTransMeta.
protected TransMeta createTransMeta(final String dbName) throws Exception {
RepositoryDirectoryInterface rootDir = loadStartDirectory();
TransMeta transMeta = new TransMeta();
transMeta.setName(EXP_TRANS_NAME.concat(dbName));
transMeta.setDescription(EXP_TRANS_DESC);
transMeta.setExtendedDescription(EXP_TRANS_EXTENDED_DESC);
transMeta.setRepositoryDirectory(rootDir.findDirectory(DIR_TRANSFORMATIONS));
transMeta.setTransversion(EXP_TRANS_VERSION);
transMeta.setTransstatus(EXP_TRANS_STATUS);
transMeta.setCreatedUser(EXP_TRANS_CREATED_USER);
transMeta.setCreatedDate(EXP_TRANS_CREATED_DATE);
transMeta.setModifiedUser(EXP_TRANS_MOD_USER);
transMeta.setModifiedDate(EXP_TRANS_MOD_DATE);
transMeta.addParameterDefinition(EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC);
// TODO mlowery other transLogTable fields could be set for testing here
TransLogTable transLogTable = TransLogTable.getDefault(transMeta, transMeta, new ArrayList<StepMeta>(0));
transLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
transLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
transLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
transLogTable.setLogSizeLimit(EXP_TRANS_LOG_TABLE_SIZE_LIMIT);
transLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
transLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setTransLogTable(transLogTable);
// TODO mlowery other perfLogTable fields could be set for testing here
PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault(transMeta, transMeta);
perfLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
perfLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
perfLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
perfLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
perfLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setPerformanceLogTable(perfLogTable);
// TODO mlowery other channelLogTable fields could be set for testing here
ChannelLogTable channelLogTable = ChannelLogTable.getDefault(transMeta, transMeta);
channelLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
channelLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
channelLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
channelLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setChannelLogTable(channelLogTable);
// TODO mlowery other stepLogTable fields could be set for testing here
StepLogTable stepLogTable = StepLogTable.getDefault(transMeta, transMeta);
stepLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
stepLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
stepLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
stepLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setStepLogTable(stepLogTable);
DatabaseMeta dbMeta = createDatabaseMeta(dbName);
// dbMeta must be saved so that it gets an ID
repository.save(dbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(dbMeta);
transMeta.setMaxDateConnection(dbMeta);
transMeta.setMaxDateTable(EXP_TRANS_MAX_DATE_TABLE);
transMeta.setMaxDateField(EXP_TRANS_MAX_DATE_FIELD);
transMeta.setMaxDateOffset(EXP_TRANS_MAX_DATE_OFFSET);
transMeta.setMaxDateDifference(EXP_TRANS_MAX_DATE_DIFF);
transMeta.setSizeRowset(EXP_TRANS_SIZE_ROWSET);
transMeta.setSleepTimeEmpty(EXP_TRANS_SLEEP_TIME_EMPTY);
transMeta.setSleepTimeFull(EXP_TRANS_SLEEP_TIME_FULL);
transMeta.setUsingUniqueConnections(EXP_TRANS_USING_UNIQUE_CONN);
transMeta.setFeedbackShown(EXP_TRANS_FEEDBACK_SHOWN);
transMeta.setFeedbackSize(EXP_TRANS_FEEDBACK_SIZE);
transMeta.setUsingThreadPriorityManagment(EXP_TRANS_USING_THREAD_PRIORITY_MGMT);
transMeta.setSharedObjectsFile(EXP_TRANS_SHARED_OBJECTS_FILE);
transMeta.setCapturingStepPerformanceSnapShots(EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS);
transMeta.setStepPerformanceCapturingDelay(EXP_TRANS_STEP_PERF_CAP_DELAY);
transMeta.addDependency(new TransDependency(dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME));
DatabaseMeta stepDbMeta = createDatabaseMeta(EXP_DBMETA_NAME_STEP.concat(dbName));
repository.save(stepDbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(stepDbMeta);
Condition cond = new Condition();
StepMeta step1 = createStepMeta1(transMeta, stepDbMeta, cond);
transMeta.addStep(step1);
StepMeta step2 = createStepMeta2(stepDbMeta, cond);
transMeta.addStep(step2);
transMeta.addTransHop(createTransHopMeta(step1, step2));
SlaveServer slaveServer = createSlaveServer(dbName);
PartitionSchema partSchema = createPartitionSchema(dbName);
// slaveServer, partSchema must be saved so that they get IDs
repository.save(slaveServer, VERSION_COMMENT_V1, null);
deleteStack.push(slaveServer);
repository.save(partSchema, VERSION_COMMENT_V1, null);
deleteStack.push(partSchema);
SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
slaveStepCopyPartitionDistribution.addPartition(EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0);
slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas(Arrays.asList(new PartitionSchema[] { partSchema }));
transMeta.setSlaveStepCopyPartitionDistribution(slaveStepCopyPartitionDistribution);
transMeta.setSlaveTransformation(EXP_TRANS_SLAVE_TRANSFORMATION);
return transMeta;
}
Aggregations