use of org.pentaho.di.core.logging.ChannelLogTable in project pentaho-kettle by pentaho.
the class Job method beginProcessing.
/**
* Handle logging at start
*
* @return true if it went OK.
*
* @throws KettleException
*/
public boolean beginProcessing() throws KettleException {
currentDate = new Date();
logDate = new Date();
startDate = Const.MIN_DATE;
endDate = currentDate;
resetErrors();
final JobLogTable jobLogTable = jobMeta.getJobLogTable();
int intervalInSeconds = Const.toInt(environmentSubstitute(jobLogTable.getLogInterval()), -1);
if (jobLogTable.isDefined()) {
DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta();
String schemaName = environmentSubstitute(jobMeta.getJobLogTable().getActualSchemaName());
String tableName = environmentSubstitute(jobMeta.getJobLogTable().getActualTableName());
String schemaAndTable = jobMeta.getJobLogTable().getDatabaseMeta().getQuotedSchemaTableCombination(schemaName, tableName);
Database ldb = new Database(this, logcon);
ldb.shareVariablesWith(this);
ldb.connect();
ldb.setCommit(logCommitSize);
try {
// See if we have to add a batch id...
Long id_batch = new Long(1);
if (jobMeta.getJobLogTable().isBatchIdUsed()) {
id_batch = logcon.getNextBatchId(ldb, schemaName, tableName, jobLogTable.getKeyField().getFieldName());
setBatchId(id_batch.longValue());
if (getPassedBatchId() <= 0) {
setPassedBatchId(id_batch.longValue());
}
}
Object[] lastr = ldb.getLastLogDate(schemaAndTable, jobMeta.getName(), true, LogStatus.END);
if (!Utils.isEmpty(lastr)) {
Date last;
try {
last = ldb.getReturnRowMeta().getDate(lastr, 0);
} catch (KettleValueException e) {
throw new KettleJobException(BaseMessages.getString(PKG, "Job.Log.ConversionError", "" + tableName), e);
}
if (last != null) {
startDate = last;
}
}
depDate = currentDate;
ldb.writeLogRecord(jobMeta.getJobLogTable(), LogStatus.START, this, null);
if (!ldb.isAutoCommit()) {
ldb.commitLog(true, jobMeta.getJobLogTable());
}
ldb.disconnect();
//
if (intervalInSeconds > 0) {
final Timer timer = new Timer(getName() + " - interval logging timer");
TimerTask timerTask = new TimerTask() {
public void run() {
try {
endProcessing();
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "Job.Exception.UnableToPerformIntervalLogging"), e);
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000);
addJobListener(new JobAdapter() {
public void jobFinished(Job job) {
timer.cancel();
}
});
}
// Add a listener at the end of the job to take of writing the final job
// log record...
//
addJobListener(new JobAdapter() {
public void jobFinished(Job job) throws KettleException {
try {
endProcessing();
} catch (KettleJobException e) {
log.logError(BaseMessages.getString(PKG, "Job.Exception.UnableToWriteToLoggingTable", jobLogTable.toString()), e);
// job is failed in case log database record is failed!
throw new KettleException(e);
}
}
});
} catch (KettleDatabaseException dbe) {
// This is even before actual execution
addErrors(1);
throw new KettleJobException(BaseMessages.getString(PKG, "Job.Log.UnableToProcessLoggingStart", "" + tableName), dbe);
} finally {
ldb.disconnect();
}
}
// If we need to write out the job entry logging information, do so at the end of the job:
//
JobEntryLogTable jobEntryLogTable = jobMeta.getJobEntryLogTable();
if (jobEntryLogTable.isDefined()) {
addJobListener(new JobAdapter() {
public void jobFinished(Job job) throws KettleException {
try {
writeJobEntryLogInformation();
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Job.Exception.UnableToPerformJobEntryLoggingAtJobEnd"), e);
}
}
});
}
// If we need to write the log channel hierarchy and lineage information,
// add a listener for that too...
//
ChannelLogTable channelLogTable = jobMeta.getChannelLogTable();
if (channelLogTable.isDefined()) {
addJobListener(new JobAdapter() {
public void jobFinished(Job job) throws KettleException {
try {
writeLogChannelInformation();
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Job.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
JobExecutionExtension extension = new JobExecutionExtension(this, result, null, false);
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobBeginProcessing.id, extension);
return true;
}
use of org.pentaho.di.core.logging.ChannelLogTable in project pentaho-kettle by pentaho.
the class AbstractMetaTest method testGetSetChannelLogTable.
@Test
public void testGetSetChannelLogTable() throws Exception {
assertNull(meta.getChannelLogTable());
ChannelLogTable table = mock(ChannelLogTable.class);
meta.setChannelLogTable(table);
assertEquals(table, meta.getChannelLogTable());
}
use of org.pentaho.di.core.logging.ChannelLogTable in project pentaho-kettle by pentaho.
the class Trans method beginProcessing.
/**
* Begin processing. Also handle logging operations related to the start of the transformation
*
* @throws KettleTransException
* the kettle trans exception
*/
public void beginProcessing() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1);
try {
String logTable = transLogTable.getActualTableName();
SimpleDateFormat df = new SimpleDateFormat(REPLAY_DATE_FORMAT);
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationCanBeReplayed") + df.format(currentDate));
try {
if (transLogTableDatabaseConnection != null && !Utils.isEmpty(logTable) && !Utils.isEmpty(transMeta.getName())) {
transLogTableDatabaseConnection.writeLogRecord(transLogTable, LogStatus.START, this, null);
//
if (!transLogTableDatabaseConnection.isAutoCommit()) {
transLogTableDatabaseConnection.commitLog(true, transLogTable);
}
//
if (intervalInSeconds > 0) {
final Timer timer = new Timer(getName() + " - interval logging timer");
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
try {
endProcessing();
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalLogging"), e);
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000);
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) {
timer.cancel();
}
});
}
// Add a listener to make sure that the last record is also written when transformation finishes...
//
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) throws KettleException {
try {
endProcessing();
lastWrittenStepPerformanceSequenceNr = writeStepPerformanceLogRecords(lastWrittenStepPerformanceSequenceNr, LogStatus.END);
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
// If we need to write out the step logging information, do so at the end of the transformation too...
//
StepLogTable stepLogTable = transMeta.getStepLogTable();
if (stepLogTable.isDefined()) {
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) throws KettleException {
try {
writeStepLogInformation();
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
// If we need to write the log channel hierarchy and lineage information, add a listener for that too...
//
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
if (channelLogTable.isDefined()) {
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) throws KettleException {
try {
writeLogChannelInformation();
} catch (KettleException e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e);
}
}
});
}
// See if we need to write the step performance records at intervals too...
//
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
int perfLogInterval = Const.toInt(environmentSubstitute(performanceLogTable.getLogInterval()), -1);
if (performanceLogTable.isDefined() && perfLogInterval > 0) {
final Timer timer = new Timer(getName() + " - step performance log interval timer");
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
try {
lastWrittenStepPerformanceSequenceNr = writeStepPerformanceLogRecords(lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING);
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalPerformanceLogging"), e);
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule(timerTask, perfLogInterval * 1000, perfLogInterval * 1000);
addTransListener(new TransAdapter() {
@Override
public void transFinished(Trans trans) {
timer.cancel();
}
});
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", logTable), e);
} finally {
//
if (transLogTableDatabaseConnection != null && (intervalInSeconds <= 0)) {
transLogTableDatabaseConnection.disconnect();
transLogTableDatabaseConnection = null;
}
}
} catch (KettleException e) {
throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToBeginProcessingTransformation"), e);
}
}
use of org.pentaho.di.core.logging.ChannelLogTable in project pentaho-kettle by pentaho.
the class Trans method writeLogChannelInformation.
/**
* Writes log channel information to a channel logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeLogChannelInformation() throws KettleException {
Database db = null;
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
// PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries
Trans t = getParentTrans();
if (t != null) {
if (channelLogTable.equals(t.getTransMeta().getChannelLogTable())) {
return;
}
}
Job j = getParentJob();
if (j != null) {
if (channelLogTable.equals(j.getJobMeta().getChannelLogTable())) {
return;
}
}
try {
db = new Database(this, channelLogTable.getDatabaseMeta());
db.shareVariablesWith(this);
db.connect();
db.setCommit(logCommitSize);
List<LoggingHierarchy> loggingHierarchyList = getLoggingHierarchy();
for (LoggingHierarchy loggingHierarchy : loggingHierarchyList) {
db.writeLogRecord(channelLogTable, LogStatus.START, loggingHierarchy, null);
}
// Also time-out the log records in here...
//
db.cleanupLogRecords(channelLogTable);
} catch (Exception e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToWriteLogChannelInformationToLogTable"), e);
} finally {
if (!db.isAutoCommit()) {
db.commit(true);
}
db.disconnect();
}
}
use of org.pentaho.di.core.logging.ChannelLogTable in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createJobMeta.
protected JobMeta createJobMeta(String jobName) throws Exception {
RepositoryDirectoryInterface rootDir = loadStartDirectory();
JobMeta jobMeta = new JobMeta();
jobMeta.setName(jobName);
jobMeta.setDescription(EXP_JOB_DESC);
jobMeta.setExtendedDescription(EXP_JOB_EXTENDED_DESC);
jobMeta.setRepositoryDirectory(rootDir.findDirectory(DIR_JOBS));
jobMeta.setJobversion(EXP_JOB_VERSION);
jobMeta.setJobstatus(EXP_JOB_STATUS);
jobMeta.setCreatedUser(EXP_JOB_CREATED_USER);
jobMeta.setCreatedDate(EXP_JOB_CREATED_DATE);
jobMeta.setModifiedUser(EXP_JOB_MOD_USER);
jobMeta.setModifiedDate(EXP_JOB_MOD_DATE);
jobMeta.addParameterDefinition(EXP_JOB_PARAM_1_NAME, EXP_JOB_PARAM_1_DEF, EXP_JOB_PARAM_1_DESC);
// TODO mlowery other jobLogTable fields could be set for testing here
JobLogTable jobLogTable = JobLogTable.getDefault(jobMeta, jobMeta);
jobLogTable.setConnectionName(EXP_JOB_LOG_TABLE_CONN_NAME);
jobLogTable.setLogInterval(EXP_JOB_LOG_TABLE_INTERVAL);
jobLogTable.setSchemaName(EXP_JOB_LOG_TABLE_SCHEMA_NAME);
jobLogTable.setLogSizeLimit(EXP_JOB_LOG_TABLE_SIZE_LIMIT);
jobLogTable.setTableName(EXP_JOB_LOG_TABLE_TABLE_NAME);
jobLogTable.setTimeoutInDays(EXP_JOB_LOG_TABLE_TIMEOUT_IN_DAYS);
jobMeta.setJobLogTable(jobLogTable);
// TODO mlowery other jobEntryLogTable fields could be set for testing here
JobEntryLogTable jobEntryLogTable = JobEntryLogTable.getDefault(jobMeta, jobMeta);
jobEntryLogTable.setConnectionName(EXP_JOB_LOG_TABLE_CONN_NAME);
jobEntryLogTable.setSchemaName(EXP_JOB_LOG_TABLE_SCHEMA_NAME);
jobEntryLogTable.setTableName(EXP_JOB_LOG_TABLE_TABLE_NAME);
jobEntryLogTable.setTimeoutInDays(EXP_JOB_LOG_TABLE_TIMEOUT_IN_DAYS);
jobMeta.setJobEntryLogTable(jobEntryLogTable);
// TODO mlowery other channelLogTable fields could be set for testing here
ChannelLogTable channelLogTable = ChannelLogTable.getDefault(jobMeta, jobMeta);
channelLogTable.setConnectionName(EXP_JOB_LOG_TABLE_CONN_NAME);
channelLogTable.setSchemaName(EXP_JOB_LOG_TABLE_SCHEMA_NAME);
channelLogTable.setTableName(EXP_JOB_LOG_TABLE_TABLE_NAME);
channelLogTable.setTimeoutInDays(EXP_JOB_LOG_TABLE_TIMEOUT_IN_DAYS);
jobMeta.setChannelLogTable(channelLogTable);
jobMeta.setBatchIdPassed(EXP_JOB_BATCH_ID_PASSED);
jobMeta.setSharedObjectsFile(EXP_JOB_SHARED_OBJECTS_FILE);
DatabaseMeta entryDbMeta = createDatabaseMeta(EXP_DBMETA_NAME_JOB.concat(jobName));
repository.save(entryDbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(entryDbMeta);
JobEntryCopy jobEntryCopy1 = createJobEntry1Copy(entryDbMeta);
jobMeta.addJobEntry(jobEntryCopy1);
JobEntryCopy jobEntryCopy2 = createJobEntry2Copy(entryDbMeta);
jobMeta.addJobEntry(jobEntryCopy2);
jobMeta.addJobHop(createJobHopMeta(jobEntryCopy1, jobEntryCopy2));
jobMeta.addNote(createNotePadMeta(jobName));
return jobMeta;
}
Aggregations