use of org.pentaho.di.core.logging.MetricsLogTable in project pentaho-kettle by pentaho.
the class SpoonExportXmlTest method savingTransToXmlNotChangesLogTables.
@Test
public void savingTransToXmlNotChangesLogTables() {
TransMeta transMeta = new TransMeta();
initTables(transMeta);
TransLogTable originTransLogTable = transMeta.getTransLogTable();
StepLogTable originStepLogTable = transMeta.getStepLogTable();
PerformanceLogTable originPerformanceLogTable = transMeta.getPerformanceLogTable();
ChannelLogTable originChannelLogTable = transMeta.getChannelLogTable();
MetricsLogTable originMetricsLogTable = transMeta.getMetricsLogTable();
when(spoon.getActiveTransformation()).thenReturn(transMeta);
when(spoon.saveXMLFile(any(TransMeta.class), anyBoolean())).thenReturn(true);
when(spoon.saveXMLFile(anyBoolean())).thenCallRealMethod();
spoon.saveXMLFile(true);
tablesCommonValuesEqual(originTransLogTable, transMeta.getTransLogTable());
assertEquals(originTransLogTable.getLogInterval(), transMeta.getTransLogTable().getLogInterval());
assertEquals(originTransLogTable.getLogSizeLimit(), transMeta.getTransLogTable().getLogSizeLimit());
tablesCommonValuesEqual(originStepLogTable, transMeta.getStepLogTable());
tablesCommonValuesEqual(originPerformanceLogTable, transMeta.getPerformanceLogTable());
assertEquals(originPerformanceLogTable.getLogInterval(), transMeta.getPerformanceLogTable().getLogInterval());
tablesCommonValuesEqual(originChannelLogTable, transMeta.getChannelLogTable());
tablesCommonValuesEqual(originMetricsLogTable, transMeta.getMetricsLogTable());
}
use of org.pentaho.di.core.logging.MetricsLogTable in project pentaho-kettle by pentaho.
the class Trans method writeMetricsInformation.
protected synchronized void writeMetricsInformation() throws KettleException {
//
List<MetricsDuration> metricsList = MetricsUtil.getDuration(log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START);
if ((log != null) && (log.isDebug()) && !metricsList.isEmpty()) {
log.logDebug(metricsList.get(0).toString());
}
metricsList = MetricsUtil.getDuration(log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START);
if ((log != null) && (log.isDebug()) && !metricsList.isEmpty()) {
log.logDebug(metricsList.get(0).toString());
}
long total = 0;
metricsList = MetricsUtil.getDuration(log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START);
if ((log != null) && (log.isDebug()) && metricsList != null && !metricsList.isEmpty()) {
for (MetricsDuration duration : metricsList) {
total += duration.getDuration();
log.logDebug(" - " + duration.toString() + " Total=" + total);
}
}
Database db = null;
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
try {
db = new Database(this, metricsLogTable.getDatabaseMeta());
db.shareVariablesWith(this);
db.connect();
db.setCommit(logCommitSize);
List<String> logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren(getLogChannelId());
for (String logChannelId : logChannelIds) {
Queue<MetricsSnapshotInterface> snapshotList = MetricsRegistry.getInstance().getSnapshotLists().get(logChannelId);
if (snapshotList != null) {
Iterator<MetricsSnapshotInterface> iterator = snapshotList.iterator();
while (iterator.hasNext()) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord(metricsLogTable, LogStatus.START, new LoggingMetric(batchId, snapshot), null);
}
}
Map<String, MetricsSnapshotInterface> snapshotMap = MetricsRegistry.getInstance().getSnapshotMaps().get(logChannelId);
if (snapshotMap != null) {
synchronized (snapshotMap) {
Iterator<MetricsSnapshotInterface> iterator = snapshotMap.values().iterator();
while (iterator.hasNext()) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord(metricsLogTable, LogStatus.START, new LoggingMetric(batchId, snapshot), null);
}
}
}
}
// Also time-out the log records in here...
//
db.cleanupLogRecords(metricsLogTable);
} catch (Exception e) {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToWriteMetricsInformationToLogTable"), e);
} finally {
if (!db.isAutoCommit()) {
db.commit(true);
}
db.disconnect();
}
}
use of org.pentaho.di.core.logging.MetricsLogTable in project pentaho-kettle by pentaho.
the class Trans method startThreads.
/**
* Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them.
*
* @throws KettleException
* if there is a communication error with a remote output socket.
*/
public void startThreads() throws KettleException {
// Now prepare to start all the threads...
//
nrOfFinishedSteps = 0;
nrOfActiveSteps = 0;
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationStartThreads.id, this);
fireTransStartedListeners();
for (int i = 0; i < steps.size(); i++) {
final StepMetaDataCombi sid = steps.get(i);
sid.step.markStart();
sid.step.initBeforeStart();
// also attach a Step Listener to detect when we're done...
//
StepListener stepListener = new StepListener() {
@Override
public void stepActive(Trans trans, StepMeta stepMeta, StepInterface step) {
nrOfActiveSteps++;
if (nrOfActiveSteps == 1) {
// PDI-5229 sync added
synchronized (transListeners) {
for (TransListener listener : transListeners) {
listener.transActive(Trans.this);
}
}
}
}
@Override
public void stepFinished(Trans trans, StepMeta stepMeta, StepInterface step) {
synchronized (Trans.this) {
nrOfFinishedSteps++;
if (nrOfFinishedSteps >= steps.size()) {
// Set the finished flag
//
setFinished(true);
// Grab the performance statistics one last time (if enabled)
//
addStepPerformanceSnapShot();
try {
fireTransFinishedListeners();
} catch (Exception e) {
step.setErrors(step.getErrors() + 1L);
log.logError(getName() + " : " + BaseMessages.getString(PKG, "Trans.Log.UnexpectedErrorAtTransformationEnd"), e);
}
}
//
if (step.getErrors() > 0) {
log.logMinimal(BaseMessages.getString(PKG, "Trans.Log.TransformationDetectedErrors"));
log.logMinimal(BaseMessages.getString(PKG, "Trans.Log.TransformationIsKillingTheOtherSteps"));
killAllNoWait();
}
}
}
};
//
if (sid.step instanceof BaseStep) {
((BaseStep) sid.step).getStepListeners().add(0, stepListener);
} else {
sid.step.addStepListener(stepListener);
}
}
if (transMeta.isCapturingStepPerformanceSnapShots()) {
stepPerformanceSnapshotSeqNr = new AtomicInteger(0);
stepPerformanceSnapShots = new ConcurrentHashMap<>();
// Calculate the maximum number of snapshots to be kept in memory
//
String limitString = environmentSubstitute(transMeta.getStepPerformanceCapturingSizeLimit());
if (Utils.isEmpty(limitString)) {
limitString = EnvUtil.getSystemProperty(Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT);
}
stepPerformanceSnapshotSizeLimit = Const.toInt(limitString, 0);
// Set a timer to collect the performance data from the running threads...
//
stepPerformanceSnapShotTimer = new Timer("stepPerformanceSnapShot Timer: " + transMeta.getName());
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
if (!isFinished()) {
addStepPerformanceSnapShot();
}
}
};
stepPerformanceSnapShotTimer.schedule(timerTask, 100, transMeta.getStepPerformanceCapturingDelay());
}
// Now start a thread to monitor the running transformation...
//
setFinished(false);
setPaused(false);
setStopped(false);
transFinishedBlockingQueue = new ArrayBlockingQueue<>(10);
TransListener transListener = new TransAdapter() {
@Override
public void transFinished(Trans trans) {
try {
shutdownHeartbeat(trans != null ? trans.heartbeat : null);
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationFinish.id, trans);
} catch (KettleException e) {
throw new RuntimeException("Error calling extension point at end of transformation", e);
}
//
if (transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null) {
stepPerformanceSnapShotTimer.cancel();
}
transMeta.disposeEmbeddedMetastoreProvider();
setFinished(true);
// no longer running
setRunning(false);
log.snap(Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP);
// If the user ran with metrics gathering enabled and a metrics logging table is configured, add another
// listener...
//
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
if (metricsLogTable.isDefined()) {
try {
writeMetricsInformation();
} catch (Exception e) {
log.logError("Error writing metrics information", e);
errors.incrementAndGet();
}
}
//
if (transMeta.isUsingUniqueConnections()) {
trans.closeUniqueDatabaseConnections(getResult());
}
// release unused vfs connections
KettleVFS.freeUnusedResources();
}
};
// This should always be done first so that the other listeners achieve a clean state to start from (setFinished and
// so on)
//
transListeners.add(0, transListener);
setRunning(true);
switch(transMeta.getTransformationType()) {
case Normal:
//
for (int i = 0; i < steps.size(); i++) {
final StepMetaDataCombi combi = steps.get(i);
RunThread runThread = new RunThread(combi);
Thread thread = new Thread(runThread);
thread.setName(getName() + " - " + combi.stepname);
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepBeforeStart.id, combi);
// Call an extension point at the end of the step
//
combi.step.addStepListener(new StepAdapter() {
@Override
public void stepFinished(Trans trans, StepMeta stepMeta, StepInterface step) {
try {
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepFinished.id, combi);
} catch (KettleException e) {
throw new RuntimeException("Unexpected error in calling extension point upon step finish", e);
}
}
});
thread.start();
}
break;
case SerialSingleThreaded:
new Thread(new Runnable() {
@Override
public void run() {
try {
//
for (StepMetaDataCombi combi : steps) {
combi.step.setUsingThreadPriorityManagment(false);
}
//
// This is a single threaded version...
//
// Sort the steps from start to finish...
//
Collections.sort(steps, new Comparator<StepMetaDataCombi>() {
@Override
public int compare(StepMetaDataCombi c1, StepMetaDataCombi c2) {
boolean c1BeforeC2 = transMeta.findPrevious(c2.stepMeta, c1.stepMeta);
if (c1BeforeC2) {
return -1;
} else {
return 1;
}
}
});
boolean[] stepDone = new boolean[steps.size()];
int nrDone = 0;
while (nrDone < steps.size() && !isStopped()) {
for (int i = 0; i < steps.size() && !isStopped(); i++) {
StepMetaDataCombi combi = steps.get(i);
if (!stepDone[i]) {
// if (combi.step.canProcessOneRow() ||
// !combi.step.isRunning()) {
boolean cont = combi.step.processRow(combi.meta, combi.data);
if (!cont) {
stepDone[i] = true;
nrDone++;
}
// }
}
}
}
} catch (Exception e) {
errors.addAndGet(1);
log.logError("Error executing single threaded", e);
} finally {
for (int i = 0; i < steps.size(); i++) {
StepMetaDataCombi combi = steps.get(i);
combi.step.dispose(combi.meta, combi.data);
combi.step.markStop();
}
}
}
}).start();
break;
case SingleThreaded:
//
break;
default:
break;
}
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationStart.id, this);
heartbeat = startHeartbeat(getHeartbeatIntervalInSeconds());
if (steps.isEmpty()) {
fireTransFinishedListeners();
}
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationHasAllocated", String.valueOf(steps.size()), String.valueOf(rowsets.size())));
}
}
use of org.pentaho.di.core.logging.MetricsLogTable in project pentaho-kettle by pentaho.
the class XmlExportHelper method swapTables.
/**
* When exporting meta we should not export user global parameters.
* Method makes clone for each table and deletes all global parameters.
* We have to make clones of each table, because we don't want to change real tables content.
*
* @param transMeta
* meta, that contains log tables to be refactored before export
*/
public static void swapTables(TransMeta transMeta) {
TransLogTable transLogTable = transMeta.getTransLogTable();
if (transLogTable != null) {
TransLogTable cloneTransLogTable = (TransLogTable) transLogTable.clone();
cloneTransLogTable.setAllGlobalParametersToNull();
transMeta.setTransLogTable(cloneTransLogTable);
}
StepLogTable stepLogTable = transMeta.getStepLogTable();
if (stepLogTable != null) {
StepLogTable cloneStepLogTable = (StepLogTable) stepLogTable.clone();
cloneStepLogTable.setAllGlobalParametersToNull();
transMeta.setStepLogTable(cloneStepLogTable);
}
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
if (performanceLogTable != null) {
PerformanceLogTable clonePerformanceLogTable = (PerformanceLogTable) performanceLogTable.clone();
clonePerformanceLogTable.setAllGlobalParametersToNull();
transMeta.setPerformanceLogTable(clonePerformanceLogTable);
}
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
if (channelLogTable != null) {
ChannelLogTable cloneChannelLogTable = (ChannelLogTable) channelLogTable.clone();
cloneChannelLogTable.setAllGlobalParametersToNull();
transMeta.setChannelLogTable(cloneChannelLogTable);
}
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
if (metricsLogTable != null) {
MetricsLogTable cloneMetricsLogTable = (MetricsLogTable) metricsLogTable.clone();
cloneMetricsLogTable.setAllGlobalParametersToNull();
transMeta.setMetricsLogTable(cloneMetricsLogTable);
}
}
use of org.pentaho.di.core.logging.MetricsLogTable in project pentaho-kettle by pentaho.
the class SpoonExportXmlTest method initTables.
private void initTables(TransMeta transMeta) {
TransLogTable transLogTable = TransLogTable.getDefault(mockedVariableSpace, mockedHasDbInterface, null);
initTableWithSampleParams(transLogTable);
transLogTable.setLogInterval(GLOBAL_PARAM);
transLogTable.setLogSizeLimit(GLOBAL_PARAM);
transMeta.setTransLogTable(transLogTable);
StepLogTable stepLogTable = StepLogTable.getDefault(mockedVariableSpace, mockedHasDbInterface);
initTableWithSampleParams(stepLogTable);
transMeta.setStepLogTable(stepLogTable);
PerformanceLogTable performanceLogTable = PerformanceLogTable.getDefault(mockedVariableSpace, mockedHasDbInterface);
initTableWithSampleParams(performanceLogTable);
performanceLogTable.setLogInterval(GLOBAL_PARAM);
transMeta.setPerformanceLogTable(performanceLogTable);
ChannelLogTable channelLogTable = ChannelLogTable.getDefault(mockedVariableSpace, mockedHasDbInterface);
initTableWithSampleParams(channelLogTable);
transMeta.setChannelLogTable(channelLogTable);
MetricsLogTable metricsLogTable = MetricsLogTable.getDefault(mockedVariableSpace, mockedHasDbInterface);
initTableWithSampleParams(metricsLogTable);
transMeta.setMetricsLogTable(metricsLogTable);
}
Aggregations