use of org.pentaho.di.core.logging.LogTableInterface in project pentaho-kettle by pentaho.
the class Spoon method saveJobAsXmlFile.
private boolean saveJobAsXmlFile(JobMeta jobMeta, boolean export) {
JobLogTable origJobLogTable = jobMeta.getJobLogTable();
JobEntryLogTable originEntryLogTable = jobMeta.getJobEntryLogTable();
ChannelLogTable originChannelLogTable = jobMeta.getChannelLogTable();
List<LogTableInterface> originExtraLogTables = jobMeta.getExtraLogTables();
try {
XmlExportHelper.swapTables(jobMeta);
return saveXMLFile(jobMeta, export);
} finally {
jobMeta.setJobLogTable(origJobLogTable);
jobMeta.setJobEntryLogTable(originEntryLogTable);
jobMeta.setChannelLogTable(originChannelLogTable);
jobMeta.setExtraLogTables(originExtraLogTables);
}
}
use of org.pentaho.di.core.logging.LogTableInterface in project pentaho-kettle by pentaho.
the class TransHistoryDelegate method getHistoryData.
private boolean getHistoryData(final int index, final Mode mode) {
final int BATCH_SIZE = Props.getInstance().getLinesInHistoryFetchSize();
boolean moreRows = false;
TransHistoryLogTab model = models[index];
LogTableInterface logTable = model.logTable;
//
if (transMeta != null && !Utils.isEmpty(transMeta.getName()) && logTable.isDefined()) {
Database database = null;
try {
DatabaseMeta logConnection = logTable.getDatabaseMeta();
// open a connection
database = new Database(loggingObject, logConnection);
database.shareVariablesWith(transMeta);
database.connect();
int queryLimit = 0;
switch(mode) {
case ALL:
model.batchCount = 0;
queryLimit = Props.getInstance().getMaxNrLinesInHistory();
break;
case NEXT_BATCH:
model.batchCount++;
queryLimit = BATCH_SIZE * model.batchCount;
break;
case INITIAL:
model.batchCount = 1;
queryLimit = BATCH_SIZE;
break;
default:
break;
}
database.setQueryLimit(queryLimit);
// First, we get the information out of the database table...
//
String schemaTable = logTable.getQuotedSchemaTableCombination();
StringBuilder sql = new StringBuilder("SELECT ");
boolean first = true;
for (LogTableField field : logTable.getFields()) {
if (field.isEnabled() && field.isVisible()) {
if (!first) {
sql.append(", ");
}
first = false;
sql.append(logConnection.quoteField(field.getFieldName()));
}
}
sql.append(" FROM ").append(schemaTable);
RowMetaAndData params = new RowMetaAndData();
// Do we need to limit the amount of data?
//
LogTableField nameField = logTable.getNameField();
LogTableField keyField = logTable.getKeyField();
// CHECKSTYLE:LineLength:OFF
if (nameField != null) {
if (transMeta.isUsingAClusterSchema()) {
sql.append(" WHERE ").append(logConnection.quoteField(nameField.getFieldName())).append(" LIKE ?");
params.addValue(new ValueMetaString("transname_literal"), transMeta.getName());
sql.append(" OR ").append(logConnection.quoteField(nameField.getFieldName())).append(" LIKE ?");
params.addValue(new ValueMetaString("transname_cluster"), transMeta.getName() + " (%");
} else {
sql.append(" WHERE ").append(logConnection.quoteField(nameField.getFieldName())).append(" = ?");
params.addValue(new ValueMetaString("transname_literal"), transMeta.getName());
}
}
if (keyField != null && keyField.isEnabled()) {
sql.append(" ORDER BY ").append(logConnection.quoteField(keyField.getFieldName())).append(" DESC");
}
ResultSet resultSet = database.openQuery(sql.toString(), params.getRowMeta(), params.getData());
List<Object[]> rows = new ArrayList<Object[]>();
Object[] rowData = database.getRow(resultSet);
int rowsFetched = 1;
while (rowData != null) {
rows.add(rowData);
rowData = database.getRow(resultSet);
rowsFetched++;
}
if (rowsFetched >= queryLimit) {
moreRows = true;
}
database.closeQuery(resultSet);
model.rows = rows;
} catch (Exception e) {
LogChannel.GENERAL.logError("Unable to get rows of data from logging table " + model.logTable, e);
model.rows = new ArrayList<Object[]>();
} finally {
if (database != null) {
database.disconnect();
}
}
} else {
model.rows = new ArrayList<Object[]>();
}
return moreRows;
}
use of org.pentaho.di.core.logging.LogTableInterface in project pentaho-kettle by pentaho.
the class TransHistoryDelegate method clearLogTable.
/**
* User requested to clear the log table.<br>
* Better ask confirmation
*/
private void clearLogTable(int index) {
TransHistoryLogTab model = models[index];
LogTableInterface logTable = model.logTable;
if (logTable.isDefined()) {
String schemaTable = logTable.getQuotedSchemaTableCombination();
DatabaseMeta databaseMeta = logTable.getDatabaseMeta();
MessageBox mb = new MessageBox(transGraph.getShell(), SWT.YES | SWT.NO | SWT.ICON_QUESTION);
mb.setMessage(BaseMessages.getString(PKG, "TransGraph.Dialog.AreYouSureYouWantToRemoveAllLogEntries.Message", // Nothing found that matches your criteria, sorry!
schemaTable));
mb.setText(BaseMessages.getString(PKG, "TransGraph.Dialog.AreYouSureYouWantToRemoveAllLogEntries.Title"));
if (mb.open() == SWT.YES) {
Database database = new Database(loggingObject, databaseMeta);
try {
database.connect();
database.truncateTable(schemaTable);
} catch (Exception e) {
new ErrorDialog(transGraph.getShell(), BaseMessages.getString(PKG, "TransGraph.Dialog.ErrorClearningLoggingTable.Title"), BaseMessages.getString(PKG, "TransGraph.Dialog.ErrorClearningLoggingTable.Message"), e);
} finally {
database.disconnect();
refreshHistory();
if (model.logDisplayText != null) {
model.logDisplayText.setText("");
}
}
}
}
}
use of org.pentaho.di.core.logging.LogTableInterface in project pentaho-kettle by pentaho.
the class JobHistoryDelegate method getHistoryData.
private boolean getHistoryData(final int index, final Mode mode) {
final int BATCH_SIZE = Props.getInstance().getLinesInHistoryFetchSize();
boolean moreRows = false;
JobHistoryLogTab model = models[index];
LogTableInterface logTable = model.logTable;
//
if (jobMeta != null && !Utils.isEmpty(jobMeta.getName()) && logTable.isDefined()) {
Database database = null;
try {
DatabaseMeta logConnection = logTable.getDatabaseMeta();
// open a connection
database = new Database(loggingObject, logConnection);
database.shareVariablesWith(jobMeta);
database.connect();
int queryLimit = 0;
switch(mode) {
case ALL:
model.batchCount = 0;
queryLimit = Props.getInstance().getMaxNrLinesInHistory();
break;
case NEXT_BATCH:
model.batchCount++;
queryLimit = BATCH_SIZE * model.batchCount;
break;
case INITIAL:
model.batchCount = 1;
queryLimit = BATCH_SIZE;
break;
default:
break;
}
database.setQueryLimit(queryLimit);
// First, we get the information out of the database table...
//
String schemaTable = logTable.getQuotedSchemaTableCombination();
StringBuilder sql = new StringBuilder("SELECT ");
boolean first = true;
for (LogTableField field : logTable.getFields()) {
if (field.isEnabled() && field.isVisible()) {
if (!first) {
sql.append(", ");
}
first = false;
sql.append(logConnection.quoteField(field.getFieldName()));
}
}
sql.append(" FROM ").append(schemaTable);
RowMetaAndData params = new RowMetaAndData();
// Do we need to limit the amount of data?
//
LogTableField nameField = logTable.getNameField();
LogTableField keyField = logTable.getKeyField();
if (nameField != null) {
sql.append(" WHERE ").append(logConnection.quoteField(nameField.getFieldName())).append(" LIKE ?");
params.addValue(new ValueMetaString("transname_literal"), jobMeta.getName());
}
if (keyField != null && keyField.isEnabled()) {
sql.append(" ORDER BY ").append(logConnection.quoteField(keyField.getFieldName())).append(" DESC");
}
ResultSet resultSet = database.openQuery(sql.toString(), params.getRowMeta(), params.getData());
List<Object[]> rows = new ArrayList<Object[]>();
Object[] rowData = database.getRow(resultSet);
int rowsFetched = 1;
while (rowData != null) {
rows.add(rowData);
rowData = database.getRow(resultSet);
rowsFetched++;
}
if (rowsFetched >= queryLimit) {
moreRows = true;
}
database.closeQuery(resultSet);
models[index].rows = rows;
} catch (Exception e) {
LogChannel.GENERAL.logError("Unable to get rows of data from logging table " + models[index].logTable, e);
models[index].rows = new ArrayList<Object[]>();
} finally {
if (database != null) {
database.disconnect();
}
}
} else {
models[index].rows = new ArrayList<Object[]>();
}
return moreRows;
}
use of org.pentaho.di.core.logging.LogTableInterface in project pentaho-kettle by pentaho.
the class JobHistoryDelegate method clearLogTable.
/**
* User requested to clear the log table.<br>
* Better ask confirmation
*/
private void clearLogTable(int index) {
JobHistoryLogTab model = models[index];
LogTableInterface logTable = model.logTable;
if (logTable.isDefined()) {
String schemaTable = logTable.getQuotedSchemaTableCombination();
DatabaseMeta databaseMeta = logTable.getDatabaseMeta();
MessageBox mb = new MessageBox(jobGraph.getShell(), SWT.YES | SWT.NO | SWT.ICON_QUESTION);
// CHECKSTYLE:LineLength:OFF
mb.setMessage(BaseMessages.getString(PKG, "JobGraph.Dialog.AreYouSureYouWantToRemoveAllLogEntries.Message", schemaTable));
mb.setText(BaseMessages.getString(PKG, "JobGraph.Dialog.AreYouSureYouWantToRemoveAllLogEntries.Title"));
if (mb.open() == SWT.YES) {
Database database = new Database(loggingObject, databaseMeta);
try {
database.connect();
database.truncateTable(schemaTable);
} catch (Exception e) {
new ErrorDialog(jobGraph.getShell(), BaseMessages.getString(PKG, "JobGraph.Dialog.ErrorClearningLoggingTable.Title"), BaseMessages.getString(PKG, "JobGraph.Dialog.AreYouSureYouWantToRemoveAllLogEntries.Message"), e);
} finally {
database.disconnect();
refreshHistory();
if (model.logDisplayText != null) {
model.logDisplayText.setText("");
}
}
}
}
}
Aggregations