use of org.pentaho.di.job.entries.sql.JobEntrySQL in project pentaho-kettle by pentaho.
the class SpoonJobDelegate method ripDB.
public JobMeta ripDB(final List<DatabaseMeta> databases, final String jobname, final RepositoryDirectoryInterface repdir, final String directory, final DatabaseMeta sourceDbInfo, final DatabaseMeta targetDbInfo, final String[] tables) {
//
// Create a new job...
//
final JobMeta jobMeta = new JobMeta();
jobMeta.setDatabases(databases);
jobMeta.setFilename(null);
jobMeta.setName(jobname);
if (spoon.getRepository() != null) {
jobMeta.setRepositoryDirectory(repdir);
} else {
jobMeta.setFilename(Const.createFilename(directory, jobname, "." + Const.STRING_JOB_DEFAULT_EXT));
}
spoon.refreshTree();
spoon.refreshGraph();
final Point location = new Point(50, 50);
// The start entry...
final JobEntryCopy start = JobMeta.createStartEntry();
start.setLocation(new Point(location.x, location.y));
start.setDrawn();
jobMeta.addJobEntry(start);
// final Thread parentThread = Thread.currentThread();
// Create a dialog with a progress indicator!
IRunnableWithProgress op = monitor -> {
try {
// This is running in a new process: copy some KettleVariables
// info
// LocalVariables.getInstance().createKettleVariables(Thread.currentThread().getName(),
// parentThread.getName(), true);
monitor.beginTask(BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.BuildingNewJob"), tables.length);
monitor.worked(0);
JobEntryCopy previous = start;
// Loop over the table-names...
for (int i = 0; i < tables.length && !monitor.isCanceled(); i++) {
monitor.setTaskName(BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.ProcessingTable") + tables[i] + "]...");
//
// Create the new transformation...
//
String transname = BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.Transname1") + sourceDbInfo + "].[" + tables[i] + BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.Transname2") + targetDbInfo + "]";
TransMeta transMeta = new TransMeta();
if (repdir != null) {
transMeta.setRepositoryDirectory(repdir);
} else {
transMeta.setFilename(Const.createFilename(directory, transname, "." + Const.STRING_TRANS_DEFAULT_EXT));
}
// Add the source & target db
transMeta.addDatabase(sourceDbInfo);
transMeta.addDatabase(targetDbInfo);
//
// Add a note
//
String note = BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.Note1") + tables[i] + BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.Note2") + sourceDbInfo + "]" + Const.CR;
note += BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.Note3") + tables[i] + BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.Note4") + targetDbInfo + "]";
NotePadMeta ni = new NotePadMeta(note, 150, 10, -1, -1);
transMeta.addNote(ni);
//
// Add the TableInputMeta step...
//
String fromstepname = BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.FromStep.Name") + tables[i] + "]";
TableInputMeta tii = new TableInputMeta();
tii.setDefault();
tii.setDatabaseMeta(sourceDbInfo);
// It's already quoted!
tii.setSQL("SELECT * FROM " + tables[i]);
String fromstepid = PluginRegistry.getInstance().getPluginId(StepPluginType.class, tii);
StepMeta fromstep = new StepMeta(fromstepid, fromstepname, tii);
fromstep.setLocation(150, 100);
fromstep.setDraw(true);
fromstep.setDescription(BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.FromStep.Description") + tables[i] + BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.FromStep.Description2") + sourceDbInfo + "]");
transMeta.addStep(fromstep);
//
// Add the TableOutputMeta step...
//
String tostepname = BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.ToStep.Name") + tables[i] + "]";
TableOutputMeta toi = new TableOutputMeta();
toi.setDatabaseMeta(targetDbInfo);
toi.setTableName(tables[i]);
toi.setCommitSize(100);
toi.setTruncateTable(true);
String tostepid = PluginRegistry.getInstance().getPluginId(StepPluginType.class, toi);
StepMeta tostep = new StepMeta(tostepid, tostepname, toi);
tostep.setLocation(500, 100);
tostep.setDraw(true);
tostep.setDescription(BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.ToStep.Description1") + tables[i] + BaseMessages.getString(PKG, "Spoon.RipDB.Monitor.ToStep.Description2") + targetDbInfo + "]");
transMeta.addStep(tostep);
//
// Add a hop between the two steps...
//
TransHopMeta hi = new TransHopMeta(fromstep, tostep);
transMeta.addTransHop(hi);
//
// Now we generate the SQL needed to run for this
// transformation.
//
// First set the limit to 1 to speed things up!
String tmpSql = tii.getSQL();
tii.setSQL(tii.getSQL() + sourceDbInfo.getLimitClause(1));
String sql;
try {
sql = transMeta.getSQLStatementsString();
} catch (KettleStepException kse) {
throw new InvocationTargetException(kse, BaseMessages.getString(PKG, "Spoon.RipDB.Exception.ErrorGettingSQLFromTransformation") + transMeta + "] : " + kse.getMessage());
}
// remove the limit
tii.setSQL(tmpSql);
//
// Now, save the transformation...
//
boolean ok;
if (spoon.getRepository() != null) {
ok = spoon.saveToRepository(transMeta, false);
} else {
ok = spoon.saveToFile(transMeta);
}
if (!ok) {
throw new InvocationTargetException(new Exception(BaseMessages.getString(PKG, "Spoon.RipDB.Exception.UnableToSaveTransformationToRepository")), BaseMessages.getString(PKG, "Spoon.RipDB.Exception.UnableToSaveTransformationToRepository"));
}
// We can now continue with the population of the job...
// //////////////////////////////////////////////////////////////////////
location.x = 250;
if (i > 0) {
location.y += 100;
}
//
if (!Utils.isEmpty(sql)) {
String jesqlname = BaseMessages.getString(PKG, "Spoon.RipDB.JobEntrySQL.Name") + tables[i] + "]";
JobEntrySQL jesql = new JobEntrySQL(jesqlname);
jesql.setDatabase(targetDbInfo);
jesql.setSQL(sql);
jesql.setDescription(BaseMessages.getString(PKG, "Spoon.RipDB.JobEntrySQL.Description") + targetDbInfo + "].[" + tables[i] + "]");
JobEntryCopy jecsql = new JobEntryCopy();
jecsql.setEntry(jesql);
jecsql.setLocation(new Point(location.x, location.y));
jecsql.setDrawn();
jobMeta.addJobEntry(jecsql);
// Add the hop too...
JobHopMeta jhi = new JobHopMeta(previous, jecsql);
jobMeta.addJobHop(jhi);
previous = jecsql;
}
//
// Add the jobentry for the transformation too...
//
String jetransname = BaseMessages.getString(PKG, "Spoon.RipDB.JobEntryTrans.Name") + tables[i] + "]";
JobEntryTrans jetrans = new JobEntryTrans(jetransname);
jetrans.setTransname(transMeta.getName());
if (spoon.getRepository() != null) {
jetrans.setSpecificationMethod(ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME);
jetrans.setDirectory(transMeta.getRepositoryDirectory().getPath());
} else {
jetrans.setSpecificationMethod(ObjectLocationSpecificationMethod.FILENAME);
jetrans.setFileName(Const.createFilename("${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}", transMeta.getName(), "." + Const.STRING_TRANS_DEFAULT_EXT));
}
JobEntryCopy jectrans = new JobEntryCopy(jetrans);
jectrans.setDescription(BaseMessages.getString(PKG, "Spoon.RipDB.JobEntryTrans.Description1") + Const.CR + BaseMessages.getString(PKG, "Spoon.RipDB.JobEntryTrans.Description2") + sourceDbInfo + "].[" + tables[i] + "]" + Const.CR + BaseMessages.getString(PKG, "Spoon.RipDB.JobEntryTrans.Description3") + targetDbInfo + "].[" + tables[i] + "]");
jectrans.setDrawn();
location.x += 400;
jectrans.setLocation(new Point(location.x, location.y));
jobMeta.addJobEntry(jectrans);
// Add a hop between the last 2 job entries.
JobHopMeta jhi2 = new JobHopMeta(previous, jectrans);
jobMeta.addJobHop(jhi2);
previous = jectrans;
monitor.worked(1);
}
monitor.worked(100);
monitor.done();
} catch (Exception e) {
new ErrorDialog(spoon.getShell(), "Error", "An unexpected error occurred!", e);
}
};
try {
ProgressMonitorDialog pmd = new ProgressMonitorDialog(spoon.getShell());
pmd.run(false, true, op);
} catch (InvocationTargetException | InterruptedException e) {
new ErrorDialog(spoon.getShell(), BaseMessages.getString(PKG, "Spoon.ErrorDialog.RipDB.ErrorRippingTheDatabase.Title"), BaseMessages.getString(PKG, "Spoon.ErrorDialog.RipDB.ErrorRippingTheDatabase.Message"), e);
return null;
} finally {
spoon.refreshGraph();
spoon.refreshTree();
}
return jobMeta;
}
use of org.pentaho.di.job.entries.sql.JobEntrySQL in project pentaho-kettle by pentaho.
the class JobGenerator method generateSqlJob.
public JobMeta generateSqlJob() throws KettleException {
DatabaseMeta databaseMeta = findTargetDatabaseMeta();
Database db = new Database(Spoon.loggingObject, databaseMeta);
try {
db.connect();
JobMeta jobMeta = new JobMeta();
jobMeta.setName("Create tables for '" + ConceptUtil.getName(domain, locale) + "'");
jobMeta.setDescription(ConceptUtil.getDescription(domain, locale));
// Let's not forget to add the database connection
//
jobMeta.addDatabase(databaseMeta);
Point location = new Point(GRAPH_LEFT, GRAPH_TOP);
// Create a job entry
//
JobEntryCopy startEntry = JobMeta.createStartEntry();
startEntry.setLocation(location.x, location.y);
startEntry.setDrawn();
jobMeta.addJobEntry(startEntry);
JobEntryCopy lastEntry = startEntry;
nextLocation(location);
// Create one SQL entry for all the physically unique dimensions and facts
// We need to get a list of all known dimensions with physical table name.
//
List<LogicalTable> tables = getUniqueLogicalTables();
for (LogicalTable logicalTable : tables) {
String phTable = ConceptUtil.getString(logicalTable, DefaultIDs.LOGICAL_TABLE_PHYSICAL_TABLE_NAME);
String tableName = ConceptUtil.getName(logicalTable, locale);
String tableDescription = ConceptUtil.getDescription(logicalTable, locale);
TableType tableType = ConceptUtil.getTableType(logicalTable);
DimensionType dimensionType = ConceptUtil.getDimensionType(logicalTable);
boolean isFact = tableType == TableType.FACT;
boolean isDimension = tableType == TableType.DIMENSION;
boolean isJunk = isDimension && dimensionType == DimensionType.JUNK_DIMENSION;
JobEntrySQL sqlEntry = new JobEntrySQL(phTable);
sqlEntry.setDatabase(databaseMeta);
// Get the SQL for this table...
//
String schemaTable = databaseMeta.getQuotedSchemaTableCombination(null, phTable);
String phKeyField = null;
// The technical key is the first KEY field...
//
LogicalColumn keyColumn = null;
if (isDimension) {
keyColumn = ConceptUtil.findLogicalColumn(logicalTable, AttributeType.TECHNICAL_KEY);
}
if (keyColumn != null) {
phKeyField = ConceptUtil.getString(keyColumn, DefaultIDs.LOGICAL_COLUMN_PHYSICAL_COLUMN_NAME);
}
// Get all the fields for the logical table...
//
RowMetaInterface fields = getRowForLogicalTable(databaseMeta, logicalTable);
// Generate the required SQL to make this happen
//
String sql = db.getCreateTableStatement(schemaTable, fields, phKeyField, databaseMeta.supportsAutoinc() && !isFact, null, true);
//
if (keyColumn != null) {
ValueMetaInterface keyValueMeta = getValueForLogicalColumn(databaseMeta, keyColumn);
String indexName = databaseMeta.quoteField("IDX_" + phTable.replace(" ", "_").toUpperCase() + "_" + phKeyField.toUpperCase());
String indexSql = db.getCreateIndexStatement(schemaTable, indexName, new String[] { keyValueMeta.getName() }, true, false, true, true);
sql += Const.CR + indexSql;
}
//
if (isFact) {
List<LogicalColumn> fks = ConceptUtil.findLogicalColumns(logicalTable, AttributeType.TECHNICAL_KEY);
for (LogicalColumn fk : fks) {
ValueMetaInterface keyValueMeta = getValueForLogicalColumn(databaseMeta, fk);
String phColumn = ConceptUtil.getString(fk, DefaultIDs.LOGICAL_COLUMN_PHYSICAL_COLUMN_NAME);
if (!Utils.isEmpty(phColumn)) {
String indexName = databaseMeta.quoteField("IDX_" + phTable.replace(" ", "_").toUpperCase() + "_" + phColumn.toUpperCase());
String indexSql = db.getCreateIndexStatement(schemaTable, indexName, new String[] { keyValueMeta.getName() }, true, false, true, true);
sql += Const.CR + indexSql;
}
}
}
//
if (isDimension) {
List<LogicalColumn> naturalKeys = ConceptUtil.findLogicalColumns(logicalTable, AttributeType.NATURAL_KEY);
if (!naturalKeys.isEmpty()) {
String indexName = databaseMeta.quoteField("IDX_" + phTable.replace(" ", "_").toUpperCase() + "_LOOKUP");
String[] fieldNames = new String[naturalKeys.size()];
for (int i = 0; i < fieldNames.length; i++) {
ValueMetaInterface keyValueMeta = getValueForLogicalColumn(databaseMeta, naturalKeys.get(i));
fieldNames[i] = keyValueMeta.getName();
}
String indexSql = db.getCreateIndexStatement(schemaTable, indexName, fieldNames, false, false, false, true);
sql += Const.CR + indexSql;
}
}
if (isJunk) {
List<LogicalColumn> attributes = ConceptUtil.findLogicalColumns(logicalTable, AttributeType.ATTRIBUTE);
if (!attributes.isEmpty()) {
String indexName = databaseMeta.quoteField("IDX_" + phTable.replace(" ", "_").toUpperCase() + "_LOOKUP");
String[] fieldNames = new String[attributes.size()];
for (int i = 0; i < fieldNames.length; i++) {
ValueMetaInterface attrValueMeta = getValueForLogicalColumn(databaseMeta, attributes.get(i));
fieldNames[i] = attrValueMeta.getName();
}
String indexSql = db.getCreateIndexStatement(schemaTable, indexName, fieldNames, false, false, false, true);
sql += Const.CR + indexSql;
}
}
// If it's
sqlEntry.setSQL(sql);
sqlEntry.setDescription("Generated based on logical table '" + tableName + "'" + Const.CR + Const.CR + Const.NVL(tableDescription, ""));
JobEntryCopy sqlCopy = new JobEntryCopy(sqlEntry);
sqlCopy.setLocation(location.x, location.y);
sqlCopy.setDrawn();
nextLocation(location);
jobMeta.addJobEntry(sqlCopy);
// Hook up with the previous job entry too...
//
JobHopMeta jobHop = new JobHopMeta(lastEntry, sqlCopy);
jobHop.setEnabled();
jobHop.setConditional();
jobHop.setEvaluation(true);
if (lastEntry.isStart()) {
jobHop.setUnconditional();
}
jobMeta.addJobHop(jobHop);
lastEntry = sqlCopy;
}
return jobMeta;
} catch (Exception e) {
throw new KettleException("There was an error during the generation of the SQL job", e);
} finally {
if (db != null) {
db.disconnect();
}
}
}
Aggregations