use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class Spoon method setMenu.
private synchronized void setMenu(Tree tree) {
TreeSelection[] objects = getTreeObjects(tree);
if (objects.length != 1) {
// not yet supported, we can do this later when the OSX bug
return;
// goes away
}
TreeSelection object = objects[0];
selectionObject = object.getSelection();
Object selection = selectionObject;
selectionObjectParent = object.getParent();
// Not clicked on a real object: returns a class
XulMenupopup spoonMenu = null;
if (selection instanceof Class<?>) {
if (selection.equals(TransMeta.class)) {
// New
spoonMenu = (XulMenupopup) menuMap.get("trans-class");
} else if (selection.equals(JobMeta.class)) {
// New
spoonMenu = (XulMenupopup) menuMap.get("job-class");
} else if (selection.equals(TransHopMeta.class)) {
// New
spoonMenu = (XulMenupopup) menuMap.get("trans-hop-class");
} else if (selection.equals(DatabaseMeta.class)) {
spoonMenu = (XulMenupopup) menuMap.get("database-class");
} else if (selection.equals(PartitionSchema.class)) {
// New
spoonMenu = (XulMenupopup) menuMap.get("partition-schema-class");
} else if (selection.equals(ClusterSchema.class)) {
spoonMenu = (XulMenupopup) menuMap.get("cluster-schema-class");
} else if (selection.equals(SlaveServer.class)) {
spoonMenu = (XulMenupopup) menuMap.get("slave-cluster-class");
} else {
spoonMenu = null;
}
} else {
if (selection instanceof TransMeta) {
spoonMenu = (XulMenupopup) menuMap.get("trans-inst");
} else if (selection instanceof JobMeta) {
spoonMenu = (XulMenupopup) menuMap.get("job-inst");
} else if (selection instanceof PluginInterface) {
spoonMenu = (XulMenupopup) menuMap.get("step-plugin");
} else if (selection instanceof DatabaseMeta) {
spoonMenu = (XulMenupopup) menuMap.get("database-inst");
// disable for now if the connection is an SAP ERP type of database...
//
XulMenuitem item = (XulMenuitem) mainSpoonContainer.getDocumentRoot().getElementById("database-inst-explore");
if (item != null) {
final DatabaseMeta databaseMeta = (DatabaseMeta) selection;
item.setDisabled(!databaseMeta.isExplorable());
}
item = (XulMenuitem) mainSpoonContainer.getDocumentRoot().getElementById("database-inst-clear-cache");
if (item != null) {
final DatabaseMeta databaseMeta = (DatabaseMeta) selectionObject;
item.setLabel(BaseMessages.getString(PKG, "Spoon.Menu.Popup.CONNECTIONS.ClearDBCache") + // Clear
databaseMeta.getName());
}
item = (XulMenuitem) mainSpoonContainer.getDocumentRoot().getElementById("database-inst-share");
if (item != null) {
final DatabaseMeta databaseMeta = (DatabaseMeta) selection;
if (databaseMeta.isShared()) {
item.setLabel(BaseMessages.getString(PKG, "Spoon.Menu.Popup.CONNECTIONS.UnShare"));
} else {
item.setLabel(BaseMessages.getString(PKG, "Spoon.Menu.Popup.CONNECTIONS.Share"));
}
}
} else if (selection instanceof StepMeta) {
spoonMenu = (XulMenupopup) menuMap.get("step-inst");
} else if (selection instanceof JobEntryCopy) {
spoonMenu = (XulMenupopup) menuMap.get("job-entry-copy-inst");
} else if (selection instanceof TransHopMeta) {
spoonMenu = (XulMenupopup) menuMap.get("trans-hop-inst");
} else if (selection instanceof PartitionSchema) {
spoonMenu = (XulMenupopup) menuMap.get("partition-schema-inst");
} else if (selection instanceof ClusterSchema) {
spoonMenu = (XulMenupopup) menuMap.get("cluster-schema-inst");
} else if (selection instanceof SlaveServer) {
spoonMenu = (XulMenupopup) menuMap.get("slave-server-inst");
}
}
if (spoonMenu != null) {
ConstUI.displayMenu(spoonMenu, tree);
} else {
tree.setMenu(null);
}
createPopUpMenuExtension();
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class Spoon method shareObject.
public void shareObject(String id) {
if ("database-inst-share".equals(id)) {
final DatabaseMeta databaseMeta = (DatabaseMeta) selectionObject;
if (databaseMeta.isShared()) {
unShareObject(databaseMeta);
} else {
shareObject(databaseMeta);
}
}
if ("step-inst-share".equals(id)) {
final StepMeta stepMeta = (StepMeta) selectionObject;
shareObject(stepMeta);
}
if ("partition-schema-inst-share".equals(id)) {
final PartitionSchema partitionSchema = (PartitionSchema) selectionObject;
shareObject(partitionSchema);
}
if ("cluster-schema-inst-share".equals(id)) {
final ClusterSchema clusterSchema = (ClusterSchema) selectionObject;
shareObject(clusterSchema);
}
if ("slave-server-inst-share".equals(id)) {
final SlaveServer slaveServer = (SlaveServer) selectionObject;
shareObject(slaveServer);
}
sharedObjectSyncUtil.reloadSharedObjects();
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class Spoon method pickupPartitionSchemas.
private List<PartitionSchema> pickupPartitionSchemas(TransMeta transMeta) throws KettleException {
if (rep != null) {
ObjectId[] ids = rep.getPartitionSchemaIDs(false);
List<PartitionSchema> result = new ArrayList<>(ids.length);
for (ObjectId id : ids) {
PartitionSchema schema = rep.loadPartitionSchema(id, null);
result.add(schema);
}
return result;
}
return transMeta.getPartitionSchemas();
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class Spoon method loadSessionInformation.
private void loadSessionInformation(Repository repository, boolean saveOldDatabases) {
JobMeta[] jobMetas = getLoadedJobs();
for (JobMeta jobMeta : jobMetas) {
for (int i = 0; i < jobMeta.nrDatabases(); i++) {
jobMeta.getDatabase(i).setObjectId(null);
}
// Set for the existing job the ID at -1!
jobMeta.setObjectId(null);
// Keep track of the old databases for now.
List<DatabaseMeta> oldDatabases = jobMeta.getDatabases();
// In order to re-match the databases on name (not content), we
// need to load the databases from the new repository.
// NOTE: for purposes such as DEVELOP - TEST - PRODUCTION
// cycles.
// first clear the list of databases and slave servers
jobMeta.setDatabases(new ArrayList<DatabaseMeta>());
jobMeta.setSlaveServers(new ArrayList<SlaveServer>());
// Read them from the new repository.
try {
SharedObjects sharedObjects = repository != null ? repository.readJobMetaSharedObjects(jobMeta) : jobMeta.readSharedObjects();
sharedObjectsFileMap.put(sharedObjects.getFilename(), sharedObjects);
} catch (KettleException e) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Message", makeTabName(jobMeta, true)), e);
}
// Then we need to re-match the databases at save time...
for (DatabaseMeta oldDatabase : oldDatabases) {
DatabaseMeta newDatabase = DatabaseMeta.findDatabase(jobMeta.getDatabases(), oldDatabase.getName());
// If it exists, change the settings...
if (newDatabase != null) {
//
// A database connection with the same name exists in
// the new repository.
// Change the old connections to reflect the settings in
// the new repository
//
oldDatabase.setDatabaseInterface(newDatabase.getDatabaseInterface());
} else {
if (saveOldDatabases) {
//
// The old database is not present in the new
// repository: simply add it to the list.
// When the job gets saved, it will be added
// to the repository.
//
jobMeta.addDatabase(oldDatabase);
}
}
}
if (repository != null) {
try {
// For the existing job, change the directory too:
// Try to find the same directory in the new repository...
RepositoryDirectoryInterface rdi = repository.findDirectory(jobMeta.getRepositoryDirectory().getPath());
if (rdi != null && !rdi.getPath().equals("/")) {
jobMeta.setRepositoryDirectory(rdi);
} else {
// the root is the default!
jobMeta.setRepositoryDirectory(repository.loadRepositoryDirectoryTree());
}
} catch (KettleException ke) {
rep = null;
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Message", Const.CR), ke);
}
}
}
TransMeta[] transMetas = getLoadedTransformations();
for (TransMeta transMeta : transMetas) {
for (int i = 0; i < transMeta.nrDatabases(); i++) {
transMeta.getDatabase(i).setObjectId(null);
}
// Set for the existing transformation the ID at -1!
transMeta.setObjectId(null);
// Keep track of the old databases for now.
List<DatabaseMeta> oldDatabases = transMeta.getDatabases();
// In order to re-match the databases on name (not content), we
// need to load the databases from the new repository.
// NOTE: for purposes such as DEVELOP - TEST - PRODUCTION
// cycles.
// first clear the list of databases, partition schemas, slave
// servers, clusters
transMeta.setDatabases(new ArrayList<DatabaseMeta>());
transMeta.setPartitionSchemas(new ArrayList<PartitionSchema>());
transMeta.setSlaveServers(new ArrayList<SlaveServer>());
transMeta.setClusterSchemas(new ArrayList<ClusterSchema>());
// Read them from the new repository.
try {
SharedObjects sharedObjects = repository != null ? repository.readTransSharedObjects(transMeta) : transMeta.readSharedObjects();
sharedObjectsFileMap.put(sharedObjects.getFilename(), sharedObjects);
} catch (KettleException e) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Message", makeTabName(transMeta, true)), e);
}
// Then we need to re-match the databases at save time...
for (DatabaseMeta oldDatabase : oldDatabases) {
DatabaseMeta newDatabase = DatabaseMeta.findDatabase(transMeta.getDatabases(), oldDatabase.getName());
// If it exists, change the settings...
if (newDatabase != null) {
//
// A database connection with the same name exists in
// the new repository.
// Change the old connections to reflect the settings in
// the new repository
//
oldDatabase.setDatabaseInterface(newDatabase.getDatabaseInterface());
} else {
if (saveOldDatabases) {
//
// The old database is not present in the new
// repository: simply add it to the list.
// When the transformation gets saved, it will be added
// to the repository.
//
transMeta.addDatabase(oldDatabase);
}
}
}
if (repository != null) {
try {
// For the existing transformation, change the directory too:
// Try to find the same directory in the new repository...
RepositoryDirectoryInterface rdi = repository.findDirectory(transMeta.getRepositoryDirectory().getPath());
if (rdi != null && !rdi.getPath().equals("/")) {
transMeta.setRepositoryDirectory(rdi);
} else {
// the root is the default!
transMeta.setRepositoryDirectory(repository.loadRepositoryDirectoryTree());
}
} catch (KettleException ke) {
rep = null;
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Message", Const.CR), ke);
}
}
}
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class Spoon method delPartitionSchema.
public void delPartitionSchema() {
final TransMeta transMeta = (TransMeta) selectionObjectParent;
final PartitionSchema partitionSchema = (PartitionSchema) selectionObject;
delPartitionSchema(transMeta, partitionSchema);
}
Aggregations