use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class Spoon method delSlaveServer.
public void delSlaveServer() {
final HasSlaveServersInterface hasSlaveServersInterface = (HasSlaveServersInterface) selectionObjectParent;
final SlaveServer slaveServer = (SlaveServer) selectionObject;
delSlaveServer(hasSlaveServersInterface, slaveServer);
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class Spoon method loadSessionInformation.
private void loadSessionInformation(Repository repository, boolean saveOldDatabases) {
JobMeta[] jobMetas = getLoadedJobs();
for (JobMeta jobMeta : jobMetas) {
for (int i = 0; i < jobMeta.nrDatabases(); i++) {
jobMeta.getDatabase(i).setObjectId(null);
}
// Set for the existing job the ID at -1!
jobMeta.setObjectId(null);
// Keep track of the old databases for now.
List<DatabaseMeta> oldDatabases = jobMeta.getDatabases();
// In order to re-match the databases on name (not content), we
// need to load the databases from the new repository.
// NOTE: for purposes such as DEVELOP - TEST - PRODUCTION
// cycles.
// first clear the list of databases and slave servers
jobMeta.setDatabases(new ArrayList<DatabaseMeta>());
jobMeta.setSlaveServers(new ArrayList<SlaveServer>());
// Read them from the new repository.
try {
SharedObjects sharedObjects = repository != null ? repository.readJobMetaSharedObjects(jobMeta) : jobMeta.readSharedObjects();
sharedObjectsFileMap.put(sharedObjects.getFilename(), sharedObjects);
} catch (KettleException e) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Message", makeTabName(jobMeta, true)), e);
}
// Then we need to re-match the databases at save time...
for (DatabaseMeta oldDatabase : oldDatabases) {
DatabaseMeta newDatabase = DatabaseMeta.findDatabase(jobMeta.getDatabases(), oldDatabase.getName());
// If it exists, change the settings...
if (newDatabase != null) {
//
// A database connection with the same name exists in
// the new repository.
// Change the old connections to reflect the settings in
// the new repository
//
oldDatabase.setDatabaseInterface(newDatabase.getDatabaseInterface());
} else {
if (saveOldDatabases) {
//
// The old database is not present in the new
// repository: simply add it to the list.
// When the job gets saved, it will be added
// to the repository.
//
jobMeta.addDatabase(oldDatabase);
}
}
}
if (repository != null) {
try {
// For the existing job, change the directory too:
// Try to find the same directory in the new repository...
RepositoryDirectoryInterface rdi = repository.findDirectory(jobMeta.getRepositoryDirectory().getPath());
if (rdi != null && !rdi.getPath().equals("/")) {
jobMeta.setRepositoryDirectory(rdi);
} else {
// the root is the default!
jobMeta.setRepositoryDirectory(repository.loadRepositoryDirectoryTree());
}
} catch (KettleException ke) {
rep = null;
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Message", Const.CR), ke);
}
}
}
TransMeta[] transMetas = getLoadedTransformations();
for (TransMeta transMeta : transMetas) {
for (int i = 0; i < transMeta.nrDatabases(); i++) {
transMeta.getDatabase(i).setObjectId(null);
}
// Set for the existing transformation the ID at -1!
transMeta.setObjectId(null);
// Keep track of the old databases for now.
List<DatabaseMeta> oldDatabases = transMeta.getDatabases();
// In order to re-match the databases on name (not content), we
// need to load the databases from the new repository.
// NOTE: for purposes such as DEVELOP - TEST - PRODUCTION
// cycles.
// first clear the list of databases, partition schemas, slave
// servers, clusters
transMeta.setDatabases(new ArrayList<DatabaseMeta>());
transMeta.setPartitionSchemas(new ArrayList<PartitionSchema>());
transMeta.setSlaveServers(new ArrayList<SlaveServer>());
transMeta.setClusterSchemas(new ArrayList<ClusterSchema>());
// Read them from the new repository.
try {
SharedObjects sharedObjects = repository != null ? repository.readTransSharedObjects(transMeta) : transMeta.readSharedObjects();
sharedObjectsFileMap.put(sharedObjects.getFilename(), sharedObjects);
} catch (KettleException e) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorReadingSharedObjects.Message", makeTabName(transMeta, true)), e);
}
// Then we need to re-match the databases at save time...
for (DatabaseMeta oldDatabase : oldDatabases) {
DatabaseMeta newDatabase = DatabaseMeta.findDatabase(transMeta.getDatabases(), oldDatabase.getName());
// If it exists, change the settings...
if (newDatabase != null) {
//
// A database connection with the same name exists in
// the new repository.
// Change the old connections to reflect the settings in
// the new repository
//
oldDatabase.setDatabaseInterface(newDatabase.getDatabaseInterface());
} else {
if (saveOldDatabases) {
//
// The old database is not present in the new
// repository: simply add it to the list.
// When the transformation gets saved, it will be added
// to the repository.
//
transMeta.addDatabase(oldDatabase);
}
}
}
if (repository != null) {
try {
// For the existing transformation, change the directory too:
// Try to find the same directory in the new repository...
RepositoryDirectoryInterface rdi = repository.findDirectory(transMeta.getRepositoryDirectory().getPath());
if (rdi != null && !rdi.getPath().equals("/")) {
transMeta.setRepositoryDirectory(rdi);
} else {
// the root is the default!
transMeta.setRepositoryDirectory(repository.loadRepositoryDirectoryTree());
}
} catch (KettleException ke) {
rep = null;
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Title"), BaseMessages.getString(PKG, "Spoon.Dialog.ErrorConnectingRepository.Message", Const.CR), ke);
}
}
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class Spoon method editSlaveServer.
public void editSlaveServer() {
final SlaveServer slaveServer = (SlaveServer) selectionObject;
editSlaveServer(slaveServer);
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class AbstractMeta method addOrReplaceSlaveServer.
/**
* Add a new slave server to the transformation if that didn't exist yet. Otherwise, replace it.
*
* @param slaveServer The slave server to be added.
*/
public void addOrReplaceSlaveServer(SlaveServer slaveServer) {
int index = slaveServers.indexOf(slaveServer);
if (index < 0) {
slaveServers.add(slaveServer);
} else {
SlaveServer previous = slaveServers.get(index);
previous.replaceMeta(slaveServer);
}
setChanged();
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class AbstractMeta method clear.
public void clear() {
setName(null);
setFilename(null);
notes = new ArrayList<NotePadMeta>();
databases = new ArrayList<DatabaseMeta>();
slaveServers = new ArrayList<SlaveServer>();
channelLogTable = ChannelLogTable.getDefault(this, this);
attributesMap = new HashMap<String, Map<String, String>>();
max_undo = Const.MAX_UNDO;
clearUndo();
clearChanged();
setChanged(false);
channelLogTable = ChannelLogTable.getDefault(this, this);
createdUser = "-";
createdDate = new Date();
modifiedUser = "-";
modifiedDate = new Date();
directory = new RepositoryDirectory();
description = null;
extendedDescription = null;
}
Aggregations