use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class RepositoryExplorerDialog method editSlaveServer.
public void editSlaveServer(String slaveName) {
try {
ObjectId id = rep.getSlaveID(slaveName);
// Load the last version
SlaveServer slaveServer = rep.loadSlaveServer(id, null);
SlaveServerDialog dd = new SlaveServerDialog(shell, slaveServer);
if (dd.open()) {
rep.insertLogEntry("Updating slave server '" + slaveServer.getName() + "'");
rep.save(slaveServer, Const.VERSION_COMMENT_EDIT_VERSION, null);
if (!slaveName.equalsIgnoreCase(slaveServer.getName())) {
refreshTree();
}
}
} catch (KettleException e) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.UnexpectedError.Title"), BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.UnexpectedError.Message") + slaveName + "]", e);
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class StreamToJobNodeConverter method saveSharedObjects.
public void saveSharedObjects(final Repository repo, final RepositoryElementInterface element) throws KettleException {
JobMeta jobMeta = (JobMeta) element;
// First store the databases and other depending objects in the transformation.
List<String> databaseNames = Arrays.asList(repo.getDatabaseNames(true));
int dbIndex = 0;
int indexToReplace = 0;
boolean updateMeta = Boolean.FALSE;
for (DatabaseMeta databaseMeta : jobMeta.getDatabases()) {
if (!databaseNames.contains(databaseMeta.getName())) {
if (databaseMeta.getObjectId() == null || !StringUtils.isEmpty(databaseMeta.getHostname())) {
repo.save(databaseMeta, null, null);
}
} else if (databaseMeta.getObjectId() == null) {
indexToReplace = dbIndex;
updateMeta = Boolean.TRUE;
}
dbIndex++;
}
// in the transMeta db collection
if (updateMeta) {
DatabaseMeta dbMetaToReplace = jobMeta.getDatabase(indexToReplace);
dbMetaToReplace.setObjectId(repo.getDatabaseID(dbMetaToReplace.getName()));
jobMeta.removeDatabase(indexToReplace);
jobMeta.addDatabase(dbMetaToReplace);
}
//
for (SlaveServer slaveServer : jobMeta.getSlaveServers()) {
if (slaveServer.getObjectId() == null) {
repo.save(slaveServer, null, null);
}
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class StreamToTransNodeConverter method saveSharedObjects.
private void saveSharedObjects(final Repository repo, final RepositoryElementInterface element) throws KettleException {
TransMeta transMeta = (TransMeta) element;
// First store the databases and other depending objects in the transformation.
List<String> databaseNames = Arrays.asList(repo.getDatabaseNames(true));
int dbIndex = 0;
boolean updateMeta = Boolean.FALSE;
List<Integer> transMetaDatabasesToUpdate = new ArrayList<Integer>();
for (DatabaseMeta databaseMeta : transMeta.getDatabases()) {
if (!databaseNames.contains(databaseMeta.getName())) {
if (databaseMeta.getObjectId() == null || !StringUtils.isEmpty(databaseMeta.getHostname())) {
repo.save(databaseMeta, null, null);
}
} else if (databaseMeta.getObjectId() == null) {
// add this to the list to update object Ids later
transMetaDatabasesToUpdate.add(dbIndex);
updateMeta = Boolean.TRUE;
}
dbIndex++;
}
if (updateMeta) {
// make sure to update object ids in the transmeta db collection
for (Integer databaseMetaIndex : transMetaDatabasesToUpdate) {
transMeta.getDatabase(databaseMetaIndex).setObjectId(repo.getDatabaseID(transMeta.getDatabase(databaseMetaIndex).getName()));
}
}
//
for (SlaveServer slaveServer : transMeta.getSlaveServers()) {
if (slaveServer.hasChanged() || slaveServer.getObjectId() == null) {
repo.save(slaveServer, null, null);
}
}
//
for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) {
if (clusterSchema.hasChanged() || clusterSchema.getObjectId() == null) {
repo.save(clusterSchema, null, null);
}
}
//
for (PartitionSchema partitionSchema : transMeta.getPartitionSchemas()) {
if (partitionSchema.hasChanged() || partitionSchema.getObjectId() == null) {
repo.save(partitionSchema, null, null);
}
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class ClusterDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
DataNode rootNode = new DataNode(NODE_ROOT);
// save the properties...
rootNode.setProperty(PROP_BASE_PORT, clusterSchema.getBasePort());
rootNode.setProperty(PROP_SOCKETS_BUFFER_SIZE, clusterSchema.getSocketsBufferSize());
rootNode.setProperty(PROP_SOCKETS_FLUSH_INTERVAL, clusterSchema.getSocketsFlushInterval());
rootNode.setProperty(PROP_SOCKETS_COMPRESSED, clusterSchema.isSocketsCompressed());
rootNode.setProperty(PROP_DYNAMIC, clusterSchema.isDynamic());
DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
// Also save the used slave server references.
attrNode.setProperty(PROP_NB_SLAVE_SERVERS, clusterSchema.getSlaveServers().size());
for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
DataNodeRef slaveNodeRef = new DataNodeRef(slaveServer.getObjectId().getId());
// Save the slave server by reference, this way it becomes impossible to delete the slave by accident when still
// in use.
attrNode.setProperty(String.valueOf(i), slaveNodeRef);
}
return rootNode;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class JobDelegate method loadSharedObjects.
// ~ Methods =========================================================================================================
@SuppressWarnings("unchecked")
public SharedObjects loadSharedObjects(final RepositoryElementInterface element, final Map<RepositoryObjectType, List<? extends SharedObjectInterface>> sharedObjectsByType) throws KettleException {
JobMeta jobMeta = (JobMeta) element;
jobMeta.setSharedObjects(jobMeta.readSharedObjects());
// Repository objects take priority so let's overwrite them...
//
readDatabases(jobMeta, true, (List<DatabaseMeta>) sharedObjectsByType.get(RepositoryObjectType.DATABASE));
readSlaves(jobMeta, true, (List<SlaveServer>) sharedObjectsByType.get(RepositoryObjectType.SLAVE_SERVER));
return jobMeta.getSharedObjects();
}
Aggregations