use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class RepositoryExplorerDialog method newCluster.
public void newCluster() {
try {
ClusterSchema cluster = new ClusterSchema();
ClusterSchemaDialog dd = new ClusterSchemaDialog(shell, cluster, rep.getSlaveServers());
if (dd.open()) {
// See if this slave server already exists...
ObjectId idCluster = rep.getClusterID(cluster.getName());
if (idCluster == null) {
rep.insertLogEntry("Creating new cluster '" + cluster.getName() + "'");
rep.save(cluster, Const.VERSION_COMMENT_INITIAL_VERSION, null);
} else {
MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.AlreadyExists.Message"));
mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.AlreadyExists.Title"));
mb.open();
}
// Refresh tree...
refreshTree();
}
} catch (KettleException e) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.UnexpectedError.Title"), BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.UnexpectedError.Message"), e);
}
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class StreamToTransNodeConverter method saveSharedObjects.
private void saveSharedObjects(final Repository repo, final RepositoryElementInterface element) throws KettleException {
TransMeta transMeta = (TransMeta) element;
// First store the databases and other depending objects in the transformation.
List<String> databaseNames = Arrays.asList(repo.getDatabaseNames(true));
int dbIndex = 0;
boolean updateMeta = Boolean.FALSE;
List<Integer> transMetaDatabasesToUpdate = new ArrayList<Integer>();
for (DatabaseMeta databaseMeta : transMeta.getDatabases()) {
if (!databaseNames.contains(databaseMeta.getName())) {
if (databaseMeta.getObjectId() == null || !StringUtils.isEmpty(databaseMeta.getHostname())) {
repo.save(databaseMeta, null, null);
}
} else if (databaseMeta.getObjectId() == null) {
// add this to the list to update object Ids later
transMetaDatabasesToUpdate.add(dbIndex);
updateMeta = Boolean.TRUE;
}
dbIndex++;
}
if (updateMeta) {
// make sure to update object ids in the transmeta db collection
for (Integer databaseMetaIndex : transMetaDatabasesToUpdate) {
transMeta.getDatabase(databaseMetaIndex).setObjectId(repo.getDatabaseID(transMeta.getDatabase(databaseMetaIndex).getName()));
}
}
//
for (SlaveServer slaveServer : transMeta.getSlaveServers()) {
if (slaveServer.hasChanged() || slaveServer.getObjectId() == null) {
repo.save(slaveServer, null, null);
}
}
//
for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) {
if (clusterSchema.hasChanged() || clusterSchema.getObjectId() == null) {
repo.save(clusterSchema, null, null);
}
}
//
for (PartitionSchema partitionSchema : transMeta.getPartitionSchemas()) {
if (partitionSchema.hasChanged() || partitionSchema.getObjectId() == null) {
repo.save(partitionSchema, null, null);
}
}
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class ClusterDelegate method dataNodeToElement.
public void dataNodeToElement(DataNode rootNode, RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
// The metadata...
clusterSchema.setBasePort(getString(rootNode, PROP_BASE_PORT));
clusterSchema.setSocketsBufferSize(getString(rootNode, PROP_SOCKETS_BUFFER_SIZE));
clusterSchema.setSocketsFlushInterval(getString(rootNode, PROP_SOCKETS_FLUSH_INTERVAL));
clusterSchema.setSocketsCompressed(rootNode.getProperty(PROP_SOCKETS_COMPRESSED).getBoolean());
clusterSchema.setDynamic(rootNode.getProperty(PROP_DYNAMIC).getBoolean());
DataNode attrNode = rootNode.getNode(NODE_ATTRIBUTES);
// The slaves...
long nrSlaves = attrNode.getProperty(PROP_NB_SLAVE_SERVERS).getLong();
for (int i = 0; i < nrSlaves; i++) {
if (attrNode.hasProperty(String.valueOf(i))) {
DataNodeRef slaveNodeRef = attrNode.getProperty(String.valueOf(i)).getRef();
clusterSchema.getSlaveServers().add(findSlaveServer(new StringObjectId(slaveNodeRef.toString())));
}
}
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class ClusterDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
DataNode rootNode = new DataNode(NODE_ROOT);
// save the properties...
rootNode.setProperty(PROP_BASE_PORT, clusterSchema.getBasePort());
rootNode.setProperty(PROP_SOCKETS_BUFFER_SIZE, clusterSchema.getSocketsBufferSize());
rootNode.setProperty(PROP_SOCKETS_FLUSH_INTERVAL, clusterSchema.getSocketsFlushInterval());
rootNode.setProperty(PROP_SOCKETS_COMPRESSED, clusterSchema.isSocketsCompressed());
rootNode.setProperty(PROP_DYNAMIC, clusterSchema.isDynamic());
DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
// Also save the used slave server references.
attrNode.setProperty(PROP_NB_SLAVE_SERVERS, clusterSchema.getSlaveServers().size());
for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
DataNodeRef slaveNodeRef = new DataNodeRef(slaveServer.getObjectId().getId());
// Save the slave server by reference, this way it becomes impossible to delete the slave by accident when still
// in use.
attrNode.setProperty(String.valueOf(i), slaveNodeRef);
}
return rootNode;
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class TransDelegate method loadSharedObjects.
@SuppressWarnings("unchecked")
public SharedObjects loadSharedObjects(final RepositoryElementInterface element, final Map<RepositoryObjectType, List<? extends SharedObjectInterface>> sharedObjectsByType) throws KettleException {
TransMeta transMeta = (TransMeta) element;
transMeta.setSharedObjects(transMeta.readSharedObjects());
// Repository objects take priority so let's overwrite them...
//
readDatabases(transMeta, true, (List<DatabaseMeta>) sharedObjectsByType.get(RepositoryObjectType.DATABASE));
readPartitionSchemas(transMeta, true, (List<PartitionSchema>) sharedObjectsByType.get(RepositoryObjectType.PARTITION_SCHEMA));
readSlaves(transMeta, true, (List<SlaveServer>) sharedObjectsByType.get(RepositoryObjectType.SLAVE_SERVER));
readClusters(transMeta, true, (List<ClusterSchema>) sharedObjectsByType.get(RepositoryObjectType.CLUSTER_SCHEMA));
return transMeta.getSharedObjects();
}
Aggregations