Search in sources :

Example 16 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class RepositoryExplorerDialog method newCluster.

public void newCluster() {
    try {
        ClusterSchema cluster = new ClusterSchema();
        ClusterSchemaDialog dd = new ClusterSchemaDialog(shell, cluster, rep.getSlaveServers());
        if (dd.open()) {
            // See if this slave server already exists...
            ObjectId idCluster = rep.getClusterID(cluster.getName());
            if (idCluster == null) {
                rep.insertLogEntry("Creating new cluster '" + cluster.getName() + "'");
                rep.save(cluster, Const.VERSION_COMMENT_INITIAL_VERSION, null);
            } else {
                MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
                mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.AlreadyExists.Message"));
                mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.AlreadyExists.Title"));
                mb.open();
            }
            // Refresh tree...
            refreshTree();
        }
    } catch (KettleException e) {
        new ErrorDialog(shell, BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.UnexpectedError.Title"), BaseMessages.getString(PKG, "RepositoryExplorerDialog.Cluster.Create.UnexpectedError.Message"), e);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) ObjectId(org.pentaho.di.repository.ObjectId) ErrorDialog(org.pentaho.di.ui.core.dialog.ErrorDialog) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) ClusterSchemaDialog(org.pentaho.di.ui.cluster.dialog.ClusterSchemaDialog) MessageBox(org.eclipse.swt.widgets.MessageBox)

Example 17 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class StreamToTransNodeConverter method saveSharedObjects.

private void saveSharedObjects(final Repository repo, final RepositoryElementInterface element) throws KettleException {
    TransMeta transMeta = (TransMeta) element;
    // First store the databases and other depending objects in the transformation.
    List<String> databaseNames = Arrays.asList(repo.getDatabaseNames(true));
    int dbIndex = 0;
    boolean updateMeta = Boolean.FALSE;
    List<Integer> transMetaDatabasesToUpdate = new ArrayList<Integer>();
    for (DatabaseMeta databaseMeta : transMeta.getDatabases()) {
        if (!databaseNames.contains(databaseMeta.getName())) {
            if (databaseMeta.getObjectId() == null || !StringUtils.isEmpty(databaseMeta.getHostname())) {
                repo.save(databaseMeta, null, null);
            }
        } else if (databaseMeta.getObjectId() == null) {
            // add this to the list to update object Ids later
            transMetaDatabasesToUpdate.add(dbIndex);
            updateMeta = Boolean.TRUE;
        }
        dbIndex++;
    }
    if (updateMeta) {
        // make sure to update object ids in the transmeta db collection
        for (Integer databaseMetaIndex : transMetaDatabasesToUpdate) {
            transMeta.getDatabase(databaseMetaIndex).setObjectId(repo.getDatabaseID(transMeta.getDatabase(databaseMetaIndex).getName()));
        }
    }
    // 
    for (SlaveServer slaveServer : transMeta.getSlaveServers()) {
        if (slaveServer.hasChanged() || slaveServer.getObjectId() == null) {
            repo.save(slaveServer, null, null);
        }
    }
    // 
    for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) {
        if (clusterSchema.hasChanged() || clusterSchema.getObjectId() == null) {
            repo.save(clusterSchema, null, null);
        }
    }
    // 
    for (PartitionSchema partitionSchema : transMeta.getPartitionSchemas()) {
        if (partitionSchema.hasChanged() || partitionSchema.getObjectId() == null) {
            repo.save(partitionSchema, null, null);
        }
    }
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) TransMeta(org.pentaho.di.trans.TransMeta) ArrayList(java.util.ArrayList) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 18 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class ClusterDelegate method dataNodeToElement.

public void dataNodeToElement(DataNode rootNode, RepositoryElementInterface element) throws KettleException {
    ClusterSchema clusterSchema = (ClusterSchema) element;
    // The metadata...
    clusterSchema.setBasePort(getString(rootNode, PROP_BASE_PORT));
    clusterSchema.setSocketsBufferSize(getString(rootNode, PROP_SOCKETS_BUFFER_SIZE));
    clusterSchema.setSocketsFlushInterval(getString(rootNode, PROP_SOCKETS_FLUSH_INTERVAL));
    clusterSchema.setSocketsCompressed(rootNode.getProperty(PROP_SOCKETS_COMPRESSED).getBoolean());
    clusterSchema.setDynamic(rootNode.getProperty(PROP_DYNAMIC).getBoolean());
    DataNode attrNode = rootNode.getNode(NODE_ATTRIBUTES);
    // The slaves...
    long nrSlaves = attrNode.getProperty(PROP_NB_SLAVE_SERVERS).getLong();
    for (int i = 0; i < nrSlaves; i++) {
        if (attrNode.hasProperty(String.valueOf(i))) {
            DataNodeRef slaveNodeRef = attrNode.getProperty(String.valueOf(i)).getRef();
            clusterSchema.getSlaveServers().add(findSlaveServer(new StringObjectId(slaveNodeRef.toString())));
        }
    }
}
Also used : DataNodeRef(org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef) DataNode(org.pentaho.platform.api.repository2.unified.data.node.DataNode) StringObjectId(org.pentaho.di.repository.StringObjectId) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 19 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class ClusterDelegate method elementToDataNode.

public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
    ClusterSchema clusterSchema = (ClusterSchema) element;
    DataNode rootNode = new DataNode(NODE_ROOT);
    // save the properties...
    rootNode.setProperty(PROP_BASE_PORT, clusterSchema.getBasePort());
    rootNode.setProperty(PROP_SOCKETS_BUFFER_SIZE, clusterSchema.getSocketsBufferSize());
    rootNode.setProperty(PROP_SOCKETS_FLUSH_INTERVAL, clusterSchema.getSocketsFlushInterval());
    rootNode.setProperty(PROP_SOCKETS_COMPRESSED, clusterSchema.isSocketsCompressed());
    rootNode.setProperty(PROP_DYNAMIC, clusterSchema.isDynamic());
    DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
    // Also save the used slave server references.
    attrNode.setProperty(PROP_NB_SLAVE_SERVERS, clusterSchema.getSlaveServers().size());
    for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
        SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
        DataNodeRef slaveNodeRef = new DataNodeRef(slaveServer.getObjectId().getId());
        // Save the slave server by reference, this way it becomes impossible to delete the slave by accident when still
        // in use.
        attrNode.setProperty(String.valueOf(i), slaveNodeRef);
    }
    return rootNode;
}
Also used : DataNodeRef(org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef) DataNode(org.pentaho.platform.api.repository2.unified.data.node.DataNode) SlaveServer(org.pentaho.di.cluster.SlaveServer) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 20 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class TransDelegate method loadSharedObjects.

@SuppressWarnings("unchecked")
public SharedObjects loadSharedObjects(final RepositoryElementInterface element, final Map<RepositoryObjectType, List<? extends SharedObjectInterface>> sharedObjectsByType) throws KettleException {
    TransMeta transMeta = (TransMeta) element;
    transMeta.setSharedObjects(transMeta.readSharedObjects());
    // Repository objects take priority so let's overwrite them...
    // 
    readDatabases(transMeta, true, (List<DatabaseMeta>) sharedObjectsByType.get(RepositoryObjectType.DATABASE));
    readPartitionSchemas(transMeta, true, (List<PartitionSchema>) sharedObjectsByType.get(RepositoryObjectType.PARTITION_SCHEMA));
    readSlaves(transMeta, true, (List<SlaveServer>) sharedObjectsByType.get(RepositoryObjectType.SLAVE_SERVER));
    readClusters(transMeta, true, (List<ClusterSchema>) sharedObjectsByType.get(RepositoryObjectType.CLUSTER_SCHEMA));
    return transMeta.getSharedObjects();
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) TransMeta(org.pentaho.di.trans.TransMeta) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Aggregations

ClusterSchema (org.pentaho.di.cluster.ClusterSchema)52 SlaveServer (org.pentaho.di.cluster.SlaveServer)23 KettleException (org.pentaho.di.core.exception.KettleException)19 PartitionSchema (org.pentaho.di.partition.PartitionSchema)19 TransMeta (org.pentaho.di.trans.TransMeta)16 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)15 StepMeta (org.pentaho.di.trans.step.StepMeta)15 ObjectId (org.pentaho.di.repository.ObjectId)11 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)7 ErrorDialog (org.pentaho.di.ui.core.dialog.ErrorDialog)7 NotePadMeta (org.pentaho.di.core.NotePadMeta)6 TransHopMeta (org.pentaho.di.trans.TransHopMeta)6 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)5 Point (org.pentaho.di.core.gui.Point)5 ClusterSchemaDialog (org.pentaho.di.ui.cluster.dialog.ClusterSchemaDialog)5 List (java.util.List)4 MessageBox (org.eclipse.swt.widgets.MessageBox)4 JobMeta (org.pentaho.di.job.JobMeta)4