Search in sources :

Example 31 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class RepositoryExplorerDialog method editSlaveServer.

public void editSlaveServer(String slaveName) {
    try {
        ObjectId id = rep.getSlaveID(slaveName);
        // Load the last version
        SlaveServer slaveServer = rep.loadSlaveServer(id, null);
        SlaveServerDialog dd = new SlaveServerDialog(shell, slaveServer);
        if (dd.open()) {
            rep.insertLogEntry("Updating slave server '" + slaveServer.getName() + "'");
            rep.save(slaveServer, Const.VERSION_COMMENT_EDIT_VERSION, null);
            if (!slaveName.equalsIgnoreCase(slaveServer.getName())) {
                refreshTree();
            }
        }
    } catch (KettleException e) {
        new ErrorDialog(shell, BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.UnexpectedError.Title"), BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.UnexpectedError.Message") + slaveName + "]", e);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) ObjectId(org.pentaho.di.repository.ObjectId) SlaveServerDialog(org.pentaho.di.ui.cluster.dialog.SlaveServerDialog) ErrorDialog(org.pentaho.di.ui.core.dialog.ErrorDialog) SlaveServer(org.pentaho.di.cluster.SlaveServer)

Example 32 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class StreamToJobNodeConverter method saveSharedObjects.

public void saveSharedObjects(final Repository repo, final RepositoryElementInterface element) throws KettleException {
    JobMeta jobMeta = (JobMeta) element;
    // First store the databases and other depending objects in the transformation.
    List<String> databaseNames = Arrays.asList(repo.getDatabaseNames(true));
    int dbIndex = 0;
    int indexToReplace = 0;
    boolean updateMeta = Boolean.FALSE;
    for (DatabaseMeta databaseMeta : jobMeta.getDatabases()) {
        if (!databaseNames.contains(databaseMeta.getName())) {
            if (databaseMeta.getObjectId() == null || !StringUtils.isEmpty(databaseMeta.getHostname())) {
                repo.save(databaseMeta, null, null);
            }
        } else if (databaseMeta.getObjectId() == null) {
            indexToReplace = dbIndex;
            updateMeta = Boolean.TRUE;
        }
        dbIndex++;
    }
    // in the transMeta db collection
    if (updateMeta) {
        DatabaseMeta dbMetaToReplace = jobMeta.getDatabase(indexToReplace);
        dbMetaToReplace.setObjectId(repo.getDatabaseID(dbMetaToReplace.getName()));
        jobMeta.removeDatabase(indexToReplace);
        jobMeta.addDatabase(dbMetaToReplace);
    }
    // 
    for (SlaveServer slaveServer : jobMeta.getSlaveServers()) {
        if (slaveServer.getObjectId() == null) {
            repo.save(slaveServer, null, null);
        }
    }
}
Also used : JobMeta(org.pentaho.di.job.JobMeta) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta)

Example 33 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class StreamToTransNodeConverter method saveSharedObjects.

private void saveSharedObjects(final Repository repo, final RepositoryElementInterface element) throws KettleException {
    TransMeta transMeta = (TransMeta) element;
    // First store the databases and other depending objects in the transformation.
    List<String> databaseNames = Arrays.asList(repo.getDatabaseNames(true));
    int dbIndex = 0;
    boolean updateMeta = Boolean.FALSE;
    List<Integer> transMetaDatabasesToUpdate = new ArrayList<Integer>();
    for (DatabaseMeta databaseMeta : transMeta.getDatabases()) {
        if (!databaseNames.contains(databaseMeta.getName())) {
            if (databaseMeta.getObjectId() == null || !StringUtils.isEmpty(databaseMeta.getHostname())) {
                repo.save(databaseMeta, null, null);
            }
        } else if (databaseMeta.getObjectId() == null) {
            // add this to the list to update object Ids later
            transMetaDatabasesToUpdate.add(dbIndex);
            updateMeta = Boolean.TRUE;
        }
        dbIndex++;
    }
    if (updateMeta) {
        // make sure to update object ids in the transmeta db collection
        for (Integer databaseMetaIndex : transMetaDatabasesToUpdate) {
            transMeta.getDatabase(databaseMetaIndex).setObjectId(repo.getDatabaseID(transMeta.getDatabase(databaseMetaIndex).getName()));
        }
    }
    // 
    for (SlaveServer slaveServer : transMeta.getSlaveServers()) {
        if (slaveServer.hasChanged() || slaveServer.getObjectId() == null) {
            repo.save(slaveServer, null, null);
        }
    }
    // 
    for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) {
        if (clusterSchema.hasChanged() || clusterSchema.getObjectId() == null) {
            repo.save(clusterSchema, null, null);
        }
    }
    // 
    for (PartitionSchema partitionSchema : transMeta.getPartitionSchemas()) {
        if (partitionSchema.hasChanged() || partitionSchema.getObjectId() == null) {
            repo.save(partitionSchema, null, null);
        }
    }
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) TransMeta(org.pentaho.di.trans.TransMeta) ArrayList(java.util.ArrayList) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 34 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class ClusterDelegate method elementToDataNode.

public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
    ClusterSchema clusterSchema = (ClusterSchema) element;
    DataNode rootNode = new DataNode(NODE_ROOT);
    // save the properties...
    rootNode.setProperty(PROP_BASE_PORT, clusterSchema.getBasePort());
    rootNode.setProperty(PROP_SOCKETS_BUFFER_SIZE, clusterSchema.getSocketsBufferSize());
    rootNode.setProperty(PROP_SOCKETS_FLUSH_INTERVAL, clusterSchema.getSocketsFlushInterval());
    rootNode.setProperty(PROP_SOCKETS_COMPRESSED, clusterSchema.isSocketsCompressed());
    rootNode.setProperty(PROP_DYNAMIC, clusterSchema.isDynamic());
    DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
    // Also save the used slave server references.
    attrNode.setProperty(PROP_NB_SLAVE_SERVERS, clusterSchema.getSlaveServers().size());
    for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
        SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
        DataNodeRef slaveNodeRef = new DataNodeRef(slaveServer.getObjectId().getId());
        // Save the slave server by reference, this way it becomes impossible to delete the slave by accident when still
        // in use.
        attrNode.setProperty(String.valueOf(i), slaveNodeRef);
    }
    return rootNode;
}
Also used : DataNodeRef(org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef) DataNode(org.pentaho.platform.api.repository2.unified.data.node.DataNode) SlaveServer(org.pentaho.di.cluster.SlaveServer) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 35 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class JobDelegate method loadSharedObjects.

// ~ Methods =========================================================================================================
@SuppressWarnings("unchecked")
public SharedObjects loadSharedObjects(final RepositoryElementInterface element, final Map<RepositoryObjectType, List<? extends SharedObjectInterface>> sharedObjectsByType) throws KettleException {
    JobMeta jobMeta = (JobMeta) element;
    jobMeta.setSharedObjects(jobMeta.readSharedObjects());
    // Repository objects take priority so let's overwrite them...
    // 
    readDatabases(jobMeta, true, (List<DatabaseMeta>) sharedObjectsByType.get(RepositoryObjectType.DATABASE));
    readSlaves(jobMeta, true, (List<SlaveServer>) sharedObjectsByType.get(RepositoryObjectType.SLAVE_SERVER));
    return jobMeta.getSharedObjects();
}
Also used : JobMeta(org.pentaho.di.job.JobMeta) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta)

Aggregations

SlaveServer (org.pentaho.di.cluster.SlaveServer)103 KettleException (org.pentaho.di.core.exception.KettleException)35 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)28 ClusterSchema (org.pentaho.di.cluster.ClusterSchema)22 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)18 PartitionSchema (org.pentaho.di.partition.PartitionSchema)18 Test (org.junit.Test)17 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)17 JobMeta (org.pentaho.di.job.JobMeta)16 ObjectId (org.pentaho.di.repository.ObjectId)15 StepMeta (org.pentaho.di.trans.step.StepMeta)14 ArrayList (java.util.ArrayList)13 TransMeta (org.pentaho.di.trans.TransMeta)11 KettleFileException (org.pentaho.di.core.exception.KettleFileException)10 NotePadMeta (org.pentaho.di.core.NotePadMeta)9 UnknownParamException (org.pentaho.di.core.parameters.UnknownParamException)9 Point (org.pentaho.di.core.gui.Point)8 KettleXMLException (org.pentaho.di.core.exception.KettleXMLException)7 DuplicateParamException (org.pentaho.di.core.parameters.DuplicateParamException)7 JobEntryCopy (org.pentaho.di.job.entry.JobEntryCopy)7