Search in sources :

Example 31 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class KettleFileRepository method readTransSharedObjects.

@Override
public SharedObjects readTransSharedObjects(TransMeta transMeta) throws KettleException {
    // First the normal shared objects...
    // 
    SharedObjects sharedObjects = transMeta.readSharedObjects();
    // 
    for (ObjectId id : getDatabaseIDs(false)) {
        // Load last version
        DatabaseMeta databaseMeta = loadDatabaseMeta(id, null);
        databaseMeta.shareVariablesWith(transMeta);
        transMeta.addOrReplaceDatabase(databaseMeta);
    }
    for (ObjectId id : getSlaveIDs(false)) {
        // Load last version
        SlaveServer slaveServer = loadSlaveServer(id, null);
        slaveServer.shareVariablesWith(transMeta);
        transMeta.addOrReplaceSlaveServer(slaveServer);
    }
    for (ObjectId id : getClusterIDs(false)) {
        // Load last version
        ClusterSchema clusterSchema = loadClusterSchema(id, transMeta.getSlaveServers(), null);
        clusterSchema.shareVariablesWith(transMeta);
        transMeta.addOrReplaceClusterSchema(clusterSchema);
    }
    for (ObjectId id : getPartitionSchemaIDs(false)) {
        // Load last version
        PartitionSchema partitionSchema = loadPartitionSchema(id, null);
        transMeta.addOrReplacePartitionSchema(partitionSchema);
    }
    return sharedObjects;
}
Also used : StringObjectId(org.pentaho.di.repository.StringObjectId) ObjectId(org.pentaho.di.repository.ObjectId) PartitionSchema(org.pentaho.di.partition.PartitionSchema) SharedObjects(org.pentaho.di.shared.SharedObjects) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 32 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositoryTransDelegate method readClusters.

/**
 * Read the clusters in the repository and add them to this transformation if they are not yet present.
 *
 * @param TransMeta
 *          The transformation to load into.
 * @param overWriteShared
 *          if an object with the same name exists, overwrite
 * @throws KettleException
 */
public void readClusters(TransMeta transMeta, boolean overWriteShared) throws KettleException {
    try {
        ObjectId[] dbids = repository.getClusterIDs(false);
        for (int i = 0; i < dbids.length; i++) {
            ClusterSchema clusterSchema = repository.loadClusterSchema(dbids[i], transMeta.getSlaveServers(), null);
            clusterSchema.shareVariablesWith(transMeta);
            // Check if there already is one in the transformation
            ClusterSchema check = transMeta.findClusterSchema(clusterSchema.getName());
            if (check == null || overWriteShared) {
                if (!Utils.isEmpty(clusterSchema.getName())) {
                    transMeta.addOrReplaceClusterSchema(clusterSchema);
                    if (!overWriteShared) {
                        clusterSchema.setChanged(false);
                    }
                }
            }
        }
    } catch (KettleDatabaseException dbe) {
        throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Log.UnableToReadClustersFromRepository"), dbe);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) LongObjectId(org.pentaho.di.repository.LongObjectId) ObjectId(org.pentaho.di.repository.ObjectId) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 33 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositoryTest method testInsertClusterSlave.

@Test
public void testInsertClusterSlave() throws KettleException {
    ArgumentCaptor<String> argumentTableName = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<RowMetaAndData> argumentTableData = ArgumentCaptor.forClass(RowMetaAndData.class);
    doNothing().when(repo.connectionDelegate).insertTableRow(argumentTableName.capture(), argumentTableData.capture());
    doReturn(new LongObjectId(357)).when(repo.connectionDelegate).getNextClusterSlaveID();
    SlaveServer testSlave = new SlaveServer("slave1", "fakelocal", "9081", "fakeuser", "fakepass");
    testSlave.setObjectId(new LongObjectId(864));
    ClusterSchema testSchema = new ClusterSchema("schema1", Arrays.asList(testSlave));
    testSchema.setObjectId(new LongObjectId(159));
    ObjectId result = repo.insertClusterSlave(testSchema, testSlave);
    RowMetaAndData insertRecord = argumentTableData.getValue();
    assertEquals(KettleDatabaseRepository.TABLE_R_CLUSTER_SLAVE, argumentTableName.getValue());
    assertEquals(3, insertRecord.size());
    assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(0).getType());
    assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_CLUSTER_SLAVE, insertRecord.getValueMeta(0).getName());
    assertEquals(Long.valueOf(357), insertRecord.getInteger(0));
    assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(1).getType());
    assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_CLUSTER, insertRecord.getValueMeta(1).getName());
    assertEquals(Long.valueOf(159), insertRecord.getInteger(1));
    assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(2).getType());
    assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_SLAVE, insertRecord.getValueMeta(2).getName());
    assertEquals(Long.valueOf(864), insertRecord.getInteger(2));
    assertEquals(new LongObjectId(357), result);
}
Also used : RowMetaAndData(org.pentaho.di.core.RowMetaAndData) LongObjectId(org.pentaho.di.repository.LongObjectId) ObjectId(org.pentaho.di.repository.ObjectId) Matchers.anyString(org.mockito.Matchers.anyString) LongObjectId(org.pentaho.di.repository.LongObjectId) SlaveServer(org.pentaho.di.cluster.SlaveServer) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) Test(org.junit.Test)

Example 34 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class RepositoryTestBase method createClusterSchema.

protected ClusterSchema createClusterSchema(String clusterName) throws Exception {
    ClusterSchema clusterSchema = new ClusterSchema();
    clusterSchema.setName(clusterName);
    clusterSchema.setBasePort(EXP_CLUSTER_SCHEMA_BASE_PORT);
    clusterSchema.setSocketsBufferSize(EXP_CLUSTER_SCHEMA_SOCKETS_BUFFER_SIZE);
    clusterSchema.setSocketsFlushInterval(EXP_CLUSTER_SCHEMA_SOCKETS_FLUSH_INTERVAL);
    clusterSchema.setSocketsCompressed(EXP_CLUSTER_SCHEMA_SOCKETS_COMPRESSED);
    clusterSchema.setDynamic(EXP_CLUSTER_SCHEMA_DYN);
    SlaveServer slaveServer = createSlaveServer(clusterName);
    repository.save(slaveServer, VERSION_COMMENT_V1, null);
    deleteStack.push(slaveServer);
    clusterSchema.setSlaveServers(Collections.singletonList(slaveServer));
    return clusterSchema;
}
Also used : SlaveServer(org.pentaho.di.cluster.SlaveServer) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 35 with ClusterSchema

use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.

the class PurRepositoryIT method doesNotChangeFileWhenFailsToRename_clusters.

@Test
public void doesNotChangeFileWhenFailsToRename_clusters() throws Exception {
    final ClusterSchema schema1 = new ClusterSchema();
    final ClusterSchema schema2 = new ClusterSchema();
    try {
        testDoesNotChangeFileWhenFailsToRename(schema1, schema2, new Callable<RepositoryElementInterface>() {

            @Override
            public RepositoryElementInterface call() throws Exception {
                return repository.loadClusterSchema(schema2.getObjectId(), null, null);
            }
        });
    } finally {
        repository.deleteClusterSchema(schema1.getObjectId());
        repository.deleteClusterSchema(schema2.getObjectId());
    }
}
Also used : RepositoryElementInterface(org.pentaho.di.repository.RepositoryElementInterface) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) MetaStoreException(org.pentaho.metastore.api.exceptions.MetaStoreException) BeansException(org.springframework.beans.BeansException) MetaStoreNamespaceExistsException(org.pentaho.metastore.api.exceptions.MetaStoreNamespaceExistsException) MetaStoreDependenciesExistsException(org.pentaho.metastore.api.exceptions.MetaStoreDependenciesExistsException) SAXException(org.xml.sax.SAXException) KettleException(org.pentaho.di.core.exception.KettleException) SAXParseException(org.xml.sax.SAXParseException) Test(org.junit.Test)

Aggregations

ClusterSchema (org.pentaho.di.cluster.ClusterSchema)52 SlaveServer (org.pentaho.di.cluster.SlaveServer)23 KettleException (org.pentaho.di.core.exception.KettleException)19 PartitionSchema (org.pentaho.di.partition.PartitionSchema)19 TransMeta (org.pentaho.di.trans.TransMeta)16 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)15 StepMeta (org.pentaho.di.trans.step.StepMeta)15 ObjectId (org.pentaho.di.repository.ObjectId)11 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)7 ErrorDialog (org.pentaho.di.ui.core.dialog.ErrorDialog)7 NotePadMeta (org.pentaho.di.core.NotePadMeta)6 TransHopMeta (org.pentaho.di.trans.TransHopMeta)6 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)5 Point (org.pentaho.di.core.gui.Point)5 ClusterSchemaDialog (org.pentaho.di.ui.cluster.dialog.ClusterSchemaDialog)5 List (java.util.List)4 MessageBox (org.eclipse.swt.widgets.MessageBox)4 JobMeta (org.pentaho.di.job.JobMeta)4