use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class KettleFileRepository method readTransSharedObjects.
@Override
public SharedObjects readTransSharedObjects(TransMeta transMeta) throws KettleException {
// First the normal shared objects...
//
SharedObjects sharedObjects = transMeta.readSharedObjects();
//
for (ObjectId id : getDatabaseIDs(false)) {
// Load last version
DatabaseMeta databaseMeta = loadDatabaseMeta(id, null);
databaseMeta.shareVariablesWith(transMeta);
transMeta.addOrReplaceDatabase(databaseMeta);
}
for (ObjectId id : getSlaveIDs(false)) {
// Load last version
SlaveServer slaveServer = loadSlaveServer(id, null);
slaveServer.shareVariablesWith(transMeta);
transMeta.addOrReplaceSlaveServer(slaveServer);
}
for (ObjectId id : getClusterIDs(false)) {
// Load last version
ClusterSchema clusterSchema = loadClusterSchema(id, transMeta.getSlaveServers(), null);
clusterSchema.shareVariablesWith(transMeta);
transMeta.addOrReplaceClusterSchema(clusterSchema);
}
for (ObjectId id : getPartitionSchemaIDs(false)) {
// Load last version
PartitionSchema partitionSchema = loadPartitionSchema(id, null);
transMeta.addOrReplacePartitionSchema(partitionSchema);
}
return sharedObjects;
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class KettleDatabaseRepositoryTransDelegate method readClusters.
/**
* Read the clusters in the repository and add them to this transformation if they are not yet present.
*
* @param TransMeta
* The transformation to load into.
* @param overWriteShared
* if an object with the same name exists, overwrite
* @throws KettleException
*/
public void readClusters(TransMeta transMeta, boolean overWriteShared) throws KettleException {
try {
ObjectId[] dbids = repository.getClusterIDs(false);
for (int i = 0; i < dbids.length; i++) {
ClusterSchema clusterSchema = repository.loadClusterSchema(dbids[i], transMeta.getSlaveServers(), null);
clusterSchema.shareVariablesWith(transMeta);
// Check if there already is one in the transformation
ClusterSchema check = transMeta.findClusterSchema(clusterSchema.getName());
if (check == null || overWriteShared) {
if (!Utils.isEmpty(clusterSchema.getName())) {
transMeta.addOrReplaceClusterSchema(clusterSchema);
if (!overWriteShared) {
clusterSchema.setChanged(false);
}
}
}
}
} catch (KettleDatabaseException dbe) {
throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Log.UnableToReadClustersFromRepository"), dbe);
}
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class KettleDatabaseRepositoryTest method testInsertClusterSlave.
@Test
public void testInsertClusterSlave() throws KettleException {
ArgumentCaptor<String> argumentTableName = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<RowMetaAndData> argumentTableData = ArgumentCaptor.forClass(RowMetaAndData.class);
doNothing().when(repo.connectionDelegate).insertTableRow(argumentTableName.capture(), argumentTableData.capture());
doReturn(new LongObjectId(357)).when(repo.connectionDelegate).getNextClusterSlaveID();
SlaveServer testSlave = new SlaveServer("slave1", "fakelocal", "9081", "fakeuser", "fakepass");
testSlave.setObjectId(new LongObjectId(864));
ClusterSchema testSchema = new ClusterSchema("schema1", Arrays.asList(testSlave));
testSchema.setObjectId(new LongObjectId(159));
ObjectId result = repo.insertClusterSlave(testSchema, testSlave);
RowMetaAndData insertRecord = argumentTableData.getValue();
assertEquals(KettleDatabaseRepository.TABLE_R_CLUSTER_SLAVE, argumentTableName.getValue());
assertEquals(3, insertRecord.size());
assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(0).getType());
assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_CLUSTER_SLAVE, insertRecord.getValueMeta(0).getName());
assertEquals(Long.valueOf(357), insertRecord.getInteger(0));
assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(1).getType());
assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_CLUSTER, insertRecord.getValueMeta(1).getName());
assertEquals(Long.valueOf(159), insertRecord.getInteger(1));
assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(2).getType());
assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_SLAVE, insertRecord.getValueMeta(2).getName());
assertEquals(Long.valueOf(864), insertRecord.getInteger(2));
assertEquals(new LongObjectId(357), result);
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createClusterSchema.
protected ClusterSchema createClusterSchema(String clusterName) throws Exception {
ClusterSchema clusterSchema = new ClusterSchema();
clusterSchema.setName(clusterName);
clusterSchema.setBasePort(EXP_CLUSTER_SCHEMA_BASE_PORT);
clusterSchema.setSocketsBufferSize(EXP_CLUSTER_SCHEMA_SOCKETS_BUFFER_SIZE);
clusterSchema.setSocketsFlushInterval(EXP_CLUSTER_SCHEMA_SOCKETS_FLUSH_INTERVAL);
clusterSchema.setSocketsCompressed(EXP_CLUSTER_SCHEMA_SOCKETS_COMPRESSED);
clusterSchema.setDynamic(EXP_CLUSTER_SCHEMA_DYN);
SlaveServer slaveServer = createSlaveServer(clusterName);
repository.save(slaveServer, VERSION_COMMENT_V1, null);
deleteStack.push(slaveServer);
clusterSchema.setSlaveServers(Collections.singletonList(slaveServer));
return clusterSchema;
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class PurRepositoryIT method doesNotChangeFileWhenFailsToRename_clusters.
@Test
public void doesNotChangeFileWhenFailsToRename_clusters() throws Exception {
final ClusterSchema schema1 = new ClusterSchema();
final ClusterSchema schema2 = new ClusterSchema();
try {
testDoesNotChangeFileWhenFailsToRename(schema1, schema2, new Callable<RepositoryElementInterface>() {
@Override
public RepositoryElementInterface call() throws Exception {
return repository.loadClusterSchema(schema2.getObjectId(), null, null);
}
});
} finally {
repository.deleteClusterSchema(schema1.getObjectId());
repository.deleteClusterSchema(schema2.getObjectId());
}
}
Aggregations