use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class KettleFileRepository method readTransSharedObjects.
@Override
public SharedObjects readTransSharedObjects(TransMeta transMeta) throws KettleException {
// First the normal shared objects...
//
SharedObjects sharedObjects = transMeta.readSharedObjects();
//
for (ObjectId id : getDatabaseIDs(false)) {
// Load last version
DatabaseMeta databaseMeta = loadDatabaseMeta(id, null);
databaseMeta.shareVariablesWith(transMeta);
transMeta.addOrReplaceDatabase(databaseMeta);
}
for (ObjectId id : getSlaveIDs(false)) {
// Load last version
SlaveServer slaveServer = loadSlaveServer(id, null);
slaveServer.shareVariablesWith(transMeta);
transMeta.addOrReplaceSlaveServer(slaveServer);
}
for (ObjectId id : getClusterIDs(false)) {
// Load last version
ClusterSchema clusterSchema = loadClusterSchema(id, transMeta.getSlaveServers(), null);
clusterSchema.shareVariablesWith(transMeta);
transMeta.addOrReplaceClusterSchema(clusterSchema);
}
for (ObjectId id : getPartitionSchemaIDs(false)) {
// Load last version
PartitionSchema partitionSchema = loadPartitionSchema(id, null);
transMeta.addOrReplacePartitionSchema(partitionSchema);
}
return sharedObjects;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class KettleFileRepository method readJobMetaSharedObjects.
@Override
public SharedObjects readJobMetaSharedObjects(JobMeta jobMeta) throws KettleException {
// First the normal shared objects...
//
SharedObjects sharedObjects = jobMeta.readSharedObjects();
//
for (ObjectId id : getDatabaseIDs(false)) {
// Load last version
DatabaseMeta databaseMeta = loadDatabaseMeta(id, null);
databaseMeta.shareVariablesWith(jobMeta);
jobMeta.addOrReplaceDatabase(databaseMeta);
}
for (ObjectId id : getSlaveIDs(false)) {
// Load last version
SlaveServer slaveServer = loadSlaveServer(id, null);
slaveServer.shareVariablesWith(jobMeta);
jobMeta.addOrReplaceSlaveServer(slaveServer);
}
return sharedObjects;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class StepMetaTest method createTestMeta.
private static StepMeta createTestMeta() throws Exception {
StepMetaInterface stepMetaInterface = mock(AbstractStepMeta.class);
when(stepMetaInterface.clone()).thenReturn(stepMetaInterface);
StepMeta meta = new StepMeta(STEP_ID, "stepname", stepMetaInterface);
meta.setSelected(true);
meta.setDistributes(false);
meta.setCopiesString("2");
meta.setLocation(1, 2);
meta.setDraw(true);
meta.setDescription("description");
meta.setTerminator(true);
meta.setClusterSchemaName("clusterSchemaName");
boolean shouldDistribute = rand.nextBoolean();
meta.setDistributes(shouldDistribute);
if (shouldDistribute) {
meta.setRowDistribution(selectRowDistribution());
}
Map<String, Map<String, String>> attributes = new HashMap<String, Map<String, String>>();
Map<String, String> map1 = new HashMap<String, String>();
map1.put("1", "1");
Map<String, String> map2 = new HashMap<String, String>();
map2.put("2", "2");
attributes.put("qwerty", map1);
attributes.put("asdfg", map2);
meta.setAttributesMap(attributes);
meta.setStepPartitioningMeta(createStepPartitioningMeta("stepMethod", "stepSchema"));
meta.setTargetStepPartitioningMeta(createStepPartitioningMeta("targetMethod", "targetSchema"));
meta.setClusterSchema(new ClusterSchema("cluster_schema", Collections.<SlaveServer>emptyList()));
return meta;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class PurRepository_SharedObjects_IT method slaveServer.
private static SlaveServer slaveServer() {
SlaveServer server = new SlaveServer();
server.setName("testSlaveServer");
return server;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class PurRepository_SharedObjects_Links_IT method testReadSharedObjects.
@SuppressWarnings("unchecked")
private void testReadSharedObjects(GenericMeta gMeta) throws Exception {
PurRepository pur = (PurRepository) repository;
RepositoryDirectoryInterface rootDir = initRepo();
SlaveServer slave1 = createSlaveServer("slave1");
SlaveServer slave2 = createSlaveServer("slave2");
pur.save(slave1, VERSION_COMMENT_V1, null);
pur.save(slave2, VERSION_COMMENT_V1, null);
AbstractMeta meta = gMeta.createFilled();
meta.getSlaveServers().add(slave1);
meta.getSlaveServers().add(slave2);
rootDir.findDirectory(DIR_TRANSFORMATIONS);
pur.save(meta, VERSION_COMMENT_V1, null);
String xmlText = meta.getXML();
try {
// import transformation from file
meta = gMeta.createEmpty();
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
Document doc = dBuilder.parse(IOUtils.toInputStream(xmlText));
gMeta.loadFromXml(doc.getParentNode());
List<SharedObjectInterface> sharedObjects = (List<SharedObjectInterface>) pur.loadAndCacheSharedObjects(false).get(RepositoryObjectType.SLAVE_SERVER);
for (int i = 0; i < meta.getSlaveServers().size(); i++) {
for (int j = 0; j < sharedObjects.size(); j++) {
SlaveServer s1 = meta.getSlaveServers().get(i);
SlaveServer s2 = (SlaveServer) sharedObjects.get(j);
if (s1 == s2) {
fail("Trans/job has direct links on slave servers from cache");
}
}
}
} finally {
pur.deleteSlave(slave1.getObjectId());
pur.deleteSlave(slave2.getObjectId());
pur.clearSharedObjectCache();
}
}
Aggregations