use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class KettleDatabaseRepository method getSlaveServers.
/**
* @return a list of all the slave servers in the repository.
* @throws KettleException
*/
public List<SlaveServer> getSlaveServers() throws KettleException {
List<SlaveServer> list = new ArrayList<>();
ObjectId[] slaveIDs = getSlaveIDs(false);
for (int i = 0; i < slaveIDs.length; i++) {
// Load last
SlaveServer slaveServer = loadSlaveServer(slaveIDs[i], null);
// version
list.add(slaveServer);
}
return list;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class ClusterSchemaDialog method refreshSlaveServers.
private void refreshSlaveServers() {
wServers.clearAll(false);
List<SlaveServer> slServers = clusterSchema.getSlaveServers();
for (int i = 0; i < slServers.size(); i++) {
TableItem item = new TableItem(wServers.table, SWT.NONE);
SlaveServer slaveServer = slServers.get(i);
item.setText(1, Const.NVL(slaveServer.getName(), ""));
item.setText(2, Const.NVL(slaveServer.toString(), ""));
item.setText(3, slaveServer.isMaster() ? "Y" : "N");
}
wServers.removeEmptyRows();
wServers.setRowNums();
wServers.optWidth(true);
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class ClusterSchemaDialog method selectSlaveServers.
private void selectSlaveServers() {
String[] names = SlaveServer.getSlaveServerNames(slaveServers);
int[] idx = Const.indexsOfFoundStrings(wServers.getItems(0), names);
EnterSelectionDialog dialog = new EnterSelectionDialog(shell, names, BaseMessages.getString(PKG, "ClusterSchemaDialog.SelectServers.Label"), BaseMessages.getString(PKG, "ClusterSchemaDialog.SelectServersCluster.Label"));
dialog.setAvoidQuickSearch();
dialog.setSelectedNrs(idx);
dialog.setMulti(true);
if (dialog.open() != null) {
clusterSchema.getSlaveServers().clear();
int[] indeces = dialog.getSelectionIndeces();
for (int i = 0; i < indeces.length; i++) {
SlaveServer slaveServer = SlaveServer.findSlaveServer(slaveServers, names[indeces[i]]);
clusterSchema.getSlaveServers().add(slaveServer);
}
refreshSlaveServers();
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class ClusterSchemaDialog method getInfo.
private void getInfo() {
clusterSchema.setName(wName.getText());
clusterSchema.setBasePort(wPort.getText());
clusterSchema.setSocketsBufferSize(wBufferSize.getText());
clusterSchema.setSocketsFlushInterval(wFlushInterval.getText());
clusterSchema.setSocketsCompressed(wCompressed.getSelection());
clusterSchema.setDynamic(wDynamic.getSelection());
String[] names = SlaveServer.getSlaveServerNames(slaveServers);
int[] idx = Const.indexsOfFoundStrings(wServers.getItems(0), names);
clusterSchema.getSlaveServers().clear();
for (int i = 0; i < idx.length; i++) {
SlaveServer slaveServer = SlaveServer.findSlaveServer(slaveServers, names[idx[i]]);
clusterSchema.getSlaveServers().add(slaveServer);
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class JobEntryJobIT method testLogfileWritesFromRemote.
@Test
public void testLogfileWritesFromRemote() throws Exception {
JobEntryJob job = spy(new JobEntryJob(JOB_ENTRY_JOB_NAME));
doCallRealMethod().when(job).execute(any(Result.class), anyInt());
Job parentJob = mock(Job.class);
JobMeta parentJobMeta = mock(JobMeta.class);
JobMeta jobMeta = mock(JobMeta.class);
SlaveServer slaveServer = mock(SlaveServer.class);
LogChannelInterface log = mock(LogChannelInterface.class);
SlaveServerJobStatus status = mock(SlaveServerJobStatus.class);
when(parentJob.getLogLevel()).thenReturn(LogLevel.BASIC);
when(parentJobMeta.getRepositoryDirectory()).thenReturn(null);
when(jobMeta.getRepositoryDirectory()).thenReturn(mock(RepositoryDirectoryInterface.class));
when(jobMeta.getName()).thenReturn(JOB_META_NAME);
when(parentJob.getJobMeta()).thenReturn(parentJobMeta);
when(parentJobMeta.findSlaveServer(REMOTE_SLAVE_SERVER_NAME)).thenReturn(slaveServer);
when(slaveServer.getLogChannel()).thenReturn(log);
when(log.getLogLevel()).thenReturn(LogLevel.BASIC);
when(slaveServer.sendXML(anyString(), anyString())).thenReturn(REPLY);
when(slaveServer.execService(anyString())).thenReturn(REPLY);
when(slaveServer.getJobStatus(anyString(), anyString(), anyInt())).thenReturn(status);
when(status.getResult()).thenReturn(mock(Result.class));
when(status.getLoggingString()).thenReturn(LOG);
file = Files.createTempFile("file", "");
doReturn(LOG_FILE_NAME).when(job).getLogFilename();
doReturn(file.toString()).when(job).environmentSubstitute(LOG_FILE_NAME);
doReturn(REMOTE_SLAVE_SERVER_NAME).when(job).environmentSubstitute(REMOTE_SLAVE_SERVER_NAME);
doReturn(jobMeta).when(job).getJobMeta(any(Repository.class), any(VariableSpace.class));
doNothing().when(job).copyVariablesFrom(anyObject());
doNothing().when(job).setParentVariableSpace(anyObject());
job.setLogfile = true;
job.createParentFolder = false;
job.logFileLevel = LogLevel.BASIC;
job.execPerRow = false;
job.paramsFromPrevious = false;
job.argFromPrevious = false;
job.waitingToFinish = true;
job.setSpecificationMethod(ObjectLocationSpecificationMethod.FILENAME);
job.setRemoteSlaveServerName(REMOTE_SLAVE_SERVER_NAME);
job.setParentJob(parentJob);
job.setParentJobMeta(parentJobMeta);
job.execute(new Result(), 0);
String result = Files.lines(file).collect(Collectors.joining(""));
assertTrue(result.contains(LOG));
}
Aggregations