use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class JobEntryTransTest method testJobEntry.
@SuppressWarnings("unchecked")
private void testJobEntry(Repository rep, boolean includeJobName, ObjectLocationSpecificationMethod method, ObjectLocationSpecificationMethod expectedMethod) throws KettleXMLException, ParserConfigurationException, SAXException, IOException {
List<DatabaseMeta> databases = mock(List.class);
List<SlaveServer> slaveServers = mock(List.class);
IMetaStore metaStore = mock(IMetaStore.class);
JobEntryTrans jobEntryTrans = getJobEntryTrans();
jobEntryTrans.loadXML(getEntryNode(includeJobName, method), databases, slaveServers, rep, metaStore);
assertEquals("If we connect to repository then we use rep_name method", expectedMethod, jobEntryTrans.getSpecificationMethod());
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class KettleDatabaseRepositoryTest method testInsertClusterSlave.
@Test
public void testInsertClusterSlave() throws KettleException {
ArgumentCaptor<String> argumentTableName = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<RowMetaAndData> argumentTableData = ArgumentCaptor.forClass(RowMetaAndData.class);
doNothing().when(repo.connectionDelegate).insertTableRow(argumentTableName.capture(), argumentTableData.capture());
doReturn(new LongObjectId(357)).when(repo.connectionDelegate).getNextClusterSlaveID();
SlaveServer testSlave = new SlaveServer("slave1", "fakelocal", "9081", "fakeuser", "fakepass");
testSlave.setObjectId(new LongObjectId(864));
ClusterSchema testSchema = new ClusterSchema("schema1", Arrays.asList(testSlave));
testSchema.setObjectId(new LongObjectId(159));
ObjectId result = repo.insertClusterSlave(testSchema, testSlave);
RowMetaAndData insertRecord = argumentTableData.getValue();
assertEquals(KettleDatabaseRepository.TABLE_R_CLUSTER_SLAVE, argumentTableName.getValue());
assertEquals(3, insertRecord.size());
assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(0).getType());
assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_CLUSTER_SLAVE, insertRecord.getValueMeta(0).getName());
assertEquals(Long.valueOf(357), insertRecord.getInteger(0));
assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(1).getType());
assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_CLUSTER, insertRecord.getValueMeta(1).getName());
assertEquals(Long.valueOf(159), insertRecord.getInteger(1));
assertEquals(ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta(2).getType());
assertEquals(KettleDatabaseRepository.FIELD_CLUSTER_SLAVE_ID_SLAVE, insertRecord.getValueMeta(2).getName());
assertEquals(Long.valueOf(864), insertRecord.getInteger(2));
assertEquals(new LongObjectId(357), result);
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class GetSlavesServletTest method testUpdateActivityStatusInDoGet.
@Test
@SuppressWarnings("ResultOfMethodCallIgnored")
public void testUpdateActivityStatusInDoGet() throws Exception {
LogChannelInterface log = mock(LogChannelInterface.class);
ServletOutputStream outputStream = mock(ServletOutputStream.class);
SlaveServerDetection activeServerDetection = mock(SlaveServerDetection.class);
SlaveServerDetection inactiveServerDetection = mock(SlaveServerDetection.class);
SlaveServer activeSlaveServer = mock(SlaveServer.class);
SlaveServer inactiveSlaveServer = mock(SlaveServer.class);
servlet.log = log;
List<SlaveServerDetection> detections = new ArrayList<>();
detections.add(activeServerDetection);
detections.add(inactiveServerDetection);
doReturn(false).when(log).isDebug();
doReturn(outputStream).when(response).getOutputStream();
doReturn(detections).when(servlet).getDetections();
doReturn(activeSlaveServer).when(activeServerDetection).getSlaveServer();
doReturn(inactiveSlaveServer).when(inactiveServerDetection).getSlaveServer();
doThrow(new Exception()).when(inactiveSlaveServer).getStatus();
doCallRealMethod().when(servlet).doGet(request, response);
servlet.doGet(request, response);
verify(activeSlaveServer).getStatus();
verify(activeServerDetection, never()).setActive(false);
verify(activeServerDetection, never()).setLastInactiveDate(anyObject());
verify(activeServerDetection).getXML();
verify(inactiveSlaveServer).getStatus();
verify(inactiveServerDetection).setActive(false);
verify(inactiveServerDetection).setLastInactiveDate(anyObject());
verify(inactiveServerDetection).getXML();
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createTransMeta.
protected TransMeta createTransMeta(final String dbName) throws Exception {
RepositoryDirectoryInterface rootDir = loadStartDirectory();
TransMeta transMeta = new TransMeta();
transMeta.setName(EXP_TRANS_NAME.concat(dbName));
transMeta.setDescription(EXP_TRANS_DESC);
transMeta.setExtendedDescription(EXP_TRANS_EXTENDED_DESC);
transMeta.setRepositoryDirectory(rootDir.findDirectory(DIR_TRANSFORMATIONS));
transMeta.setTransversion(EXP_TRANS_VERSION);
transMeta.setTransstatus(EXP_TRANS_STATUS);
transMeta.setCreatedUser(EXP_TRANS_CREATED_USER);
transMeta.setCreatedDate(EXP_TRANS_CREATED_DATE);
transMeta.setModifiedUser(EXP_TRANS_MOD_USER);
transMeta.setModifiedDate(EXP_TRANS_MOD_DATE);
transMeta.addParameterDefinition(EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC);
// TODO mlowery other transLogTable fields could be set for testing here
TransLogTable transLogTable = TransLogTable.getDefault(transMeta, transMeta, new ArrayList<StepMeta>(0));
transLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
transLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
transLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
transLogTable.setLogSizeLimit(EXP_TRANS_LOG_TABLE_SIZE_LIMIT);
transLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
transLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setTransLogTable(transLogTable);
// TODO mlowery other perfLogTable fields could be set for testing here
PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault(transMeta, transMeta);
perfLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
perfLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
perfLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
perfLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
perfLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setPerformanceLogTable(perfLogTable);
// TODO mlowery other channelLogTable fields could be set for testing here
ChannelLogTable channelLogTable = ChannelLogTable.getDefault(transMeta, transMeta);
channelLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
channelLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
channelLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
channelLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setChannelLogTable(channelLogTable);
// TODO mlowery other stepLogTable fields could be set for testing here
StepLogTable stepLogTable = StepLogTable.getDefault(transMeta, transMeta);
stepLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
stepLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
stepLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
stepLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setStepLogTable(stepLogTable);
DatabaseMeta dbMeta = createDatabaseMeta(dbName);
// dbMeta must be saved so that it gets an ID
repository.save(dbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(dbMeta);
transMeta.setMaxDateConnection(dbMeta);
transMeta.setMaxDateTable(EXP_TRANS_MAX_DATE_TABLE);
transMeta.setMaxDateField(EXP_TRANS_MAX_DATE_FIELD);
transMeta.setMaxDateOffset(EXP_TRANS_MAX_DATE_OFFSET);
transMeta.setMaxDateDifference(EXP_TRANS_MAX_DATE_DIFF);
transMeta.setSizeRowset(EXP_TRANS_SIZE_ROWSET);
transMeta.setSleepTimeEmpty(EXP_TRANS_SLEEP_TIME_EMPTY);
transMeta.setSleepTimeFull(EXP_TRANS_SLEEP_TIME_FULL);
transMeta.setUsingUniqueConnections(EXP_TRANS_USING_UNIQUE_CONN);
transMeta.setFeedbackShown(EXP_TRANS_FEEDBACK_SHOWN);
transMeta.setFeedbackSize(EXP_TRANS_FEEDBACK_SIZE);
transMeta.setUsingThreadPriorityManagment(EXP_TRANS_USING_THREAD_PRIORITY_MGMT);
transMeta.setSharedObjectsFile(EXP_TRANS_SHARED_OBJECTS_FILE);
transMeta.setCapturingStepPerformanceSnapShots(EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS);
transMeta.setStepPerformanceCapturingDelay(EXP_TRANS_STEP_PERF_CAP_DELAY);
transMeta.addDependency(new TransDependency(dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME));
DatabaseMeta stepDbMeta = createDatabaseMeta(EXP_DBMETA_NAME_STEP.concat(dbName));
repository.save(stepDbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(stepDbMeta);
Condition cond = new Condition();
StepMeta step1 = createStepMeta1(transMeta, stepDbMeta, cond);
transMeta.addStep(step1);
StepMeta step2 = createStepMeta2(stepDbMeta, cond);
transMeta.addStep(step2);
transMeta.addTransHop(createTransHopMeta(step1, step2));
SlaveServer slaveServer = createSlaveServer(dbName);
PartitionSchema partSchema = createPartitionSchema(dbName);
// slaveServer, partSchema must be saved so that they get IDs
repository.save(slaveServer, VERSION_COMMENT_V1, null);
deleteStack.push(slaveServer);
repository.save(partSchema, VERSION_COMMENT_V1, null);
deleteStack.push(partSchema);
SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
slaveStepCopyPartitionDistribution.addPartition(EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0);
slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas(Arrays.asList(new PartitionSchema[] { partSchema }));
transMeta.setSlaveStepCopyPartitionDistribution(slaveStepCopyPartitionDistribution);
transMeta.setSlaveTransformation(EXP_TRANS_SLAVE_TRANSFORMATION);
return transMeta;
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createClusterSchema.
protected ClusterSchema createClusterSchema(String clusterName) throws Exception {
ClusterSchema clusterSchema = new ClusterSchema();
clusterSchema.setName(clusterName);
clusterSchema.setBasePort(EXP_CLUSTER_SCHEMA_BASE_PORT);
clusterSchema.setSocketsBufferSize(EXP_CLUSTER_SCHEMA_SOCKETS_BUFFER_SIZE);
clusterSchema.setSocketsFlushInterval(EXP_CLUSTER_SCHEMA_SOCKETS_FLUSH_INTERVAL);
clusterSchema.setSocketsCompressed(EXP_CLUSTER_SCHEMA_SOCKETS_COMPRESSED);
clusterSchema.setDynamic(EXP_CLUSTER_SCHEMA_DYN);
SlaveServer slaveServer = createSlaveServer(clusterName);
repository.save(slaveServer, VERSION_COMMENT_V1, null);
deleteStack.push(slaveServer);
clusterSchema.setSlaveServers(Collections.singletonList(slaveServer));
return clusterSchema;
}
Aggregations