use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class SlavesController method editSlave.
public void editSlave() {
String slaveServerName = "";
try {
Collection<UISlave> slaves = slavesTable.getSelectedItems();
if (slaves != null && !slaves.isEmpty()) {
// Grab the first item in the list & send it to the slave dialog
SlaveServer slaveServer = ((UISlave) slaves.toArray()[0]).getSlaveServer();
slaveServerName = slaveServer.getName();
// Make sure the slave already exists
ObjectId slaveId = repository.getSlaveID(slaveServer.getName());
if (slaveId == null) {
MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.DoesNotExists.Message", slaveServerName));
mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.Title"));
mb.open();
} else {
SlaveServerDialog ssd = new SlaveServerDialog(shell, slaveServer);
if (ssd.open()) {
if (slaveServer.getName() != null && !slaveServer.getName().equals("")) {
repository.insertLogEntry(BaseMessages.getString(PKG, "SlavesController.Message.UpdatingSlave", slaveServer.getName()));
repository.save(slaveServer, Const.VERSION_COMMENT_EDIT_VERSION, null);
if (getSharedObjectSyncUtil() != null) {
getSharedObjectSyncUtil().synchronizeSlaveServers(slaveServer, slaveServerName);
}
} else {
MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.InvalidName.Message"));
mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.Title"));
mb.open();
}
}
}
} else {
MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.NoItemSelected.Message"));
mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.Title"));
mb.open();
}
} catch (KettleException e) {
if (mainController == null || !mainController.handleLostRepository(e)) {
new ErrorDialog(shell, BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.Title"), BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.UnexpectedError.Message") + slaveServerName + "]", e);
}
} finally {
refreshSlaves();
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class Job method sendToSlaveServer.
/**
* Send to slave server.
*
* @param jobMeta
* the job meta
* @param executionConfiguration
* the execution configuration
* @param repository
* the repository
* @param metaStore
* the metaStore
* @return the string
* @throws KettleException
* the kettle exception
*/
public static String sendToSlaveServer(JobMeta jobMeta, JobExecutionConfiguration executionConfiguration, Repository repository, IMetaStore metaStore) throws KettleException {
String carteObjectId;
SlaveServer slaveServer = executionConfiguration.getRemoteServer();
if (slaveServer == null) {
throw new KettleException(BaseMessages.getString(PKG, "Job.Log.NoSlaveServerSpecified"));
}
if (Utils.isEmpty(jobMeta.getName())) {
throw new KettleException(BaseMessages.getString(PKG, "Job.Log.UniqueJobName"));
}
// Align logging levels between execution configuration and remote server
slaveServer.getLogChannel().setLogLevel(executionConfiguration.getLogLevel());
try {
//
for (String var : Const.INTERNAL_TRANS_VARIABLES) {
executionConfiguration.getVariables().put(var, jobMeta.getVariable(var));
}
for (String var : Const.INTERNAL_JOB_VARIABLES) {
executionConfiguration.getVariables().put(var, jobMeta.getVariable(var));
}
if (executionConfiguration.isPassingExport()) {
// First export the job... slaveServer.getVariable("MASTER_HOST")
//
FileObject tempFile = KettleVFS.createTempFile("jobExport", ".zip", System.getProperty("java.io.tmpdir"), jobMeta);
TopLevelResource topLevelResource = ResourceUtil.serializeResourceExportInterface(tempFile.getName().toString(), jobMeta, jobMeta, repository, metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME);
// Send the zip file over to the slave server...
String result = slaveServer.sendExport(topLevelResource.getArchiveName(), RegisterPackageServlet.TYPE_JOB, topLevelResource.getBaseResourceName());
WebResult webResult = WebResult.fromXMLString(result);
if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
throw new KettleException("There was an error passing the exported job to the remote server: " + Const.CR + webResult.getMessage());
}
carteObjectId = webResult.getId();
} else {
String xml = new JobConfiguration(jobMeta, executionConfiguration).getXML();
String reply = slaveServer.sendXML(xml, RegisterJobServlet.CONTEXT_PATH + "/?xml=Y");
WebResult webResult = WebResult.fromXMLString(reply);
if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
throw new KettleException("There was an error posting the job on the remote server: " + Const.CR + webResult.getMessage());
}
carteObjectId = webResult.getId();
}
// Start the job
//
String reply = slaveServer.execService(StartJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(jobMeta.getName(), "UTF-8") + "&xml=Y&id=" + carteObjectId);
WebResult webResult = WebResult.fromXMLString(reply);
if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
throw new KettleException("There was an error starting the job on the remote server: " + Const.CR + webResult.getMessage());
}
return carteObjectId;
} catch (KettleException ke) {
throw ke;
} catch (Exception e) {
throw new KettleException(e);
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class JobMeta method loadXML.
/**
* Load a block of XML from an DOM node.
*
* @param jobnode The node to load from
* @param fname The filename
* @param rep The reference to a repository to load additional information from
* @param metaStore the MetaStore to use
* @param ignoreRepositorySharedObjects Do not load shared objects, handled separately
* @param prompter The prompter to use in case a shared object gets overwritten
* @throws KettleXMLException
*/
public void loadXML(Node jobnode, String fname, Repository rep, IMetaStore metaStore, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter) throws KettleXMLException {
Props props = null;
if (Props.isInitialized()) {
props = Props.getInstance();
}
try {
// clear the jobs;
clear();
// Set the filename here so it can be used in variables for ALL aspects of the job FIX: PDI-8890
if (null == rep) {
setFilename(fname);
} else {
// Set the repository here so it can be used in variables for ALL aspects of the job FIX: PDI-16441
setRepository(rep);
}
//
// get job info:
//
setName(XMLHandler.getTagValue(jobnode, "name"));
//
if (rep != null) {
String directoryPath = XMLHandler.getTagValue(jobnode, "directory");
if (directoryPath != null) {
directory = rep.findDirectory(directoryPath);
if (directory == null) {
// not found
// The root as default
directory = new RepositoryDirectory();
}
}
}
// description
description = XMLHandler.getTagValue(jobnode, "description");
// extended description
extendedDescription = XMLHandler.getTagValue(jobnode, "extended_description");
// job version
jobVersion = XMLHandler.getTagValue(jobnode, "job_version");
// job status
jobStatus = Const.toInt(XMLHandler.getTagValue(jobnode, "job_status"), -1);
// Created user/date
createdUser = XMLHandler.getTagValue(jobnode, "created_user");
String createDate = XMLHandler.getTagValue(jobnode, "created_date");
if (createDate != null) {
createdDate = XMLHandler.stringToDate(createDate);
}
// Changed user/date
modifiedUser = XMLHandler.getTagValue(jobnode, "modified_user");
String modDate = XMLHandler.getTagValue(jobnode, "modified_date");
if (modDate != null) {
modifiedDate = XMLHandler.stringToDate(modDate);
}
// Read objects from the shared XML file & the repository
try {
sharedObjectsFile = XMLHandler.getTagValue(jobnode, "shared_objects_file");
if (rep == null || ignoreRepositorySharedObjects) {
sharedObjects = readSharedObjects();
} else {
sharedObjects = rep.readJobMetaSharedObjects(this);
}
} catch (Exception e) {
LogChannel.GENERAL.logError(BaseMessages.getString(PKG, "JobMeta.ErrorReadingSharedObjects.Message", e.toString()));
LogChannel.GENERAL.logError(Const.getStackTracker(e));
}
// Load the database connections, slave servers, cluster schemas & partition schemas into this object.
//
importFromMetaStore();
// Read the named parameters.
Node paramsNode = XMLHandler.getSubNode(jobnode, XML_TAG_PARAMETERS);
int nrParams = XMLHandler.countNodes(paramsNode, "parameter");
for (int i = 0; i < nrParams; i++) {
Node paramNode = XMLHandler.getSubNodeByNr(paramsNode, "parameter", i);
String paramName = XMLHandler.getTagValue(paramNode, "name");
String defValue = XMLHandler.getTagValue(paramNode, "default_value");
String descr = XMLHandler.getTagValue(paramNode, "description");
addParameterDefinition(paramName, defValue, descr);
}
//
// Read the database connections
//
int nr = XMLHandler.countNodes(jobnode, "connection");
Set<String> privateDatabases = new HashSet<String>(nr);
for (int i = 0; i < nr; i++) {
Node dbnode = XMLHandler.getSubNodeByNr(jobnode, "connection", i);
DatabaseMeta dbcon = new DatabaseMeta(dbnode);
dbcon.shareVariablesWith(this);
if (!dbcon.isShared()) {
privateDatabases.add(dbcon.getName());
}
DatabaseMeta exist = findDatabase(dbcon.getName());
if (exist == null) {
addDatabase(dbcon);
} else {
if (!exist.isShared()) {
// skip shared connections
if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.Message", dbcon.getName()), BaseMessages.getString(PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"))) {
int idx = indexOfDatabase(exist);
removeDatabase(idx);
addDatabase(idx, dbcon);
}
}
}
}
setPrivateDatabases(privateDatabases);
// Read the slave servers...
//
Node slaveServersNode = XMLHandler.getSubNode(jobnode, XML_TAG_SLAVESERVERS);
int nrSlaveServers = XMLHandler.countNodes(slaveServersNode, SlaveServer.XML_TAG);
for (int i = 0; i < nrSlaveServers; i++) {
Node slaveServerNode = XMLHandler.getSubNodeByNr(slaveServersNode, SlaveServer.XML_TAG, i);
SlaveServer slaveServer = new SlaveServer(slaveServerNode);
slaveServer.shareVariablesWith(this);
// Check if the object exists and if it's a shared object.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
SlaveServer check = findSlaveServer(slaveServer.getName());
if (check != null) {
if (!check.isShared()) {
// we don't overwrite shared objects.
if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "JobMeta.Dialog.SlaveServerExistsOverWrite.Message", slaveServer.getName()), BaseMessages.getString(PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"))) {
addOrReplaceSlaveServer(slaveServer);
}
}
} else {
slaveServers.add(slaveServer);
}
}
/*
* Get the log database connection & log table
*/
// Backward compatibility...
//
Node jobLogNode = XMLHandler.getSubNode(jobnode, JobLogTable.XML_TAG);
if (jobLogNode == null) {
// Load the XML
//
jobLogTable.setConnectionName(XMLHandler.getTagValue(jobnode, "logconnection"));
jobLogTable.setTableName(XMLHandler.getTagValue(jobnode, "logtable"));
jobLogTable.setBatchIdUsed("Y".equalsIgnoreCase(XMLHandler.getTagValue(jobnode, "use_batchid")));
jobLogTable.setLogFieldUsed("Y".equalsIgnoreCase(XMLHandler.getTagValue(jobnode, "use_logfield")));
jobLogTable.findField(JobLogTable.ID.CHANNEL_ID).setEnabled(false);
jobLogTable.findField(JobLogTable.ID.LINES_REJECTED).setEnabled(false);
} else {
jobLogTable.loadXML(jobLogNode, databases, null);
}
Node channelLogTableNode = XMLHandler.getSubNode(jobnode, ChannelLogTable.XML_TAG);
if (channelLogTableNode != null) {
channelLogTable.loadXML(channelLogTableNode, databases, null);
}
jobEntryLogTable.loadXML(jobnode, databases, null);
for (LogTableInterface extraLogTable : extraLogTables) {
extraLogTable.loadXML(jobnode, databases, null);
}
batchIdPassed = "Y".equalsIgnoreCase(XMLHandler.getTagValue(jobnode, "pass_batchid"));
/*
* read the job entries...
*/
Node entriesnode = XMLHandler.getSubNode(jobnode, "entries");
int tr = XMLHandler.countNodes(entriesnode, "entry");
for (int i = 0; i < tr; i++) {
Node entrynode = XMLHandler.getSubNodeByNr(entriesnode, "entry", i);
// System.out.println("Reading entry:\n"+entrynode);
JobEntryCopy je = new JobEntryCopy(entrynode, databases, slaveServers, rep, metaStore);
if (je.isSpecial() && je.isMissing()) {
addMissingEntry((MissingEntry) je.getEntry());
}
JobEntryCopy prev = findJobEntry(je.getName(), 0, true);
if (prev != null) {
//
if (je.getNr() == 0) {
// Replace previous version with this one: remove it first
//
int idx = indexOfJobEntry(prev);
removeJobEntry(idx);
} else if (je.getNr() > 0) {
// Use previously defined JobEntry info!
//
je.setEntry(prev.getEntry());
// See if entry already exists...
prev = findJobEntry(je.getName(), je.getNr(), true);
if (prev != null) {
// remove the old one!
//
int idx = indexOfJobEntry(prev);
removeJobEntry(idx);
}
}
}
// Add the JobEntryCopy...
addJobEntry(je);
}
Node hopsnode = XMLHandler.getSubNode(jobnode, "hops");
int ho = XMLHandler.countNodes(hopsnode, "hop");
for (int i = 0; i < ho; i++) {
Node hopnode = XMLHandler.getSubNodeByNr(hopsnode, "hop", i);
JobHopMeta hi = new JobHopMeta(hopnode, this);
jobhops.add(hi);
}
// Read the notes...
Node notepadsnode = XMLHandler.getSubNode(jobnode, "notepads");
int nrnotes = XMLHandler.countNodes(notepadsnode, "notepad");
for (int i = 0; i < nrnotes; i++) {
Node notepadnode = XMLHandler.getSubNodeByNr(notepadsnode, "notepad", i);
NotePadMeta ni = new NotePadMeta(notepadnode);
notes.add(ni);
}
// Load the attribute groups map
//
attributesMap = AttributesUtil.loadAttributes(XMLHandler.getSubNode(jobnode, AttributesUtil.XML_TAG));
ExtensionPointHandler.callExtensionPoint(LogChannel.GENERAL, KettleExtensionPoint.JobMetaLoaded.id, this);
clearChanged();
} catch (Exception e) {
throw new KettleXMLException(BaseMessages.getString(PKG, "JobMeta.Exception.UnableToLoadJobFromXMLNode"), e);
} finally {
setInternalKettleVariables();
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class JobMeta method realClone.
/**
* Perform a real clone of the job meta-data object, including cloning all lists and copying all values. If the
* doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied
* fields will be cleared.
*
* @param doClear Whether to clear all of the clone's data before copying from the source object
* @return a real clone of the calling object
*/
public Object realClone(boolean doClear) {
try {
JobMeta jobMeta = (JobMeta) super.clone();
if (doClear) {
jobMeta.clear();
} else {
jobMeta.jobcopies = new ArrayList<JobEntryCopy>();
jobMeta.jobhops = new ArrayList<JobHopMeta>();
jobMeta.notes = new ArrayList<NotePadMeta>();
jobMeta.databases = new ArrayList<DatabaseMeta>();
jobMeta.slaveServers = new ArrayList<SlaveServer>();
jobMeta.namedParams = new NamedParamsDefault();
}
for (JobEntryCopy entry : jobcopies) {
jobMeta.jobcopies.add((JobEntryCopy) entry.clone_deep());
}
for (JobHopMeta entry : jobhops) {
jobMeta.jobhops.add((JobHopMeta) entry.clone());
}
for (NotePadMeta entry : notes) {
jobMeta.notes.add((NotePadMeta) entry.clone());
}
for (DatabaseMeta entry : databases) {
jobMeta.databases.add((DatabaseMeta) entry.clone());
}
for (SlaveServer slave : slaveServers) {
jobMeta.getSlaveServers().add((SlaveServer) slave.clone());
}
for (String key : listParameters()) {
jobMeta.addParameterDefinition(key, getParameterDefault(key), getParameterDescription(key));
}
return jobMeta;
} catch (Exception e) {
return null;
}
}
use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.
the class RepositoryImporter method replaceSharedObjects.
private void replaceSharedObjects(AbstractMeta abstractMeta) {
for (DatabaseMeta databaseMeta : getSharedObjects(DatabaseMeta.class)) {
// Database...
int index = abstractMeta.indexOfDatabase(databaseMeta);
if (index < 0) {
abstractMeta.addDatabase(databaseMeta);
} else {
DatabaseMeta imported = abstractMeta.getDatabase(index);
// Preserve the object id so we can update without having to look up the id
imported.setObjectId(databaseMeta.getObjectId());
if (equals(databaseMeta, imported) || !getPromptResult(BaseMessages.getString(PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.Message", imported.getName()), BaseMessages.getString(PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_DB)) {
imported.replaceMeta(databaseMeta);
// We didn't actually change anything
imported.clearChanged();
} else {
imported.setChanged();
}
}
}
for (SlaveServer slaveServer : getSharedObjects(SlaveServer.class)) {
int index = abstractMeta.getSlaveServers().indexOf(slaveServer);
if (index < 0) {
abstractMeta.getSlaveServers().add(slaveServer);
} else {
SlaveServer imported = abstractMeta.getSlaveServers().get(index);
// Preserve the object id so we can update without having to look up the id
imported.setObjectId(slaveServer.getObjectId());
if (equals(slaveServer, imported) || !getPromptResult(BaseMessages.getString(PKG, "RepositoryImporter.Dialog.SlaveServerExistsOverWrite.Message", imported.getName()), BaseMessages.getString(PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_SS)) {
imported.replaceMeta(slaveServer);
// We didn't actually change anything
imported.clearChanged();
} else {
imported.setChanged();
}
}
}
}
Aggregations