use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class AttributesMapUtil method loadAttributesMap.
public static final void loadAttributesMap(DataNode dataNode, AttributesInterface attributesInterface) throws KettleException {
Map<String, Map<String, String>> attributesMap = new HashMap<String, Map<String, String>>();
attributesInterface.setAttributesMap(attributesMap);
DataNode groupsNode = dataNode.getNode(NODE_ATTRIBUTE_GROUPS);
if (groupsNode != null) {
Iterable<DataNode> nodes = groupsNode.getNodes();
for (Iterator<DataNode> groupsIterator = nodes.iterator(); groupsIterator.hasNext(); ) {
DataNode groupNode = groupsIterator.next();
HashMap<String, String> attributes = new HashMap<String, String>();
attributesMap.put(groupNode.getName(), attributes);
Iterable<DataProperty> properties = groupNode.getProperties();
for (Iterator<DataProperty> propertiesIterator = properties.iterator(); propertiesIterator.hasNext(); ) {
DataProperty dataProperty = propertiesIterator.next();
String key = dataProperty.getName();
String value = dataProperty.getString();
if (key != null && value != null) {
attributes.put(key, value);
}
}
}
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class ClusterDelegate method dataNodeToElement.
public void dataNodeToElement(DataNode rootNode, RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
// The metadata...
clusterSchema.setBasePort(getString(rootNode, PROP_BASE_PORT));
clusterSchema.setSocketsBufferSize(getString(rootNode, PROP_SOCKETS_BUFFER_SIZE));
clusterSchema.setSocketsFlushInterval(getString(rootNode, PROP_SOCKETS_FLUSH_INTERVAL));
clusterSchema.setSocketsCompressed(rootNode.getProperty(PROP_SOCKETS_COMPRESSED).getBoolean());
clusterSchema.setDynamic(rootNode.getProperty(PROP_DYNAMIC).getBoolean());
DataNode attrNode = rootNode.getNode(NODE_ATTRIBUTES);
// The slaves...
long nrSlaves = attrNode.getProperty(PROP_NB_SLAVE_SERVERS).getLong();
for (int i = 0; i < nrSlaves; i++) {
if (attrNode.hasProperty(String.valueOf(i))) {
DataNodeRef slaveNodeRef = attrNode.getProperty(String.valueOf(i)).getRef();
clusterSchema.getSlaveServers().add(findSlaveServer(new StringObjectId(slaveNodeRef.toString())));
}
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class ClusterDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
DataNode rootNode = new DataNode(NODE_ROOT);
// save the properties...
rootNode.setProperty(PROP_BASE_PORT, clusterSchema.getBasePort());
rootNode.setProperty(PROP_SOCKETS_BUFFER_SIZE, clusterSchema.getSocketsBufferSize());
rootNode.setProperty(PROP_SOCKETS_FLUSH_INTERVAL, clusterSchema.getSocketsFlushInterval());
rootNode.setProperty(PROP_SOCKETS_COMPRESSED, clusterSchema.isSocketsCompressed());
rootNode.setProperty(PROP_DYNAMIC, clusterSchema.isDynamic());
DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
// Also save the used slave server references.
attrNode.setProperty(PROP_NB_SLAVE_SERVERS, clusterSchema.getSlaveServers().size());
for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
DataNodeRef slaveNodeRef = new DataNodeRef(slaveServer.getObjectId().getId());
// Save the slave server by reference, this way it becomes impossible to delete the slave by accident when still
// in use.
attrNode.setProperty(String.valueOf(i), slaveNodeRef);
}
return rootNode;
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class JobDelegate method saveJobDetails.
private void saveJobDetails(DataNode rootNode, JobMeta jobMeta) throws KettleException {
rootNode.setProperty(PROP_EXTENDED_DESCRIPTION, jobMeta.getExtendedDescription());
rootNode.setProperty(PROP_JOB_VERSION, jobMeta.getJobversion());
rootNode.setProperty(PROP_JOB_STATUS, jobMeta.getJobstatus() < 0 ? -1L : jobMeta.getJobstatus());
if (jobMeta.getJobLogTable().getDatabaseMeta() != null) {
DataNodeRef ref = new DataNodeRef(jobMeta.getJobLogTable().getDatabaseMeta().getObjectId().getId());
rootNode.setProperty(PROP_DATABASE_LOG, ref);
}
rootNode.setProperty(PROP_TABLE_NAME_LOG, jobMeta.getJobLogTable().getTableName());
rootNode.setProperty(PROP_CREATED_USER, jobMeta.getCreatedUser());
rootNode.setProperty(PROP_CREATED_DATE, jobMeta.getCreatedDate());
rootNode.setProperty(PROP_MODIFIED_USER, jobMeta.getModifiedUser());
rootNode.setProperty(PROP_MODIFIED_DATE, jobMeta.getModifiedDate());
rootNode.setProperty(PROP_USE_BATCH_ID, jobMeta.getJobLogTable().isBatchIdUsed());
rootNode.setProperty(PROP_PASS_BATCH_ID, jobMeta.isBatchIdPassed());
rootNode.setProperty(PROP_USE_LOGFIELD, jobMeta.getJobLogTable().isLogFieldUsed());
rootNode.setProperty(PROP_SHARED_FILE, jobMeta.getSharedObjectsFile());
rootNode.setProperty(PROP_LOG_SIZE_LIMIT, jobMeta.getJobLogTable().getLogSizeLimit());
// Save the logging tables too..
//
RepositoryAttributeInterface attributeInterface = new PurRepositoryAttribute(rootNode, jobMeta.getDatabases());
for (LogTableInterface logTable : jobMeta.getLogTables()) {
logTable.saveToRepository(attributeInterface);
}
// Load the attributes map
//
AttributesMapUtil.saveAttributesMap(rootNode, jobMeta);
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class SlaveDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
SlaveServer slaveServer = (SlaveServer) element;
DataNode rootNode = new DataNode(NODE_ROOT);
/*
* // Check for naming collision ObjectId slaveId = repo.getSlaveID(slaveServer.getName()); if (slaveId != null &&
* slaveServer.getObjectId()!=null && !slaveServer.getObjectId().equals(slaveId)) { // We have a naming collision,
* abort the save throw new KettleException("Failed to save object to repository. Object [" + slaveServer.getName()
* + "] already exists."); }
*/
// Create or version a new slave node
//
rootNode.setProperty(PROP_HOST_NAME, slaveServer.getHostname());
rootNode.setProperty(PROP_PORT, slaveServer.getPort());
rootNode.setProperty(PROP_WEBAPP_NAME, slaveServer.getWebAppName());
rootNode.setProperty(PROP_USERNAME, slaveServer.getUsername());
rootNode.setProperty(PROP_PASSWORD, Encr.encryptPasswordIfNotUsingVariables(slaveServer.getPassword()));
rootNode.setProperty(PROP_PROXY_HOST_NAME, slaveServer.getProxyHostname());
rootNode.setProperty(PROP_PROXY_PORT, slaveServer.getProxyPort());
rootNode.setProperty(PROP_NON_PROXY_HOSTS, slaveServer.getNonProxyHosts());
rootNode.setProperty(PROP_MASTER, slaveServer.isMaster());
return rootNode;
}
Aggregations