use of org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef in project pentaho-kettle by pentaho.
the class ClusterDelegate method dataNodeToElement.
public void dataNodeToElement(DataNode rootNode, RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
// The metadata...
clusterSchema.setBasePort(getString(rootNode, PROP_BASE_PORT));
clusterSchema.setSocketsBufferSize(getString(rootNode, PROP_SOCKETS_BUFFER_SIZE));
clusterSchema.setSocketsFlushInterval(getString(rootNode, PROP_SOCKETS_FLUSH_INTERVAL));
clusterSchema.setSocketsCompressed(rootNode.getProperty(PROP_SOCKETS_COMPRESSED).getBoolean());
clusterSchema.setDynamic(rootNode.getProperty(PROP_DYNAMIC).getBoolean());
DataNode attrNode = rootNode.getNode(NODE_ATTRIBUTES);
// The slaves...
long nrSlaves = attrNode.getProperty(PROP_NB_SLAVE_SERVERS).getLong();
for (int i = 0; i < nrSlaves; i++) {
if (attrNode.hasProperty(String.valueOf(i))) {
DataNodeRef slaveNodeRef = attrNode.getProperty(String.valueOf(i)).getRef();
clusterSchema.getSlaveServers().add(findSlaveServer(new StringObjectId(slaveNodeRef.toString())));
}
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef in project pentaho-kettle by pentaho.
the class ClusterDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
ClusterSchema clusterSchema = (ClusterSchema) element;
DataNode rootNode = new DataNode(NODE_ROOT);
// save the properties...
rootNode.setProperty(PROP_BASE_PORT, clusterSchema.getBasePort());
rootNode.setProperty(PROP_SOCKETS_BUFFER_SIZE, clusterSchema.getSocketsBufferSize());
rootNode.setProperty(PROP_SOCKETS_FLUSH_INTERVAL, clusterSchema.getSocketsFlushInterval());
rootNode.setProperty(PROP_SOCKETS_COMPRESSED, clusterSchema.isSocketsCompressed());
rootNode.setProperty(PROP_DYNAMIC, clusterSchema.isDynamic());
DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
// Also save the used slave server references.
attrNode.setProperty(PROP_NB_SLAVE_SERVERS, clusterSchema.getSlaveServers().size());
for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
DataNodeRef slaveNodeRef = new DataNodeRef(slaveServer.getObjectId().getId());
// Save the slave server by reference, this way it becomes impossible to delete the slave by accident when still
// in use.
attrNode.setProperty(String.valueOf(i), slaveNodeRef);
}
return rootNode;
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef in project pentaho-kettle by pentaho.
the class JobDelegate method saveJobDetails.
private void saveJobDetails(DataNode rootNode, JobMeta jobMeta) throws KettleException {
rootNode.setProperty(PROP_EXTENDED_DESCRIPTION, jobMeta.getExtendedDescription());
rootNode.setProperty(PROP_JOB_VERSION, jobMeta.getJobversion());
rootNode.setProperty(PROP_JOB_STATUS, jobMeta.getJobstatus() < 0 ? -1L : jobMeta.getJobstatus());
if (jobMeta.getJobLogTable().getDatabaseMeta() != null) {
DataNodeRef ref = new DataNodeRef(jobMeta.getJobLogTable().getDatabaseMeta().getObjectId().getId());
rootNode.setProperty(PROP_DATABASE_LOG, ref);
}
rootNode.setProperty(PROP_TABLE_NAME_LOG, jobMeta.getJobLogTable().getTableName());
rootNode.setProperty(PROP_CREATED_USER, jobMeta.getCreatedUser());
rootNode.setProperty(PROP_CREATED_DATE, jobMeta.getCreatedDate());
rootNode.setProperty(PROP_MODIFIED_USER, jobMeta.getModifiedUser());
rootNode.setProperty(PROP_MODIFIED_DATE, jobMeta.getModifiedDate());
rootNode.setProperty(PROP_USE_BATCH_ID, jobMeta.getJobLogTable().isBatchIdUsed());
rootNode.setProperty(PROP_PASS_BATCH_ID, jobMeta.isBatchIdPassed());
rootNode.setProperty(PROP_USE_LOGFIELD, jobMeta.getJobLogTable().isLogFieldUsed());
rootNode.setProperty(PROP_SHARED_FILE, jobMeta.getSharedObjectsFile());
rootNode.setProperty(PROP_LOG_SIZE_LIMIT, jobMeta.getJobLogTable().getLogSizeLimit());
// Save the logging tables too..
//
RepositoryAttributeInterface attributeInterface = new PurRepositoryAttribute(rootNode, jobMeta.getDatabases());
for (LogTableInterface logTable : jobMeta.getLogTables()) {
logTable.saveToRepository(attributeInterface);
}
// Load the attributes map
//
AttributesMapUtil.saveAttributesMap(rootNode, jobMeta);
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef in project pentaho-kettle by pentaho.
the class RepositoryProxy method saveDatabaseMetaJobEntryAttribute.
public void saveDatabaseMetaJobEntryAttribute(ObjectId idJob, ObjectId idJobentry, int nr, String nameCode, String code, DatabaseMeta database) throws KettleException {
if (database != null && database.getObjectId() != null) {
DataNodeRef ref = new DataNodeRef(database.getObjectId().getId());
node.setProperty(code + PROP_CODE_NR_SEPARATOR + nr, ref);
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNodeRef in project pentaho-kettle by pentaho.
the class TransDelegate method elementToDataNode.
public DataNode elementToDataNode(final RepositoryElementInterface element) throws KettleException {
TransMeta transMeta = (TransMeta) element;
DataNode rootNode = new DataNode(NODE_TRANS);
if (transMeta.getPrivateDatabases() != null) {
// save all private transformations database name http://jira.pentaho.com/browse/PPP-3405
String privateDatabaseNames = StringUtils.join(transMeta.getPrivateDatabases(), TRANS_PRIVATE_DATABASE_DELIMITER);
DataNode privateDatabaseNode = rootNode.addNode(NODE_TRANS_PRIVATE_DATABASES);
privateDatabaseNode.setProperty(PROP_TRANS_PRIVATE_DATABASE_NAMES, privateDatabaseNames);
}
DataNode stepsNode = rootNode.addNode(NODE_STEPS);
// Also save all the steps in the transformation!
//
int stepNr = 0;
for (StepMeta step : transMeta.getSteps()) {
stepNr++;
// $NON-NLS-1$
DataNode stepNode = stepsNode.addNode(sanitizeNodeName(step.getName()) + "_" + stepNr + EXT_STEP);
// Store the main data
//
stepNode.setProperty(PROP_NAME, step.getName());
stepNode.setProperty(PROP_DESCRIPTION, step.getDescription());
stepNode.setProperty(PROP_STEP_TYPE, step.getStepID());
stepNode.setProperty(PROP_STEP_DISTRIBUTE, step.isDistributes());
stepNode.setProperty(PROP_STEP_ROW_DISTRIBUTION, step.getRowDistribution() == null ? null : step.getRowDistribution().getCode());
stepNode.setProperty(PROP_STEP_COPIES, step.getCopies());
stepNode.setProperty(PROP_STEP_COPIES_STRING, step.getCopiesString());
stepNode.setProperty(PROP_STEP_GUI_LOCATION_X, step.getLocation().x);
stepNode.setProperty(PROP_STEP_GUI_LOCATION_Y, step.getLocation().y);
stepNode.setProperty(PROP_STEP_GUI_DRAW, step.isDrawn());
// Also save the step group attributes map
//
AttributesMapUtil.saveAttributesMap(stepNode, step);
// Save the step metadata using the repository save method, NOT XML
// That is because we want to keep the links to databases, conditions, etc by ID, not name.
//
StepMetaInterface stepMetaInterface = step.getStepMetaInterface();
DataNode stepCustomNode = new DataNode(NODE_STEP_CUSTOM);
Repository proxy = new RepositoryProxy(stepCustomNode);
compatibleSaveRep(stepMetaInterface, proxy, null, null);
stepMetaInterface.saveRep(proxy, proxy.getMetaStore(), null, null);
stepNode.addNode(stepCustomNode);
// Save the partitioning information by reference as well...
//
StepPartitioningMeta partitioningMeta = step.getStepPartitioningMeta();
if (partitioningMeta != null && partitioningMeta.getPartitionSchema() != null && partitioningMeta.isPartitioned()) {
DataNodeRef ref = new DataNodeRef(partitioningMeta.getPartitionSchema().getObjectId().getId());
stepNode.setProperty(PROP_PARTITIONING_SCHEMA, ref);
// method of partitioning
stepNode.setProperty(PROP_PARTITIONING_METHOD, partitioningMeta.getMethodCode());
if (partitioningMeta.getPartitioner() != null) {
DataNode partitionerCustomNode = new DataNode(NODE_PARTITIONER_CUSTOM);
proxy = new RepositoryProxy(partitionerCustomNode);
partitioningMeta.getPartitioner().saveRep(proxy, null, null);
stepNode.addNode(partitionerCustomNode);
}
}
// Save the clustering information as well...
//
stepNode.setProperty(PROP_CLUSTER_SCHEMA, step.getClusterSchema() == null ? "" : // $NON-NLS-1$
step.getClusterSchema().getName());
// Save the error hop metadata
//
StepErrorMeta stepErrorMeta = step.getStepErrorMeta();
if (stepErrorMeta != null) {
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_SOURCE_STEP, stepErrorMeta.getSourceStep() != null ? stepErrorMeta.getSourceStep().getName() : // $NON-NLS-1$
"");
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_TARGET_STEP, stepErrorMeta.getTargetStep() != null ? stepErrorMeta.getTargetStep().getName() : // $NON-NLS-1$
"");
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_IS_ENABLED, stepErrorMeta.isEnabled());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_NR_VALUENAME, stepErrorMeta.getNrErrorsValuename());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_DESCRIPTIONS_VALUENAME, stepErrorMeta.getErrorDescriptionsValuename());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_FIELDS_VALUENAME, stepErrorMeta.getErrorFieldsValuename());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_CODES_VALUENAME, stepErrorMeta.getErrorCodesValuename());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_MAX_ERRORS, stepErrorMeta.getMaxErrors());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_MAX_PCT_ERRORS, stepErrorMeta.getMaxPercentErrors());
stepNode.setProperty(PROP_STEP_ERROR_HANDLING_MIN_PCT_ROWS, stepErrorMeta.getMinPercentRows());
}
}
// Save the notes
//
DataNode notesNode = rootNode.addNode(NODE_NOTES);
notesNode.setProperty(PROP_NR_NOTES, transMeta.nrNotes());
for (int i = 0; i < transMeta.nrNotes(); i++) {
NotePadMeta note = transMeta.getNote(i);
DataNode noteNode = notesNode.addNode(NOTE_PREFIX + i);
noteNode.setProperty(PROP_XML, note.getXML());
}
// Finally, save the hops
//
DataNode hopsNode = rootNode.addNode(NODE_HOPS);
hopsNode.setProperty(PROP_NR_HOPS, transMeta.nrTransHops());
for (int i = 0; i < transMeta.nrTransHops(); i++) {
TransHopMeta hop = transMeta.getTransHop(i);
DataNode hopNode = hopsNode.addNode(TRANS_HOP_PREFIX + i);
hopNode.setProperty(TRANS_HOP_FROM, hop.getFromStep().getName());
hopNode.setProperty(TRANS_HOP_TO, hop.getToStep().getName());
hopNode.setProperty(TRANS_HOP_ENABLED, hop.isEnabled());
}
// Parameters
//
String[] paramKeys = transMeta.listParameters();
DataNode paramsNode = rootNode.addNode(NODE_PARAMETERS);
paramsNode.setProperty(PROP_NR_PARAMETERS, paramKeys == null ? 0 : paramKeys.length);
for (int idx = 0; idx < paramKeys.length; idx++) {
DataNode paramNode = paramsNode.addNode(TRANS_PARAM_PREFIX + idx);
String key = paramKeys[idx];
String description = transMeta.getParameterDescription(paramKeys[idx]);
String defaultValue = transMeta.getParameterDefault(paramKeys[idx]);
// $NON-NLS-1$
paramNode.setProperty(PARAM_KEY, key != null ? key : "");
// $NON-NLS-1$
paramNode.setProperty(PARAM_DEFAULT, defaultValue != null ? defaultValue : "");
// $NON-NLS-1$
paramNode.setProperty(PARAM_DESC, description != null ? description : "");
}
// Let's not forget to save the details of the transformation itself.
// This includes logging information, parameters, etc.
//
saveTransformationDetails(rootNode, transMeta);
return rootNode;
}
Aggregations