use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class JobDelegate method elementToDataNode.
public DataNode elementToDataNode(final RepositoryElementInterface element) throws KettleException {
JobMeta jobMeta = (JobMeta) element;
DataNode rootNode = new DataNode(NODE_JOB);
if (jobMeta.getPrivateDatabases() != null) {
// save all private database names http://jira.pentaho.com/browse/PPP-3413
String privateDatabaseNames = StringUtils.join(jobMeta.getPrivateDatabases(), JOB_PRIVATE_DATABASE_DELIMITER);
DataNode privateDatabaseNode = rootNode.addNode(NODE_JOB_PRIVATE_DATABASES);
privateDatabaseNode.setProperty(PROP_JOB_PRIVATE_DATABASE_NAMES, privateDatabaseNames);
}
// Save the notes
//
DataNode notesNode = rootNode.addNode(NODE_NOTES);
notesNode.setProperty(PROP_NR_NOTES, jobMeta.nrNotes());
for (int i = 0; i < jobMeta.nrNotes(); i++) {
NotePadMeta note = jobMeta.getNote(i);
DataNode noteNode = notesNode.addNode(NOTE_PREFIX + i);
noteNode.setProperty(PROP_XML, note.getXML());
}
//
if (log.isDetailed()) {
// $NON-NLS-1$ //$NON-NLS-2$
log.logDetailed(toString(), "Saving " + jobMeta.nrJobEntries() + " Job enty copies to repository...");
}
DataNode entriesNode = rootNode.addNode(NODE_ENTRIES);
entriesNode.setProperty(PROP_NR_JOB_ENTRY_COPIES, jobMeta.nrJobEntries());
for (int i = 0; i < jobMeta.nrJobEntries(); i++) {
JobEntryCopy copy = jobMeta.getJobEntry(i);
JobEntryInterface entry = copy.getEntry();
// Create a new node for each entry...
//
DataNode copyNode = entriesNode.addNode(// $NON-NLS-1$
sanitizeNodeName(copy.getName()) + "_" + (i + 1) + EXT_JOB_ENTRY_COPY);
copyNode.setProperty(PROP_NAME, copy.getName());
copyNode.setProperty(PROP_DESCRIPTION, copy.getDescription());
copyNode.setProperty(PROP_NR, copy.getNr());
copyNode.setProperty(PROP_GUI_LOCATION_X, copy.getLocation().x);
copyNode.setProperty(PROP_GUI_LOCATION_Y, copy.getLocation().y);
copyNode.setProperty(PROP_GUI_DRAW, copy.isDrawn());
copyNode.setProperty(PROP_PARALLEL, copy.isLaunchingInParallel());
//
if (entry instanceof JobEntryBase) {
AttributesMapUtil.saveAttributesMap(copyNode, (JobEntryBase) entry);
}
AttributesMapUtil.saveAttributesMap(copyNode, copy);
// Save the entry information here as well, for completeness.
// TODO: since this slightly stores duplicate information, figure out how to store this separately.
//
copyNode.setProperty(PROP_JOBENTRY_TYPE, entry.getPluginId());
DataNode customNode = new DataNode(NODE_CUSTOM);
RepositoryProxy proxy = new RepositoryProxy(customNode);
entry.saveRep(proxy, proxy.getMetaStore(), null);
compatibleEntrySaveRep(entry, proxy, null);
copyNode.addNode(customNode);
}
// Finally, save the hops
//
DataNode hopsNode = rootNode.addNode(NODE_HOPS);
hopsNode.setProperty(PROP_NR_HOPS, jobMeta.nrJobHops());
for (int i = 0; i < jobMeta.nrJobHops(); i++) {
JobHopMeta hop = jobMeta.getJobHop(i);
DataNode hopNode = hopsNode.addNode(JOB_HOP_PREFIX + i);
hopNode.setProperty(JOB_HOP_FROM, hop.getFromEntry().getName());
hopNode.setProperty(JOB_HOP_FROM_NR, hop.getFromEntry().getNr());
hopNode.setProperty(JOB_HOP_TO, hop.getToEntry().getName());
hopNode.setProperty(JOB_HOP_TO_NR, hop.getToEntry().getNr());
hopNode.setProperty(JOB_HOP_ENABLED, hop.isEnabled());
hopNode.setProperty(JOB_HOP_EVALUATION, hop.getEvaluation());
hopNode.setProperty(JOB_HOP_UNCONDITIONAL, hop.isUnconditional());
}
String[] paramKeys = jobMeta.listParameters();
DataNode paramsNode = rootNode.addNode(NODE_PARAMETERS);
paramsNode.setProperty(PROP_NR_PARAMETERS, paramKeys == null ? 0 : paramKeys.length);
for (int idx = 0; idx < paramKeys.length; idx++) {
DataNode paramNode = paramsNode.addNode(PARAM_PREFIX + idx);
String key = paramKeys[idx];
String description = jobMeta.getParameterDescription(paramKeys[idx]);
String defaultValue = jobMeta.getParameterDefault(paramKeys[idx]);
// $NON-NLS-1$
paramNode.setProperty(PARAM_KEY, key != null ? key : "");
// $NON-NLS-1$
paramNode.setProperty(PARAM_DEFAULT, defaultValue != null ? defaultValue : "");
// $NON-NLS-1$
paramNode.setProperty(PARAM_DESC, description != null ? description : "");
}
// Let's not forget to save the details of the transformation itself.
// This includes logging information, parameters, etc.
//
saveJobDetails(rootNode, jobMeta);
return rootNode;
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class JobDelegate method dataNodeToElement.
public void dataNodeToElement(final DataNode rootNode, final RepositoryElementInterface element) throws KettleException {
JobMeta jobMeta = (JobMeta) element;
Set<String> privateDatabases = null;
// read the private databases
DataNode privateDbsNode = rootNode.getNode(NODE_JOB_PRIVATE_DATABASES);
// BACKLOG-6635
if (privateDbsNode != null) {
privateDatabases = new HashSet<String>();
if (privateDbsNode.hasProperty(PROP_JOB_PRIVATE_DATABASE_NAMES)) {
for (String privateDatabaseName : getString(privateDbsNode, PROP_JOB_PRIVATE_DATABASE_NAMES).split(JOB_PRIVATE_DATABASE_DELIMITER)) {
if (!privateDatabaseName.isEmpty()) {
privateDatabases.add(privateDatabaseName);
}
}
} else {
for (DataNode privateDatabase : privateDbsNode.getNodes()) {
privateDatabases.add(privateDatabase.getName());
}
}
}
jobMeta.setPrivateDatabases(privateDatabases);
jobMeta.setSharedObjectsFile(getString(rootNode, PROP_SHARED_FILE));
// Keep a unique list of job entries to facilitate in the loading.
//
List<JobEntryInterface> jobentries = new ArrayList<JobEntryInterface>();
// Read the job entry copies
//
DataNode entriesNode = rootNode.getNode(NODE_ENTRIES);
int nrCopies = (int) entriesNode.getProperty(PROP_NR_JOB_ENTRY_COPIES).getLong();
//
for (DataNode copyNode : entriesNode.getNodes()) {
// String name = copyNode.getName();
// Read the entry...
//
JobEntryInterface jobEntry = readJobEntry(copyNode, jobMeta, jobentries);
JobEntryCopy copy = new JobEntryCopy(jobEntry);
copy.setName(getString(copyNode, PROP_NAME));
copy.setDescription(getString(copyNode, PROP_DESCRIPTION));
copy.setObjectId(new StringObjectId(copyNode.getId().toString()));
copy.setNr((int) copyNode.getProperty(PROP_NR).getLong());
int x = (int) copyNode.getProperty(PROP_GUI_LOCATION_X).getLong();
int y = (int) copyNode.getProperty(PROP_GUI_LOCATION_Y).getLong();
copy.setLocation(x, y);
copy.setDrawn(copyNode.getProperty(PROP_GUI_DRAW).getBoolean());
copy.setLaunchingInParallel(copyNode.getProperty(PROP_PARALLEL).getBoolean());
//
if (jobEntry instanceof JobEntryBase) {
AttributesMapUtil.loadAttributesMap(copyNode, (JobEntryBase) jobEntry);
}
AttributesMapUtil.loadAttributesMap(copyNode, copy);
jobMeta.getJobCopies().add(copy);
}
if (jobMeta.getJobCopies().size() != nrCopies) {
throw new KettleException("The number of job entry copies read [" + jobMeta.getJobCopies().size() + "] was not the number we expected [" + nrCopies + "]");
}
// Read the notes...
//
DataNode notesNode = rootNode.getNode(NODE_NOTES);
int nrNotes = (int) notesNode.getProperty(PROP_NR_NOTES).getLong();
for (DataNode noteNode : notesNode.getNodes()) {
// String name = noteNode.getName();
String xml = getString(noteNode, PROP_XML);
jobMeta.addNote(new NotePadMeta(XMLHandler.getSubNode(XMLHandler.loadXMLString(xml), NotePadMeta.XML_TAG)));
}
if (jobMeta.nrNotes() != nrNotes) {
throw new KettleException("The number of notes read [" + jobMeta.nrNotes() + "] was not the number we expected [" + nrNotes + "]");
}
// Read the hops...
//
DataNode hopsNode = rootNode.getNode(NODE_HOPS);
int nrHops = (int) hopsNode.getProperty(PROP_NR_HOPS).getLong();
for (DataNode hopNode : hopsNode.getNodes()) {
// String name = hopNode.getName();
String copyFromName = getString(hopNode, JOB_HOP_FROM);
int copyFromNr = (int) hopNode.getProperty(JOB_HOP_FROM_NR).getLong();
String copyToName = getString(hopNode, JOB_HOP_TO);
int copyToNr = (int) hopNode.getProperty(JOB_HOP_TO_NR).getLong();
boolean enabled = true;
if (hopNode.hasProperty(JOB_HOP_ENABLED)) {
enabled = hopNode.getProperty(JOB_HOP_ENABLED).getBoolean();
}
boolean evaluation = true;
if (hopNode.hasProperty(JOB_HOP_EVALUATION)) {
evaluation = hopNode.getProperty(JOB_HOP_EVALUATION).getBoolean();
}
boolean unconditional = true;
if (hopNode.hasProperty(JOB_HOP_UNCONDITIONAL)) {
unconditional = hopNode.getProperty(JOB_HOP_UNCONDITIONAL).getBoolean();
}
JobEntryCopy copyFrom = jobMeta.findJobEntry(copyFromName, copyFromNr, true);
JobEntryCopy copyTo = jobMeta.findJobEntry(copyToName, copyToNr, true);
JobHopMeta jobHopMeta = new JobHopMeta(copyFrom, copyTo);
jobHopMeta.setEnabled(enabled);
jobHopMeta.setEvaluation(evaluation);
jobHopMeta.setUnconditional(unconditional);
jobMeta.addJobHop(jobHopMeta);
}
if (jobMeta.nrJobHops() != nrHops) {
throw new KettleException("The number of hops read [" + jobMeta.nrJobHops() + "] was not the number we expected [" + nrHops + "]");
}
// Load the details at the end, to make sure we reference the databases correctly, etc.
//
loadJobMetaDetails(rootNode, jobMeta);
jobMeta.eraseParameters();
DataNode paramsNode = rootNode.getNode(NODE_PARAMETERS);
int count = (int) paramsNode.getProperty(PROP_NR_PARAMETERS).getLong();
for (int idx = 0; idx < count; idx++) {
DataNode paramNode = paramsNode.getNode(PARAM_PREFIX + idx);
String key = getString(paramNode, PARAM_KEY);
String def = getString(paramNode, PARAM_DEFAULT);
String desc = getString(paramNode, PARAM_DESC);
jobMeta.addParameterDefinition(key, def, desc);
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class PartitionDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
PartitionSchema partitionSchema = (PartitionSchema) element;
DataNode rootNode = new DataNode(NODE_ROOT);
// Check for naming collision
ObjectId partitionId = repo.getPartitionSchemaID(partitionSchema.getName());
if (partitionId != null && !partitionSchema.getObjectId().equals(partitionId)) {
// We have a naming collision, abort the save
throw new KettleException("Failed to save object to repository. Object [" + partitionSchema.getName() + "] already exists.");
}
rootNode.setProperty(PROP_DYNAMIC_DEFINITION, partitionSchema.isDynamicallyDefined());
rootNode.setProperty(PROP_PARTITIONS_PER_SLAVE, partitionSchema.getNumberOfPartitionsPerSlave());
// Save the cluster-partition relationships
DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
attrNode.setProperty(PROP_NB_PARTITION_SCHEMA, partitionSchema.getPartitionIDs().size());
for (int i = 0; i < partitionSchema.getPartitionIDs().size(); i++) {
attrNode.setProperty(String.valueOf(i), partitionSchema.getPartitionIDs().get(i));
}
return rootNode;
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class PartitionDelegate method dataNodeToElement.
public void dataNodeToElement(DataNode rootNode, RepositoryElementInterface element) throws KettleException {
PartitionSchema partitionSchema = (PartitionSchema) element;
partitionSchema.setDynamicallyDefined(rootNode.getProperty(PROP_DYNAMIC_DEFINITION).getBoolean());
partitionSchema.setNumberOfPartitionsPerSlave(getString(rootNode, PROP_PARTITIONS_PER_SLAVE));
// Also, load all the properties we can find...
DataNode attrNode = rootNode.getNode(NODE_ATTRIBUTES);
long partitionSchemaSize = attrNode.getProperty(PROP_NB_PARTITION_SCHEMA).getLong();
for (int i = 0; i < partitionSchemaSize; i++) {
DataProperty property = attrNode.getProperty(String.valueOf(i));
partitionSchema.getPartitionIDs().add(Const.NVL(property.getString(), ""));
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.DataNode in project pentaho-kettle by pentaho.
the class PurRepository method readDatabases.
@Override
public List<DatabaseMeta> readDatabases() throws KettleException {
try {
List<RepositoryFile> children = getAllFilesOfType(null, RepositoryObjectType.DATABASE, false);
List<DatabaseMeta> dbMetas = new ArrayList<DatabaseMeta>();
for (RepositoryFile file : children) {
DataNode node = pur.getDataForRead(file.getId(), NodeRepositoryFileData.class).getNode();
DatabaseMeta databaseMeta = (DatabaseMeta) databaseMetaTransformer.dataNodeToElement(node);
databaseMeta.setName(file.getTitle());
dbMetas.add(databaseMeta);
}
return dbMetas;
} catch (Exception e) {
throw new KettleException("Unable to read all databases", e);
}
}
Aggregations