use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PartitionDelegate method dataNodeToElement.
public void dataNodeToElement(DataNode rootNode, RepositoryElementInterface element) throws KettleException {
PartitionSchema partitionSchema = (PartitionSchema) element;
partitionSchema.setDynamicallyDefined(rootNode.getProperty(PROP_DYNAMIC_DEFINITION).getBoolean());
partitionSchema.setNumberOfPartitionsPerSlave(getString(rootNode, PROP_PARTITIONS_PER_SLAVE));
// Also, load all the properties we can find...
DataNode attrNode = rootNode.getNode(NODE_ATTRIBUTES);
long partitionSchemaSize = attrNode.getProperty(PROP_NB_PARTITION_SCHEMA).getLong();
for (int i = 0; i < partitionSchemaSize; i++) {
DataProperty property = attrNode.getProperty(String.valueOf(i));
partitionSchema.getPartitionIDs().add(Const.NVL(property.getString(), ""));
}
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PartitionDelegate method dataNodeToElement.
// ~ Methods =========================================================================================================
public RepositoryElementInterface dataNodeToElement(DataNode rootNode) throws KettleException {
PartitionSchema partitionSchema = new PartitionSchema();
dataNodeToElement(rootNode, partitionSchema);
return partitionSchema;
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PartitionDelegate method assemble.
public PartitionSchema assemble(RepositoryFile file, NodeRepositoryFileData data, VersionSummary version) throws KettleException {
PartitionSchema partitionSchema = (PartitionSchema) dataNodeToElement(data.getNode());
partitionSchema.setName(file.getTitle());
partitionSchema.setObjectId(new StringObjectId(file.getId().toString()));
partitionSchema.setObjectRevision(repo.createObjectRevision(version));
partitionSchema.clearChanged();
return partitionSchema;
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PartitionDelegate method elementToDataNode.
public DataNode elementToDataNode(RepositoryElementInterface element) throws KettleException {
PartitionSchema partitionSchema = (PartitionSchema) element;
DataNode rootNode = new DataNode(NODE_ROOT);
// Check for naming collision
ObjectId partitionId = repo.getPartitionSchemaID(partitionSchema.getName());
if (partitionId != null && !partitionSchema.getObjectId().equals(partitionId)) {
// We have a naming collision, abort the save
throw new KettleException("Failed to save object to repository. Object [" + partitionSchema.getName() + "] already exists.");
}
rootNode.setProperty(PROP_DYNAMIC_DEFINITION, partitionSchema.isDynamicallyDefined());
rootNode.setProperty(PROP_PARTITIONS_PER_SLAVE, partitionSchema.getNumberOfPartitionsPerSlave());
// Save the cluster-partition relationships
DataNode attrNode = rootNode.addNode(NODE_ATTRIBUTES);
attrNode.setProperty(PROP_NB_PARTITION_SCHEMA, partitionSchema.getPartitionIDs().size());
for (int i = 0; i < partitionSchema.getPartitionIDs().size(); i++) {
attrNode.setProperty(String.valueOf(i), partitionSchema.getPartitionIDs().get(i));
}
return rootNode;
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PurRepository method deepCopy.
private Map<RepositoryObjectType, List<? extends SharedObjectInterface>> deepCopy(Map<RepositoryObjectType, List<? extends SharedObjectInterface>> orig) throws KettleException {
Map<RepositoryObjectType, List<? extends SharedObjectInterface>> copy = new EnumMap<RepositoryObjectType, List<? extends SharedObjectInterface>>(RepositoryObjectType.class);
sharedObjectsLock.writeLock().lock();
try {
for (Entry<RepositoryObjectType, List<? extends SharedObjectInterface>> entry : orig.entrySet()) {
RepositoryObjectType type = entry.getKey();
List<? extends SharedObjectInterface> value = entry.getValue();
List<SharedObjectInterface> newValue = new ArrayList<SharedObjectInterface>(value.size());
for (SharedObjectInterface obj : value) {
SharedObjectInterface newValueItem;
if (obj instanceof DatabaseMeta) {
DatabaseMeta databaseMeta = (DatabaseMeta) ((DatabaseMeta) obj).clone();
databaseMeta.setObjectId(((DatabaseMeta) obj).getObjectId());
databaseMeta.setChangedDate(obj.getChangedDate());
databaseMeta.clearChanged();
newValueItem = databaseMeta;
} else if (obj instanceof SlaveServer) {
SlaveServer slaveServer = (SlaveServer) ((SlaveServer) obj).clone();
slaveServer.setObjectId(((SlaveServer) obj).getObjectId());
slaveServer.clearChanged();
newValueItem = slaveServer;
} else if (obj instanceof PartitionSchema) {
PartitionSchema partitionSchema = (PartitionSchema) ((PartitionSchema) obj).clone();
partitionSchema.setObjectId(((PartitionSchema) obj).getObjectId());
partitionSchema.clearChanged();
newValueItem = partitionSchema;
} else if (obj instanceof ClusterSchema) {
ClusterSchema clusterSchema = ((ClusterSchema) obj).clone();
clusterSchema.setObjectId(((ClusterSchema) obj).getObjectId());
clusterSchema.clearChanged();
newValueItem = clusterSchema;
} else {
throw new KettleException("unknown shared object class");
}
newValue.add(newValueItem);
}
copy.put(type, newValue);
}
} finally {
sharedObjectsLock.writeLock().unlock();
}
return copy;
}
Aggregations