use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class KettleDatabaseRepositoryClusterSchemaDelegate method loadClusterSchema.
public ClusterSchema loadClusterSchema(ObjectId id_cluster_schema, List<SlaveServer> slaveServers) throws KettleException {
ClusterSchema clusterSchema = new ClusterSchema();
RowMetaAndData row = getClusterSchema(id_cluster_schema);
clusterSchema.setObjectId(id_cluster_schema);
clusterSchema.setName(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_NAME, null));
clusterSchema.setBasePort(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, null));
clusterSchema.setSocketsBufferSize(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, null));
clusterSchema.setSocketsFlushInterval(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, null));
clusterSchema.setSocketsCompressed(row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, true));
clusterSchema.setDynamic(row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, true));
ObjectId[] pids = repository.getClusterSlaveIDs(id_cluster_schema);
for (int i = 0; i < pids.length; i++) {
// Load last version
SlaveServer slaveServer = repository.loadSlaveServer(pids[i], null);
SlaveServer reference = SlaveServer.findSlaveServer(slaveServers, slaveServer.getName());
if (reference != null) {
clusterSchema.getSlaveServers().add(reference);
} else {
clusterSchema.getSlaveServers().add(slaveServer);
}
}
return clusterSchema;
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class RepositoryImporter method replaceSharedObjects.
protected void replaceSharedObjects(TransMeta transMeta) throws KettleException {
replaceSharedObjects((AbstractMeta) transMeta);
for (ClusterSchema clusterSchema : getSharedObjects(ClusterSchema.class)) {
int index = transMeta.getClusterSchemas().indexOf(clusterSchema);
if (index < 0) {
transMeta.getClusterSchemas().add(clusterSchema);
} else {
ClusterSchema imported = transMeta.getClusterSchemas().get(index);
// Preserve the object id so we can update without having to look up the id
imported.setObjectId(clusterSchema.getObjectId());
if (equals(clusterSchema, imported) || !getPromptResult(BaseMessages.getString(PKG, "RepositoryImporter.Dialog.ClusterSchemaExistsOverWrite.Message", imported.getName()), BaseMessages.getString(PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_CS)) {
imported.replaceMeta(clusterSchema);
// We didn't actually change anything
imported.clearChanged();
} else {
imported.setChanged();
}
}
}
for (PartitionSchema partitionSchema : getSharedObjects(PartitionSchema.class)) {
int index = transMeta.getPartitionSchemas().indexOf(partitionSchema);
if (index < 0) {
transMeta.getPartitionSchemas().add(partitionSchema);
} else {
PartitionSchema imported = transMeta.getPartitionSchemas().get(index);
// Preserve the object id so we can update without having to look up the id
imported.setObjectId(partitionSchema.getObjectId());
if (equals(partitionSchema, imported) || !getPromptResult(BaseMessages.getString(PKG, "RepositoryImporter.Dialog.PartitionSchemaExistsOverWrite.Message", imported.getName()), BaseMessages.getString(PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_PS)) {
imported.replaceMeta(partitionSchema);
// We didn't actually change anything
imported.clearChanged();
} else {
imported.setChanged();
}
}
}
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class PurRepository_SharedObjects_IT method loadAllShared.
@Test
public void loadAllShared() throws Exception {
ClusterSchema cluster = cluster();
DatabaseMeta database = database();
SlaveServer slaveServer = slaveServer();
PartitionSchema partition = partition();
purRepository.save(cluster, null, null);
purRepository.save(database, null, null);
purRepository.save(slaveServer, null, null);
purRepository.save(partition, null, null);
Map<RepositoryObjectType, List<? extends SharedObjectInterface>> map = map();
purRepository.readSharedObjects(map, RepositoryObjectType.CLUSTER_SCHEMA, RepositoryObjectType.DATABASE, RepositoryObjectType.SLAVE_SERVER, RepositoryObjectType.PARTITION_SCHEMA);
RepositoryElementInterface[] saved = new RepositoryElementInterface[] { cluster, database, slaveServer, partition };
assertEquals(saved.length, map.size());
for (RepositoryElementInterface sharedObject : saved) {
List<? extends SharedObjectInterface> list = map.get(sharedObject.getRepositoryElementType());
assertEquals(1, list.size());
assertEquals(sharedObject, list.get(0));
}
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class RepositoryTestBase method testClusterSchemas.
/**
* save(clusterSchema) exists() loadClusterSchema() deleteClusterSchema() getClusterID() getClusterIDs()
* getClusterNames()
*/
@Test
public void testClusterSchemas() throws Exception {
// RepositoryDirectoryInterface rootDir =
initRepo();
ClusterSchema clusterSchema = createClusterSchema(EXP_CLUSTER_SCHEMA_NAME);
repository.save(clusterSchema, VERSION_COMMENT_V1, null);
assertNotNull(clusterSchema.getObjectId());
ObjectRevision version = clusterSchema.getObjectRevision();
assertNotNull(version);
assertTrue(hasVersionWithComment(clusterSchema, VERSION_COMMENT_V1));
assertTrue(repository.exists(EXP_CLUSTER_SCHEMA_NAME, null, RepositoryObjectType.CLUSTER_SCHEMA));
ClusterSchema fetchedClusterSchema = repository.loadClusterSchema(clusterSchema.getObjectId(), repository.getSlaveServers(), null);
assertEquals(EXP_CLUSTER_SCHEMA_NAME, fetchedClusterSchema.getName());
// TODO mlowery clusterSchema.getXML doesn't output desc either; should it?
// assertEquals(EXP_CLUSTER_SCHEMA_DESC, fetchedClusterSchema.getDescription());
assertEquals(EXP_CLUSTER_SCHEMA_BASE_PORT, fetchedClusterSchema.getBasePort());
assertEquals(EXP_CLUSTER_SCHEMA_SOCKETS_BUFFER_SIZE, fetchedClusterSchema.getSocketsBufferSize());
assertEquals(EXP_CLUSTER_SCHEMA_SOCKETS_FLUSH_INTERVAL, fetchedClusterSchema.getSocketsFlushInterval());
assertEquals(EXP_CLUSTER_SCHEMA_SOCKETS_COMPRESSED, fetchedClusterSchema.isSocketsCompressed());
assertEquals(EXP_CLUSTER_SCHEMA_DYN, fetchedClusterSchema.isDynamic());
assertEquals(1, fetchedClusterSchema.getSlaveServers().size());
assertTrue(fetchedClusterSchema.getSlaveServers().get(0).getName().startsWith(EXP_SLAVE_NAME));
// versioning test
clusterSchema.setBasePort(EXP_CLUSTER_SCHEMA_BASE_PORT_V2);
repository.save(clusterSchema, VERSION_COMMENT_V2, null);
assertEquals(VERSION_COMMENT_V2, clusterSchema.getObjectRevision().getComment());
fetchedClusterSchema = repository.loadClusterSchema(clusterSchema.getObjectId(), repository.getSlaveServers(), null);
assertEquals(EXP_CLUSTER_SCHEMA_BASE_PORT_V2, fetchedClusterSchema.getBasePort());
fetchedClusterSchema = repository.loadClusterSchema(clusterSchema.getObjectId(), repository.getSlaveServers(), VERSION_LABEL_V1);
assertEquals(EXP_CLUSTER_SCHEMA_BASE_PORT, fetchedClusterSchema.getBasePort());
assertEquals(clusterSchema.getObjectId(), repository.getClusterID(EXP_CLUSTER_SCHEMA_NAME));
assertEquals(1, repository.getClusterIDs(false).length);
assertEquals(1, repository.getClusterIDs(true).length);
assertEquals(clusterSchema.getObjectId(), repository.getClusterIDs(false)[0]);
assertEquals(1, repository.getClusterNames(false).length);
assertEquals(1, repository.getClusterNames(true).length);
assertEquals(EXP_CLUSTER_SCHEMA_NAME, repository.getClusterNames(false)[0]);
repository.deleteClusterSchema(clusterSchema.getObjectId());
assertFalse(repository.exists(EXP_CLUSTER_SCHEMA_NAME, null, RepositoryObjectType.CLUSTER_SCHEMA));
assertEquals(0, repository.getClusterIDs(false).length);
// shared object deletion is permanent by default
assertEquals(0, repository.getClusterIDs(true).length);
assertEquals(0, repository.getClusterNames(false).length);
// shared object deletion is permanent by default
assertEquals(0, repository.getClusterNames(true).length);
}
use of org.pentaho.di.cluster.ClusterSchema in project pentaho-kettle by pentaho.
the class TransMeta method getXML.
/**
* Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster,
* or partition information as specified by the parameters
*
* @param includeSteps
* whether to include step data
* @param includeDatabase
* whether to include database data
* @param includeSlaves
* whether to include slave server data
* @param includeClusters
* whether to include cluster data
* @param includePartitions
* whether to include partition data
* @param includeNamedParameters
* whether to include named parameters data
* @param includeLog
* whether to include log data
* @param includeDependencies
* whether to include dependencies data
* @param includeNotePads
* whether to include notepads data
* @param includeAttributeGroups
* whether to include attributes map data
* @return the XML representation of this transformation
* @throws KettleException
* if any errors occur during generation of the XML
*/
public String getXML(boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters, boolean includePartitions, boolean includeNamedParameters, boolean includeLog, boolean includeDependencies, boolean includeNotePads, boolean includeAttributeGroups) throws KettleException {
// Clear the embedded named clusters. We will be repopulating from steps that used named clusters
getNamedClusterEmbedManager().clear();
Props props = null;
if (Props.isInitialized()) {
props = Props.getInstance();
}
StringBuilder retval = new StringBuilder(800);
retval.append(XMLHandler.openTag(XML_TAG)).append(Const.CR);
retval.append(" ").append(XMLHandler.openTag(XML_TAG_INFO)).append(Const.CR);
retval.append(" ").append(XMLHandler.addTagValue("name", name));
retval.append(" ").append(XMLHandler.addTagValue("description", description));
retval.append(" ").append(XMLHandler.addTagValue("extended_description", extendedDescription));
retval.append(" ").append(XMLHandler.addTagValue("trans_version", trans_version));
retval.append(" ").append(XMLHandler.addTagValue("trans_type", transformationType.getCode()));
if (trans_status >= 0) {
retval.append(" ").append(XMLHandler.addTagValue("trans_status", trans_status));
}
retval.append(" ").append(XMLHandler.addTagValue("directory", directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR));
if (includeNamedParameters) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_PARAMETERS)).append(Const.CR);
String[] parameters = listParameters();
for (int idx = 0; idx < parameters.length; idx++) {
retval.append(" ").append(XMLHandler.openTag("parameter")).append(Const.CR);
retval.append(" ").append(XMLHandler.addTagValue("name", parameters[idx]));
retval.append(" ").append(XMLHandler.addTagValue("default_value", getParameterDefault(parameters[idx])));
retval.append(" ").append(XMLHandler.addTagValue("description", getParameterDescription(parameters[idx])));
retval.append(" ").append(XMLHandler.closeTag("parameter")).append(Const.CR);
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_PARAMETERS)).append(Const.CR);
}
if (includeLog) {
retval.append(" ").append(XMLHandler.openTag("log")).append(Const.CR);
// Add the metadata for the various logging tables
//
retval.append(transLogTable.getXML());
retval.append(performanceLogTable.getXML());
retval.append(channelLogTable.getXML());
retval.append(stepLogTable.getXML());
retval.append(metricsLogTable.getXML());
retval.append(" ").append(XMLHandler.closeTag("log")).append(Const.CR);
}
retval.append(" ").append(XMLHandler.openTag("maxdate")).append(Const.CR);
retval.append(" ").append(XMLHandler.addTagValue("connection", maxDateConnection == null ? "" : maxDateConnection.getName()));
retval.append(" ").append(XMLHandler.addTagValue("table", maxDateTable));
retval.append(" ").append(XMLHandler.addTagValue("field", maxDateField));
retval.append(" ").append(XMLHandler.addTagValue("offset", maxDateOffset));
retval.append(" ").append(XMLHandler.addTagValue("maxdiff", maxDateDifference));
retval.append(" ").append(XMLHandler.closeTag("maxdate")).append(Const.CR);
retval.append(" ").append(XMLHandler.addTagValue("size_rowset", sizeRowset));
retval.append(" ").append(XMLHandler.addTagValue("sleep_time_empty", sleepTimeEmpty));
retval.append(" ").append(XMLHandler.addTagValue("sleep_time_full", sleepTimeFull));
retval.append(" ").append(XMLHandler.addTagValue("unique_connections", usingUniqueConnections));
retval.append(" ").append(XMLHandler.addTagValue("feedback_shown", feedbackShown));
retval.append(" ").append(XMLHandler.addTagValue("feedback_size", feedbackSize));
retval.append(" ").append(XMLHandler.addTagValue("using_thread_priorities", usingThreadPriorityManagment));
retval.append(" ").append(XMLHandler.addTagValue("shared_objects_file", sharedObjectsFile));
// Performance monitoring
//
retval.append(" ").append(XMLHandler.addTagValue("capture_step_performance", capturingStepPerformanceSnapShots));
retval.append(" ").append(XMLHandler.addTagValue("step_performance_capturing_delay", stepPerformanceCapturingDelay));
retval.append(" ").append(XMLHandler.addTagValue("step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit));
if (includeDependencies) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_DEPENDENCIES)).append(Const.CR);
for (int i = 0; i < nrDependencies(); i++) {
TransDependency td = getDependency(i);
retval.append(td.getXML());
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_DEPENDENCIES)).append(Const.CR);
}
//
if (includePartitions) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR);
for (int i = 0; i < partitionSchemas.size(); i++) {
PartitionSchema partitionSchema = partitionSchemas.get(i);
retval.append(partitionSchema.getXML());
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR);
}
//
if (includeSlaves) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
for (int i = 0; i < slaveServers.size(); i++) {
SlaveServer slaveServer = slaveServers.get(i);
retval.append(slaveServer.getXML());
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
}
//
if (includeClusters) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR);
for (int i = 0; i < clusterSchemas.size(); i++) {
ClusterSchema clusterSchema = clusterSchemas.get(i);
retval.append(clusterSchema.getXML());
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR);
}
retval.append(" ").append(XMLHandler.addTagValue("created_user", createdUser));
retval.append(" ").append(XMLHandler.addTagValue("created_date", XMLHandler.date2string(createdDate)));
retval.append(" ").append(XMLHandler.addTagValue("modified_user", modifiedUser));
retval.append(" ").append(XMLHandler.addTagValue("modified_date", XMLHandler.date2string(modifiedDate)));
try {
retval.append(" ").append(XMLHandler.addTagValue("key_for_session_key", keyForSessionKey));
} catch (Exception ex) {
log.logError("Unable to decode key", ex);
}
retval.append(" ").append(XMLHandler.addTagValue("is_key_private", isKeyPrivate));
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_INFO)).append(Const.CR);
if (includeNotePads) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_NOTEPADS)).append(Const.CR);
if (notes != null) {
for (int i = 0; i < nrNotes(); i++) {
NotePadMeta ni = getNote(i);
retval.append(ni.getXML());
}
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_NOTEPADS)).append(Const.CR);
}
// The database connections...
if (includeDatabase) {
for (int i = 0; i < nrDatabases(); i++) {
DatabaseMeta dbMeta = getDatabase(i);
if (props != null && props.areOnlyUsedConnectionsSavedToXML()) {
if (isDatabaseConnectionUsed(dbMeta)) {
retval.append(dbMeta.getXML());
}
} else {
retval.append(dbMeta.getXML());
}
}
}
if (includeSteps) {
retval.append(" ").append(XMLHandler.openTag(XML_TAG_ORDER)).append(Const.CR);
for (int i = 0; i < nrTransHops(); i++) {
TransHopMeta transHopMeta = getTransHop(i);
retval.append(transHopMeta.getXML());
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_ORDER)).append(Const.CR);
/* The steps... */
for (int i = 0; i < nrSteps(); i++) {
StepMeta stepMeta = getStep(i);
if (stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface) {
((HasRepositoryInterface) stepMeta.getStepMetaInterface()).setRepository(repository);
}
retval.append(stepMeta.getXML());
}
/* The error handling metadata on the steps */
retval.append(" ").append(XMLHandler.openTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR);
for (int i = 0; i < nrSteps(); i++) {
StepMeta stepMeta = getStep(i);
if (stepMeta.getStepErrorMeta() != null) {
retval.append(stepMeta.getStepErrorMeta().getXML());
}
}
retval.append(" ").append(XMLHandler.closeTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR);
}
// The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment.
retval.append(slaveStepCopyPartitionDistribution.getXML());
// Is this a slave transformation or not?
retval.append(" ").append(XMLHandler.addTagValue("slave_transformation", slaveTransformation));
//
if (includeAttributeGroups) {
retval.append(AttributesUtil.getAttributesXml(attributesMap));
}
retval.append(XMLHandler.closeTag(XML_TAG)).append(Const.CR);
return XMLFormatter.format(retval.toString());
}
Aggregations