use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PurRepositoryStressTest method testLockSavePartitionSchema.
public void testLockSavePartitionSchema() throws Exception {
ObjectId objId = mock(ObjectId.class);
doReturn("id1").when(objId).getId();
PartitionSchema rei = mock(PartitionSchema.class);
doReturn(RepositoryObjectType.PARTITION_SCHEMA).when(rei).getRepositoryElementType();
doReturn("partitionID").when(rei).getName();
doReturn(objId).when(rei).getObjectId();
doReturn(rei).when(rei).clone();
Calendar calendar = mock(Calendar.class);
purRepository.save(rei, "vComment", calendar, mock(ProgressMonitorListener.class), false);
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createTransMeta.
protected TransMeta createTransMeta(final String dbName) throws Exception {
RepositoryDirectoryInterface rootDir = loadStartDirectory();
TransMeta transMeta = new TransMeta();
transMeta.setName(EXP_TRANS_NAME.concat(dbName));
transMeta.setDescription(EXP_TRANS_DESC);
transMeta.setExtendedDescription(EXP_TRANS_EXTENDED_DESC);
transMeta.setRepositoryDirectory(rootDir.findDirectory(DIR_TRANSFORMATIONS));
transMeta.setTransversion(EXP_TRANS_VERSION);
transMeta.setTransstatus(EXP_TRANS_STATUS);
transMeta.setCreatedUser(EXP_TRANS_CREATED_USER);
transMeta.setCreatedDate(EXP_TRANS_CREATED_DATE);
transMeta.setModifiedUser(EXP_TRANS_MOD_USER);
transMeta.setModifiedDate(EXP_TRANS_MOD_DATE);
transMeta.addParameterDefinition(EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC);
// TODO mlowery other transLogTable fields could be set for testing here
TransLogTable transLogTable = TransLogTable.getDefault(transMeta, transMeta, new ArrayList<StepMeta>(0));
transLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
transLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
transLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
transLogTable.setLogSizeLimit(EXP_TRANS_LOG_TABLE_SIZE_LIMIT);
transLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
transLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setTransLogTable(transLogTable);
// TODO mlowery other perfLogTable fields could be set for testing here
PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault(transMeta, transMeta);
perfLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
perfLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
perfLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
perfLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
perfLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setPerformanceLogTable(perfLogTable);
// TODO mlowery other channelLogTable fields could be set for testing here
ChannelLogTable channelLogTable = ChannelLogTable.getDefault(transMeta, transMeta);
channelLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
channelLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
channelLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
channelLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setChannelLogTable(channelLogTable);
// TODO mlowery other stepLogTable fields could be set for testing here
StepLogTable stepLogTable = StepLogTable.getDefault(transMeta, transMeta);
stepLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
stepLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
stepLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
stepLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
transMeta.setStepLogTable(stepLogTable);
DatabaseMeta dbMeta = createDatabaseMeta(dbName);
// dbMeta must be saved so that it gets an ID
repository.save(dbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(dbMeta);
transMeta.setMaxDateConnection(dbMeta);
transMeta.setMaxDateTable(EXP_TRANS_MAX_DATE_TABLE);
transMeta.setMaxDateField(EXP_TRANS_MAX_DATE_FIELD);
transMeta.setMaxDateOffset(EXP_TRANS_MAX_DATE_OFFSET);
transMeta.setMaxDateDifference(EXP_TRANS_MAX_DATE_DIFF);
transMeta.setSizeRowset(EXP_TRANS_SIZE_ROWSET);
transMeta.setSleepTimeEmpty(EXP_TRANS_SLEEP_TIME_EMPTY);
transMeta.setSleepTimeFull(EXP_TRANS_SLEEP_TIME_FULL);
transMeta.setUsingUniqueConnections(EXP_TRANS_USING_UNIQUE_CONN);
transMeta.setFeedbackShown(EXP_TRANS_FEEDBACK_SHOWN);
transMeta.setFeedbackSize(EXP_TRANS_FEEDBACK_SIZE);
transMeta.setUsingThreadPriorityManagment(EXP_TRANS_USING_THREAD_PRIORITY_MGMT);
transMeta.setSharedObjectsFile(EXP_TRANS_SHARED_OBJECTS_FILE);
transMeta.setCapturingStepPerformanceSnapShots(EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS);
transMeta.setStepPerformanceCapturingDelay(EXP_TRANS_STEP_PERF_CAP_DELAY);
transMeta.addDependency(new TransDependency(dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME));
DatabaseMeta stepDbMeta = createDatabaseMeta(EXP_DBMETA_NAME_STEP.concat(dbName));
repository.save(stepDbMeta, VERSION_COMMENT_V1, null);
deleteStack.push(stepDbMeta);
Condition cond = new Condition();
StepMeta step1 = createStepMeta1(transMeta, stepDbMeta, cond);
transMeta.addStep(step1);
StepMeta step2 = createStepMeta2(stepDbMeta, cond);
transMeta.addStep(step2);
transMeta.addTransHop(createTransHopMeta(step1, step2));
SlaveServer slaveServer = createSlaveServer(dbName);
PartitionSchema partSchema = createPartitionSchema(dbName);
// slaveServer, partSchema must be saved so that they get IDs
repository.save(slaveServer, VERSION_COMMENT_V1, null);
deleteStack.push(slaveServer);
repository.save(partSchema, VERSION_COMMENT_V1, null);
deleteStack.push(partSchema);
SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
slaveStepCopyPartitionDistribution.addPartition(EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0);
slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas(Arrays.asList(new PartitionSchema[] { partSchema }));
transMeta.setSlaveStepCopyPartitionDistribution(slaveStepCopyPartitionDistribution);
transMeta.setSlaveTransformation(EXP_TRANS_SLAVE_TRANSFORMATION);
return transMeta;
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class RepositoryTestBase method createPartitionSchema.
protected PartitionSchema createPartitionSchema(String partName) throws Exception {
PartitionSchema partSchema = new PartitionSchema();
partSchema.setName(EXP_PART_SCHEMA_NAME.concat(partName));
partSchema.setDescription(EXP_PART_SCHEMA_DESC);
partSchema.setPartitionIDs(Arrays.asList(new String[] { EXP_PART_SCHEMA_PARTID_1, EXP_PART_SCHEMA_PARTID_2 }));
partSchema.setDynamicallyDefined(EXP_PART_SCHEMA_DYN_DEF);
partSchema.setNumberOfPartitionsPerSlave(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT);
return partSchema;
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class RepositoryTestBase method testPartitionSchemas.
/**
* save(partitionSchema) exists() loadPartitionSchema() deletePartitionSchema() getPartitionSchemaID()
* getPartitionSchemaIDs() getPartitionSchemaNames()
*/
@Test
public void testPartitionSchemas() throws Exception {
// RepositoryDirectoryInterface rootDir =
initRepo();
PartitionSchema partSchema = createPartitionSchema("");
repository.save(partSchema, VERSION_COMMENT_V1, null);
assertNotNull(partSchema.getObjectId());
ObjectRevision version = partSchema.getObjectRevision();
assertNotNull(version);
assertTrue(hasVersionWithComment(partSchema, VERSION_COMMENT_V1));
assertTrue(repository.exists(EXP_PART_SCHEMA_NAME, null, RepositoryObjectType.PARTITION_SCHEMA));
PartitionSchema fetchedPartSchema = repository.loadPartitionSchema(partSchema.getObjectId(), null);
assertEquals(EXP_PART_SCHEMA_NAME, fetchedPartSchema.getName());
// TODO mlowery partitionSchema.getXML doesn't output desc either; should it?
// assertEquals(EXP_PART_SCHEMA_DESC, fetchedPartSchema.getDescription());
assertEquals(Arrays.asList(new String[] { EXP_PART_SCHEMA_PARTID_1, EXP_PART_SCHEMA_PARTID_2 }), fetchedPartSchema.getPartitionIDs());
assertEquals(EXP_PART_SCHEMA_DYN_DEF, fetchedPartSchema.isDynamicallyDefined());
assertEquals(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT, fetchedPartSchema.getNumberOfPartitionsPerSlave());
partSchema.setNumberOfPartitionsPerSlave(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT_V2);
repository.save(partSchema, VERSION_COMMENT_V2, null);
assertEquals(VERSION_COMMENT_V2, partSchema.getObjectRevision().getComment());
fetchedPartSchema = repository.loadPartitionSchema(partSchema.getObjectId(), null);
assertEquals(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT_V2, fetchedPartSchema.getNumberOfPartitionsPerSlave());
fetchedPartSchema = repository.loadPartitionSchema(partSchema.getObjectId(), VERSION_LABEL_V1);
assertEquals(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT, fetchedPartSchema.getNumberOfPartitionsPerSlave());
assertEquals(partSchema.getObjectId(), repository.getPartitionSchemaID(EXP_PART_SCHEMA_NAME));
assertEquals(1, repository.getPartitionSchemaIDs(false).length);
assertEquals(1, repository.getPartitionSchemaIDs(true).length);
assertEquals(partSchema.getObjectId(), repository.getPartitionSchemaIDs(false)[0]);
assertEquals(1, repository.getPartitionSchemaNames(false).length);
assertEquals(1, repository.getPartitionSchemaNames(true).length);
assertEquals(EXP_PART_SCHEMA_NAME, repository.getPartitionSchemaNames(false)[0]);
repository.deletePartitionSchema(partSchema.getObjectId());
assertFalse(repository.exists(EXP_PART_SCHEMA_NAME, null, RepositoryObjectType.PARTITION_SCHEMA));
assertEquals(0, repository.getPartitionSchemaIDs(false).length);
// shared object deletion is permanent by default
assertEquals(0, repository.getPartitionSchemaIDs(true).length);
assertEquals(0, repository.getPartitionSchemaNames(false).length);
// shared object deletion is permanent by default
assertEquals(0, repository.getPartitionSchemaNames(true).length);
}
use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.
the class PurRepository method updateSharedObjectCache.
/**
* Do not call this method directly. Instead call updateSharedObjectCache or removeFromSharedObjectCache.
*/
private void updateSharedObjectCache(final RepositoryElementInterface element, final RepositoryObjectType type, final ObjectId id) throws KettleException {
if (element != null && (element.getObjectId() == null || element.getObjectId().getId() == null)) {
throw new IllegalArgumentException(element.getName() + " has a null id");
}
loadAndCacheSharedObjects(false);
boolean remove = element == null;
ObjectId idToFind = element != null ? element.getObjectId() : id;
RepositoryObjectType typeToUpdate = element != null ? element.getRepositoryElementType() : type;
RepositoryElementInterface elementToUpdate = null;
List<? extends SharedObjectInterface> origSharedObjects = null;
sharedObjectsLock.writeLock().lock();
try {
switch(typeToUpdate) {
case DATABASE:
origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.DATABASE);
if (!remove) {
elementToUpdate = (RepositoryElementInterface) ((DatabaseMeta) element).clone();
}
break;
case SLAVE_SERVER:
origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.SLAVE_SERVER);
if (!remove) {
elementToUpdate = (RepositoryElementInterface) ((SlaveServer) element).clone();
}
break;
case CLUSTER_SCHEMA:
origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.CLUSTER_SCHEMA);
if (!remove) {
elementToUpdate = ((ClusterSchema) element).clone();
}
break;
case PARTITION_SCHEMA:
origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.PARTITION_SCHEMA);
if (!remove) {
elementToUpdate = (RepositoryElementInterface) ((PartitionSchema) element).clone();
}
break;
default:
throw new KettleException("unknown type [" + typeToUpdate + "]");
}
List<SharedObjectInterface> newSharedObjects = new ArrayList<SharedObjectInterface>(origSharedObjects);
// if there's a match on id, replace the element
boolean found = false;
for (int i = 0; i < origSharedObjects.size(); i++) {
RepositoryElementInterface repositoryElementInterface = (RepositoryElementInterface) origSharedObjects.get(i);
if (repositoryElementInterface == null) {
continue;
}
ObjectId objectId = repositoryElementInterface.getObjectId();
if (objectId != null && objectId.equals(idToFind)) {
if (remove) {
newSharedObjects.remove(i);
} else {
// because some clones don't clone the ID!!!
elementToUpdate.setObjectId(idToFind);
newSharedObjects.set(i, (SharedObjectInterface) elementToUpdate);
}
found = true;
}
}
// otherwise, add it
if (!remove && !found) {
// because some clones don't clone the ID!!!
elementToUpdate.setObjectId(idToFind);
newSharedObjects.add((SharedObjectInterface) elementToUpdate);
}
sharedObjectsByType.put(typeToUpdate, newSharedObjects);
} finally {
sharedObjectsLock.writeLock().unlock();
}
}
Aggregations