Search in sources :

Example 56 with PartitionSchema

use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.

the class PurRepositoryStressTest method testLockSavePartitionSchema.

public void testLockSavePartitionSchema() throws Exception {
    ObjectId objId = mock(ObjectId.class);
    doReturn("id1").when(objId).getId();
    PartitionSchema rei = mock(PartitionSchema.class);
    doReturn(RepositoryObjectType.PARTITION_SCHEMA).when(rei).getRepositoryElementType();
    doReturn("partitionID").when(rei).getName();
    doReturn(objId).when(rei).getObjectId();
    doReturn(rei).when(rei).clone();
    Calendar calendar = mock(Calendar.class);
    purRepository.save(rei, "vComment", calendar, mock(ProgressMonitorListener.class), false);
}
Also used : StringObjectId(org.pentaho.di.repository.StringObjectId) ObjectId(org.pentaho.di.repository.ObjectId) PartitionSchema(org.pentaho.di.partition.PartitionSchema) Calendar(java.util.Calendar) ProgressMonitorListener(org.pentaho.di.core.ProgressMonitorListener)

Example 57 with PartitionSchema

use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.

the class RepositoryTestBase method createTransMeta.

protected TransMeta createTransMeta(final String dbName) throws Exception {
    RepositoryDirectoryInterface rootDir = loadStartDirectory();
    TransMeta transMeta = new TransMeta();
    transMeta.setName(EXP_TRANS_NAME.concat(dbName));
    transMeta.setDescription(EXP_TRANS_DESC);
    transMeta.setExtendedDescription(EXP_TRANS_EXTENDED_DESC);
    transMeta.setRepositoryDirectory(rootDir.findDirectory(DIR_TRANSFORMATIONS));
    transMeta.setTransversion(EXP_TRANS_VERSION);
    transMeta.setTransstatus(EXP_TRANS_STATUS);
    transMeta.setCreatedUser(EXP_TRANS_CREATED_USER);
    transMeta.setCreatedDate(EXP_TRANS_CREATED_DATE);
    transMeta.setModifiedUser(EXP_TRANS_MOD_USER);
    transMeta.setModifiedDate(EXP_TRANS_MOD_DATE);
    transMeta.addParameterDefinition(EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC);
    // TODO mlowery other transLogTable fields could be set for testing here
    TransLogTable transLogTable = TransLogTable.getDefault(transMeta, transMeta, new ArrayList<StepMeta>(0));
    transLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
    transLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
    transLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
    transLogTable.setLogSizeLimit(EXP_TRANS_LOG_TABLE_SIZE_LIMIT);
    transLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
    transLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
    transMeta.setTransLogTable(transLogTable);
    // TODO mlowery other perfLogTable fields could be set for testing here
    PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault(transMeta, transMeta);
    perfLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
    perfLogTable.setLogInterval(EXP_TRANS_LOG_TABLE_INTERVAL);
    perfLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
    perfLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
    perfLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
    transMeta.setPerformanceLogTable(perfLogTable);
    // TODO mlowery other channelLogTable fields could be set for testing here
    ChannelLogTable channelLogTable = ChannelLogTable.getDefault(transMeta, transMeta);
    channelLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
    channelLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
    channelLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
    channelLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
    transMeta.setChannelLogTable(channelLogTable);
    // TODO mlowery other stepLogTable fields could be set for testing here
    StepLogTable stepLogTable = StepLogTable.getDefault(transMeta, transMeta);
    stepLogTable.setConnectionName(EXP_TRANS_LOG_TABLE_CONN_NAME);
    stepLogTable.setSchemaName(EXP_TRANS_LOG_TABLE_SCHEMA_NAME);
    stepLogTable.setTableName(EXP_TRANS_LOG_TABLE_TABLE_NAME);
    stepLogTable.setTimeoutInDays(EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS);
    transMeta.setStepLogTable(stepLogTable);
    DatabaseMeta dbMeta = createDatabaseMeta(dbName);
    // dbMeta must be saved so that it gets an ID
    repository.save(dbMeta, VERSION_COMMENT_V1, null);
    deleteStack.push(dbMeta);
    transMeta.setMaxDateConnection(dbMeta);
    transMeta.setMaxDateTable(EXP_TRANS_MAX_DATE_TABLE);
    transMeta.setMaxDateField(EXP_TRANS_MAX_DATE_FIELD);
    transMeta.setMaxDateOffset(EXP_TRANS_MAX_DATE_OFFSET);
    transMeta.setMaxDateDifference(EXP_TRANS_MAX_DATE_DIFF);
    transMeta.setSizeRowset(EXP_TRANS_SIZE_ROWSET);
    transMeta.setSleepTimeEmpty(EXP_TRANS_SLEEP_TIME_EMPTY);
    transMeta.setSleepTimeFull(EXP_TRANS_SLEEP_TIME_FULL);
    transMeta.setUsingUniqueConnections(EXP_TRANS_USING_UNIQUE_CONN);
    transMeta.setFeedbackShown(EXP_TRANS_FEEDBACK_SHOWN);
    transMeta.setFeedbackSize(EXP_TRANS_FEEDBACK_SIZE);
    transMeta.setUsingThreadPriorityManagment(EXP_TRANS_USING_THREAD_PRIORITY_MGMT);
    transMeta.setSharedObjectsFile(EXP_TRANS_SHARED_OBJECTS_FILE);
    transMeta.setCapturingStepPerformanceSnapShots(EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS);
    transMeta.setStepPerformanceCapturingDelay(EXP_TRANS_STEP_PERF_CAP_DELAY);
    transMeta.addDependency(new TransDependency(dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME));
    DatabaseMeta stepDbMeta = createDatabaseMeta(EXP_DBMETA_NAME_STEP.concat(dbName));
    repository.save(stepDbMeta, VERSION_COMMENT_V1, null);
    deleteStack.push(stepDbMeta);
    Condition cond = new Condition();
    StepMeta step1 = createStepMeta1(transMeta, stepDbMeta, cond);
    transMeta.addStep(step1);
    StepMeta step2 = createStepMeta2(stepDbMeta, cond);
    transMeta.addStep(step2);
    transMeta.addTransHop(createTransHopMeta(step1, step2));
    SlaveServer slaveServer = createSlaveServer(dbName);
    PartitionSchema partSchema = createPartitionSchema(dbName);
    // slaveServer, partSchema must be saved so that they get IDs
    repository.save(slaveServer, VERSION_COMMENT_V1, null);
    deleteStack.push(slaveServer);
    repository.save(partSchema, VERSION_COMMENT_V1, null);
    deleteStack.push(partSchema);
    SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
    slaveStepCopyPartitionDistribution.addPartition(EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0);
    slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas(Arrays.asList(new PartitionSchema[] { partSchema }));
    transMeta.setSlaveStepCopyPartitionDistribution(slaveStepCopyPartitionDistribution);
    transMeta.setSlaveTransformation(EXP_TRANS_SLAVE_TRANSFORMATION);
    return transMeta;
}
Also used : Condition(org.pentaho.di.core.Condition) PerformanceLogTable(org.pentaho.di.core.logging.PerformanceLogTable) SlaveStepCopyPartitionDistribution(org.pentaho.di.trans.SlaveStepCopyPartitionDistribution) PartitionSchema(org.pentaho.di.partition.PartitionSchema) TransMeta(org.pentaho.di.trans.TransMeta) TransDependency(org.pentaho.di.trans.TransDependency) SlaveServer(org.pentaho.di.cluster.SlaveServer) StepMeta(org.pentaho.di.trans.step.StepMeta) BaseStepMeta(org.pentaho.di.trans.step.BaseStepMeta) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) ChannelLogTable(org.pentaho.di.core.logging.ChannelLogTable) StepLogTable(org.pentaho.di.core.logging.StepLogTable) TransLogTable(org.pentaho.di.core.logging.TransLogTable)

Example 58 with PartitionSchema

use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.

the class RepositoryTestBase method createPartitionSchema.

protected PartitionSchema createPartitionSchema(String partName) throws Exception {
    PartitionSchema partSchema = new PartitionSchema();
    partSchema.setName(EXP_PART_SCHEMA_NAME.concat(partName));
    partSchema.setDescription(EXP_PART_SCHEMA_DESC);
    partSchema.setPartitionIDs(Arrays.asList(new String[] { EXP_PART_SCHEMA_PARTID_1, EXP_PART_SCHEMA_PARTID_2 }));
    partSchema.setDynamicallyDefined(EXP_PART_SCHEMA_DYN_DEF);
    partSchema.setNumberOfPartitionsPerSlave(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT);
    return partSchema;
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema)

Example 59 with PartitionSchema

use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.

the class RepositoryTestBase method testPartitionSchemas.

/**
 * save(partitionSchema) exists() loadPartitionSchema() deletePartitionSchema() getPartitionSchemaID()
 * getPartitionSchemaIDs() getPartitionSchemaNames()
 */
@Test
public void testPartitionSchemas() throws Exception {
    // RepositoryDirectoryInterface rootDir =
    initRepo();
    PartitionSchema partSchema = createPartitionSchema("");
    repository.save(partSchema, VERSION_COMMENT_V1, null);
    assertNotNull(partSchema.getObjectId());
    ObjectRevision version = partSchema.getObjectRevision();
    assertNotNull(version);
    assertTrue(hasVersionWithComment(partSchema, VERSION_COMMENT_V1));
    assertTrue(repository.exists(EXP_PART_SCHEMA_NAME, null, RepositoryObjectType.PARTITION_SCHEMA));
    PartitionSchema fetchedPartSchema = repository.loadPartitionSchema(partSchema.getObjectId(), null);
    assertEquals(EXP_PART_SCHEMA_NAME, fetchedPartSchema.getName());
    // TODO mlowery partitionSchema.getXML doesn't output desc either; should it?
    // assertEquals(EXP_PART_SCHEMA_DESC, fetchedPartSchema.getDescription());
    assertEquals(Arrays.asList(new String[] { EXP_PART_SCHEMA_PARTID_1, EXP_PART_SCHEMA_PARTID_2 }), fetchedPartSchema.getPartitionIDs());
    assertEquals(EXP_PART_SCHEMA_DYN_DEF, fetchedPartSchema.isDynamicallyDefined());
    assertEquals(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT, fetchedPartSchema.getNumberOfPartitionsPerSlave());
    partSchema.setNumberOfPartitionsPerSlave(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT_V2);
    repository.save(partSchema, VERSION_COMMENT_V2, null);
    assertEquals(VERSION_COMMENT_V2, partSchema.getObjectRevision().getComment());
    fetchedPartSchema = repository.loadPartitionSchema(partSchema.getObjectId(), null);
    assertEquals(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT_V2, fetchedPartSchema.getNumberOfPartitionsPerSlave());
    fetchedPartSchema = repository.loadPartitionSchema(partSchema.getObjectId(), VERSION_LABEL_V1);
    assertEquals(EXP_PART_SCHEMA_PART_PER_SLAVE_COUNT, fetchedPartSchema.getNumberOfPartitionsPerSlave());
    assertEquals(partSchema.getObjectId(), repository.getPartitionSchemaID(EXP_PART_SCHEMA_NAME));
    assertEquals(1, repository.getPartitionSchemaIDs(false).length);
    assertEquals(1, repository.getPartitionSchemaIDs(true).length);
    assertEquals(partSchema.getObjectId(), repository.getPartitionSchemaIDs(false)[0]);
    assertEquals(1, repository.getPartitionSchemaNames(false).length);
    assertEquals(1, repository.getPartitionSchemaNames(true).length);
    assertEquals(EXP_PART_SCHEMA_NAME, repository.getPartitionSchemaNames(false)[0]);
    repository.deletePartitionSchema(partSchema.getObjectId());
    assertFalse(repository.exists(EXP_PART_SCHEMA_NAME, null, RepositoryObjectType.PARTITION_SCHEMA));
    assertEquals(0, repository.getPartitionSchemaIDs(false).length);
    // shared object deletion is permanent by default
    assertEquals(0, repository.getPartitionSchemaIDs(true).length);
    assertEquals(0, repository.getPartitionSchemaNames(false).length);
    // shared object deletion is permanent by default
    assertEquals(0, repository.getPartitionSchemaNames(true).length);
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) Test(org.junit.Test)

Example 60 with PartitionSchema

use of org.pentaho.di.partition.PartitionSchema in project pentaho-kettle by pentaho.

the class PurRepository method updateSharedObjectCache.

/**
 * Do not call this method directly. Instead call updateSharedObjectCache or removeFromSharedObjectCache.
 */
private void updateSharedObjectCache(final RepositoryElementInterface element, final RepositoryObjectType type, final ObjectId id) throws KettleException {
    if (element != null && (element.getObjectId() == null || element.getObjectId().getId() == null)) {
        throw new IllegalArgumentException(element.getName() + " has a null id");
    }
    loadAndCacheSharedObjects(false);
    boolean remove = element == null;
    ObjectId idToFind = element != null ? element.getObjectId() : id;
    RepositoryObjectType typeToUpdate = element != null ? element.getRepositoryElementType() : type;
    RepositoryElementInterface elementToUpdate = null;
    List<? extends SharedObjectInterface> origSharedObjects = null;
    sharedObjectsLock.writeLock().lock();
    try {
        switch(typeToUpdate) {
            case DATABASE:
                origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.DATABASE);
                if (!remove) {
                    elementToUpdate = (RepositoryElementInterface) ((DatabaseMeta) element).clone();
                }
                break;
            case SLAVE_SERVER:
                origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.SLAVE_SERVER);
                if (!remove) {
                    elementToUpdate = (RepositoryElementInterface) ((SlaveServer) element).clone();
                }
                break;
            case CLUSTER_SCHEMA:
                origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.CLUSTER_SCHEMA);
                if (!remove) {
                    elementToUpdate = ((ClusterSchema) element).clone();
                }
                break;
            case PARTITION_SCHEMA:
                origSharedObjects = sharedObjectsByType.get(RepositoryObjectType.PARTITION_SCHEMA);
                if (!remove) {
                    elementToUpdate = (RepositoryElementInterface) ((PartitionSchema) element).clone();
                }
                break;
            default:
                throw new KettleException("unknown type [" + typeToUpdate + "]");
        }
        List<SharedObjectInterface> newSharedObjects = new ArrayList<SharedObjectInterface>(origSharedObjects);
        // if there's a match on id, replace the element
        boolean found = false;
        for (int i = 0; i < origSharedObjects.size(); i++) {
            RepositoryElementInterface repositoryElementInterface = (RepositoryElementInterface) origSharedObjects.get(i);
            if (repositoryElementInterface == null) {
                continue;
            }
            ObjectId objectId = repositoryElementInterface.getObjectId();
            if (objectId != null && objectId.equals(idToFind)) {
                if (remove) {
                    newSharedObjects.remove(i);
                } else {
                    // because some clones don't clone the ID!!!
                    elementToUpdate.setObjectId(idToFind);
                    newSharedObjects.set(i, (SharedObjectInterface) elementToUpdate);
                }
                found = true;
            }
        }
        // otherwise, add it
        if (!remove && !found) {
            // because some clones don't clone the ID!!!
            elementToUpdate.setObjectId(idToFind);
            newSharedObjects.add((SharedObjectInterface) elementToUpdate);
        }
        sharedObjectsByType.put(typeToUpdate, newSharedObjects);
    } finally {
        sharedObjectsLock.writeLock().unlock();
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) StringObjectId(org.pentaho.di.repository.StringObjectId) ObjectId(org.pentaho.di.repository.ObjectId) PartitionSchema(org.pentaho.di.partition.PartitionSchema) ArrayList(java.util.ArrayList) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) RepositoryElementInterface(org.pentaho.di.repository.RepositoryElementInterface) RepositoryObjectType(org.pentaho.di.repository.RepositoryObjectType) SharedObjectInterface(org.pentaho.di.shared.SharedObjectInterface)

Aggregations

PartitionSchema (org.pentaho.di.partition.PartitionSchema)74 KettleException (org.pentaho.di.core.exception.KettleException)26 TransMeta (org.pentaho.di.trans.TransMeta)19 StepMeta (org.pentaho.di.trans.step.StepMeta)19 ClusterSchema (org.pentaho.di.cluster.ClusterSchema)18 SlaveServer (org.pentaho.di.cluster.SlaveServer)18 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)17 StepPartitioningMeta (org.pentaho.di.trans.step.StepPartitioningMeta)17 Test (org.junit.Test)16 ArrayList (java.util.ArrayList)15 ObjectId (org.pentaho.di.repository.ObjectId)14 ErrorDialog (org.pentaho.di.ui.core.dialog.ErrorDialog)10 List (java.util.List)8 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)8 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)6 StringObjectId (org.pentaho.di.repository.StringObjectId)6 PartitionSchemaDialog (org.pentaho.di.ui.partition.dialog.PartitionSchemaDialog)6 MessageBox (org.eclipse.swt.widgets.MessageBox)5 Point (org.pentaho.di.core.gui.Point)5 IOException (java.io.IOException)4