Search in sources :

Example 51 with KettleDatabaseException

use of org.pentaho.di.core.exception.KettleDatabaseException in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositorySlaveServerDelegate method loadSlaveServer.

public SlaveServer loadSlaveServer(ObjectId id_slave_server) throws KettleException {
    SlaveServer slaveServer = new SlaveServer();
    slaveServer.setObjectId(id_slave_server);
    RowMetaAndData row = getSlaveServer(id_slave_server);
    if (row == null) {
        throw new KettleDatabaseException(BaseMessages.getString(PKG, "SlaveServer.SlaveCouldNotBeFound", id_slave_server.toString()));
    }
    slaveServer.setName(row.getString(KettleDatabaseRepository.FIELD_SLAVE_NAME, null));
    slaveServer.setHostname(row.getString(KettleDatabaseRepository.FIELD_SLAVE_HOST_NAME, null));
    slaveServer.setPort(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PORT, null));
    slaveServer.setWebAppName(row.getString(KettleDatabaseRepository.FIELD_SLAVE_WEB_APP_NAME, null));
    slaveServer.setUsername(row.getString(KettleDatabaseRepository.FIELD_SLAVE_USERNAME, null));
    slaveServer.setPassword(Encr.decryptPasswordOptionallyEncrypted(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PASSWORD, null)));
    slaveServer.setProxyHostname(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PROXY_HOST_NAME, null));
    slaveServer.setProxyPort(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PROXY_PORT, null));
    slaveServer.setNonProxyHosts(row.getString(KettleDatabaseRepository.FIELD_SLAVE_NON_PROXY_HOSTS, null));
    slaveServer.setMaster(row.getBoolean(KettleDatabaseRepository.FIELD_SLAVE_MASTER, false));
    return slaveServer;
}
Also used : RowMetaAndData(org.pentaho.di.core.RowMetaAndData) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SlaveServer(org.pentaho.di.cluster.SlaveServer)

Example 52 with KettleDatabaseException

use of org.pentaho.di.core.exception.KettleDatabaseException in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositoryStepDelegate method loadStepMeta.

/**
 * Create a new step by loading the metadata from the specified repository.
 *
 * @param rep
 * @param stepId
 * @param databases
 * @param counters
 * @param partitionSchemas
 * @throws KettleException
 */
public StepMeta loadStepMeta(ObjectId stepId, List<DatabaseMeta> databases, List<PartitionSchema> partitionSchemas) throws KettleException {
    StepMeta stepMeta = new StepMeta();
    PluginRegistry registry = PluginRegistry.getInstance();
    try {
        RowMetaAndData r = getStep(stepId);
        if (r != null) {
            stepMeta.setObjectId(stepId);
            stepMeta.setName(r.getString(KettleDatabaseRepository.FIELD_STEP_NAME, null));
            stepMeta.setDescription(r.getString(KettleDatabaseRepository.FIELD_STEP_DESCRIPTION, null));
            long id_step_type = r.getInteger(KettleDatabaseRepository.FIELD_STEP_ID_STEP_TYPE, -1L);
            RowMetaAndData steptyperow = getStepType(new LongObjectId(id_step_type));
            stepMeta.setStepID(steptyperow.getString(KettleDatabaseRepository.FIELD_STEP_TYPE_CODE, null));
            stepMeta.setDistributes(r.getBoolean(KettleDatabaseRepository.FIELD_STEP_DISTRIBUTE, true));
            int copies = (int) r.getInteger(KettleDatabaseRepository.FIELD_STEP_COPIES, 1);
            String copiesString = r.getString(KettleDatabaseRepository.FIELD_STEP_COPIES_STRING, null);
            if (!Utils.isEmpty(copiesString)) {
                stepMeta.setCopiesString(copiesString);
            } else {
                stepMeta.setCopies(copies);
            }
            int x = (int) r.getInteger(KettleDatabaseRepository.FIELD_STEP_GUI_LOCATION_X, 0);
            int y = (int) r.getInteger(KettleDatabaseRepository.FIELD_STEP_GUI_LOCATION_Y, 0);
            stepMeta.setLocation(new Point(x, y));
            stepMeta.setDraw(r.getBoolean(KettleDatabaseRepository.FIELD_STEP_GUI_DRAW, false));
            // Generate the appropriate class...
            PluginInterface sp = registry.findPluginWithId(StepPluginType.class, stepMeta.getStepID());
            if (sp == null) {
                stepMeta.setStepMetaInterface(new MissingTrans(stepMeta.getName(), stepMeta.getStepID()));
            } else {
                stepMeta.setStepMetaInterface((StepMetaInterface) registry.loadClass(sp));
            }
            if (stepMeta.getStepMetaInterface() != null) {
                // Read the step info from the repository!
                readRepCompatibleStepMeta(stepMeta.getStepMetaInterface(), repository, stepMeta.getObjectId(), databases);
                stepMeta.getStepMetaInterface().readRep(repository, repository.metaStore, stepMeta.getObjectId(), databases);
            }
            // Get the partitioning as well...
            // 
            stepMeta.setStepPartitioningMeta(loadStepPartitioningMeta(stepMeta.getObjectId()));
            stepMeta.getStepPartitioningMeta().setPartitionSchemaAfterLoading(partitionSchemas);
            // Get the cluster schema name
            // 
            stepMeta.setClusterSchemaName(repository.getStepAttributeString(stepId, "cluster_schema"));
            // Are we using a custom row distribution plugin?
            // 
            String rowDistributionCode = repository.getStepAttributeString(stepId, 0, "row_distribution_code");
            RowDistributionInterface rowDistribution = PluginRegistry.getInstance().loadClass(RowDistributionPluginType.class, rowDistributionCode, RowDistributionInterface.class);
            stepMeta.setRowDistribution(rowDistribution);
            // Load the attribute groups map
            // 
            stepMeta.setAttributesMap(loadStepAttributesMap(stepId));
            // 
            return stepMeta;
        } else {
            throw new KettleException(BaseMessages.getString(PKG, "StepMeta.Exception.StepInfoCouldNotBeFound", String.valueOf(stepId)));
        }
    } catch (KettleDatabaseException dbe) {
        throw new KettleException(BaseMessages.getString(PKG, "StepMeta.Exception.StepCouldNotBeLoaded", String.valueOf(stepMeta.getObjectId())), dbe);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) PluginInterface(org.pentaho.di.core.plugins.PluginInterface) LongObjectId(org.pentaho.di.repository.LongObjectId) ValueMetaString(org.pentaho.di.core.row.value.ValueMetaString) Point(org.pentaho.di.core.gui.Point) StepMeta(org.pentaho.di.trans.step.StepMeta) Point(org.pentaho.di.core.gui.Point) RowMetaAndData(org.pentaho.di.core.RowMetaAndData) PluginRegistry(org.pentaho.di.core.plugins.PluginRegistry) MissingTrans(org.pentaho.di.trans.steps.missing.MissingTrans) RowDistributionInterface(org.pentaho.di.trans.step.RowDistributionInterface)

Example 53 with KettleDatabaseException

use of org.pentaho.di.core.exception.KettleDatabaseException in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositoryTransDelegate method readSlaves.

/**
 * Read the slave servers in the repository and add them to this transformation if they are not yet present.
 *
 * @param TransMeta
 *          The transformation to load into.
 * @param overWriteShared
 *          if an object with the same name exists, overwrite
 * @throws KettleException
 */
public void readSlaves(TransMeta transMeta, boolean overWriteShared) throws KettleException {
    try {
        ObjectId[] dbids = repository.getSlaveIDs(false);
        for (int i = 0; i < dbids.length; i++) {
            // Load last version
            SlaveServer slaveServer = repository.loadSlaveServer(dbids[i], null);
            slaveServer.shareVariablesWith(transMeta);
            // Check if there already is one in the
            SlaveServer check = transMeta.findSlaveServer(slaveServer.getName());
            // transformation
            if (check == null || overWriteShared) {
                if (!Utils.isEmpty(slaveServer.getName())) {
                    transMeta.addOrReplaceSlaveServer(slaveServer);
                    if (!overWriteShared) {
                        slaveServer.setChanged(false);
                    }
                }
            }
        }
    } catch (KettleDatabaseException dbe) {
        throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Log.UnableToReadSlaveServersFromRepository"), dbe);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) LongObjectId(org.pentaho.di.repository.LongObjectId) ObjectId(org.pentaho.di.repository.ObjectId) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) SlaveServer(org.pentaho.di.cluster.SlaveServer)

Example 54 with KettleDatabaseException

use of org.pentaho.di.core.exception.KettleDatabaseException in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositoryTransDelegate method saveTransformation.

/**
 * Saves the transformation to a repository.
 *
 * @param transMeta
 *          the transformation metadata to store
 * @param monitor
 *          the way we report progress to the user, can be null if no UI is present
 * @param overwrite
 *          Overwrite existing object(s)?
 * @throws KettleException
 *           if an error occurs.
 */
public void saveTransformation(TransMeta transMeta, String versionComment, ProgressMonitorListener monitor, boolean overwriteAssociated) throws KettleException {
    try {
        if (monitor != null) {
            monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.LockingRepository"));
        }
        repository.insertLogEntry("save transformation '" + transMeta.getName() + "'");
        // Clear attribute id cache
        // force repository lookup.
        repository.connectionDelegate.clearNextIDCounters();
        // Do we have a valid directory?
        if (transMeta.getRepositoryDirectory().getObjectId() == null) {
            throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Exception.PlsSelectAValidDirectoryBeforeSavingTheTransformation"));
        }
        int nrWorks = 2 + transMeta.nrDatabases() + transMeta.nrNotes() + transMeta.nrSteps() + transMeta.nrTransHops();
        if (monitor != null) {
            monitor.beginTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingTransformationTask.Title") + transMeta.getPathAndName(), nrWorks);
        }
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingOfTransformationStarted"));
        }
        if (monitor != null && monitor.isCanceled()) {
            throw new KettleDatabaseException();
        }
        // 
        if (monitor != null) {
            monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.HandlingOldVersionTransformationTask.Title"));
        // transMeta.setObjectId(getTransformationID(transMeta.getName(),
        // transMeta.getRepositoryDirectory().getObjectId()));
        }
        // If no valid id is available in the database, assign one...
        if (transMeta.getObjectId() == null) {
            transMeta.setObjectId(repository.connectionDelegate.getNextTransformationID());
        } else {
            // of the database for this id_transformation, before we put it back in...
            if (monitor != null) {
                monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.DeletingOldVersionTransformationTask.Title"));
            }
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.DeletingOldVersionTransformation"));
            }
            repository.deleteTransformation(transMeta.getObjectId());
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.OldVersionOfTransformationRemoved"));
            }
        }
        if (monitor != null) {
            monitor.worked(1);
        }
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingNotes"));
        }
        for (int i = 0; i < transMeta.nrNotes(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            // if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingNoteTask.Title") +
            // (i + 1) + "/" + transMeta.nrNotes());
            NotePadMeta ni = transMeta.getNote(i);
            repository.saveNotePadMeta(ni, transMeta.getObjectId());
            if (ni.getObjectId() != null) {
                repository.insertTransNote(transMeta.getObjectId(), ni.getObjectId());
            }
            if (monitor != null) {
                monitor.worked(1);
            }
        }
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingDatabaseConnections"));
        }
        for (int i = 0; i < transMeta.nrDatabases(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            // if (monitor != null) monitor.subTask(BaseMessages.getString(PKG,
            // "TransMeta.Monitor.SavingDatabaseTask.Title") + (i + 1) + "/" + transMeta.nrDatabases());
            DatabaseMeta databaseMeta = transMeta.getDatabase(i);
            // repository)
            if (overwriteAssociated || databaseMeta.hasChanged() || databaseMeta.getObjectId() == null) {
                repository.save(databaseMeta, versionComment, monitor, overwriteAssociated);
            }
            if (monitor != null) {
                monitor.worked(1);
            }
        }
        // It is possible that we received another step through a plugin.
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.CheckingStepTypes"));
        }
        repository.updateStepTypes();
        repository.updateDatabaseTypes();
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingSteps"));
        }
        for (int i = 0; i < transMeta.nrSteps(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            // if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingStepTask.Title") +
            // (i + 1) + "/" + transMeta.nrSteps());
            StepMeta stepMeta = transMeta.getStep(i);
            repository.stepDelegate.saveStepMeta(stepMeta, transMeta.getObjectId());
            if (monitor != null) {
                monitor.worked(1);
            }
        }
        repository.connectionDelegate.closeStepAttributeInsertPreparedStatement();
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingHops"));
        }
        for (int i = 0; i < transMeta.nrTransHops(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            // if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.SavingHopTask.Title") +
            // (i + 1) + "/" + transMeta.nrTransHops());
            TransHopMeta hi = transMeta.getTransHop(i);
            saveTransHopMeta(hi, transMeta.getObjectId());
            if (monitor != null) {
                monitor.worked(1);
            }
        }
        // if (monitor != null) monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.FinishingTask.Title"));
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingTransformationInfo"));
        }
        // save the top level information for the transformation
        insertTransformation(transMeta);
        saveTransParameters(transMeta);
        repository.connectionDelegate.closeTransAttributeInsertPreparedStatement();
        // 
        for (int i = 0; i < transMeta.getPartitionSchemas().size(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            PartitionSchema partitionSchema = transMeta.getPartitionSchemas().get(i);
            // See if this transformation really is a consumer of this object
            // It might be simply loaded as a shared object from the repository
            // 
            boolean isUsedByTransformation = transMeta.isUsingPartitionSchema(partitionSchema);
            repository.save(partitionSchema, versionComment, null, transMeta.getObjectId(), isUsedByTransformation, overwriteAssociated);
        }
        // 
        for (int i = 0; i < transMeta.getSlaveServers().size(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            SlaveServer slaveServer = transMeta.getSlaveServers().get(i);
            boolean isUsedByTransformation = transMeta.isUsingSlaveServer(slaveServer);
            repository.save(slaveServer, versionComment, null, transMeta.getObjectId(), isUsedByTransformation, overwriteAssociated);
        }
        // Save the clustering schemas
        for (int i = 0; i < transMeta.getClusterSchemas().size(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            ClusterSchema clusterSchema = transMeta.getClusterSchemas().get(i);
            boolean isUsedByTransformation = transMeta.isUsingClusterSchema(clusterSchema);
            repository.save(clusterSchema, versionComment, null, transMeta.getObjectId(), isUsedByTransformation, overwriteAssociated);
        }
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingDependencies"));
        }
        for (int i = 0; i < transMeta.nrDependencies(); i++) {
            if (monitor != null && monitor.isCanceled()) {
                throw new KettleDatabaseException(BaseMessages.getString(PKG, "TransMeta.Log.UserCancelledTransSave"));
            }
            TransDependency td = transMeta.getDependency(i);
            saveTransDependency(td, transMeta.getObjectId());
        }
        saveTransAttributesMap(transMeta.getObjectId(), transMeta.getAttributesMap());
        // Save the step error handling information as well!
        for (int i = 0; i < transMeta.nrSteps(); i++) {
            StepMeta stepMeta = transMeta.getStep(i);
            StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta();
            if (stepErrorMeta != null) {
                repository.stepDelegate.saveStepErrorMeta(stepErrorMeta, transMeta.getObjectId(), stepMeta.getObjectId());
            }
        }
        repository.connectionDelegate.closeStepAttributeInsertPreparedStatement();
        if (log.isDebug()) {
            log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.SavingFinished"));
        }
        if (monitor != null) {
            monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.UnlockingRepository"));
        }
        repository.unlockRepository();
        // Perform a commit!
        repository.commit();
        transMeta.clearChanged();
        if (monitor != null) {
            monitor.worked(1);
        }
        if (monitor != null) {
            monitor.done();
        }
    } catch (KettleDatabaseException dbe) {
        // Oops, roll back!
        repository.rollback();
        log.logError(BaseMessages.getString(PKG, "TransMeta.Log.ErrorSavingTransformationToRepository") + Const.CR + dbe.getMessage());
        throw new KettleException(BaseMessages.getString(PKG, "TransMeta.Log.ErrorSavingTransformationToRepository"), dbe);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) PartitionSchema(org.pentaho.di.partition.PartitionSchema) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) StepErrorMeta(org.pentaho.di.trans.step.StepErrorMeta) TransDependency(org.pentaho.di.trans.TransDependency) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) NotePadMeta(org.pentaho.di.core.NotePadMeta) TransHopMeta(org.pentaho.di.trans.TransHopMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 55 with KettleDatabaseException

use of org.pentaho.di.core.exception.KettleDatabaseException in project pentaho-kettle by pentaho.

the class KettleDatabaseRepositoryTransDelegate method saveTransHopMeta.

public void saveTransHopMeta(TransHopMeta transHopMeta, ObjectId id_transformation) throws KettleException {
    try {
        // See if a transformation hop with the same fromstep and tostep is
        // already available...
        ObjectId id_step_from = transHopMeta.getFromStep() == null ? null : transHopMeta.getFromStep().getObjectId();
        ObjectId id_step_to = transHopMeta.getToStep() == null ? null : transHopMeta.getToStep().getObjectId();
        // Insert new transMeta hop in repository
        transHopMeta.setObjectId(insertTransHop(id_transformation, id_step_from, id_step_to, transHopMeta.isEnabled()));
    } catch (KettleDatabaseException dbe) {
        throw new KettleException(BaseMessages.getString(PKG, "TransHopMeta.Exception.UnableToSaveTransformationHopInfo") + id_transformation, dbe);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) LongObjectId(org.pentaho.di.repository.LongObjectId) ObjectId(org.pentaho.di.repository.ObjectId) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException)

Aggregations

KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)279 KettleException (org.pentaho.di.core.exception.KettleException)176 SQLException (java.sql.SQLException)69 Database (org.pentaho.di.core.database.Database)46 ValueMetaString (org.pentaho.di.core.row.value.ValueMetaString)41 KettleValueException (org.pentaho.di.core.exception.KettleValueException)39 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)37 KettleDatabaseBatchException (org.pentaho.di.core.exception.KettleDatabaseBatchException)33 RowMetaInterface (org.pentaho.di.core.row.RowMetaInterface)31 BatchUpdateException (java.sql.BatchUpdateException)27 ResultSet (java.sql.ResultSet)27 ValueMetaInterface (org.pentaho.di.core.row.ValueMetaInterface)26 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)25 RowMetaAndData (org.pentaho.di.core.RowMetaAndData)24 RowMeta (org.pentaho.di.core.row.RowMeta)22 FileObject (org.apache.commons.vfs2.FileObject)18 LongObjectId (org.pentaho.di.repository.LongObjectId)17 Savepoint (java.sql.Savepoint)16 ArrayList (java.util.ArrayList)16 Test (org.junit.Test)14