use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.
the class DatabaseLogExceptionFactoryTest method testExceptionStrategyWithMaxAllowedPacketException.
/**
* PDI-5153
* Test that in case of MaxAllowedPacketException exception there will be no stack trace in log (MariaDB)
*/
@Test
public void testExceptionStrategyWithMaxAllowedPacketException() {
DatabaseMeta databaseMeta = mock(DatabaseMeta.class);
DatabaseInterface databaseInterface = new MariaDBDatabaseMeta();
MaxAllowedPacketException e = new MaxAllowedPacketException();
when(logTable.getDatabaseMeta()).thenReturn(databaseMeta);
when(databaseMeta.getDatabaseInterface()).thenReturn(databaseInterface);
LogExceptionBehaviourInterface exceptionStrategy = DatabaseLogExceptionFactory.getExceptionStrategy(logTable, new KettleDatabaseException(e));
String strategyName = exceptionStrategy.getClass().getName();
assertEquals(SUPPRESSABLE_WITH_SHORT_MESSAGE, strategyName);
}
use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.
the class OraBulkLoader method getControlFileContents.
/**
* Get the contents of the control file as specified in the meta object
*
* @param meta
* the meta object to model the control file after
*
* @return a string containing the control file contents
*/
public String getControlFileContents(OraBulkLoaderMeta meta, RowMetaInterface rm, Object[] r) throws KettleException {
DatabaseMeta dm = meta.getDatabaseMeta();
String inputName = "'" + getFilename(getFileObject(meta.getDataFile(), getTransMeta())) + "'";
String loadAction = meta.getLoadAction();
StringBuilder contents = new StringBuilder(500);
contents.append("OPTIONS(").append(Const.CR);
contents.append(" ERRORS=\'").append(meta.getMaxErrors()).append("\'").append(Const.CR);
if (meta.getCommitSizeAsInt(this) != 0 && !(meta.isDirectPath() && getStepMeta().getCopies() > 1)) {
// For the second part of the above expressions: ROWS is not supported
// in parallel mode (by sqlldr).
contents.append(" , ROWS=\'").append(meta.getCommitSize()).append("\'").append(Const.CR);
}
if (meta.getBindSizeAsInt(this) != 0) {
contents.append(" , BINDSIZE=\'").append(meta.getBindSize()).append("\'").append(Const.CR);
}
if (meta.getReadSizeAsInt(this) != 0) {
contents.append(" , READSIZE=\'").append(meta.getReadSize()).append("\'").append(Const.CR);
}
contents.append(")").append(Const.CR);
contents.append("LOAD DATA").append(Const.CR);
if (!Utils.isEmpty(meta.getCharacterSetName())) {
contents.append("CHARACTERSET ").append(meta.getCharacterSetName()).append(Const.CR);
}
if (!OraBulkLoaderMeta.METHOD_AUTO_CONCURRENT.equals(meta.getLoadMethod()) || !Utils.isEmpty(meta.getAltRecordTerm())) {
String infile = inputName;
if (OraBulkLoaderMeta.METHOD_AUTO_CONCURRENT.equals(meta.getLoadMethod())) {
infile = "''";
}
// For concurrent input, data command line argument must be specified
contents.append("INFILE ").append(infile);
if (!Utils.isEmpty(meta.getAltRecordTerm())) {
contents.append(" \"STR x'").append(encodeRecordTerminator(meta.getAltRecordTerm(), meta.getEncoding())).append("'\"");
}
contents.append(Const.CR);
}
contents.append("INTO TABLE ").append(dm.getQuotedSchemaTableCombination(environmentSubstitute(meta.getSchemaName()), environmentSubstitute(meta.getTableName()))).append(Const.CR).append(loadAction).append(Const.CR).append("FIELDS TERMINATED BY ',' ENCLOSED BY '\"'").append(Const.CR).append("TRAILING NULLCOLS").append(Const.CR).append('(');
String[] streamFields = meta.getFieldStream();
String[] tableFields = meta.getFieldTable();
String[] dateMask = meta.getDateMask();
if (streamFields == null || streamFields.length == 0) {
throw new KettleException("No fields defined to load to database");
}
for (int i = 0; i < streamFields.length; i++) {
if (i != 0) {
contents.append(", ").append(Const.CR);
}
contents.append(dm.quoteField(tableFields[i]));
int pos = rm.indexOfValue(streamFields[i]);
if (pos < 0) {
throw new KettleException("Could not find field " + streamFields[i] + " in stream");
}
ValueMetaInterface v = rm.getValueMeta(pos);
switch(v.getType()) {
case ValueMetaInterface.TYPE_STRING:
if (v.getLength() > 255) {
contents.append(" CHAR(").append(v.getLength()).append(")");
} else {
contents.append(" CHAR");
}
break;
case ValueMetaInterface.TYPE_INTEGER:
case ValueMetaInterface.TYPE_NUMBER:
case ValueMetaInterface.TYPE_BIGNUMBER:
break;
case ValueMetaInterface.TYPE_DATE:
if (OraBulkLoaderMeta.DATE_MASK_DATE.equals(dateMask[i])) {
contents.append(" DATE 'yyyy-mm-dd'");
} else if (OraBulkLoaderMeta.DATE_MASK_DATETIME.equals(dateMask[i])) {
contents.append(" TIMESTAMP 'yyyy-mm-dd hh24:mi:ss.ff'");
} else {
// If not specified the default is date.
contents.append(" DATE 'yyyy-mm-dd'");
}
break;
case ValueMetaInterface.TYPE_BINARY:
contents.append(" ENCLOSED BY '<startlob>' AND '<endlob>'");
break;
case ValueMetaInterface.TYPE_TIMESTAMP:
contents.append(" TIMESTAMP 'yyyy-mm-dd hh24:mi:ss.ff'");
break;
default:
break;
}
}
contents.append(")");
return contents.toString();
}
use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.
the class PGBulkLoader method getDatabase.
@VisibleForTesting
Database getDatabase(LoggingObjectInterface parentObject, PGBulkLoaderMeta pgBulkLoaderMeta) {
DatabaseMeta dbMeta = pgBulkLoaderMeta.getDatabaseMeta();
// If dbNameOverride is present, clone the origin db meta and override the DB name
String dbNameOverride = environmentSubstitute(pgBulkLoaderMeta.getDbNameOverride());
if (!Utils.isEmpty(dbNameOverride)) {
dbMeta = (DatabaseMeta) pgBulkLoaderMeta.getDatabaseMeta().clone();
dbMeta.setDBName(dbNameOverride.trim());
logDebug("DB name overridden to the value: " + dbNameOverride);
}
return new Database(parentObject, dbMeta);
}
use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.
the class PGBulkLoader method processTruncate.
void processTruncate() throws Exception {
Connection connection = data.db.getConnection();
String loadAction = environmentSubstitute(meta.getLoadAction());
if (loadAction.equalsIgnoreCase("truncate")) {
DatabaseMeta dm = meta.getDatabaseMeta();
String tableName = dm.getQuotedSchemaTableCombination(environmentSubstitute(meta.getSchemaName()), environmentSubstitute(meta.getTableName()));
logBasic("Launching command: " + "TRUNCATE " + tableName);
Statement statement = connection.createStatement();
try {
statement.executeUpdate("TRUNCATE " + tableName);
} catch (Exception ex) {
throw new KettleException("Error while truncating " + tableName, ex);
} finally {
statement.close();
}
}
}
use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.
the class RepositoriesMetaTest method testReadData.
@Test
public void testReadData() throws Exception {
LogChannel log = mock(LogChannel.class);
doReturn(getClass().getResource("repositories.xml").getPath()).when(repoMeta).getKettleUserRepositoriesFile();
doReturn(log).when(repoMeta).newLogChannel();
repoMeta.readData();
String repositoriesXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + Const.CR + "<repositories>" + Const.CR + " <connection>" + Const.CR + " <name>local postgres</name>" + Const.CR + " <server>localhost</server>" + Const.CR + " <type>POSTGRESQL</type>" + Const.CR + " <access>Native</access>" + Const.CR + " <database>hibernate</database>" + Const.CR + " <port>5432</port>" + Const.CR + " <username>auser</username>" + Const.CR + " <password>Encrypted 2be98afc86aa7f285bb18bd63c99dbdde</password>" + Const.CR + " <servername/>" + Const.CR + " <data_tablespace/>" + Const.CR + " <index_tablespace/>" + Const.CR + " <attributes>" + Const.CR + " <attribute><code>FORCE_IDENTIFIERS_TO_LOWERCASE</code><attribute>N</attribute></attribute>" + Const.CR + " <attribute><code>FORCE_IDENTIFIERS_TO_UPPERCASE</code><attribute>N</attribute></attribute>" + Const.CR + " <attribute><code>IS_CLUSTERED</code><attribute>N</attribute></attribute>" + Const.CR + " <attribute><code>PORT_NUMBER</code><attribute>5432</attribute></attribute>" + Const.CR + " <attribute><code>PRESERVE_RESERVED_WORD_CASE</code><attribute>N</attribute></attribute>" + Const.CR + " <attribute><code>QUOTE_ALL_FIELDS</code><attribute>N</attribute></attribute>" + Const.CR + " <attribute><code>SUPPORTS_BOOLEAN_DATA_TYPE</code><attribute>Y</attribute></attribute>" + Const.CR + " <attribute><code>SUPPORTS_TIMESTAMP_DATA_TYPE</code><attribute>Y</attribute></attribute>" + Const.CR + " <attribute><code>USE_POOLING</code><attribute>N</attribute></attribute>" + Const.CR + " </attributes>" + Const.CR + " </connection>" + Const.CR + " <repository> <id>KettleFileRepository</id>" + Const.CR + " <name>Test Repository</name>" + Const.CR + " <description>Test Repository Description</description>" + Const.CR + " <is_default>false</is_default>" + Const.CR + " <base_directory>test-repository</base_directory>" + Const.CR + " <read_only>N</read_only>" + Const.CR + " <hides_hidden_files>N</hides_hidden_files>" + Const.CR + " </repository> </repositories>" + Const.CR;
assertEquals(repositoriesXml, repoMeta.getXML());
RepositoriesMeta clone = repoMeta.clone();
assertEquals(repositoriesXml, repoMeta.getXML());
assertNotSame(clone, repoMeta);
assertEquals(1, repoMeta.nrRepositories());
RepositoryMeta repository = repoMeta.getRepository(0);
assertEquals("Test Repository", repository.getName());
assertEquals("Test Repository Description", repository.getDescription());
assertEquals(" <repository> <id>KettleFileRepository</id>" + Const.CR + " <name>Test Repository</name>" + Const.CR + " <description>Test Repository Description</description>" + Const.CR + " <is_default>false</is_default>" + Const.CR + " <base_directory>test-repository</base_directory>" + Const.CR + " <read_only>N</read_only>" + Const.CR + " <hides_hidden_files>N</hides_hidden_files>" + Const.CR + " </repository>", repository.getXML());
assertSame(repository, repoMeta.searchRepository("Test Repository"));
assertSame(repository, repoMeta.findRepositoryById("KettleFileRepository"));
assertSame(repository, repoMeta.findRepository("Test Repository"));
assertNull(repoMeta.findRepository("not found"));
assertNull(repoMeta.findRepositoryById("not found"));
assertEquals(0, repoMeta.indexOfRepository(repository));
repoMeta.removeRepository(0);
assertEquals(0, repoMeta.nrRepositories());
assertNull(repoMeta.searchRepository("Test Repository"));
repoMeta.addRepository(0, repository);
assertEquals(1, repoMeta.nrRepositories());
repoMeta.removeRepository(1);
assertEquals(1, repoMeta.nrRepositories());
assertEquals(1, repoMeta.nrDatabases());
assertEquals("local postgres", repoMeta.getDatabase(0).getName());
DatabaseMeta searchDatabase = repoMeta.searchDatabase("local postgres");
assertSame(searchDatabase, repoMeta.getDatabase(0));
assertEquals(0, repoMeta.indexOfDatabase(searchDatabase));
repoMeta.removeDatabase(0);
assertEquals(0, repoMeta.nrDatabases());
assertNull(repoMeta.searchDatabase("local postgres"));
repoMeta.addDatabase(0, searchDatabase);
assertEquals(1, repoMeta.nrDatabases());
repoMeta.removeDatabase(1);
assertEquals(1, repoMeta.nrDatabases());
assertEquals("Unable to read repository with id [junk]. RepositoryMeta is not available.", repoMeta.getErrorMessage());
}
Aggregations