use of org.pentaho.di.core.database.MSSQLServerDatabaseMeta in project pentaho-kettle by pentaho.
the class JobEntryMssqlBulkLoad method execute.
public Result execute(Result previousResult, int nr) {
String TakeFirstNbrLines = "";
String LineTerminatedby = "";
String FieldTerminatedby = "";
boolean useFieldSeparator = false;
String UseCodepage = "";
String ErrorfileName = "";
Result result = previousResult;
result.setResult(false);
String vfsFilename = environmentSubstitute(filename);
FileObject fileObject = null;
// Let's check the filename ...
if (!Utils.isEmpty(vfsFilename)) {
try {
// User has specified a file, We can continue ...
//
// This is running over VFS but we need a normal file.
// As such, we're going to verify that it's a local file...
// We're also going to convert VFS FileObject to File
//
fileObject = KettleVFS.getFileObject(vfsFilename, this);
if (!(fileObject instanceof LocalFile)) {
//
throw new KettleException(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.OnlyLocalFileSupported", vfsFilename));
}
// Convert it to a regular platform specific file name
//
String realFilename = KettleVFS.getFilename(fileObject);
// Here we go... back to the regular scheduled program...
//
File file = new File(realFilename);
if (file.exists() && file.canRead()) {
// User has specified an existing file, We can continue ...
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobMssqlBulkLoad.FileExists.Label", realFilename));
}
if (connection != null) {
// User has specified a connection, We can continue ...
Database db = new Database(this, connection);
if (!(db.getDatabaseMeta().getDatabaseInterface() instanceof MSSQLServerDatabaseMeta)) {
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.DbNotMSSQL", connection.getDatabaseName()));
return result;
}
db.shareVariablesWith(this);
try {
db.connect(parentJob.getTransactionId(), null);
// Get schemaname
String realSchemaname = environmentSubstitute(schemaname);
// Get tablename
String realTablename = environmentSubstitute(tablename);
// Add schemaname (Most the time Schemaname.Tablename)
if (schemaname != null) {
realTablename = realSchemaname + "." + realTablename;
}
if (db.checkTableExists(realTablename)) {
// The table existe, We can continue ...
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobMssqlBulkLoad.TableExists.Label", realTablename));
}
// FIELDTERMINATOR
String Fieldterminator = getRealFieldTerminator();
if (Utils.isEmpty(Fieldterminator) && (datafiletype.equals("char") || datafiletype.equals("widechar"))) {
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.FieldTerminatorMissing"));
return result;
} else {
if (datafiletype.equals("char") || datafiletype.equals("widechar")) {
useFieldSeparator = true;
FieldTerminatedby = "FIELDTERMINATOR='" + Fieldterminator + "'";
}
}
// Check Specific Code page
if (codepage.equals("Specific")) {
String realCodePage = environmentSubstitute(codepage);
if (specificcodepage.length() < 0) {
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.SpecificCodePageMissing"));
return result;
} else {
UseCodepage = "CODEPAGE = '" + realCodePage + "'";
}
} else {
UseCodepage = "CODEPAGE = '" + codepage + "'";
}
// Check Error file
String realErrorFile = environmentSubstitute(errorfilename);
if (realErrorFile != null) {
File errorfile = new File(realErrorFile);
if (errorfile.exists() && !adddatetime) {
// The error file is created when the command is executed. An error occurs if the file already
// exists.
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.ErrorFileExists"));
return result;
}
if (adddatetime) {
// Add date time to filename...
SimpleDateFormat daf = new SimpleDateFormat();
Date now = new Date();
daf.applyPattern("yyyMMdd_HHmmss");
String d = daf.format(now);
ErrorfileName = "ERRORFILE ='" + realErrorFile + "_" + d + "'";
} else {
ErrorfileName = "ERRORFILE ='" + realErrorFile + "'";
}
}
// ROWTERMINATOR
String Rowterminator = getRealLineterminated();
if (!Utils.isEmpty(Rowterminator)) {
LineTerminatedby = "ROWTERMINATOR='" + Rowterminator + "'";
}
// Start file at
if (startfile > 0) {
TakeFirstNbrLines = "FIRSTROW=" + startfile;
}
// End file at
if (endfile > 0) {
TakeFirstNbrLines = "LASTROW=" + endfile;
}
// Truncate table?
String SQLBULKLOAD = "";
if (truncate) {
SQLBULKLOAD = "TRUNCATE TABLE " + realTablename + ";";
}
// Build BULK Command
SQLBULKLOAD = SQLBULKLOAD + "BULK INSERT " + realTablename + " FROM " + "'" + realFilename.replace('\\', '/') + "'";
SQLBULKLOAD = SQLBULKLOAD + " WITH (";
if (useFieldSeparator) {
SQLBULKLOAD = SQLBULKLOAD + FieldTerminatedby;
} else {
SQLBULKLOAD = SQLBULKLOAD + "DATAFILETYPE ='" + datafiletype + "'";
}
if (LineTerminatedby.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + "," + LineTerminatedby;
}
if (TakeFirstNbrLines.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + "," + TakeFirstNbrLines;
}
if (UseCodepage.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + "," + UseCodepage;
}
String realFormatFile = environmentSubstitute(formatfilename);
if (realFormatFile != null) {
SQLBULKLOAD = SQLBULKLOAD + ", FORMATFILE='" + realFormatFile + "'";
}
if (firetriggers) {
SQLBULKLOAD = SQLBULKLOAD + ",FIRE_TRIGGERS";
}
if (keepnulls) {
SQLBULKLOAD = SQLBULKLOAD + ",KEEPNULLS";
}
if (keepidentity) {
SQLBULKLOAD = SQLBULKLOAD + ",KEEPIDENTITY";
}
if (checkconstraints) {
SQLBULKLOAD = SQLBULKLOAD + ",CHECK_CONSTRAINTS";
}
if (tablock) {
SQLBULKLOAD = SQLBULKLOAD + ",TABLOCK";
}
if (orderby != null) {
SQLBULKLOAD = SQLBULKLOAD + ",ORDER ( " + orderby + " " + orderdirection + ")";
}
if (ErrorfileName.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", " + ErrorfileName;
}
if (maxerrors > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", MAXERRORS=" + maxerrors;
}
if (batchsize > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", BATCHSIZE=" + batchsize;
}
if (rowsperbatch > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", ROWS_PER_BATCH=" + rowsperbatch;
}
// End of Bulk command
SQLBULKLOAD = SQLBULKLOAD + ")";
try {
// Run the SQL
db.execStatement(SQLBULKLOAD);
// Everything is OK...we can disconnect now
db.disconnect();
if (isAddFileToResult()) {
// Add filename to output files
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(realFilename, this), parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
result.setResult(true);
} catch (KettleDatabaseException je) {
result.setNrErrors(1);
logError("An error occurred executing this job entry : " + je.getMessage(), je);
} catch (KettleFileException e) {
logError("An error occurred executing this job entry : " + e.getMessage(), e);
result.setNrErrors(1);
} finally {
if (db != null) {
db.disconnect();
db = null;
}
}
} else {
// Of course, the table should have been created already before the bulk load operation
db.disconnect();
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.TableNotExists", realTablename));
}
} catch (KettleDatabaseException dbe) {
db.disconnect();
result.setNrErrors(1);
logError("An error occurred executing this entry: " + dbe.getMessage());
}
} else {
// No database connection is defined
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Nodatabase.Label"));
}
} else {
// the file doesn't exist
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.FileNotExists", realFilename));
}
} catch (Exception e) {
// An unexpected error occurred
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.UnexpectedError.Label"), e);
} finally {
try {
if (fileObject != null) {
fileObject.close();
}
} catch (Exception e) {
// Ignore errors
}
}
} else {
// No file was specified
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Nofilename.Label"));
}
return result;
}
Aggregations