use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryMssqlBulkLoad method execute.
public Result execute(Result previousResult, int nr) {
String TakeFirstNbrLines = "";
String LineTerminatedby = "";
String FieldTerminatedby = "";
boolean useFieldSeparator = false;
String UseCodepage = "";
String ErrorfileName = "";
Result result = previousResult;
result.setResult(false);
String vfsFilename = environmentSubstitute(filename);
FileObject fileObject = null;
// Let's check the filename ...
if (!Utils.isEmpty(vfsFilename)) {
try {
// User has specified a file, We can continue ...
//
// This is running over VFS but we need a normal file.
// As such, we're going to verify that it's a local file...
// We're also going to convert VFS FileObject to File
//
fileObject = KettleVFS.getFileObject(vfsFilename, this);
if (!(fileObject instanceof LocalFile)) {
//
throw new KettleException(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.OnlyLocalFileSupported", vfsFilename));
}
// Convert it to a regular platform specific file name
//
String realFilename = KettleVFS.getFilename(fileObject);
// Here we go... back to the regular scheduled program...
//
File file = new File(realFilename);
if (file.exists() && file.canRead()) {
// User has specified an existing file, We can continue ...
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobMssqlBulkLoad.FileExists.Label", realFilename));
}
if (connection != null) {
// User has specified a connection, We can continue ...
Database db = new Database(this, connection);
if (!(db.getDatabaseMeta().getDatabaseInterface() instanceof MSSQLServerDatabaseMeta)) {
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.DbNotMSSQL", connection.getDatabaseName()));
return result;
}
db.shareVariablesWith(this);
try {
db.connect(parentJob.getTransactionId(), null);
// Get schemaname
String realSchemaname = environmentSubstitute(schemaname);
// Get tablename
String realTablename = environmentSubstitute(tablename);
// Add schemaname (Most the time Schemaname.Tablename)
if (schemaname != null) {
realTablename = realSchemaname + "." + realTablename;
}
if (db.checkTableExists(realTablename)) {
// The table existe, We can continue ...
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobMssqlBulkLoad.TableExists.Label", realTablename));
}
// FIELDTERMINATOR
String Fieldterminator = getRealFieldTerminator();
if (Utils.isEmpty(Fieldterminator) && (datafiletype.equals("char") || datafiletype.equals("widechar"))) {
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.FieldTerminatorMissing"));
return result;
} else {
if (datafiletype.equals("char") || datafiletype.equals("widechar")) {
useFieldSeparator = true;
FieldTerminatedby = "FIELDTERMINATOR='" + Fieldterminator + "'";
}
}
// Check Specific Code page
if (codepage.equals("Specific")) {
String realCodePage = environmentSubstitute(codepage);
if (specificcodepage.length() < 0) {
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.SpecificCodePageMissing"));
return result;
} else {
UseCodepage = "CODEPAGE = '" + realCodePage + "'";
}
} else {
UseCodepage = "CODEPAGE = '" + codepage + "'";
}
// Check Error file
String realErrorFile = environmentSubstitute(errorfilename);
if (realErrorFile != null) {
File errorfile = new File(realErrorFile);
if (errorfile.exists() && !adddatetime) {
// The error file is created when the command is executed. An error occurs if the file already
// exists.
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.ErrorFileExists"));
return result;
}
if (adddatetime) {
// Add date time to filename...
SimpleDateFormat daf = new SimpleDateFormat();
Date now = new Date();
daf.applyPattern("yyyMMdd_HHmmss");
String d = daf.format(now);
ErrorfileName = "ERRORFILE ='" + realErrorFile + "_" + d + "'";
} else {
ErrorfileName = "ERRORFILE ='" + realErrorFile + "'";
}
}
// ROWTERMINATOR
String Rowterminator = getRealLineterminated();
if (!Utils.isEmpty(Rowterminator)) {
LineTerminatedby = "ROWTERMINATOR='" + Rowterminator + "'";
}
// Start file at
if (startfile > 0) {
TakeFirstNbrLines = "FIRSTROW=" + startfile;
}
// End file at
if (endfile > 0) {
TakeFirstNbrLines = "LASTROW=" + endfile;
}
// Truncate table?
String SQLBULKLOAD = "";
if (truncate) {
SQLBULKLOAD = "TRUNCATE TABLE " + realTablename + ";";
}
// Build BULK Command
SQLBULKLOAD = SQLBULKLOAD + "BULK INSERT " + realTablename + " FROM " + "'" + realFilename.replace('\\', '/') + "'";
SQLBULKLOAD = SQLBULKLOAD + " WITH (";
if (useFieldSeparator) {
SQLBULKLOAD = SQLBULKLOAD + FieldTerminatedby;
} else {
SQLBULKLOAD = SQLBULKLOAD + "DATAFILETYPE ='" + datafiletype + "'";
}
if (LineTerminatedby.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + "," + LineTerminatedby;
}
if (TakeFirstNbrLines.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + "," + TakeFirstNbrLines;
}
if (UseCodepage.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + "," + UseCodepage;
}
String realFormatFile = environmentSubstitute(formatfilename);
if (realFormatFile != null) {
SQLBULKLOAD = SQLBULKLOAD + ", FORMATFILE='" + realFormatFile + "'";
}
if (firetriggers) {
SQLBULKLOAD = SQLBULKLOAD + ",FIRE_TRIGGERS";
}
if (keepnulls) {
SQLBULKLOAD = SQLBULKLOAD + ",KEEPNULLS";
}
if (keepidentity) {
SQLBULKLOAD = SQLBULKLOAD + ",KEEPIDENTITY";
}
if (checkconstraints) {
SQLBULKLOAD = SQLBULKLOAD + ",CHECK_CONSTRAINTS";
}
if (tablock) {
SQLBULKLOAD = SQLBULKLOAD + ",TABLOCK";
}
if (orderby != null) {
SQLBULKLOAD = SQLBULKLOAD + ",ORDER ( " + orderby + " " + orderdirection + ")";
}
if (ErrorfileName.length() > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", " + ErrorfileName;
}
if (maxerrors > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", MAXERRORS=" + maxerrors;
}
if (batchsize > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", BATCHSIZE=" + batchsize;
}
if (rowsperbatch > 0) {
SQLBULKLOAD = SQLBULKLOAD + ", ROWS_PER_BATCH=" + rowsperbatch;
}
// End of Bulk command
SQLBULKLOAD = SQLBULKLOAD + ")";
try {
// Run the SQL
db.execStatement(SQLBULKLOAD);
// Everything is OK...we can disconnect now
db.disconnect();
if (isAddFileToResult()) {
// Add filename to output files
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(realFilename, this), parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
result.setResult(true);
} catch (KettleDatabaseException je) {
result.setNrErrors(1);
logError("An error occurred executing this job entry : " + je.getMessage(), je);
} catch (KettleFileException e) {
logError("An error occurred executing this job entry : " + e.getMessage(), e);
result.setNrErrors(1);
} finally {
if (db != null) {
db.disconnect();
db = null;
}
}
} else {
// Of course, the table should have been created already before the bulk load operation
db.disconnect();
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.TableNotExists", realTablename));
}
} catch (KettleDatabaseException dbe) {
db.disconnect();
result.setNrErrors(1);
logError("An error occurred executing this entry: " + dbe.getMessage());
}
} else {
// No database connection is defined
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Nodatabase.Label"));
}
} else {
// the file doesn't exist
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Error.FileNotExists", realFilename));
}
} catch (Exception e) {
// An unexpected error occurred
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.UnexpectedError.Label"), e);
} finally {
try {
if (fileObject != null) {
fileObject.close();
}
} catch (Exception e) {
// Ignore errors
}
}
} else {
// No file was specified
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMssqlBulkLoad.Nofilename.Label"));
}
return result;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryMysqlBulkLoad method execute.
public Result execute(Result previousResult, int nr) {
String ReplaceIgnore;
String IgnoreNbrLignes = "";
String ListOfColumn = "";
String LocalExec = "";
String PriorityText = "";
String LineTerminatedby = "";
String FieldTerminatedby = "";
Result result = previousResult;
result.setResult(false);
String vfsFilename = environmentSubstitute(filename);
// Let's check the filename ...
if (!Utils.isEmpty(vfsFilename)) {
try {
// User has specified a file, We can continue ...
//
// This is running over VFS but we need a normal file.
// As such, we're going to verify that it's a local file...
// We're also going to convert VFS FileObject to File
//
FileObject fileObject = KettleVFS.getFileObject(vfsFilename, this);
if (!(fileObject instanceof LocalFile)) {
//
throw new KettleException("Only local files are supported at this time, file [" + vfsFilename + "] is not a local file.");
}
// Convert it to a regular platform specific file name
//
String realFilename = KettleVFS.getFilename(fileObject);
// Here we go... back to the regular scheduled program...
//
File file = new File(realFilename);
if ((file.exists() && file.canRead()) || isLocalInfile() == false) {
// User has specified an existing file, We can continue ...
if (log.isDetailed()) {
logDetailed("File [" + realFilename + "] exists.");
}
if (connection != null) {
// User has specified a connection, We can continue ...
Database db = new Database(this, connection);
db.shareVariablesWith(this);
try {
db.connect(parentJob.getTransactionId(), null);
// Get schemaname
String realSchemaname = environmentSubstitute(schemaname);
// Get tablename
String realTablename = environmentSubstitute(tablename);
if (db.checkTableExists(realTablename)) {
// The table existe, We can continue ...
if (log.isDetailed()) {
logDetailed("Table [" + realTablename + "] exists.");
}
// Add schemaname (Most the time Schemaname.Tablename)
if (schemaname != null) {
realTablename = realSchemaname + "." + realTablename;
}
// Set the REPLACE or IGNORE
if (isReplacedata()) {
ReplaceIgnore = "REPLACE";
} else {
ReplaceIgnore = "IGNORE";
}
// Set the IGNORE LINES
if (Const.toInt(getRealIgnorelines(), 0) > 0) {
IgnoreNbrLignes = "IGNORE " + getRealIgnorelines() + " LINES";
}
// Set list of Column
if (getRealListattribut() != null) {
ListOfColumn = "(" + MysqlString(getRealListattribut()) + ")";
}
// Local File execution
if (isLocalInfile()) {
LocalExec = "LOCAL";
}
// Prority
if (prorityvalue == 1) {
// LOW
PriorityText = "LOW_PRIORITY";
} else if (prorityvalue == 2) {
// CONCURRENT
PriorityText = "CONCURRENT";
}
// Fields ....
if (getRealSeparator() != null || getRealEnclosed() != null || getRealEscaped() != null) {
FieldTerminatedby = "FIELDS ";
if (getRealSeparator() != null) {
FieldTerminatedby = FieldTerminatedby + "TERMINATED BY '" + Const.replace(getRealSeparator(), "'", "''") + "'";
}
if (getRealEnclosed() != null) {
FieldTerminatedby = FieldTerminatedby + " ENCLOSED BY '" + Const.replace(getRealEnclosed(), "'", "''") + "'";
}
if (getRealEscaped() != null) {
FieldTerminatedby = FieldTerminatedby + " ESCAPED BY '" + Const.replace(getRealEscaped(), "'", "''") + "'";
}
}
// LINES ...
if (getRealLinestarted() != null || getRealLineterminated() != null) {
LineTerminatedby = "LINES ";
// Line starting By
if (getRealLinestarted() != null) {
LineTerminatedby = LineTerminatedby + "STARTING BY '" + Const.replace(getRealLinestarted(), "'", "''") + "'";
}
// Line terminating By
if (getRealLineterminated() != null) {
LineTerminatedby = LineTerminatedby + " TERMINATED BY '" + Const.replace(getRealLineterminated(), "'", "''") + "'";
}
}
String SQLBULKLOAD = "LOAD DATA " + PriorityText + " " + LocalExec + " INFILE '" + realFilename.replace('\\', '/') + "' " + ReplaceIgnore + " INTO TABLE " + realTablename + " " + FieldTerminatedby + " " + LineTerminatedby + " " + IgnoreNbrLignes + " " + ListOfColumn + ";";
try {
// Run the SQL
db.execStatement(SQLBULKLOAD);
// Everything is OK...we can deconnect now
db.disconnect();
if (isAddFileToResult()) {
// Add zip filename to output files
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(realFilename, this), parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
result.setResult(true);
} catch (KettleDatabaseException je) {
db.disconnect();
result.setNrErrors(1);
logError("An error occurred executing this job entry : " + je.getMessage());
} catch (KettleFileException e) {
logError("An error occurred executing this job entry : " + e.getMessage());
result.setNrErrors(1);
}
} else {
// Of course, the table should have been created already before the bulk load operation
db.disconnect();
result.setNrErrors(1);
if (log.isDetailed()) {
logDetailed("Table [" + realTablename + "] doesn't exist!");
}
}
} catch (KettleDatabaseException dbe) {
db.disconnect();
result.setNrErrors(1);
logError("An error occurred executing this entry: " + dbe.getMessage());
}
} else {
// No database connection is defined
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMysqlBulkLoad.Nodatabase.Label"));
}
} else {
// the file doesn't exist
result.setNrErrors(1);
logError("File [" + realFilename + "] doesn't exist!");
}
} catch (Exception e) {
// An unexpected error occurred
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMysqlBulkLoad.UnexpectedError.Label"), e);
}
} else {
// No file was specified
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobMysqlBulkLoad.Nofilename.Label"));
}
return result;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryFilesExist method execute.
public Result execute(Result previousResult, int nr) {
Result result = previousResult;
result.setResult(false);
result.setNrErrors(0);
int missingfiles = 0;
int nrErrors = 0;
// see PDI-10270 for details
boolean oldBehavior = "Y".equalsIgnoreCase(getVariable(Const.KETTLE_COMPATIBILITY_SET_ERROR_ON_SPECIFIC_JOB_ENTRIES, "N"));
if (arguments != null) {
for (int i = 0; i < arguments.length && !parentJob.isStopped(); i++) {
FileObject file = null;
try {
String realFilefoldername = environmentSubstitute(arguments[i]);
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
file = KettleVFS.getFileObject(realFilefoldername, this);
if (file.exists() && file.isReadable()) {
// TODO: is it needed to check file for readability?
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobEntryFilesExist.File_Exists", realFilefoldername));
}
} else {
missingfiles++;
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobEntryFilesExist.File_Does_Not_Exist", realFilefoldername));
}
}
} catch (Exception e) {
nrErrors++;
missingfiles++;
logError(BaseMessages.getString(PKG, "JobEntryFilesExist.ERROR_0004_IO_Exception", e.toString()), e);
} finally {
if (file != null) {
try {
file.close();
file = null;
} catch (IOException ex) {
/* Ignore */
}
}
}
}
}
result.setNrErrors(nrErrors);
if (oldBehavior) {
result.setNrErrors(missingfiles);
}
if (missingfiles == 0) {
result.setResult(true);
}
return result;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryFolderIsEmpty method execute.
public Result execute(Result previousResult, int nr) {
// see PDI-10270 for details
boolean oldBehavior = "Y".equalsIgnoreCase(getVariable(Const.KETTLE_COMPATIBILITY_SET_ERROR_ON_SPECIFIC_JOB_ENTRIES, "N"));
Result result = previousResult;
result.setResult(false);
result.setNrErrors(oldBehavior ? 1 : 0);
filescount = 0;
folderscount = 0;
pattern = null;
if (!Utils.isEmpty(getWildcard())) {
pattern = Pattern.compile(getRealWildcard());
}
if (foldername != null) {
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
String realFoldername = getRealFoldername();
FileObject folderObject = null;
try {
folderObject = KettleVFS.getFileObject(realFoldername, this);
if (folderObject.exists()) {
// Check if it's a folder
if (folderObject.getType() == FileType.FOLDER) {
// File provided is a folder, so we can process ...
try {
folderObject.findFiles(new TextFileSelector(folderObject.toString()));
} catch (Exception ex) {
if (!(ex.getCause() instanceof ExpectedException)) {
throw ex;
}
}
if (log.isBasic()) {
log.logBasic("Total files", "We found : " + filescount + " file(s)");
}
if (filescount == 0) {
result.setResult(true);
result.setNrLinesInput(folderscount);
}
} else {
// Not a folder, fail
log.logError("[" + realFoldername + "] is not a folder, failing.");
result.setNrErrors(1);
}
} else {
// No Folder found
if (log.isBasic()) {
logBasic("we can not find [" + realFoldername + "] !");
}
result.setNrErrors(1);
}
} catch (Exception e) {
logError("Error checking folder [" + realFoldername + "]", e);
result.setResult(false);
result.setNrErrors(1);
} finally {
if (folderObject != null) {
try {
folderObject.close();
folderObject = null;
} catch (IOException ex) {
/* Ignore */
}
}
}
} else {
logError("No Foldername is defined.");
result.setNrErrors(1);
}
return result;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryFoldersCompare method execute.
public Result execute(Result previousResult, int nr) {
Result result = previousResult;
result.setResult(false);
boolean ok = true;
String realFilename1 = getRealFilename1();
String realFilename2 = getRealFilename2();
FileObject folder1 = null;
FileObject folder2 = null;
FileObject filefolder1 = null;
FileObject filefolder2 = null;
try {
if (filename1 != null && filename2 != null) {
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
// Get Folders/Files to compare
folder1 = KettleVFS.getFileObject(realFilename1, this);
folder2 = KettleVFS.getFileObject(realFilename2, this);
if (folder1.exists() && folder2.exists()) {
if (!folder1.getType().equals(folder2.getType())) {
// pb...we try to compare file with folder !!!
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.CanNotCompareFilesFolders"));
if (folder1.getType() == FileType.FILE) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFile", realFilename1));
} else if (folder1.getType() == FileType.FOLDER) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFolder", realFilename1));
} else {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsUnknownFileType", realFilename1));
}
if (folder2.getType() == FileType.FILE) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFile", realFilename2));
} else if (folder2.getType() == FileType.FOLDER) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFolder", realFilename2));
} else {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsUnknownFileType", realFilename2));
}
} else {
if (folder1.getType() == FileType.FILE) {
// simply compare 2 files ..
if (equalFileContents(folder1, folder2)) {
result.setResult(true);
} else {
result.setResult(false);
}
} else if (folder1.getType() == FileType.FOLDER) {
// We compare 2 folders ...
FileObject[] list1 = folder1.findFiles(new TextFileSelector(folder1.toString()));
FileObject[] list2 = folder2.findFiles(new TextFileSelector(folder2.toString()));
int lenList1 = list1.length;
int lenList2 = list2.length;
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FolderContains", realFilename1, "" + lenList1));
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FolderContains", realFilename2, "" + lenList2));
}
if (lenList1 == lenList2) {
HashMap<String, String> collection1 = new HashMap<String, String>();
HashMap<String, String> collection2 = new HashMap<String, String>();
for (int i = 0; i < list1.length; i++) {
// Put files list1 in TreeMap collection1
collection1.put(list1[i].getName().getBaseName(), list1[i].toString());
}
for (int i = 0; i < list2.length; i++) {
// Put files list2 in TreeMap collection2
collection2.put(list2[i].getName().getBaseName(), list2[i].toString());
}
// Let's now fetch Folder1
// and for each entry, we will search it in Folder2
// if the entry exists..we will compare file entry (file or folder?)
// if the 2 entry are file (not folder), we will compare content
Set<Map.Entry<String, String>> entrees = collection1.entrySet();
Iterator<Map.Entry<String, String>> iterateur = entrees.iterator();
while (iterateur.hasNext()) {
Map.Entry<String, String> entree = iterateur.next();
if (!collection2.containsKey(entree.getKey())) {
ok = false;
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FileCanNotBeFoundIn", entree.getKey().toString(), realFilename2));
}
} else {
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FileIsFoundIn", entree.getKey().toString(), realFilename2));
}
filefolder1 = KettleVFS.getFileObject(entree.getValue().toString(), this);
filefolder2 = KettleVFS.getFileObject(collection2.get(entree.getKey()).toString(), this);
if (!filefolder2.getType().equals(filefolder1.getType())) {
// The file1 exist in the folder2..but they don't have the same type
ok = false;
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FilesNotSameType", filefolder1.toString(), filefolder2.toString()));
}
if (filefolder1.getType() == FileType.FILE) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFile", filefolder1.toString()));
} else if (filefolder1.getType() == FileType.FOLDER) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFolder", filefolder1.toString()));
} else {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsUnknownFileType", filefolder1.toString()));
}
if (filefolder2.getType() == FileType.FILE) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFile", filefolder2.toString()));
} else if (filefolder2.getType() == FileType.FOLDER) {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsAFolder", filefolder2.toString()));
} else {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.IsUnknownFileType", filefolder2.toString()));
}
} else {
// Files are the same type ...
if (filefolder2.getType() == FileType.FILE) {
// Let's compare file size
if (comparefilesize) {
long filefolder1_size = filefolder1.getContent().getSize();
long filefolder2_size = filefolder2.getContent().getSize();
if (filefolder1_size != filefolder2_size) {
ok = false;
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FilesNotSameSize", filefolder1.toString(), filefolder2.toString()));
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.SizeFileIs", filefolder1.toString(), "" + filefolder1_size));
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.SizeFileIs", filefolder2.toString(), "" + filefolder2_size));
}
}
}
if (ok) {
// Let's compare files content..
if (comparefilecontent) {
if (!equalFileContents(filefolder1, filefolder2)) {
ok = false;
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FilesNotSameContent", filefolder1.toString(), filefolder2.toString()));
}
}
}
}
}
}
}
// logBasic(entree.getKey() + " - " + entree.getValue());
}
result.setResult(ok);
} else {
// The 2 folders don't have the same files number
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobFoldersCompare.Log.FoldersDifferentFiles", realFilename1.toString(), realFilename2.toString()));
}
}
}
// else: File type unknown !!
}
} else {
if (!folder1.exists()) {
logError(BaseMessages.getString(PKG, "JobFileCompare.Log.FileNotExist", realFilename1));
}
if (!folder2.exists()) {
logError(BaseMessages.getString(PKG, "JobFileCompare.Log.FileNotExist", realFilename2));
}
result.setResult(false);
result.setNrErrors(1);
}
} else {
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.Need2Files"));
}
} catch (Exception e) {
result.setResult(false);
result.setNrErrors(1);
logError(BaseMessages.getString(PKG, "JobFoldersCompare.Log.ErrorComparing", realFilename2, realFilename2, e.getMessage()));
} finally {
try {
if (folder1 != null) {
folder1.close();
folder1 = null;
}
if (folder2 != null) {
folder2.close();
folder2 = null;
}
if (filefolder1 != null) {
filefolder1.close();
filefolder1 = null;
}
if (filefolder2 != null) {
filefolder2.close();
filefolder2 = null;
}
} catch (IOException e) {
// Ignore errors
}
}
return result;
}
Aggregations