use of org.apache.commons.vfs2.FileObject in project pentaho-kettle by pentaho.
the class PartitioningIT method testPartitioningSwimmingLanesOnCluster.
/**
* This test reads a CSV file in parallel on the cluster, one copy per slave.<br>
* It then partitions the data on id in 12 partitions (4 per slave) and keeps the data partitioned until written to
* file.<br>
* As such we expect 12 files on disk.<br>
* File: "partitioning-swimming-lanes-on-cluster.ktr"<br>
*/
public void testPartitioningSwimmingLanesOnCluster() throws Exception {
init();
ClusterGenerator clusterGenerator = new ClusterGenerator();
try {
clusterGenerator.launchSlaveServers();
TransMeta transMeta = loadAndModifyTestTransformation(clusterGenerator, "src/it/resources/org/pentaho/di/cluster/partitioning-swimming-lanes-on-cluster.ktr");
TransExecutionConfiguration config = createClusteredTransExecutionConfiguration();
TransSplitter transSplitter = Trans.executeClustered(transMeta, config);
long nrErrors = Trans.monitorClusteredTransformation(new LogChannel("cluster unit test <testParallelFileReadOnMaster>"), transSplitter, null, 1);
assertEquals(0L, nrErrors);
String[] results = new String[] { "8", "9", "9", "9", "9", "8", "8", "8", "8", "8", "8", "8" };
String[] files = new String[] { "000", "001", "002", "003", "004", "005", "006", "007", "008", "009", "010", "011" };
for (int i = 0; i < results.length; i++) {
String filename = "${java.io.tmpdir}/partitioning-swimming-lanes-on-cluster-" + files[i] + ".txt";
String result = loadFileContent(transMeta, filename);
assertEqualsIgnoreWhitespacesAndCase(results[i], result);
// Remove the output file : we don't want to leave too much clutter around
//
FileObject file = KettleVFS.getFileObject(transMeta.environmentSubstitute(filename));
file.delete();
}
} catch (Exception e) {
e.printStackTrace();
fail(e.toString());
} finally {
try {
clusterGenerator.stopSlaveServers();
} catch (Exception e) {
e.printStackTrace();
fail(e.toString());
}
}
}
use of org.apache.commons.vfs2.FileObject in project pentaho-kettle by pentaho.
the class BlockingStep method getBuffer.
private Object[] getBuffer() {
Object[] retval;
// Open all files at once and read one row from each file...
if (data.files.size() > 0 && (data.dis.size() == 0 || data.fis.size() == 0)) {
if (log.isBasic()) {
logBasic(BaseMessages.getString(PKG, "BlockingStep.Log.Openfiles"));
}
try {
FileObject fileObject = data.files.get(0);
String filename = KettleVFS.getFilename(fileObject);
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "BlockingStep.Log.Openfilename1") + filename + BaseMessages.getString(PKG, "BlockingStep.Log.Openfilename2"));
}
InputStream fi = KettleVFS.getInputStream(fileObject);
DataInputStream di;
data.fis.add(fi);
if (meta.getCompress()) {
GZIPInputStream gzfi = new GZIPInputStream(new BufferedInputStream(fi));
di = new DataInputStream(gzfi);
data.gzis.add(gzfi);
} else {
di = new DataInputStream(fi);
}
data.dis.add(di);
// How long is the buffer?
int buffersize = di.readInt();
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "BlockingStep.Log.BufferSize1") + filename + BaseMessages.getString(PKG, "BlockingStep.Log.BufferSize2") + buffersize + " " + BaseMessages.getString(PKG, "BlockingStep.Log.BufferSize3"));
}
if (buffersize > 0) {
// Read a row from temp-file
data.rowbuffer.add(data.outputRowMeta.readData(di));
}
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "BlockingStepMeta.ErrorReadingFile") + e.toString());
logError(Const.getStackTracker(e));
}
}
if (data.files.size() == 0) {
if (data.buffer.size() > 0) {
retval = data.buffer.get(0);
data.buffer.remove(0);
} else {
retval = null;
}
} else {
if (data.rowbuffer.size() == 0) {
retval = null;
} else {
retval = data.rowbuffer.get(0);
data.rowbuffer.remove(0);
// now get another
FileObject file = data.files.get(0);
DataInputStream di = data.dis.get(0);
InputStream fi = data.fis.get(0);
GZIPInputStream gzfi = (meta.getCompress()) ? data.gzis.get(0) : null;
try {
data.rowbuffer.add(0, data.outputRowMeta.readData(di));
} catch (SocketTimeoutException e) {
logError(BaseMessages.getString(PKG, "System.Log.UnexpectedError") + " : " + e.toString());
logError(Const.getStackTracker(e));
setErrors(1);
stopAll();
} catch (KettleFileException fe) {
// empty file or EOF mostly
try {
di.close();
fi.close();
if (gzfi != null) {
gzfi.close();
}
file.delete();
} catch (IOException e) {
logError(BaseMessages.getString(PKG, "BlockingStepMeta.UnableDeleteFile") + file.toString());
setErrors(1);
stopAll();
return null;
}
data.files.remove(0);
data.dis.remove(0);
data.fis.remove(0);
if (gzfi != null) {
data.gzis.remove(0);
}
}
}
}
return retval;
}
use of org.apache.commons.vfs2.FileObject in project pentaho-kettle by pentaho.
the class SftpFileObjectWithWindowsSupportTest method isWritableLinuxCase.
@Test
public void isWritableLinuxCase() throws Exception {
FileObject fileObjectWritable = getLinuxFileObject(true, true);
FileObject fileObjectNotWritable = getLinuxFileObject(true, false);
assertTrue(fileObjectWritable.isWriteable());
assertFalse(fileObjectNotWritable.isWriteable());
}
use of org.apache.commons.vfs2.FileObject in project pentaho-kettle by pentaho.
the class SftpFileObjectWithWindowsSupportTest method isReadableWindowsCase.
@Test
public void isReadableWindowsCase() throws Exception {
FileObject fileObjectReadable = getWindowsFileObject(true, false);
FileObject fileObjectNotReadable = getWindowsFileObject(false, false);
assertTrue(fileObjectReadable.isReadable());
assertFalse(fileObjectNotReadable.isReadable());
}
use of org.apache.commons.vfs2.FileObject in project pentaho-kettle by pentaho.
the class SftpFileObjectWithWindowsSupportTest method getLinuxFileObject.
private static FileObject getLinuxFileObject(boolean posixReadable, boolean posixWritable) throws Exception {
GenericFileName fileName = mock(GenericFileName.class);
doReturn(PATH).when(fileName).getPath();
Session session = mock(Session.class);
SftpFileSystemWindows sftpFileSystem = spy(new SftpFileSystemWindows(fileName, session, null));
doReturn(false).when(sftpFileSystem).isRemoteHostWindows();
int permissions = 0;
if (posixReadable) {
permissions += 256;
}
if (posixWritable) {
permissions += 128;
}
PosixPermissions posixPermissions = new PosixPermissions(permissions, true, true);
return new SftpFileObjectWithWindowsSupport(fileName, sftpFileSystem) {
@Override
public PosixPermissions getPermissions(boolean checkIds) {
return posixPermissions;
}
@Override
public FileType getType() {
return FileType.FILE;
}
};
}
Aggregations