use of java.io.FileDescriptor in project hadoop by apache.
the class TestNativeIO method testSetFilePointer.
@Test(timeout = 30000)
public void testSetFilePointer() throws Exception {
assumeWindows();
LOG.info("Set a file pointer on Windows");
try {
File testfile = new File(TEST_DIR, "testSetFilePointer");
assertTrue("Create test subject", testfile.exists() || testfile.createNewFile());
FileWriter writer = new FileWriter(testfile);
try {
for (int i = 0; i < 200; i++) if (i < 100)
writer.write('a');
else
writer.write('b');
writer.flush();
} catch (Exception writerException) {
fail("Got unexpected exception: " + writerException.getMessage());
} finally {
writer.close();
}
FileDescriptor fd = NativeIO.Windows.createFile(testfile.getCanonicalPath(), NativeIO.Windows.GENERIC_READ, NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE, NativeIO.Windows.OPEN_EXISTING);
NativeIO.Windows.setFilePointer(fd, 120, NativeIO.Windows.FILE_BEGIN);
FileReader reader = new FileReader(fd);
try {
int c = reader.read();
assertTrue("Unexpected character: " + c, c == 'b');
} catch (Exception readerException) {
fail("Got unexpected exception: " + readerException.getMessage());
} finally {
reader.close();
}
} catch (Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
use of java.io.FileDescriptor in project hadoop by apache.
the class TestNativeIO method testCreateFile.
@Test(timeout = 30000)
public void testCreateFile() throws Exception {
assumeWindows();
LOG.info("Open a file on Windows with SHARE_DELETE shared mode");
try {
File testfile = new File(TEST_DIR, "testCreateFile");
assertTrue("Create test subject", testfile.exists() || testfile.createNewFile());
FileDescriptor fd = NativeIO.Windows.createFile(testfile.getCanonicalPath(), NativeIO.Windows.GENERIC_READ, NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE, NativeIO.Windows.OPEN_EXISTING);
FileInputStream fin = new FileInputStream(fd);
try {
fin.read();
File newfile = new File(TEST_DIR, "testRenamedFile");
boolean renamed = testfile.renameTo(newfile);
assertTrue("Rename failed.", renamed);
fin.read();
} catch (Exception e) {
fail("Got unexpected exception: " + e.getMessage());
} finally {
fin.close();
}
} catch (Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
use of java.io.FileDescriptor in project hadoop by apache.
the class TestNativeIO method testFDDoesntLeak.
/**
* Test that opens and closes a file 10000 times - this would crash with
* "Too many open files" if we leaked fds using this access pattern.
*/
@Test(timeout = 30000)
public void testFDDoesntLeak() throws IOException {
assumeNotWindows();
for (int i = 0; i < 10000; i++) {
FileDescriptor fd = NativeIO.POSIX.open(new File(TEST_DIR, "testNoFdLeak").getAbsolutePath(), O_WRONLY | O_CREAT, 0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos = new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
}
}
use of java.io.FileDescriptor in project android_frameworks_base by ParanoidAndroid.
the class BackupHelperDispatcher method doOneBackup.
private void doOneBackup(ParcelFileDescriptor oldState, BackupDataOutput data, ParcelFileDescriptor newState, Header header, BackupHelper helper) throws IOException {
int err;
FileDescriptor newStateFD = newState.getFileDescriptor();
// allocate space for the header in the file
int pos = allocateHeader_native(header, newStateFD);
if (pos < 0) {
throw new IOException("allocateHeader_native failed (error " + pos + ")");
}
data.setKeyPrefix(header.keyPrefix);
// do the backup
helper.performBackup(oldState, data, newState);
// fill in the header (seeking back to pos). The file pointer will be returned to
// where it was at the end of performBackup. Header.chunkSize will not be filled in.
err = writeHeader_native(header, newStateFD, pos);
if (err != 0) {
throw new IOException("writeHeader_native failed (error " + err + ")");
}
}
use of java.io.FileDescriptor in project android_frameworks_base by ParanoidAndroid.
the class BackupHelperDispatcher method performBackup.
public void performBackup(ParcelFileDescriptor oldState, BackupDataOutput data, ParcelFileDescriptor newState) throws IOException {
// First, do the helpers that we've already done, since they're already in the state
// file.
int err;
Header header = new Header();
TreeMap<String, BackupHelper> helpers = (TreeMap<String, BackupHelper>) mHelpers.clone();
FileDescriptor oldStateFD = null;
FileDescriptor newStateFD = newState.getFileDescriptor();
if (oldState != null) {
oldStateFD = oldState.getFileDescriptor();
while ((err = readHeader_native(header, oldStateFD)) >= 0) {
if (err == 0) {
BackupHelper helper = helpers.get(header.keyPrefix);
Log.d(TAG, "handling existing helper '" + header.keyPrefix + "' " + helper);
if (helper != null) {
doOneBackup(oldState, data, newState, header, helper);
helpers.remove(header.keyPrefix);
} else {
skipChunk_native(oldStateFD, header.chunkSize);
}
}
}
}
// Then go through and do the rest that we haven't done.
for (Map.Entry<String, BackupHelper> entry : helpers.entrySet()) {
header.keyPrefix = entry.getKey();
Log.d(TAG, "handling new helper '" + header.keyPrefix + "'");
BackupHelper helper = entry.getValue();
doOneBackup(oldState, data, newState, header, helper);
}
}
Aggregations