use of java.nio.channels.ReadableByteChannel in project neo4j by neo4j.
the class PageCacheTest method readableByteChannelMustReadAllBytesInFileConsistently.
@RepeatRule.Repeat(times = 20)
@Test(timeout = SHORT_TIMEOUT_MILLIS)
public void readableByteChannelMustReadAllBytesInFileConsistently() throws Exception {
File file = file("a");
generateFileWithRecords(file, recordCount, recordSize);
configureStandardPageCache();
try (PagedFile pf = pageCache.map(file, filePageSize)) {
RandomAdversary adversary = new RandomAdversary(0.9, 0, 0);
AdversarialPagedFile apf = new AdversarialPagedFile(pf, adversary);
try (ReadableByteChannel channel = apf.openReadableByteChannel()) {
verifyRecordsInFile(channel, recordCount);
}
}
}
use of java.nio.channels.ReadableByteChannel in project neo4j by neo4j.
the class PageCacheTest method writableByteChannelMustWriteAllBytesInFile.
@Test(timeout = SHORT_TIMEOUT_MILLIS)
public void writableByteChannelMustWriteAllBytesInFile() throws Exception {
File file = file("a");
configureStandardPageCache();
try (PagedFile pf = pageCache.map(file, filePageSize)) {
try (WritableByteChannel channel = pf.openWritableByteChannel()) {
generateFileWithRecords(channel, recordCount, recordSize);
}
try (ReadableByteChannel channel = pf.openReadableByteChannel()) {
verifyRecordsInFile(channel, recordCount);
}
}
}
use of java.nio.channels.ReadableByteChannel in project neo4j by neo4j.
the class TestBlockLogBuffer method readSmallPortions.
@Test
public void readSmallPortions() throws IOException {
byte[] bytes = new byte[255];
ChannelBuffer wrappedBuffer = ChannelBuffers.wrappedBuffer(bytes);
wrappedBuffer.resetWriterIndex();
BlockLogBuffer buffer = new BlockLogBuffer(wrappedBuffer, new Monitors().newMonitor(ByteCounterMonitor.class));
byte byteValue = 5;
int intValue = 1234;
long longValue = 574853;
buffer.put(byteValue);
buffer.putInt(intValue);
buffer.putLong(longValue);
buffer.close();
ReadableByteChannel reader = new BlockLogReader(wrappedBuffer);
ByteBuffer verificationBuffer = ByteBuffer.wrap(new byte[1]);
reader.read(verificationBuffer);
verificationBuffer.flip();
assertEquals(byteValue, verificationBuffer.get());
verificationBuffer = ByteBuffer.wrap(new byte[4]);
reader.read(verificationBuffer);
verificationBuffer.flip();
assertEquals(intValue, verificationBuffer.getInt());
verificationBuffer = ByteBuffer.wrap(new byte[8]);
reader.read(verificationBuffer);
verificationBuffer.flip();
assertEquals(longValue, verificationBuffer.getLong());
}
use of java.nio.channels.ReadableByteChannel in project neo4j by neo4j.
the class StoreCopyServer method flushStoresAndStreamStoreFiles.
/**
* Trigger store flush (checkpoint) and write {@link NeoStoreDataSource#listStoreFiles(boolean) store files} to the
* given {@link StoreWriter}.
*
* @param triggerName name of the component asks for store files.
* @param writer store writer to write files to.
* @param includeLogs <code>true</code> if transaction logs should be copied, <code>false</code> otherwise.
* @return a {@link RequestContext} specifying at which point the store copy started.
*/
public RequestContext flushStoresAndStreamStoreFiles(String triggerName, StoreWriter writer, boolean includeLogs) {
try {
ThrowingAction<IOException> checkPointAction = () -> {
monitor.startTryCheckPoint();
checkPointer.tryCheckPoint(new SimpleTriggerInfo(triggerName));
monitor.finishTryCheckPoint();
};
// Copy the store files
long lastAppliedTransaction;
try (Resource lock = mutex.storeCopy(checkPointAction);
ResourceIterator<StoreFileMetadata> files = dataSource.listStoreFiles(includeLogs)) {
lastAppliedTransaction = checkPointer.lastCheckPointedTransactionId();
monitor.startStreamingStoreFiles();
ByteBuffer temporaryBuffer = ByteBuffer.allocateDirect((int) ByteUnit.mebiBytes(1));
while (files.hasNext()) {
StoreFileMetadata meta = files.next();
File file = meta.file();
int recordSize = meta.recordSize();
// Read from paged file if mapping exists. Otherwise read through file system.
// A file is mapped if it is a store, and we have a running database, which will be the case for
// both online backup, and when we are the master of an HA cluster.
final Optional<PagedFile> optionalPagedFile = pageCache.getExistingMapping(file);
if (optionalPagedFile.isPresent()) {
try (PagedFile pagedFile = optionalPagedFile.get()) {
long fileSize = pagedFile.fileSize();
try (ReadableByteChannel fileChannel = pagedFile.openReadableByteChannel()) {
doWrite(writer, temporaryBuffer, file, recordSize, fileChannel, fileSize);
}
}
} else {
try (ReadableByteChannel fileChannel = fileSystem.open(file, "r")) {
long fileSize = fileSystem.getFileSize(file);
doWrite(writer, temporaryBuffer, file, recordSize, fileChannel, fileSize);
}
}
}
} finally {
monitor.finishStreamingStoreFiles();
}
return anonymous(lastAppliedTransaction);
} catch (IOException e) {
throw new ServerFailureException(e);
}
}
use of java.nio.channels.ReadableByteChannel in project jodd by oblac.
the class NetUtil method downloadFile.
/**
* Downloads resource to a file, potentially very efficiently.
*/
public static void downloadFile(String url, File file) throws IOException {
InputStream inputStream = new URL(url).openStream();
ReadableByteChannel rbc = Channels.newChannel(inputStream);
FileOutputStream fos = new FileOutputStream(file);
fos.getChannel().transferFrom(rbc, 0, 1 << 24);
}
Aggregations