use of org.neo4j.storageengine.api.StoreFileMetadata in project neo4j by neo4j.
the class StoreCopyServer method flushStoresAndStreamStoreFiles.
/**
* Trigger store flush (checkpoint) and write {@link NeoStoreDataSource#listStoreFiles(boolean) store files} to the
* given {@link StoreWriter}.
*
* @param triggerName name of the component asks for store files.
* @param writer store writer to write files to.
* @param includeLogs <code>true</code> if transaction logs should be copied, <code>false</code> otherwise.
* @return a {@link RequestContext} specifying at which point the store copy started.
*/
public RequestContext flushStoresAndStreamStoreFiles(String triggerName, StoreWriter writer, boolean includeLogs) {
try {
ThrowingAction<IOException> checkPointAction = () -> {
monitor.startTryCheckPoint();
checkPointer.tryCheckPoint(new SimpleTriggerInfo(triggerName));
monitor.finishTryCheckPoint();
};
// Copy the store files
long lastAppliedTransaction;
try (Resource lock = mutex.storeCopy(checkPointAction);
ResourceIterator<StoreFileMetadata> files = dataSource.listStoreFiles(includeLogs)) {
lastAppliedTransaction = checkPointer.lastCheckPointedTransactionId();
monitor.startStreamingStoreFiles();
ByteBuffer temporaryBuffer = ByteBuffer.allocateDirect((int) ByteUnit.mebiBytes(1));
while (files.hasNext()) {
StoreFileMetadata meta = files.next();
File file = meta.file();
int recordSize = meta.recordSize();
// Read from paged file if mapping exists. Otherwise read through file system.
// A file is mapped if it is a store, and we have a running database, which will be the case for
// both online backup, and when we are the master of an HA cluster.
final Optional<PagedFile> optionalPagedFile = pageCache.getExistingMapping(file);
if (optionalPagedFile.isPresent()) {
try (PagedFile pagedFile = optionalPagedFile.get()) {
long fileSize = pagedFile.fileSize();
try (ReadableByteChannel fileChannel = pagedFile.openReadableByteChannel()) {
doWrite(writer, temporaryBuffer, file, recordSize, fileChannel, fileSize);
}
}
} else {
try (ReadableByteChannel fileChannel = fileSystem.open(file, "r")) {
long fileSize = fileSystem.getFileSize(file);
doWrite(writer, temporaryBuffer, file, recordSize, fileChannel, fileSize);
}
}
}
} finally {
monitor.finishStreamingStoreFiles();
}
return anonymous(lastAppliedTransaction);
} catch (IOException e) {
throw new ServerFailureException(e);
}
}
use of org.neo4j.storageengine.api.StoreFileMetadata in project neo4j by neo4j.
the class NeoStoreFileListing method listStoreFiles.
public ResourceIterator<StoreFileMetadata> listStoreFiles(boolean includeLogs) throws IOException {
Collection<StoreFileMetadata> files = new ArrayList<>();
gatherNonRecordStores(files, includeLogs);
gatherNeoStoreFiles(files);
Resource labelScanStoreSnapshot = gatherLabelScanStoreFiles(files);
Resource schemaIndexSnapshots = gatherSchemaIndexFiles(files);
Resource legacyIndexSnapshots = gatherLegacyIndexFiles(files);
return resourceIterator(files.iterator(), new MultiResource(asList(labelScanStoreSnapshot, schemaIndexSnapshots, legacyIndexSnapshots)));
}
use of org.neo4j.storageengine.api.StoreFileMetadata in project neo4j by neo4j.
the class TestBranchedData method gatherLuceneFiles.
private Collection<File> gatherLuceneFiles(HighlyAvailableGraphDatabase db, String indexName) throws IOException {
Collection<File> result = new ArrayList<>();
NeoStoreDataSource ds = db.getDependencyResolver().resolveDependency(NeoStoreDataSource.class);
try (ResourceIterator<StoreFileMetadata> files = ds.listStoreFiles(false)) {
while (files.hasNext()) {
File file = files.next().file();
if (file.getPath().contains(indexName)) {
result.add(file);
}
}
}
return result;
}
Aggregations