use of org.apache.hyracks.storage.common.file.BufferedFileHandle in project asterixdb by apache.
the class BufferCache method openFile.
@Override
public void openFile(int fileId) throws HyracksDataException {
if (LOGGER.isLoggable(fileOpsLevel)) {
LOGGER.log(fileOpsLevel, "Opening file: " + fileId + " in cache: " + this);
}
synchronized (fileInfoMap) {
BufferedFileHandle fInfo;
fInfo = fileInfoMap.get(fileId);
if (fInfo == null) {
// map is full, make room by cleaning up unreferenced files
boolean unreferencedFileFound = true;
while (fileInfoMap.size() >= maxOpenFiles && unreferencedFileFound) {
unreferencedFileFound = false;
for (Map.Entry<Integer, BufferedFileHandle> entry : fileInfoMap.entrySet()) {
if (entry.getValue().getReferenceCount() <= 0) {
int entryFileId = entry.getKey();
boolean fileHasBeenDeleted = entry.getValue().fileHasBeenDeleted();
sweepAndFlush(entryFileId, !fileHasBeenDeleted);
if (!fileHasBeenDeleted) {
ioManager.close(entry.getValue().getFileHandle());
}
fileInfoMap.remove(entryFileId);
unreferencedFileFound = true;
// fileInfoMap
break;
}
}
}
if (fileInfoMap.size() >= maxOpenFiles) {
throw new HyracksDataException("Could not open fileId " + fileId + ". Max number of files " + maxOpenFiles + " already opened and referenced.");
}
// create, open, and map new file reference
FileReference fileRef = fileMapManager.lookupFileName(fileId);
IFileHandle fh = ioManager.open(fileRef, IIOManager.FileReadWriteMode.READ_WRITE, IIOManager.FileSyncMode.METADATA_ASYNC_DATA_ASYNC);
fInfo = new BufferedFileHandle(fileId, fh);
fileInfoMap.put(fileId, fInfo);
}
fInfo.incReferenceCount();
}
}
use of org.apache.hyracks.storage.common.file.BufferedFileHandle in project asterixdb by apache.
the class BufferCache method read.
private void read(CachedPage cPage) throws HyracksDataException {
BufferedFileHandle fInfo = getFileInfo(cPage);
cPage.buffer.clear();
BufferCacheHeaderHelper header = checkoutHeaderHelper();
try {
long bytesRead = ioManager.syncRead(fInfo.getFileHandle(), getOffsetForPage(BufferedFileHandle.getPageId(cPage.dpid)), header.prepareRead());
if (bytesRead != getPageSizeWithHeader()) {
if (bytesRead == -1) {
// disk order scan code seems to rely on this behavior, so silently return
return;
}
throw new HyracksDataException("Failed to read a complete page: " + bytesRead);
}
int totalPages = header.processRead(cPage);
if (totalPages > 1) {
pageReplacementStrategy.fixupCapacityOnLargeRead(cPage);
cPage.buffer.position(pageSize);
cPage.buffer.limit(totalPages * pageSize);
ioManager.syncRead(fInfo.getFileHandle(), getOffsetForPage(cPage.getExtraBlockPageId()), cPage.buffer);
}
} finally {
returnHeaderHelper(header);
}
}
use of org.apache.hyracks.storage.common.file.BufferedFileHandle in project asterixdb by apache.
the class BufferCache method pinSanityCheck.
private void pinSanityCheck(long dpid) throws HyracksDataException {
if (closed) {
throw new HyracksDataException("pin called on a closed cache");
}
// check whether file has been created and opened
int fileId = BufferedFileHandle.getFileId(dpid);
BufferedFileHandle fInfo;
synchronized (fileInfoMap) {
fInfo = fileInfoMap.get(fileId);
}
if (fInfo == null) {
throw new HyracksDataException("pin called on a fileId " + fileId + " that has not been created.");
} else if (fInfo.getReferenceCount() <= 0) {
throw new HyracksDataException("pin called on a fileId " + fileId + " that has not been opened.");
}
}
use of org.apache.hyracks.storage.common.file.BufferedFileHandle in project asterixdb by apache.
the class BufferCache method force.
@Override
public void force(int fileId, boolean metadata) throws HyracksDataException {
BufferedFileHandle fInfo;
synchronized (fileInfoMap) {
fInfo = fileInfoMap.get(fileId);
}
ioManager.sync(fInfo.getFileHandle(), metadata);
}
use of org.apache.hyracks.storage.common.file.BufferedFileHandle in project asterixdb by apache.
the class BufferCache method write.
void write(CachedPage cPage) throws HyracksDataException {
BufferedFileHandle fInfo = getFileInfo(cPage);
// synchronize on fInfo to prevent the file handle from being deleted until the page is written.
synchronized (fInfo) {
if (!fInfo.fileHasBeenDeleted()) {
ByteBuffer buf = cPage.buffer.duplicate();
final int totalPages = cPage.getFrameSizeMultiplier();
final int extraBlockPageId = cPage.getExtraBlockPageId();
final boolean contiguousLargePages = (BufferedFileHandle.getPageId(cPage.dpid) + 1) == extraBlockPageId;
BufferCacheHeaderHelper header = checkoutHeaderHelper();
try {
buf.limit(contiguousLargePages ? pageSize * totalPages : pageSize);
buf.position(0);
long bytesWritten = ioManager.syncWrite(fInfo.getFileHandle(), getOffsetForPage(BufferedFileHandle.getPageId(cPage.dpid)), header.prepareWrite(cPage, buf));
if (bytesWritten != (contiguousLargePages ? pageSize * (totalPages - 1) : 0) + getPageSizeWithHeader()) {
throw new HyracksDataException("Failed to write completely: " + bytesWritten);
}
} finally {
returnHeaderHelper(header);
}
if (totalPages > 1 && !contiguousLargePages) {
buf.limit(totalPages * pageSize);
ioManager.syncWrite(fInfo.getFileHandle(), getOffsetForPage(extraBlockPageId), buf);
}
assert buf.capacity() == (pageSize * totalPages);
}
}
}
Aggregations