use of org.h2.mvstore.FileStore in project h2database by h2database.
the class ValueLob method getInputStream.
@Override
public InputStream getInputStream(long oneBasedOffset, long length) {
if (fileName == null) {
return super.getInputStream(oneBasedOffset, length);
}
FileStore store = handler.openFile(fileName, "r", true);
boolean alwaysClose = SysProperties.lobCloseBetweenReads;
InputStream inputStream = new BufferedInputStream(new FileStoreInputStream(store, handler, compressed, alwaysClose), Constants.IO_BUFFER_SIZE);
return rangeInputStream(inputStream, oneBasedOffset, length, store.length());
}
use of org.h2.mvstore.FileStore in project h2database by h2database.
the class ValueLobDb method getInputStream.
@Override
public InputStream getInputStream(long oneBasedOffset, long length) {
long byteCount;
InputStream inputStream;
if (small != null) {
return super.getInputStream(oneBasedOffset, length);
} else if (fileName != null) {
FileStore store = handler.openFile(fileName, "r", true);
boolean alwaysClose = SysProperties.lobCloseBetweenReads;
byteCount = store.length();
inputStream = new BufferedInputStream(new FileStoreInputStream(store, handler, false, alwaysClose), Constants.IO_BUFFER_SIZE);
} else {
byteCount = (type == Value.BLOB) ? precision : -1;
try {
inputStream = handler.getLobStorage().getInputStream(this, hmac, byteCount);
} catch (IOException e) {
throw DbException.convertIOException(e, toString());
}
}
return ValueLob.rangeInputStream(inputStream, oneBasedOffset, length, byteCount);
}
use of org.h2.mvstore.FileStore in project h2database by h2database.
the class MVStore method readStoreHeader.
private void readStoreHeader() {
Chunk newest = null;
boolean assumeCleanShutdown = true;
boolean validStoreHeader = false;
// find out which chunk and version are the newest
// read the first two blocks
ByteBuffer fileHeaderBlocks = fileStore.readFully(0, 2 * BLOCK_SIZE);
byte[] buff = new byte[BLOCK_SIZE];
for (int i = 0; i <= BLOCK_SIZE; i += BLOCK_SIZE) {
fileHeaderBlocks.get(buff);
// the following can fail for various reasons
try {
HashMap<String, String> m = DataUtils.parseChecksummedMap(buff);
if (m == null) {
assumeCleanShutdown = false;
continue;
}
long version = DataUtils.readHexLong(m, HDR_VERSION, 0);
// if both header blocks do agree on version
// we'll continue on happy path - assume that previous shutdown was clean
assumeCleanShutdown = assumeCleanShutdown && (newest == null || version == newest.version);
if (newest == null || version > newest.version) {
validStoreHeader = true;
storeHeader.putAll(m);
creationTime = DataUtils.readHexLong(m, HDR_CREATED, 0);
int chunkId = DataUtils.readHexInt(m, HDR_CHUNK, 0);
long block = DataUtils.readHexLong(m, HDR_BLOCK, 2);
Chunk test = readChunkHeaderAndFooter(block, chunkId);
if (test != null) {
newest = test;
}
}
} catch (Exception ignore) {
assumeCleanShutdown = false;
}
}
if (!validStoreHeader) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, "Store header is corrupt: {0}", fileStore);
}
int blockSize = DataUtils.readHexInt(storeHeader, HDR_BLOCK_SIZE, BLOCK_SIZE);
if (blockSize != BLOCK_SIZE) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "Block size {0} is currently not supported", blockSize);
}
long format = DataUtils.readHexLong(storeHeader, HDR_FORMAT, 1);
if (!fileStore.isReadOnly()) {
if (format > FORMAT_WRITE_MAX) {
throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MAX, "The write format {0} is larger than the supported format {1}");
} else if (format < FORMAT_WRITE_MIN) {
throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MIN, "The write format {0} is smaller than the supported format {1}");
}
}
format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format);
if (format > FORMAT_READ_MAX) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "The read format {0} is larger than the supported format {1}", format, FORMAT_READ_MAX);
} else if (format < FORMAT_READ_MIN) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "The read format {0} is smaller than the supported format {1}", format, FORMAT_READ_MIN);
}
assumeCleanShutdown = assumeCleanShutdown && newest != null && !recoveryMode;
if (assumeCleanShutdown) {
assumeCleanShutdown = DataUtils.readHexInt(storeHeader, HDR_CLEAN, 0) != 0;
}
chunks.clear();
long now = System.currentTimeMillis();
// calculate the year (doesn't have to be exact;
// we assume 365.25 days per year, * 4 = 1461)
int year = 1970 + (int) (now / (1000L * 60 * 60 * 6 * 1461));
if (year < 2014) {
// if the year is before 2014,
// we assume the system doesn't have a real-time clock,
// and we set the creationTime to the past, so that
// existing chunks are overwritten
creationTime = now - fileStore.getDefaultRetentionTime();
} else if (now < creationTime) {
// the system time was set to the past:
// we change the creation time
creationTime = now;
storeHeader.put(HDR_CREATED, creationTime);
}
long fileSize = fileStore.size();
long blocksInStore = fileSize / BLOCK_SIZE;
Comparator<Chunk> chunkComparator = (one, two) -> {
int result = Long.compare(two.version, one.version);
if (result == 0) {
// out of two copies of the same chunk we prefer the one
// close to the beginning of file (presumably later version)
result = Long.compare(one.block, two.block);
}
return result;
};
Map<Long, Chunk> validChunksByLocation = new HashMap<>();
if (!assumeCleanShutdown) {
Chunk tailChunk = discoverChunk(blocksInStore);
if (tailChunk != null) {
// for a possible full scan later on
blocksInStore = tailChunk.block;
if (newest == null || tailChunk.version > newest.version) {
newest = tailChunk;
}
}
if (newest != null) {
// and follow the chain of next chunks
while (true) {
validChunksByLocation.put(newest.block, newest);
if (newest.next == 0 || newest.next >= blocksInStore) {
// no (valid) next
break;
}
Chunk test = readChunkHeaderAndFooter(newest.next, newest.id + 1);
if (test == null || test.version <= newest.version) {
break;
}
// if shutdown was really clean then chain should be empty
assumeCleanShutdown = false;
newest = test;
}
}
}
if (assumeCleanShutdown) {
// quickly check latest 20 chunks referenced in meta table
Queue<Chunk> chunksToVerify = new PriorityQueue<>(20, Collections.reverseOrder(chunkComparator));
try {
setLastChunk(newest);
// load the chunk metadata: although meta's root page resides in the lastChunk,
// traversing meta map might recursively load another chunk(s)
Cursor<String, String> cursor = layout.cursor(DataUtils.META_CHUNK);
while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) {
Chunk c = Chunk.fromString(cursor.getValue());
assert c.version <= currentVersion;
// might be there already, due to meta traversal
// see readPage() ... getChunkIfFound()
chunks.putIfAbsent(c.id, c);
chunksToVerify.offer(c);
if (chunksToVerify.size() == 20) {
chunksToVerify.poll();
}
}
Chunk c;
while (assumeCleanShutdown && (c = chunksToVerify.poll()) != null) {
Chunk test = readChunkHeaderAndFooter(c.block, c.id);
assumeCleanShutdown = test != null;
if (assumeCleanShutdown) {
validChunksByLocation.put(test.block, test);
}
}
} catch (MVStoreException ignored) {
assumeCleanShutdown = false;
}
}
if (!assumeCleanShutdown) {
boolean quickRecovery = false;
if (!recoveryMode) {
// now we know, that previous shutdown did not go well and file
// is possibly corrupted but there is still hope for a quick
// recovery
// this collection will hold potential candidates for lastChunk to fall back to,
// in order from the most to least likely
Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]);
Arrays.sort(lastChunkCandidates, chunkComparator);
Map<Integer, Chunk> validChunksById = new HashMap<>();
for (Chunk chunk : lastChunkCandidates) {
validChunksById.put(chunk.id, chunk);
}
quickRecovery = findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, validChunksById, false);
}
if (!quickRecovery) {
// scan whole file and try to fetch chunk header and/or footer out of every block
// matching pairs with nothing in-between are considered as valid chunk
long block = blocksInStore;
Chunk tailChunk;
while ((tailChunk = discoverChunk(block)) != null) {
block = tailChunk.block;
validChunksByLocation.put(block, tailChunk);
}
// this collection will hold potential candidates for lastChunk to fall back to,
// in order from the most to least likely
Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]);
Arrays.sort(lastChunkCandidates, chunkComparator);
Map<Integer, Chunk> validChunksById = new HashMap<>();
for (Chunk chunk : lastChunkCandidates) {
validChunksById.put(chunk.id, chunk);
}
if (!findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, validChunksById, true) && lastChunk != null) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, "File is corrupted - unable to recover a valid set of chunks");
}
}
}
fileStore.clear();
// build the free space list
for (Chunk c : chunks.values()) {
if (c.isSaved()) {
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
fileStore.markUsed(start, length);
}
if (!c.isLive()) {
deadChunks.offer(c);
}
}
assert validateFileLength("on open");
}
use of org.h2.mvstore.FileStore in project h2database by h2database.
the class SessionRemote method openFile.
@Override
public FileStore openFile(String name, String mode, boolean mustExist) {
if (mustExist && !FileUtils.exists(name)) {
throw DbException.get(ErrorCode.FILE_NOT_FOUND_1, name);
}
FileStore store;
if (cipher == null) {
store = FileStore.open(this, name, mode);
} else {
store = FileStore.open(this, name, mode, cipher, fileEncryptionKey, 0);
}
store.setCheckedWriting(false);
try {
store.init();
} catch (DbException e) {
store.closeSilently();
throw e;
}
return store;
}
use of org.h2.mvstore.FileStore in project h2database by h2database.
the class ValueBlob method createTemporary.
/**
* Create a BLOB in a temporary file.
*/
private static ValueBlob createTemporary(DataHandler handler, byte[] buff, int len, InputStream in, long remaining) throws IOException {
String fileName = ValueLob.createTempLobFileName(handler);
FileStore tempFile = handler.openFile(fileName, "rw", false);
tempFile.autoDelete();
long tmpPrecision = 0;
try (FileStoreOutputStream out = new FileStoreOutputStream(tempFile, null)) {
while (true) {
tmpPrecision += len;
out.write(buff, 0, len);
remaining -= len;
if (remaining <= 0) {
break;
}
len = ValueLob.getBufferSize(handler, remaining);
len = IOUtils.readFully(in, buff, len);
if (len <= 0) {
break;
}
}
}
return new ValueBlob(new LobDataFile(handler, fileName, tempFile), tmpPrecision);
}
Aggregations