use of org.h2.mvstore.MVStoreException in project mxTune by AeronicaMC.
the class ModDataStore method start.
// TODO: See about hooking the world gather event to force a commit of the music storage.
// TODO: Setup a simple music storage backup and maintains only a user defined number of them.
public static void start() {
String pathFileName = String.format("Folder: '%s', Filename: '%s'", SERVER_FOLDER, SERVER_DATA_STORE_FILENAME);
try {
pathFileName = getCacheFile(SERVER_FOLDER, SERVER_DATA_STORE_FILENAME, LogicalSide.SERVER).toString();
mvStore = new MVStore.Builder().fileName(pathFileName).compress().open();
} catch (IOException | MVStoreException e) {
LOGGER.error("Big OOPS here! Out of disk space? {}", pathFileName);
LOGGER.error(e);
throw new MXTuneRuntimeException("Unable to create mxtune data store.", e);
} finally {
if (getMvStore() != null)
LOGGER.debug("MVStore Started. Commit Version: {}, file: {}", getMvStore().getCurrentVersion(), getMvStore().getFileStore());
}
testGet();
// TODO: Remember to set whatIf to false for production!
long count = reapSheetMusic(true);
}
use of org.h2.mvstore.MVStoreException in project SpringStudy by myounghaklee.
the class TestReorderWrites method testMVStore.
private void testMVStore(final boolean partialWrite) {
// Add partial write test
// @since 2019-07-31 little-pan
println(String.format("testMVStore(): %s partial write", partialWrite ? "Enable" : "Disable"));
FilePathReorderWrites.setPartialWrites(partialWrite);
FilePathReorderWrites fs = FilePathReorderWrites.register();
String fileName = "reorder:memFS:test.mv";
try {
for (int i = 0; i < (config.big ? 1000 : 100); i++) {
log(i + " --------------------------------");
// this test is not interested in power off failures during
// initial creation
fs.setPowerOffCountdown(0, 0);
// release the static data this test generates
FileUtils.delete("memFS:test.mv");
FileUtils.delete("memFS:test.mv.copy");
MVStore store = new MVStore.Builder().fileName(fileName).autoCommitDisabled().open();
// store.setRetentionTime(10);
Map<Integer, byte[]> map = store.openMap("data");
map.put(-1, new byte[1]);
store.commit();
store.getFileStore().sync();
Random r = new Random(i);
int stop = 4 + r.nextInt(config.big ? 150 : 20);
log("countdown start");
fs.setPowerOffCountdown(stop, i);
try {
for (int j = 1; j < 100; j++) {
Map<Integer, Integer> newMap = store.openMap("d" + j);
newMap.put(j, j * 10);
int key = r.nextInt(10);
int len = 10 * r.nextInt(1000);
if (r.nextBoolean()) {
map.remove(key);
} else {
map.put(key, new byte[len]);
}
log("op " + j + ": ");
store.commit();
switch(r.nextInt(10)) {
case 0:
log("op compact");
store.compact(100, 10 * 1024);
break;
case 1:
log("op compactMoveChunks");
store.compactMoveChunks();
log("op compactMoveChunks done");
break;
}
}
// write has to fail at some point
fail();
} catch (MVStoreException e) {
log("stop " + e + ", cause: " + e.getCause());
// expected
}
try {
store.close();
} catch (MVStoreException e) {
// expected
store.closeImmediately();
}
log("verify");
fs.setPowerOffCountdown(100, 0);
if (LOG) {
MVStoreTool.dump(fileName, true);
}
store = new MVStore.Builder().fileName(fileName).autoCommitDisabled().open();
map = store.openMap("data");
if (!map.containsKey(-1)) {
fail("key not found, size=" + map.size() + " i=" + i);
} else {
assertEquals("i=" + i, 1, map.get(-1).length);
}
for (int j = 0; j < 100; j++) {
Map<Integer, Integer> newMap = store.openMap("d" + j);
newMap.get(j);
}
map.keySet();
store.close();
}
} finally {
// release the static data this test generates
FileUtils.delete("memFS:test.mv");
FileUtils.delete("memFS:test.mv.copy");
}
}
use of org.h2.mvstore.MVStoreException in project SpringStudy by myounghaklee.
the class TestTransactionStore method testConcurrentAddRemove.
private static void testConcurrentAddRemove() throws InterruptedException {
try (MVStore s = MVStore.open(null)) {
int threadCount = 3;
int keyCount = 2;
TransactionStore ts = new TransactionStore(s);
ts.init();
final Random r = new Random(1);
Task[] tasks = new Task[threadCount];
for (int i = 0; i < threadCount; i++) {
Task task = new Task() {
@Override
public void call() {
while (!stop) {
Transaction tx = ts.begin();
TransactionMap<Integer, Integer> map = tx.openMap("data");
int k = r.nextInt(keyCount);
try {
map.remove(k);
map.put(k, r.nextInt());
} catch (MVStoreException e) {
// ignore and retry
}
tx.commit();
}
}
};
task.execute();
tasks[i] = task;
}
Thread.sleep(1000);
for (Task t : tasks) {
t.get();
}
}
}
use of org.h2.mvstore.MVStoreException in project SpringStudy by myounghaklee.
the class TestTransactionStore method testConcurrentAdd.
private void testConcurrentAdd() {
try (MVStore s = MVStore.open(null)) {
TransactionStore ts = new TransactionStore(s);
ts.init();
Random r = new Random(1);
AtomicInteger key = new AtomicInteger();
AtomicInteger failCount = new AtomicInteger();
Task task = new Task() {
@Override
public void call() {
while (!stop) {
int k = key.get();
Transaction tx = ts.begin();
TransactionMap<Integer, Integer> map = tx.openMap("data");
try {
map.put(k, r.nextInt());
} catch (MVStoreException e) {
failCount.incrementAndGet();
// ignore and retry
}
tx.commit();
}
}
};
task.execute();
int count = 100000;
for (int i = 0; i < count; i++) {
key.set(i);
Transaction tx = ts.begin();
TransactionMap<Integer, Integer> map = tx.openMap("data");
try {
map.put(i, r.nextInt());
} catch (MVStoreException e) {
failCount.incrementAndGet();
// ignore and retry
}
tx.commit();
if (failCount.get() > 0 && i > 4000) {
// stop earlier, if possible
count = i;
break;
}
}
task.get();
// we expect at least 10% the operations were successful
assertTrue(failCount + " >= " + (count * 0.9), failCount.get() < count * 0.9);
// we expect at least a few failures
assertTrue(failCount.toString(), failCount.get() > 0);
}
}
use of org.h2.mvstore.MVStoreException in project SpringStudy by myounghaklee.
the class MVStore method readStoreHeader.
private void readStoreHeader() {
Chunk newest = null;
boolean assumeCleanShutdown = true;
boolean validStoreHeader = false;
// find out which chunk and version are the newest
// read the first two blocks
ByteBuffer fileHeaderBlocks = fileStore.readFully(0, 2 * BLOCK_SIZE);
byte[] buff = new byte[BLOCK_SIZE];
for (int i = 0; i <= BLOCK_SIZE; i += BLOCK_SIZE) {
fileHeaderBlocks.get(buff);
// the following can fail for various reasons
try {
HashMap<String, String> m = DataUtils.parseChecksummedMap(buff);
if (m == null) {
assumeCleanShutdown = false;
continue;
}
long version = DataUtils.readHexLong(m, HDR_VERSION, 0);
// if both header blocks do agree on version
// we'll continue on happy path - assume that previous shutdown was clean
assumeCleanShutdown = assumeCleanShutdown && (newest == null || version == newest.version);
if (newest == null || version > newest.version) {
validStoreHeader = true;
storeHeader.putAll(m);
creationTime = DataUtils.readHexLong(m, HDR_CREATED, 0);
int chunkId = DataUtils.readHexInt(m, HDR_CHUNK, 0);
long block = DataUtils.readHexLong(m, HDR_BLOCK, 2);
Chunk test = readChunkHeaderAndFooter(block, chunkId);
if (test != null) {
newest = test;
}
}
} catch (Exception ignore) {
assumeCleanShutdown = false;
}
}
if (!validStoreHeader) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, "Store header is corrupt: {0}", fileStore);
}
int blockSize = DataUtils.readHexInt(storeHeader, HDR_BLOCK_SIZE, BLOCK_SIZE);
if (blockSize != BLOCK_SIZE) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "Block size {0} is currently not supported", blockSize);
}
long format = DataUtils.readHexLong(storeHeader, HDR_FORMAT, 1);
if (!fileStore.isReadOnly()) {
if (format > FORMAT_WRITE_MAX) {
throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MAX, "The write format {0} is larger than the supported format {1}");
} else if (format < FORMAT_WRITE_MIN) {
throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MIN, "The write format {0} is smaller than the supported format {1}");
}
}
format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format);
if (format > FORMAT_READ_MAX) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "The read format {0} is larger than the supported format {1}", format, FORMAT_READ_MAX);
} else if (format < FORMAT_READ_MIN) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "The read format {0} is smaller than the supported format {1}", format, FORMAT_READ_MIN);
}
assumeCleanShutdown = assumeCleanShutdown && newest != null && !recoveryMode;
if (assumeCleanShutdown) {
assumeCleanShutdown = DataUtils.readHexInt(storeHeader, HDR_CLEAN, 0) != 0;
}
chunks.clear();
long now = System.currentTimeMillis();
// calculate the year (doesn't have to be exact;
// we assume 365.25 days per year, * 4 = 1461)
int year = 1970 + (int) (now / (1000L * 60 * 60 * 6 * 1461));
if (year < 2014) {
// if the year is before 2014,
// we assume the system doesn't have a real-time clock,
// and we set the creationTime to the past, so that
// existing chunks are overwritten
creationTime = now - fileStore.getDefaultRetentionTime();
} else if (now < creationTime) {
// the system time was set to the past:
// we change the creation time
creationTime = now;
storeHeader.put(HDR_CREATED, creationTime);
}
long fileSize = fileStore.size();
long blocksInStore = fileSize / BLOCK_SIZE;
Comparator<Chunk> chunkComparator = (one, two) -> {
int result = Long.compare(two.version, one.version);
if (result == 0) {
// out of two copies of the same chunk we prefer the one
// close to the beginning of file (presumably later version)
result = Long.compare(one.block, two.block);
}
return result;
};
Map<Long, Chunk> validChunksByLocation = new HashMap<>();
if (!assumeCleanShutdown) {
Chunk tailChunk = discoverChunk(blocksInStore);
if (tailChunk != null) {
// for a possible full scan later on
blocksInStore = tailChunk.block;
if (newest == null || tailChunk.version > newest.version) {
newest = tailChunk;
}
}
if (newest != null) {
// and follow the chain of next chunks
while (true) {
validChunksByLocation.put(newest.block, newest);
if (newest.next == 0 || newest.next >= blocksInStore) {
// no (valid) next
break;
}
Chunk test = readChunkHeaderAndFooter(newest.next, newest.id + 1);
if (test == null || test.version <= newest.version) {
break;
}
// if shutdown was really clean then chain should be empty
assumeCleanShutdown = false;
newest = test;
}
}
}
if (assumeCleanShutdown) {
// quickly check latest 20 chunks referenced in meta table
Queue<Chunk> chunksToVerify = new PriorityQueue<>(20, Collections.reverseOrder(chunkComparator));
try {
setLastChunk(newest);
// load the chunk metadata: although meta's root page resides in the lastChunk,
// traversing meta map might recursively load another chunk(s)
Cursor<String, String> cursor = layout.cursor(DataUtils.META_CHUNK);
while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) {
Chunk c = Chunk.fromString(cursor.getValue());
assert c.version <= currentVersion;
// might be there already, due to meta traversal
// see readPage() ... getChunkIfFound()
chunks.putIfAbsent(c.id, c);
chunksToVerify.offer(c);
if (chunksToVerify.size() == 20) {
chunksToVerify.poll();
}
}
Chunk c;
while (assumeCleanShutdown && (c = chunksToVerify.poll()) != null) {
Chunk test = readChunkHeaderAndFooter(c.block, c.id);
assumeCleanShutdown = test != null;
if (assumeCleanShutdown) {
validChunksByLocation.put(test.block, test);
}
}
} catch (MVStoreException ignored) {
assumeCleanShutdown = false;
}
}
if (!assumeCleanShutdown) {
boolean quickRecovery = false;
if (!recoveryMode) {
// now we know, that previous shutdown did not go well and file
// is possibly corrupted but there is still hope for a quick
// recovery
// this collection will hold potential candidates for lastChunk to fall back to,
// in order from the most to least likely
Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]);
Arrays.sort(lastChunkCandidates, chunkComparator);
Map<Integer, Chunk> validChunksById = new HashMap<>();
for (Chunk chunk : lastChunkCandidates) {
validChunksById.put(chunk.id, chunk);
}
quickRecovery = findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, validChunksById, false);
}
if (!quickRecovery) {
// scan whole file and try to fetch chunk header and/or footer out of every block
// matching pairs with nothing in-between are considered as valid chunk
long block = blocksInStore;
Chunk tailChunk;
while ((tailChunk = discoverChunk(block)) != null) {
block = tailChunk.block;
validChunksByLocation.put(block, tailChunk);
}
// this collection will hold potential candidates for lastChunk to fall back to,
// in order from the most to least likely
Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]);
Arrays.sort(lastChunkCandidates, chunkComparator);
Map<Integer, Chunk> validChunksById = new HashMap<>();
for (Chunk chunk : lastChunkCandidates) {
validChunksById.put(chunk.id, chunk);
}
if (!findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, validChunksById, true) && lastChunk != null) {
throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, "File is corrupted - unable to recover a valid set of chunks");
}
}
}
fileStore.clear();
// build the free space list
for (Chunk c : chunks.values()) {
if (c.isSaved()) {
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
fileStore.markUsed(start, length);
}
if (!c.isLive()) {
deadChunks.offer(c);
}
}
assert validateFileLength("on open");
}
Aggregations