Search in sources :

Example 16 with FileStore

use of org.h2.mvstore.FileStore in project SpringStudy by myounghaklee.

the class CreateScriptFile method openScriptReader.

/**
 * Open a script reader.
 *
 * @param fileName the file name (the file will be overwritten)
 * @param compressionAlgorithm the compression algorithm (uppercase)
 * @param cipher the encryption algorithm or null
 * @param password the encryption password
 * @param charset the character set (for example UTF-8)
 * @return the script reader
 * @throws IOException on failure
 */
public static LineNumberReader openScriptReader(String fileName, String compressionAlgorithm, String cipher, String password, String charset) throws IOException {
    try {
        InputStream in;
        if (cipher != null) {
            byte[] key = SHA256.getKeyPasswordHash("script", password.toCharArray());
            FileStore store = FileStore.open(null, fileName, "rw", cipher, key);
            store.init();
            in = new FileStoreInputStream(store, compressionAlgorithm != null, false);
            in = new BufferedInputStream(in, Constants.IO_BUFFER_SIZE_COMPRESS);
        } else {
            in = FileUtils.newInputStream(fileName);
            in = new BufferedInputStream(in, Constants.IO_BUFFER_SIZE);
            in = CompressTool.wrapInputStream(in, compressionAlgorithm, "script.sql");
            if (in == null) {
                throw new IOException("Entry not found: script.sql in " + fileName);
            }
        }
        return new LineNumberReader(new InputStreamReader(in, charset));
    } catch (Exception e) {
        throw new IOException(e.getMessage(), e);
    }
}
Also used : FileStore(org.h2.store.FileStore) InputStreamReader(java.io.InputStreamReader) FileStoreInputStream(org.h2.store.FileStoreInputStream) BufferedInputStream(java.io.BufferedInputStream) BufferedInputStream(java.io.BufferedInputStream) FileStoreInputStream(org.h2.store.FileStoreInputStream) InputStream(java.io.InputStream) IOException(java.io.IOException) IOException(java.io.IOException) LineNumberReader(java.io.LineNumberReader)

Example 17 with FileStore

use of org.h2.mvstore.FileStore in project SpringStudy by myounghaklee.

the class TestMVStore method testOffHeapStorage.

private void testOffHeapStorage() {
    OffHeapStore offHeap = new OffHeapStore();
    int count = 1000;
    try (MVStore s = new MVStore.Builder().fileStore(offHeap).open()) {
        Map<Integer, String> map = s.openMap("data");
        for (int i = 0; i < count; i++) {
            map.put(i, "Hello " + i);
            s.commit();
        }
        assertTrue(offHeap.getWriteCount() > count);
    }
    try (MVStore s = new MVStore.Builder().fileStore(offHeap).open()) {
        Map<Integer, String> map = s.openMap("data");
        for (int i = 0; i < count; i++) {
            assertEquals("Hello " + i, map.get(i));
        }
    }
}
Also used : MVStore(org.h2.mvstore.MVStore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) OffHeapStore(org.h2.mvstore.OffHeapStore)

Example 18 with FileStore

use of org.h2.mvstore.FileStore in project SpringStudy by myounghaklee.

the class TestFile method doTest.

private void doTest(boolean nioMem, boolean compress) {
    int len = getSize(1000, 10000);
    Random random = new Random();
    FileStore mem = null, file = null;
    byte[] buffMem = null;
    byte[] buffFile = null;
    String prefix = nioMem ? (compress ? "nioMemLZF:" : "nioMemFS:") : (compress ? "memLZF:" : "memFS:");
    FileUtils.delete(prefix + "test");
    FileUtils.delete("~/testFile");
    for (int i = 0; i < len; i++) {
        if (buffMem == null) {
            int l = 1 + random.nextInt(1000);
            buffMem = new byte[l];
            buffFile = new byte[l];
        }
        if (file == null) {
            mem = FileStore.open(this, prefix + "test", "rw");
            file = FileStore.open(this, "~/testFile", "rw");
        }
        assertEquals(file.getFilePointer(), mem.getFilePointer());
        assertEquals(file.length(), mem.length());
        int x = random.nextInt(100);
        if ((x -= 20) < 0) {
            if (file.length() > 0) {
                long pos = random.nextInt((int) (file.length() / 16)) * 16;
                trace("seek " + pos);
                mem.seek(pos);
                file.seek(pos);
            }
        } else if ((x -= 20) < 0) {
            trace("close");
            mem.close();
            file.close();
            mem = null;
            file = null;
        } else if ((x -= 20) < 0) {
            if (buffFile.length > 16) {
                random.nextBytes(buffFile);
                System.arraycopy(buffFile, 0, buffMem, 0, buffFile.length);
                int off = random.nextInt(buffFile.length - 16);
                int l = random.nextInt((buffFile.length - off) / 16) * 16;
                trace("write " + off + " " + l);
                mem.write(buffMem, off, l);
                file.write(buffFile, off, l);
            }
        } else if ((x -= 20) < 0) {
            if (buffFile.length > 16) {
                int off = random.nextInt(buffFile.length - 16);
                int l = random.nextInt((buffFile.length - off) / 16) * 16;
                l = (int) Math.min(l, file.length() - file.getFilePointer());
                trace("read " + off + " " + l);
                Exception a = null, b = null;
                try {
                    file.readFully(buffFile, off, l);
                } catch (Exception e) {
                    a = e;
                }
                try {
                    mem.readFully(buffMem, off, l);
                } catch (Exception e) {
                    b = e;
                }
                if (a != b) {
                    if (a == null || b == null) {
                        fail("only one threw an exception");
                    }
                }
                assertEquals(buffMem, buffFile);
            }
        } else if ((x -= 10) < 0) {
            trace("reset buffers");
            buffMem = null;
            buffFile = null;
        } else {
            int l = random.nextInt(10000) * 16;
            long p = file.getFilePointer();
            file.setLength(l);
            mem.setLength(l);
            trace("setLength " + l);
            if (p > l) {
                file.seek(l);
                mem.seek(l);
            }
        }
    }
    if (mem != null) {
        mem.close();
        file.close();
    }
    FileUtils.delete(prefix + "test");
    FileUtils.delete("~/testFile");
}
Also used : FileStore(org.h2.store.FileStore) Random(java.util.Random)

Example 19 with FileStore

use of org.h2.mvstore.FileStore in project SpringStudy by myounghaklee.

the class InformationSchemaTable method settings.

private void settings(SessionLocal session, ArrayList<Row> rows) {
    for (Setting s : database.getAllSettings()) {
        String value = s.getStringValue();
        if (value == null) {
            value = Integer.toString(s.getIntValue());
        }
        add(session, rows, identifier(s.getName()), value);
    }
    add(session, rows, "info.BUILD_ID", "" + Constants.BUILD_ID);
    add(session, rows, "info.VERSION_MAJOR", "" + Constants.VERSION_MAJOR);
    add(session, rows, "info.VERSION_MINOR", "" + Constants.VERSION_MINOR);
    add(session, rows, "info.VERSION", Constants.FULL_VERSION);
    if (session.getUser().isAdmin()) {
        String[] settings = { "java.runtime.version", "java.vm.name", "java.vendor", "os.name", "os.arch", "os.version", "sun.os.patch.level", "file.separator", "path.separator", "line.separator", "user.country", "user.language", "user.variant", "file.encoding" };
        for (String s : settings) {
            add(session, rows, "property." + s, Utils.getProperty(s, ""));
        }
    }
    add(session, rows, "DEFAULT_NULL_ORDERING", database.getDefaultNullOrdering().name());
    add(session, rows, "EXCLUSIVE", database.getExclusiveSession() == null ? "FALSE" : "TRUE");
    add(session, rows, "MODE", database.getMode().getName());
    add(session, rows, "QUERY_TIMEOUT", Integer.toString(session.getQueryTimeout()));
    add(session, rows, "TIME ZONE", session.currentTimeZone().getId());
    add(session, rows, "TRUNCATE_LARGE_LENGTH", session.isTruncateLargeLength() ? "TRUE" : "FALSE");
    add(session, rows, "VARIABLE_BINARY", session.isVariableBinary() ? "TRUE" : "FALSE");
    add(session, rows, "OLD_INFORMATION_SCHEMA", session.isOldInformationSchema() ? "TRUE" : "FALSE");
    BitSet nonKeywords = session.getNonKeywords();
    if (nonKeywords != null) {
        add(session, rows, "NON_KEYWORDS", Parser.formatNonKeywords(nonKeywords));
    }
    add(session, rows, "RETENTION_TIME", Integer.toString(database.getRetentionTime()));
    // database settings
    for (Map.Entry<String, String> entry : database.getSettings().getSortedSettings()) {
        add(session, rows, entry.getKey(), entry.getValue());
    }
    Store store = database.getStore();
    MVStore mvStore = store.getMvStore();
    FileStore fs = mvStore.getFileStore();
    if (fs != null) {
        add(session, rows, "info.FILE_WRITE", Long.toString(fs.getWriteCount()));
        add(session, rows, "info.FILE_WRITE_BYTES", Long.toString(fs.getWriteBytes()));
        add(session, rows, "info.FILE_READ", Long.toString(fs.getReadCount()));
        add(session, rows, "info.FILE_READ_BYTES", Long.toString(fs.getReadBytes()));
        add(session, rows, "info.UPDATE_FAILURE_PERCENT", String.format(Locale.ENGLISH, "%.2f%%", 100 * mvStore.getUpdateFailureRatio()));
        add(session, rows, "info.FILL_RATE", Integer.toString(mvStore.getFillRate()));
        add(session, rows, "info.CHUNKS_FILL_RATE", Integer.toString(mvStore.getChunksFillRate()));
        add(session, rows, "info.CHUNKS_FILL_RATE_RW", Integer.toString(mvStore.getRewritableChunksFillRate()));
        try {
            add(session, rows, "info.FILE_SIZE", Long.toString(fs.getFile().size()));
        } catch (IOException ignore) {
        /**/
        }
        add(session, rows, "info.CHUNK_COUNT", Long.toString(mvStore.getChunkCount()));
        add(session, rows, "info.PAGE_COUNT", Long.toString(mvStore.getPageCount()));
        add(session, rows, "info.PAGE_COUNT_LIVE", Long.toString(mvStore.getLivePageCount()));
        add(session, rows, "info.PAGE_SIZE", Integer.toString(mvStore.getPageSplitSize()));
        add(session, rows, "info.CACHE_MAX_SIZE", Integer.toString(mvStore.getCacheSize()));
        add(session, rows, "info.CACHE_SIZE", Integer.toString(mvStore.getCacheSizeUsed()));
        add(session, rows, "info.CACHE_HIT_RATIO", Integer.toString(mvStore.getCacheHitRatio()));
        add(session, rows, "info.TOC_CACHE_HIT_RATIO", Integer.toString(mvStore.getTocCacheHitRatio()));
        add(session, rows, "info.LEAF_RATIO", Integer.toString(mvStore.getLeafRatio()));
    }
}
Also used : MVStore(org.h2.mvstore.MVStore) FileStore(org.h2.mvstore.FileStore) Setting(org.h2.engine.Setting) BitSet(java.util.BitSet) FileStore(org.h2.mvstore.FileStore) MVStore(org.h2.mvstore.MVStore) Store(org.h2.mvstore.db.Store) IOException(java.io.IOException) Map(java.util.Map)

Example 20 with FileStore

use of org.h2.mvstore.FileStore in project SpringStudy by myounghaklee.

the class MVStore method readStoreHeader.

private void readStoreHeader() {
    Chunk newest = null;
    boolean assumeCleanShutdown = true;
    boolean validStoreHeader = false;
    // find out which chunk and version are the newest
    // read the first two blocks
    ByteBuffer fileHeaderBlocks = fileStore.readFully(0, 2 * BLOCK_SIZE);
    byte[] buff = new byte[BLOCK_SIZE];
    for (int i = 0; i <= BLOCK_SIZE; i += BLOCK_SIZE) {
        fileHeaderBlocks.get(buff);
        // the following can fail for various reasons
        try {
            HashMap<String, String> m = DataUtils.parseChecksummedMap(buff);
            if (m == null) {
                assumeCleanShutdown = false;
                continue;
            }
            long version = DataUtils.readHexLong(m, HDR_VERSION, 0);
            // if both header blocks do agree on version
            // we'll continue on happy path - assume that previous shutdown was clean
            assumeCleanShutdown = assumeCleanShutdown && (newest == null || version == newest.version);
            if (newest == null || version > newest.version) {
                validStoreHeader = true;
                storeHeader.putAll(m);
                creationTime = DataUtils.readHexLong(m, HDR_CREATED, 0);
                int chunkId = DataUtils.readHexInt(m, HDR_CHUNK, 0);
                long block = DataUtils.readHexLong(m, HDR_BLOCK, 2);
                Chunk test = readChunkHeaderAndFooter(block, chunkId);
                if (test != null) {
                    newest = test;
                }
            }
        } catch (Exception ignore) {
            assumeCleanShutdown = false;
        }
    }
    if (!validStoreHeader) {
        throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, "Store header is corrupt: {0}", fileStore);
    }
    int blockSize = DataUtils.readHexInt(storeHeader, HDR_BLOCK_SIZE, BLOCK_SIZE);
    if (blockSize != BLOCK_SIZE) {
        throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "Block size {0} is currently not supported", blockSize);
    }
    long format = DataUtils.readHexLong(storeHeader, HDR_FORMAT, 1);
    if (!fileStore.isReadOnly()) {
        if (format > FORMAT_WRITE_MAX) {
            throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MAX, "The write format {0} is larger than the supported format {1}");
        } else if (format < FORMAT_WRITE_MIN) {
            throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MIN, "The write format {0} is smaller than the supported format {1}");
        }
    }
    format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format);
    if (format > FORMAT_READ_MAX) {
        throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "The read format {0} is larger than the supported format {1}", format, FORMAT_READ_MAX);
    } else if (format < FORMAT_READ_MIN) {
        throw DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, "The read format {0} is smaller than the supported format {1}", format, FORMAT_READ_MIN);
    }
    assumeCleanShutdown = assumeCleanShutdown && newest != null && !recoveryMode;
    if (assumeCleanShutdown) {
        assumeCleanShutdown = DataUtils.readHexInt(storeHeader, HDR_CLEAN, 0) != 0;
    }
    chunks.clear();
    long now = System.currentTimeMillis();
    // calculate the year (doesn't have to be exact;
    // we assume 365.25 days per year, * 4 = 1461)
    int year = 1970 + (int) (now / (1000L * 60 * 60 * 6 * 1461));
    if (year < 2014) {
        // if the year is before 2014,
        // we assume the system doesn't have a real-time clock,
        // and we set the creationTime to the past, so that
        // existing chunks are overwritten
        creationTime = now - fileStore.getDefaultRetentionTime();
    } else if (now < creationTime) {
        // the system time was set to the past:
        // we change the creation time
        creationTime = now;
        storeHeader.put(HDR_CREATED, creationTime);
    }
    long fileSize = fileStore.size();
    long blocksInStore = fileSize / BLOCK_SIZE;
    Comparator<Chunk> chunkComparator = (one, two) -> {
        int result = Long.compare(two.version, one.version);
        if (result == 0) {
            // out of two copies of the same chunk we prefer the one
            // close to the beginning of file (presumably later version)
            result = Long.compare(one.block, two.block);
        }
        return result;
    };
    Map<Long, Chunk> validChunksByLocation = new HashMap<>();
    if (!assumeCleanShutdown) {
        Chunk tailChunk = discoverChunk(blocksInStore);
        if (tailChunk != null) {
            // for a possible full scan later on
            blocksInStore = tailChunk.block;
            if (newest == null || tailChunk.version > newest.version) {
                newest = tailChunk;
            }
        }
        if (newest != null) {
            // and follow the chain of next chunks
            while (true) {
                validChunksByLocation.put(newest.block, newest);
                if (newest.next == 0 || newest.next >= blocksInStore) {
                    // no (valid) next
                    break;
                }
                Chunk test = readChunkHeaderAndFooter(newest.next, newest.id + 1);
                if (test == null || test.version <= newest.version) {
                    break;
                }
                // if shutdown was really clean then chain should be empty
                assumeCleanShutdown = false;
                newest = test;
            }
        }
    }
    if (assumeCleanShutdown) {
        // quickly check latest 20 chunks referenced in meta table
        Queue<Chunk> chunksToVerify = new PriorityQueue<>(20, Collections.reverseOrder(chunkComparator));
        try {
            setLastChunk(newest);
            // load the chunk metadata: although meta's root page resides in the lastChunk,
            // traversing meta map might recursively load another chunk(s)
            Cursor<String, String> cursor = layout.cursor(DataUtils.META_CHUNK);
            while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) {
                Chunk c = Chunk.fromString(cursor.getValue());
                assert c.version <= currentVersion;
                // might be there already, due to meta traversal
                // see readPage() ... getChunkIfFound()
                chunks.putIfAbsent(c.id, c);
                chunksToVerify.offer(c);
                if (chunksToVerify.size() == 20) {
                    chunksToVerify.poll();
                }
            }
            Chunk c;
            while (assumeCleanShutdown && (c = chunksToVerify.poll()) != null) {
                Chunk test = readChunkHeaderAndFooter(c.block, c.id);
                assumeCleanShutdown = test != null;
                if (assumeCleanShutdown) {
                    validChunksByLocation.put(test.block, test);
                }
            }
        } catch (MVStoreException ignored) {
            assumeCleanShutdown = false;
        }
    }
    if (!assumeCleanShutdown) {
        boolean quickRecovery = false;
        if (!recoveryMode) {
            // now we know, that previous shutdown did not go well and file
            // is possibly corrupted but there is still hope for a quick
            // recovery
            // this collection will hold potential candidates for lastChunk to fall back to,
            // in order from the most to least likely
            Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]);
            Arrays.sort(lastChunkCandidates, chunkComparator);
            Map<Integer, Chunk> validChunksById = new HashMap<>();
            for (Chunk chunk : lastChunkCandidates) {
                validChunksById.put(chunk.id, chunk);
            }
            quickRecovery = findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, validChunksById, false);
        }
        if (!quickRecovery) {
            // scan whole file and try to fetch chunk header and/or footer out of every block
            // matching pairs with nothing in-between are considered as valid chunk
            long block = blocksInStore;
            Chunk tailChunk;
            while ((tailChunk = discoverChunk(block)) != null) {
                block = tailChunk.block;
                validChunksByLocation.put(block, tailChunk);
            }
            // this collection will hold potential candidates for lastChunk to fall back to,
            // in order from the most to least likely
            Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]);
            Arrays.sort(lastChunkCandidates, chunkComparator);
            Map<Integer, Chunk> validChunksById = new HashMap<>();
            for (Chunk chunk : lastChunkCandidates) {
                validChunksById.put(chunk.id, chunk);
            }
            if (!findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, validChunksById, true) && lastChunk != null) {
                throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, "File is corrupted - unable to recover a valid set of chunks");
            }
        }
    }
    fileStore.clear();
    // build the free space list
    for (Chunk c : chunks.values()) {
        if (c.isSaved()) {
            long start = c.block * BLOCK_SIZE;
            int length = c.len * BLOCK_SIZE;
            fileStore.markUsed(start, length);
        }
        if (!c.isLive()) {
            deadChunks.offer(c);
        }
    }
    assert validateFileLength("on open");
}
Also used : Arrays(java.util.Arrays) AtomicIntegerFieldUpdater(java.util.concurrent.atomic.AtomicIntegerFieldUpdater) UncaughtExceptionHandler(java.lang.Thread.UncaughtExceptionHandler) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) PriorityQueue(java.util.PriorityQueue) HashMap(java.util.HashMap) Utils(org.h2.util.Utils) Compressor(org.h2.compress.Compressor) Deque(java.util.Deque) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) PriorityBlockingQueue(java.util.concurrent.PriorityBlockingQueue) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) CompressDeflate(org.h2.compress.CompressDeflate) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) LinkedList(java.util.LinkedList) CacheLongKeyLIRS(org.h2.mvstore.cache.CacheLongKeyLIRS) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Predicate(java.util.function.Predicate) MathUtils(org.h2.util.MathUtils) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) StringDataType(org.h2.mvstore.type.StringDataType) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) INITIAL_VERSION(org.h2.mvstore.MVMap.INITIAL_VERSION) BitSet(java.util.BitSet) Queue(java.util.Queue) ArrayDeque(java.util.ArrayDeque) Comparator(java.util.Comparator) Collections(java.util.Collections) CompressLZF(org.h2.compress.CompressLZF) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) PriorityQueue(java.util.PriorityQueue) ByteBuffer(java.nio.ByteBuffer) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ExecutionException(java.util.concurrent.ExecutionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Aggregations

FileStore (org.h2.store.FileStore)20 FileStore (org.h2.mvstore.FileStore)16 IOException (java.io.IOException)14 MVStore (org.h2.mvstore.MVStore)11 BufferedInputStream (java.io.BufferedInputStream)8 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)8 FileStoreInputStream (org.h2.store.FileStoreInputStream)8 ByteArrayInputStream (java.io.ByteArrayInputStream)7 BitSet (java.util.BitSet)6 HashSet (java.util.HashSet)6 DbException (org.h2.message.DbException)6 FileStoreOutputStream (org.h2.store.FileStoreOutputStream)6 InputStream (java.io.InputStream)5 InputStreamReader (java.io.InputStreamReader)5 ArrayList (java.util.ArrayList)5 HashMap (java.util.HashMap)5 Setting (org.h2.engine.Setting)5 Map (java.util.Map)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 OutputStream (java.io.OutputStream)3