Search in sources :

Example 56 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class MVStoreTool method dump.

/**
 * Read the contents of the file and display them in a human-readable
 * format.
 *
 * @param fileName the name of the file
 * @param writer the print writer
 * @param details print the page details
 */
public static void dump(String fileName, Writer writer, boolean details) {
    PrintWriter pw = new PrintWriter(writer, true);
    if (!FilePath.get(fileName).exists()) {
        pw.println("File not found: " + fileName);
        return;
    }
    long size = FileUtils.size(fileName);
    pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024);
    FileChannel file = null;
    int blockSize = MVStore.BLOCK_SIZE;
    TreeMap<Integer, Long> mapSizesTotal = new TreeMap<>();
    long pageSizeTotal = 0;
    try {
        file = FilePath.get(fileName).open("r");
        long fileSize = file.size();
        int len = Long.toHexString(fileSize).length();
        ByteBuffer block = ByteBuffer.allocate(4096);
        long pageCount = 0;
        for (long pos = 0; pos < fileSize; ) {
            block.rewind();
            DataUtils.readFully(file, pos, block);
            block.rewind();
            int headerType = block.get();
            if (headerType == 'H') {
                String header = new String(block.array(), StandardCharsets.ISO_8859_1).trim();
                pw.printf("%0" + len + "x fileHeader %s%n", pos, header);
                pos += blockSize;
                continue;
            }
            if (headerType != 'c') {
                pos += blockSize;
                continue;
            }
            block.position(0);
            Chunk c = null;
            try {
                c = Chunk.readChunkHeader(block, pos);
            } catch (IllegalStateException e) {
                pos += blockSize;
                continue;
            }
            if (c.len <= 0) {
                // not a chunk
                pos += blockSize;
                continue;
            }
            int length = c.len * MVStore.BLOCK_SIZE;
            pw.printf("%n%0" + len + "x chunkHeader %s%n", pos, c.toString());
            ByteBuffer chunk = ByteBuffer.allocate(length);
            DataUtils.readFully(file, pos, chunk);
            int p = block.position();
            pos += length;
            int remaining = c.pageCount;
            pageCount += c.pageCount;
            TreeMap<Integer, Integer> mapSizes = new TreeMap<>();
            int pageSizeSum = 0;
            while (remaining > 0) {
                int start = p;
                try {
                    chunk.position(p);
                } catch (IllegalArgumentException e) {
                    // too far
                    pw.printf("ERROR illegal position %d%n", p);
                    break;
                }
                int pageSize = chunk.getInt();
                // check value (ignored)
                chunk.getShort();
                int mapId = DataUtils.readVarInt(chunk);
                int entries = DataUtils.readVarInt(chunk);
                int type = chunk.get();
                boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0;
                boolean node = (type & 1) != 0;
                if (details) {
                    pw.printf("+%0" + len + "x %s, map %x, %d entries, %d bytes, maxLen %x%n", p, (node ? "node" : "leaf") + (compressed ? " compressed" : ""), mapId, node ? entries + 1 : entries, pageSize, DataUtils.getPageMaxLength(DataUtils.getPagePos(0, 0, pageSize, 0)));
                }
                p += pageSize;
                Integer mapSize = mapSizes.get(mapId);
                if (mapSize == null) {
                    mapSize = 0;
                }
                mapSizes.put(mapId, mapSize + pageSize);
                Long total = mapSizesTotal.get(mapId);
                if (total == null) {
                    total = 0L;
                }
                mapSizesTotal.put(mapId, total + pageSize);
                pageSizeSum += pageSize;
                pageSizeTotal += pageSize;
                remaining--;
                long[] children = null;
                long[] counts = null;
                if (node) {
                    children = new long[entries + 1];
                    for (int i = 0; i <= entries; i++) {
                        children[i] = chunk.getLong();
                    }
                    counts = new long[entries + 1];
                    for (int i = 0; i <= entries; i++) {
                        long s = DataUtils.readVarLong(chunk);
                        counts[i] = s;
                    }
                }
                String[] keys = new String[entries];
                if (mapId == 0 && details) {
                    ByteBuffer data;
                    if (compressed) {
                        boolean fast = (type & DataUtils.PAGE_COMPRESSED_HIGH) != DataUtils.PAGE_COMPRESSED_HIGH;
                        Compressor compressor = getCompressor(fast);
                        int lenAdd = DataUtils.readVarInt(chunk);
                        int compLen = pageSize + start - chunk.position();
                        byte[] comp = Utils.newBytes(compLen);
                        chunk.get(comp);
                        int l = compLen + lenAdd;
                        data = ByteBuffer.allocate(l);
                        compressor.expand(comp, 0, compLen, data.array(), 0, l);
                    } else {
                        data = chunk;
                    }
                    for (int i = 0; i < entries; i++) {
                        String k = StringDataType.INSTANCE.read(data);
                        keys[i] = k;
                    }
                    if (node) {
                        // meta map node
                        for (int i = 0; i < entries; i++) {
                            long cp = children[i];
                            pw.printf("    %d children < %s @ " + "chunk %x +%0" + len + "x%n", counts[i], keys[i], DataUtils.getPageChunkId(cp), DataUtils.getPageOffset(cp));
                        }
                        long cp = children[entries];
                        pw.printf("    %d children >= %s @ chunk %x +%0" + len + "x%n", counts[entries], keys.length >= entries ? null : keys[entries], DataUtils.getPageChunkId(cp), DataUtils.getPageOffset(cp));
                    } else {
                        // meta map leaf
                        String[] values = new String[entries];
                        for (int i = 0; i < entries; i++) {
                            String v = StringDataType.INSTANCE.read(data);
                            values[i] = v;
                        }
                        for (int i = 0; i < entries; i++) {
                            pw.println("    " + keys[i] + " = " + values[i]);
                        }
                    }
                } else {
                    if (node && details) {
                        for (int i = 0; i <= entries; i++) {
                            long cp = children[i];
                            pw.printf("    %d children @ chunk %x +%0" + len + "x%n", counts[i], DataUtils.getPageChunkId(cp), DataUtils.getPageOffset(cp));
                        }
                    }
                }
            }
            pageSizeSum = Math.max(1, pageSizeSum);
            for (Integer mapId : mapSizes.keySet()) {
                int percent = 100 * mapSizes.get(mapId) / pageSizeSum;
                pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizes.get(mapId), percent);
            }
            int footerPos = chunk.limit() - Chunk.FOOTER_LENGTH;
            try {
                chunk.position(footerPos);
                pw.printf("+%0" + len + "x chunkFooter %s%n", footerPos, new String(chunk.array(), chunk.position(), Chunk.FOOTER_LENGTH, StandardCharsets.ISO_8859_1).trim());
            } catch (IllegalArgumentException e) {
                // too far
                pw.printf("ERROR illegal footer position %d%n", footerPos);
            }
        }
        pw.printf("%n%0" + len + "x eof%n", fileSize);
        pw.printf("\n");
        pageCount = Math.max(1, pageCount);
        pw.printf("page size total: %d bytes, page count: %d, average page size: %d bytes\n", pageSizeTotal, pageCount, pageSizeTotal / pageCount);
        pageSizeTotal = Math.max(1, pageSizeTotal);
        for (Integer mapId : mapSizesTotal.keySet()) {
            int percent = (int) (100 * mapSizesTotal.get(mapId) / pageSizeTotal);
            pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizesTotal.get(mapId), percent);
        }
    } catch (IOException e) {
        pw.println("ERROR: " + e);
        e.printStackTrace(pw);
    } finally {
        if (file != null) {
            try {
                file.close();
            } catch (IOException e) {
            // ignore
            }
        }
    }
    pw.flush();
}
Also used : FileChannel(java.nio.channels.FileChannel) Compressor(org.h2.compress.Compressor) IOException(java.io.IOException) TreeMap(java.util.TreeMap) ByteBuffer(java.nio.ByteBuffer) PrintWriter(java.io.PrintWriter)

Example 57 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class Page method read.

/**
 * Read the page from the buffer.
 *
 * @param buff the buffer
 * @param chunkId the chunk id
 * @param offset the offset within the chunk
 * @param maxLength the maximum length
 */
void read(ByteBuffer buff, int chunkId, int offset, int maxLength) {
    int start = buff.position();
    int pageLength = buff.getInt();
    if (pageLength > maxLength || pageLength < 4) {
        throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, maxLength, pageLength);
    }
    buff.limit(start + pageLength);
    short check = buff.getShort();
    int mapId = DataUtils.readVarInt(buff);
    if (mapId != map.getId()) {
        throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, map.getId(), mapId);
    }
    int checkTest = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(offset) ^ DataUtils.getCheckValue(pageLength);
    if (check != (short) checkTest) {
        throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check);
    }
    int len = DataUtils.readVarInt(buff);
    keys = new Object[len];
    int type = buff.get();
    boolean node = (type & 1) == DataUtils.PAGE_TYPE_NODE;
    if (node) {
        children = new PageReference[len + 1];
        long[] p = new long[len + 1];
        for (int i = 0; i <= len; i++) {
            p[i] = buff.getLong();
        }
        long total = 0;
        for (int i = 0; i <= len; i++) {
            long s = DataUtils.readVarLong(buff);
            total += s;
            children[i] = new PageReference(null, p[i], s);
        }
        totalCount = total;
    }
    boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0;
    if (compressed) {
        Compressor compressor;
        if ((type & DataUtils.PAGE_COMPRESSED_HIGH) == DataUtils.PAGE_COMPRESSED_HIGH) {
            compressor = map.getStore().getCompressorHigh();
        } else {
            compressor = map.getStore().getCompressorFast();
        }
        int lenAdd = DataUtils.readVarInt(buff);
        int compLen = pageLength + start - buff.position();
        byte[] comp = Utils.newBytes(compLen);
        buff.get(comp);
        int l = compLen + lenAdd;
        buff = ByteBuffer.allocate(l);
        compressor.expand(comp, 0, compLen, buff.array(), buff.arrayOffset(), l);
    }
    map.getKeyType().read(buff, keys, len, true);
    if (!node) {
        values = new Object[len];
        map.getValueType().read(buff, values, len, false);
        totalCount = len;
    }
    recalculateMemory();
}
Also used : Compressor(org.h2.compress.Compressor)

Example 58 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class TestMVStore method testFastDelete.

private void testFastDelete() {
    String fileName = getBaseDir() + "/" + getTestName();
    FileUtils.delete(fileName);
    MVStore s;
    MVMap<Integer, String> m;
    s = openStore(fileName, 700);
    m = s.openMap("data");
    for (int i = 0; i < 1000; i++) {
        m.put(i, "Hello World");
        assertEquals(i + 1, m.size());
    }
    assertEquals(1000, m.size());
    // previously (131896) we fail to account for initial root page for every map
    // there are two of them here (meta and "data"), hence lack of 256 bytes
    assertEquals(132152, s.getUnsavedMemory());
    s.commit();
    assertEquals(2, s.getFileStore().getWriteCount());
    s.close();
    s = openStore(fileName);
    m = s.openMap("data");
    m.clear();
    assertEquals(0, m.size());
    s.commit();
    // ensure only nodes are read, but not leaves
    assertEquals(45, s.getFileStore().getReadCount());
    assertTrue(s.getFileStore().getWriteCount() < 5);
    s.close();
}
Also used : MVStore(org.h2.mvstore.MVStore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 59 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class TestMVStoreStopCompact method testStopCompact.

private void testStopCompact(int retentionTime, int timeout) throws InterruptedException {
    String fileName = getBaseDir() + "/testStopCompact.h3";
    FileUtils.createDirectories(getBaseDir());
    FileUtils.delete(fileName);
    // store with a very small page size, to make sure
    // there are many leaf pages
    MVStore s = new MVStore.Builder().fileName(fileName).open();
    s.setRetentionTime(retentionTime);
    MVMap<Integer, String> map = s.openMap("data");
    long start = System.currentTimeMillis();
    Random r = new Random(1);
    for (int i = 0; i < 4000000; i++) {
        long time = System.currentTimeMillis() - start;
        if (time > timeout) {
            break;
        }
        int x = r.nextInt(10000000);
        map.put(x, "Hello World " + i * 10);
    }
    s.setAutoCommitDelay(100);
    long oldWriteCount = s.getFileStore().getWriteCount();
    // expect background write to stop after 5 seconds
    Thread.sleep(5000);
    long newWriteCount = s.getFileStore().getWriteCount();
    // expect that compaction didn't cause many writes
    assertTrue(newWriteCount - oldWriteCount < 30);
    s.close();
}
Also used : MVStore(org.h2.mvstore.MVStore) Random(java.util.Random)

Aggregations

CreateTableData (org.h2.command.ddl.CreateTableData)8 Page (org.h2.mvstore.Page)7 Data (org.h2.store.Data)7 Column (org.h2.table.Column)5 IndexColumn (org.h2.table.IndexColumn)5 Value (org.h2.value.Value)5 MVStore (org.h2.mvstore.MVStore)4 Row (org.h2.result.Row)4 SearchRow (org.h2.result.SearchRow)4 IOException (java.io.IOException)3 Connection (java.sql.Connection)3 CRC32 (java.util.zip.CRC32)3 PageBtreeIndex (org.h2.index.PageBtreeIndex)3 PageDataIndex (org.h2.index.PageDataIndex)3 PageIndex (org.h2.index.PageIndex)3 DbException (org.h2.message.DbException)3 Page (org.h2.store.Page)3 ValueString (org.h2.value.ValueString)3 PrintWriter (java.io.PrintWriter)2 ResultSet (java.sql.ResultSet)2