Search in sources :

Example 16 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class PageStore method readPage.

/**
 * Read a page.
 *
 * @param pos the page id
 * @return the page
 */
public synchronized Data readPage(int pos) {
    Data page = createData();
    readPage(pos, page);
    return page;
}
Also used : CreateTableData(org.h2.command.ddl.CreateTableData)

Example 17 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class PageStore method writeStaticHeader.

private void writeStaticHeader() {
    Data page = Data.create(database, new byte[pageSize - FileStore.HEADER_LENGTH]);
    page.writeInt(pageSize);
    page.writeByte((byte) WRITE_VERSION);
    page.writeByte((byte) READ_VERSION);
    file.seek(FileStore.HEADER_LENGTH);
    file.write(page.getBytes(), 0, pageSize - FileStore.HEADER_LENGTH);
    writeCount++;
}
Also used : CreateTableData(org.h2.command.ddl.CreateTableData)

Example 18 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class MVRTreeMap method remove.

@Override
protected synchronized Object remove(Page p, long writeVersion, Object key) {
    Object result = null;
    if (p.isLeaf()) {
        for (int i = 0; i < p.getKeyCount(); i++) {
            if (keyType.equals(p.getKey(i), key)) {
                result = p.getValue(i);
                p.remove(i);
                break;
            }
        }
        return result;
    }
    for (int i = 0; i < p.getKeyCount(); i++) {
        if (contains(p, i, key)) {
            Page cOld = p.getChildPage(i);
            // this will mark the old page as deleted
            // so we need to update the parent in any case
            // (otherwise the old page might be deleted again)
            Page c = cOld.copy(writeVersion);
            long oldSize = c.getTotalCount();
            result = remove(c, writeVersion, key);
            p.setChild(i, c);
            if (oldSize == c.getTotalCount()) {
                continue;
            }
            if (c.getTotalCount() == 0) {
                // this child was deleted
                p.remove(i);
                if (p.getKeyCount() == 0) {
                    c.removePage();
                }
                break;
            }
            Object oldBounds = p.getKey(i);
            if (!keyType.isInside(key, oldBounds)) {
                p.setKey(i, getBounds(c));
            }
            break;
        }
    }
    return result;
}
Also used : Page(org.h2.mvstore.Page)

Example 19 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class MVRTreeMap method putOrAdd.

private synchronized Object putOrAdd(SpatialKey key, V value, boolean alwaysAdd) {
    beforeWrite();
    long v = writeVersion;
    Page p = root.copy(v);
    Object result;
    if (alwaysAdd || get(key) == null) {
        if (p.getMemory() > store.getPageSplitSize() && p.getKeyCount() > 3) {
            // only possible if this is the root, else we would have
            // split earlier (this requires pageSplitSize is fixed)
            long totalCount = p.getTotalCount();
            Page split = split(p, v);
            Object k1 = getBounds(p);
            Object k2 = getBounds(split);
            Object[] keys = { k1, k2 };
            Page.PageReference[] children = { new Page.PageReference(p, p.getPos(), p.getTotalCount()), new Page.PageReference(split, split.getPos(), split.getTotalCount()), new Page.PageReference(null, 0, 0) };
            p = Page.create(this, v, keys, null, children, totalCount, 0);
        // now p is a node; continues
        }
        add(p, v, key, value);
        result = null;
    } else {
        result = set(p, v, key, value);
    }
    newRoot(p);
    return result;
}
Also used : Page(org.h2.mvstore.Page)

Example 20 with Page

use of org.h2.mvstore.Page in project h2database by h2database.

the class Page method write.

/**
 * Store the page and update the position.
 *
 * @param chunk the chunk
 * @param buff the target buffer
 * @return the position of the buffer just after the type
 */
private int write(Chunk chunk, WriteBuffer buff) {
    int start = buff.position();
    int len = keys.length;
    int type = children != null ? DataUtils.PAGE_TYPE_NODE : DataUtils.PAGE_TYPE_LEAF;
    buff.putInt(0).putShort((byte) 0).putVarInt(map.getId()).putVarInt(len);
    int typePos = buff.position();
    buff.put((byte) type);
    if (type == DataUtils.PAGE_TYPE_NODE) {
        writeChildren(buff);
        for (int i = 0; i <= len; i++) {
            buff.putVarLong(children[i].count);
        }
    }
    int compressStart = buff.position();
    map.getKeyType().write(buff, keys, len, true);
    if (type == DataUtils.PAGE_TYPE_LEAF) {
        map.getValueType().write(buff, values, len, false);
    }
    MVStore store = map.getStore();
    int expLen = buff.position() - compressStart;
    if (expLen > 16) {
        int compressionLevel = store.getCompressionLevel();
        if (compressionLevel > 0) {
            Compressor compressor;
            int compressType;
            if (compressionLevel == 1) {
                compressor = map.getStore().getCompressorFast();
                compressType = DataUtils.PAGE_COMPRESSED;
            } else {
                compressor = map.getStore().getCompressorHigh();
                compressType = DataUtils.PAGE_COMPRESSED_HIGH;
            }
            byte[] exp = new byte[expLen];
            buff.position(compressStart).get(exp);
            byte[] comp = new byte[expLen * 2];
            int compLen = compressor.compress(exp, expLen, comp, 0);
            int plus = DataUtils.getVarIntLen(compLen - expLen);
            if (compLen + plus < expLen) {
                buff.position(typePos).put((byte) (type + compressType));
                buff.position(compressStart).putVarInt(expLen - compLen).put(comp, 0, compLen);
            }
        }
    }
    int pageLength = buff.position() - start;
    int chunkId = chunk.id;
    int check = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(start) ^ DataUtils.getCheckValue(pageLength);
    buff.putInt(start, pageLength).putShort(start + 4, (short) check);
    if (pos != 0) {
        throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, "Page already stored");
    }
    pos = DataUtils.getPagePos(chunkId, start, pageLength, type);
    store.cachePage(pos, this, getMemory());
    if (type == DataUtils.PAGE_TYPE_NODE) {
        // cache again - this will make sure nodes stays in the cache
        // for a longer time
        store.cachePage(pos, this, getMemory());
    }
    long max = DataUtils.getPageMaxLength(pos);
    chunk.maxLen += max;
    chunk.maxLenLive += max;
    chunk.pageCount++;
    chunk.pageCountLive++;
    if (removedInMemory) {
        // if the page was removed _before_ the position was assigned, we
        // need to mark it removed here, so the fields are updated
        // when the next chunk is stored
        map.removePage(pos, memory);
    }
    return typePos + 1;
}
Also used : Compressor(org.h2.compress.Compressor)

Aggregations

CreateTableData (org.h2.command.ddl.CreateTableData)9 Value (org.h2.value.Value)8 Data (org.h2.store.Data)7 Column (org.h2.table.Column)6 Page (org.h2.mvstore.Page)5 IndexColumn (org.h2.table.IndexColumn)5 ResultSet (java.sql.ResultSet)4 SQLException (java.sql.SQLException)4 QueryCancelledException (org.apache.ignite.cache.query.QueryCancelledException)4 DataRegionConfiguration (org.apache.ignite.configuration.DataRegionConfiguration)4 UnsafeMemoryProvider (org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider)4 PageMemory (org.apache.ignite.internal.pagemem.PageMemory)4 PageMemoryNoStoreImpl (org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl)4 DataRegionMetricsImpl (org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl)4 GridCacheSqlQuery (org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)4 Row (org.h2.result.Row)4 Connection (java.sql.Connection)3 ArrayList (java.util.ArrayList)3 CRC32 (java.util.zip.CRC32)3 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)3