Search in sources :

Example 1 with IntIntHashMap

use of org.h2.util.IntIntHashMap in project h2database by h2database.

the class AggregateDataSelectivity method add.

@Override
void add(Database database, int dataType, boolean distinct, Value v) {
    count++;
    if (distinctHashes == null) {
        distinctHashes = new IntIntHashMap();
    }
    int size = distinctHashes.size();
    if (size > Constants.SELECTIVITY_DISTINCT_COUNT) {
        distinctHashes = new IntIntHashMap();
        m2 += size;
    }
    int hash = v.hashCode();
    // the value -1 is not supported
    distinctHashes.put(hash, 1);
}
Also used : IntIntHashMap(org.h2.util.IntIntHashMap)

Example 2 with IntIntHashMap

use of org.h2.util.IntIntHashMap in project h2database by h2database.

the class TestIntIntHashMap method test.

@Override
public void test() {
    IntIntHashMap map = new IntIntHashMap();
    map.put(1, 1);
    map.put(1, 2);
    assertEquals(1, map.size());
    map.put(0, 1);
    map.put(0, 2);
    assertEquals(2, map.size());
    rand.setSeed(10);
    test(true);
    test(false);
}
Also used : IntIntHashMap(org.h2.util.IntIntHashMap)

Example 3 with IntIntHashMap

use of org.h2.util.IntIntHashMap in project h2database by h2database.

the class PageStore method compact.

/**
 * Shrink the file so there are no empty pages at the end.
 *
 * @param compactMode 0 if no compacting should happen, otherwise
 * TransactionCommand.SHUTDOWN_COMPACT or TransactionCommand.SHUTDOWN_DEFRAG
 */
public synchronized void compact(int compactMode) {
    if (!database.getSettings().pageStoreTrim) {
        return;
    }
    if (SysProperties.MODIFY_ON_WRITE && readMode && compactMode == 0) {
        return;
    }
    openForWriting();
    // find the last used page
    int lastUsed = -1;
    for (int i = getFreeListId(pageCount); i >= 0; i--) {
        lastUsed = getFreeList(i).getLastUsed();
        if (lastUsed != -1) {
            break;
        }
    }
    // open a new log at the very end
    // (to be truncated later)
    writeBack();
    log.free();
    recoveryRunning = true;
    try {
        logFirstTrunkPage = lastUsed + 1;
        allocatePage(logFirstTrunkPage);
        log.openForWriting(logFirstTrunkPage, true);
        // ensure the free list is backed up again
        log.checkpoint();
    } finally {
        recoveryRunning = false;
    }
    long start = System.nanoTime();
    boolean isCompactFully = compactMode == CommandInterface.SHUTDOWN_COMPACT;
    boolean isDefrag = compactMode == CommandInterface.SHUTDOWN_DEFRAG;
    if (database.getSettings().defragAlways) {
        isCompactFully = isDefrag = true;
    }
    int maxCompactTime = database.getSettings().maxCompactTime;
    int maxMove = database.getSettings().maxCompactCount;
    if (isCompactFully || isDefrag) {
        maxCompactTime = Integer.MAX_VALUE;
        maxMove = Integer.MAX_VALUE;
    }
    int blockSize = isCompactFully ? COMPACT_BLOCK_SIZE : 1;
    int firstFree = MIN_PAGE_COUNT;
    for (int x = lastUsed, j = 0; x > MIN_PAGE_COUNT && j < maxMove; x -= blockSize) {
        for (int full = x - blockSize + 1; full <= x; full++) {
            if (full > MIN_PAGE_COUNT && isUsed(full)) {
                synchronized (this) {
                    firstFree = getFirstFree(firstFree);
                    if (firstFree == -1 || firstFree >= full) {
                        j = maxMove;
                        break;
                    }
                    if (compact(full, firstFree)) {
                        j++;
                        long now = System.nanoTime();
                        if (now > start + TimeUnit.MILLISECONDS.toNanos(maxCompactTime)) {
                            j = maxMove;
                            break;
                        }
                    }
                }
            }
        }
    }
    if (isDefrag) {
        log.checkpoint();
        writeBack();
        cache.clear();
        ArrayList<Table> tables = database.getAllTablesAndViews(false);
        recordedPagesList = New.arrayList();
        recordedPagesIndex = new IntIntHashMap();
        recordPageReads = true;
        Session sysSession = database.getSystemSession();
        for (Table table : tables) {
            if (!table.isTemporary() && TableType.TABLE == table.getTableType()) {
                Index scanIndex = table.getScanIndex(sysSession);
                Cursor cursor = scanIndex.find(sysSession, null, null);
                while (cursor.next()) {
                    cursor.get();
                }
                for (Index index : table.getIndexes()) {
                    if (index != scanIndex && index.canScan()) {
                        cursor = index.find(sysSession, null, null);
                        while (cursor.next()) {
                        // the data is already read
                        }
                    }
                }
            }
        }
        recordPageReads = false;
        int target = MIN_PAGE_COUNT - 1;
        int temp = 0;
        for (int i = 0, size = recordedPagesList.size(); i < size; i++) {
            log.checkpoint();
            writeBack();
            int source = recordedPagesList.get(i);
            Page pageSource = getPage(source);
            if (!pageSource.canMove()) {
                continue;
            }
            while (true) {
                Page pageTarget = getPage(++target);
                if (pageTarget == null || pageTarget.canMove()) {
                    break;
                }
            }
            if (target == source) {
                continue;
            }
            temp = getFirstFree(temp);
            if (temp == -1) {
                DbException.throwInternalError("no free page for defrag");
            }
            cache.clear();
            swap(source, target, temp);
            int index = recordedPagesIndex.get(target);
            if (index != IntIntHashMap.NOT_FOUND) {
                recordedPagesList.set(index, source);
                recordedPagesIndex.put(source, index);
            }
            recordedPagesList.set(i, target);
            recordedPagesIndex.put(target, i);
        }
        recordedPagesList = null;
        recordedPagesIndex = null;
    }
    // TODO can most likely be simplified
    checkpoint();
    log.checkpoint();
    writeIndexRowCounts();
    log.checkpoint();
    writeBack();
    commit(pageStoreSession);
    writeBack();
    log.checkpoint();
    log.free();
    // truncate the log
    recoveryRunning = true;
    try {
        setLogFirstPage(++logKey, 0, 0);
    } finally {
        recoveryRunning = false;
    }
    writeBack();
    for (int i = getFreeListId(pageCount); i >= 0; i--) {
        lastUsed = getFreeList(i).getLastUsed();
        if (lastUsed != -1) {
            break;
        }
    }
    int newPageCount = lastUsed + 1;
    if (newPageCount < pageCount) {
        freed.set(newPageCount, pageCount, false);
    }
    pageCount = newPageCount;
    // the easiest way to remove superfluous entries
    freeLists.clear();
    trace.debug("pageCount: " + pageCount);
    long newLength = (long) pageCount << pageSizeShift;
    if (file.length() != newLength) {
        file.setLength(newLength);
        writeCount++;
    }
}
Also used : IntIntHashMap(org.h2.util.IntIntHashMap) RegularTable(org.h2.table.RegularTable) Table(org.h2.table.Table) Index(org.h2.index.Index) PageIndex(org.h2.index.PageIndex) PageDelegateIndex(org.h2.index.PageDelegateIndex) MultiVersionIndex(org.h2.index.MultiVersionIndex) PageBtreeIndex(org.h2.index.PageBtreeIndex) PageDataIndex(org.h2.index.PageDataIndex) Cursor(org.h2.index.Cursor) Session(org.h2.engine.Session)

Example 4 with IntIntHashMap

use of org.h2.util.IntIntHashMap in project h2database by h2database.

the class TestIntIntHashMap method test.

private void test(boolean random) {
    int len = 2000;
    int[] x = new int[len];
    for (int i = 0; i < len; i++) {
        int key = random ? rand.nextInt() : i;
        x[i] = key;
    }
    IntIntHashMap map = new IntIntHashMap();
    for (int i = 0; i < len; i++) {
        map.put(x[i], i);
    }
    for (int i = 0; i < len; i++) {
        if (map.get(x[i]) != i) {
            throw new AssertionError("get " + x[i] + " = " + map.get(i) + " should be " + i);
        }
    }
    for (int i = 1; i < len; i += 2) {
        map.remove(x[i]);
    }
    for (int i = 1; i < len; i += 2) {
        if (map.get(x[i]) != -1) {
            throw new AssertionError("get " + x[i] + " = " + map.get(i) + " should be <=0");
        }
    }
    for (int i = 1; i < len; i += 2) {
        map.put(x[i], i);
    }
    for (int i = 0; i < len; i++) {
        if (map.get(x[i]) != i) {
            throw new AssertionError("get " + x[i] + " = " + map.get(i) + " should be " + i);
        }
    }
}
Also used : IntIntHashMap(org.h2.util.IntIntHashMap)

Aggregations

IntIntHashMap (org.h2.util.IntIntHashMap)4 Session (org.h2.engine.Session)1 Cursor (org.h2.index.Cursor)1 Index (org.h2.index.Index)1 MultiVersionIndex (org.h2.index.MultiVersionIndex)1 PageBtreeIndex (org.h2.index.PageBtreeIndex)1 PageDataIndex (org.h2.index.PageDataIndex)1 PageDelegateIndex (org.h2.index.PageDelegateIndex)1 PageIndex (org.h2.index.PageIndex)1 RegularTable (org.h2.table.RegularTable)1 Table (org.h2.table.Table)1