Search in sources :

Example 1 with MemoryMARW

use of io.questdb.cairo.vm.api.MemoryMARW in project questdb by bluestreak01.

the class TxnTest method testFailedTxWriterDoesNotCorruptTable.

@Test
public void testFailedTxWriterDoesNotCorruptTable() throws Exception {
    TestUtils.assertMemoryLeak(() -> {
        FilesFacade errorFf = new FilesFacadeImpl() {

            @Override
            public long mremap(long fd, long addr, long previousSize, long newSize, long offset, int mode, int memoryTag) {
                return -1;
            }
        };
        FilesFacadeImpl cleanFf = new FilesFacadeImpl();
        assertMemoryLeak(() -> {
            String tableName = "txntest";
            try (Path path = new Path()) {
                try (MemoryMARW mem = Vm.getCMARWInstance();
                    TableModel model = new TableModel(configuration, tableName, PartitionBy.DAY)) {
                    model.timestamp();
                    TableUtils.createTable(configuration, mem, path, model, 1);
                }
            }
            try (Path path = new Path()) {
                path.of(configuration.getRoot()).concat(tableName);
                int testPartitionCount = 3000;
                try (TxWriter txWriter = new TxWriter(cleanFf, path, PartitionBy.DAY)) {
                    // Add lots of partitions
                    for (int i = 0; i < testPartitionCount; i++) {
                        txWriter.updatePartitionSizeByTimestamp(i * Timestamps.DAY_MICROS, i + 1);
                    }
                    txWriter.updateMaxTimestamp(testPartitionCount * Timestamps.DAY_MICROS + 1);
                    txWriter.finishPartitionSizeUpdate();
                    txWriter.commit(CommitMode.SYNC, new ObjList<>());
                }
                // Reopen without OS errors
                try (TxWriter txWriter = new TxWriter(cleanFf, path, PartitionBy.DAY)) {
                    // Read lots of partitions
                    Assert.assertEquals(testPartitionCount, txWriter.getPartitionCount());
                    for (int i = 0; i < testPartitionCount - 1; i++) {
                        Assert.assertEquals(i + 1, txWriter.getPartitionSize(i));
                    }
                }
                // Open with OS error to file extend
                try (TxWriter ignored = new TxWriter(errorFf, path, PartitionBy.DAY)) {
                    Assert.fail("Should not be able to extend on opening");
                } catch (CairoException ex) {
                // expected
                }
                // Reopen without OS errors
                try (TxWriter txWriter = new TxWriter(cleanFf, path, PartitionBy.DAY)) {
                    // Read lots of partitions
                    Assert.assertEquals(testPartitionCount, txWriter.getPartitionCount());
                    for (int i = 0; i < testPartitionCount - 1; i++) {
                        Assert.assertEquals(i + 1, txWriter.getPartitionSize(i));
                    }
                }
            }
        });
    });
}
Also used : Path(io.questdb.std.str.Path) FilesFacade(io.questdb.std.FilesFacade) MemoryMARW(io.questdb.cairo.vm.api.MemoryMARW) FilesFacadeImpl(io.questdb.std.FilesFacadeImpl) Test(org.junit.Test)

Example 2 with MemoryMARW

use of io.questdb.cairo.vm.api.MemoryMARW in project questdb by bluestreak01.

the class EngineMigration method migrateEngineTo.

public static void migrateEngineTo(CairoEngine engine, int latestVersion, boolean force) {
    final FilesFacade ff = engine.getConfiguration().getFilesFacade();
    final CairoConfiguration configuration = engine.getConfiguration();
    int tempMemSize = 8;
    long mem = Unsafe.malloc(tempMemSize, MemoryTag.NATIVE_DEFAULT);
    try (MemoryARW virtualMem = Vm.getARWInstance(ff.getPageSize(), Integer.MAX_VALUE, MemoryTag.NATIVE_DEFAULT);
        Path path = new Path();
        MemoryMARW rwMemory = Vm.getMARWInstance()) {
        MigrationContext context = new MigrationContext(engine, mem, tempMemSize, virtualMem, rwMemory);
        path.of(configuration.getRoot());
        // check if all tables have been upgraded already
        path.concat(TableUtils.UPGRADE_FILE_NAME).$();
        final boolean existed = !force && ff.exists(path);
        long upgradeFd = openFileRWOrFail(ff, path);
        LOG.debug().$("open [fd=").$(upgradeFd).$(", path=").$(path).$(']').$();
        if (existed) {
            int currentVersion = TableUtils.readIntOrFail(ff, upgradeFd, 0, mem, path);
            if (currentVersion >= latestVersion) {
                LOG.info().$("table structures are up to date").$();
                ff.close(upgradeFd);
                upgradeFd = -1;
            }
        }
        if (upgradeFd != -1) {
            try {
                LOG.info().$("upgrading database [version=").$(latestVersion).I$();
                if (upgradeTables(context, latestVersion)) {
                    TableUtils.writeIntOrFail(ff, upgradeFd, 0, latestVersion, mem, path);
                }
            } finally {
                Vm.bestEffortClose(ff, LOG, upgradeFd, true, Integer.BYTES);
            }
        }
    } finally {
        Unsafe.free(mem, tempMemSize, MemoryTag.NATIVE_DEFAULT);
    }
}
Also used : Path(io.questdb.std.str.Path) MemoryMARW(io.questdb.cairo.vm.api.MemoryMARW) MemoryARW(io.questdb.cairo.vm.api.MemoryARW)

Example 3 with MemoryMARW

use of io.questdb.cairo.vm.api.MemoryMARW in project questdb by bluestreak01.

the class Mig506 method migrate.

static void migrate(MigrationContext migrationContext) {
    // Update transaction file
    // Before there was 1 int per symbol and list of removed partitions
    // Now there is 2 ints per symbol and 4 longs per each non-removed partition
    MigrationActions.LOG.info().$("rebuilding tx file [table=").$(migrationContext.getTablePath()).I$();
    Path path = migrationContext.getTablePath();
    final FilesFacade ff = migrationContext.getFf();
    int pathDirLen = path.length();
    path.concat(TXN_FILE_NAME).$();
    if (!ff.exists(path)) {
        MigrationActions.LOG.error().$("tx file does not exist, nothing to migrate [path=").$(path).I$();
        return;
    }
    EngineMigration.backupFile(ff, path, migrationContext.getTablePath2(), TXN_FILE_NAME, 417);
    MigrationActions.LOG.debug().$("opening for rw [path=").$(path).I$();
    try (MemoryMARW txMem = migrationContext.createRwMemoryOf(ff, path.$())) {
        long tempMem8b = migrationContext.getTempMemory(8);
        MemoryARW txFileUpdate = migrationContext.getTempVirtualMem();
        txFileUpdate.jumpTo(0);
        int symbolColumnCount = txMem.getInt(MigrationActions.TX_OFFSET_MAP_WRITER_COUNT_505);
        for (int i = 0; i < symbolColumnCount; i++) {
            final int symbolCount = txMem.getInt(MigrationActions.prefixedBlockOffset(MigrationActions.TX_OFFSET_MAP_WRITER_COUNT_505, i + 1L, Integer.BYTES));
            txFileUpdate.putInt(symbolCount);
            txFileUpdate.putInt(symbolCount);
        }
        // Set partition segment size as 0 for now
        long partitionSegmentOffset = txFileUpdate.getAppendOffset();
        txFileUpdate.putInt(0);
        final int partitionBy = TableUtils.readIntOrFail(ff, migrationContext.getMetadataFd(), TX_STRUCT_UPDATE_1_META_OFFSET_PARTITION_BY, tempMem8b, path);
        if (partitionBy != PartitionBy.NONE) {
            path.trimTo(pathDirLen);
            writeAttachedPartitions(ff, tempMem8b, path, txMem, partitionBy, symbolColumnCount, txFileUpdate);
        }
        long updateSize = txFileUpdate.getAppendOffset();
        long partitionSegmentSize = updateSize - partitionSegmentOffset - Integer.BYTES;
        txFileUpdate.putInt(partitionSegmentOffset, (int) partitionSegmentSize);
        // Save txFileUpdate to tx file starting at LOCAL_TX_OFFSET_MAP_WRITER_COUNT + 4
        long writeOffset = MigrationActions.TX_OFFSET_MAP_WRITER_COUNT_505 + Integer.BYTES;
        txMem.jumpTo(writeOffset);
        for (int i = 0, size = 1; i < size && updateSize > 0; i++) {
            long writeSize = Math.min(updateSize, txFileUpdate.getPageSize());
            txMem.putBlockOfBytes(txFileUpdate.getPageAddress(i), writeSize);
            updateSize -= writeSize;
        }
        assert updateSize == 0;
    }
}
Also used : Path(io.questdb.std.str.Path) FilesFacade(io.questdb.std.FilesFacade) MemoryMARW(io.questdb.cairo.vm.api.MemoryMARW) MemoryARW(io.questdb.cairo.vm.api.MemoryARW)

Example 4 with MemoryMARW

use of io.questdb.cairo.vm.api.MemoryMARW in project questdb by bluestreak01.

the class Mig607 method migrate.

static void migrate(MigrationContext migrationContext) {
    final FilesFacade ff = migrationContext.getFf();
    Path path = migrationContext.getTablePath();
    int plen = path.length();
    path.trimTo(plen).concat(META_FILE_NAME).$();
    long metaFileSize;
    long txFileSize;
    try (MemoryMARW metaMem = migrationContext.getRwMemory()) {
        metaMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
        final int columnCount = metaMem.getInt(0);
        final int partitionBy = metaMem.getInt(4);
        final long columnNameOffset = MigrationActions.prefixedBlockOffset(MigrationActions.META_OFFSET_COLUMN_TYPES_606, columnCount, MigrationActions.META_COLUMN_DATA_SIZE_606);
        try (MemoryMARW txMem = new MemoryCMARWImpl(ff, path.trimTo(plen).concat(TXN_FILE_NAME).$(), ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT)) {
            // this is a variable length file; we need to count of symbol maps before we get to the partition
            // table data
            final int symbolMapCount = txMem.getInt(MigrationActions.TX_OFFSET_MAP_WRITER_COUNT_505);
            final long partitionCountOffset = MigrationActions.TX_OFFSET_MAP_WRITER_COUNT_505 + 4 + symbolMapCount * 8L;
            int partitionCount = txMem.getInt(partitionCountOffset) / Long.BYTES / LONGS_PER_TX_ATTACHED_PARTITION;
            final long transientRowCount = txMem.getLong(TX_OFFSET_TRANSIENT_ROW_COUNT);
            if (partitionBy != PartitionBy.NONE) {
                for (int partitionIndex = 0; partitionIndex < partitionCount; partitionIndex++) {
                    final long partitionDataOffset = partitionCountOffset + Integer.BYTES + partitionIndex * 8L * LONGS_PER_TX_ATTACHED_PARTITION;
                    setPathForPartition(path.trimTo(plen), partitionBy, txMem.getLong(partitionDataOffset), false);
                    // the row count may not be stored in _txn file for the last partition
                    // we need to use transient row count instead
                    long rowCount = partitionIndex < partitionCount - 1 ? txMem.getLong(partitionDataOffset + Long.BYTES) : transientRowCount;
                    long txSuffix = txMem.getLong(MigrationActions.prefixedBlockOffset(partitionDataOffset, 2, Long.BYTES));
                    if (txSuffix > -1) {
                        txnPartition(path, txSuffix);
                    }
                    migrate(ff, path, migrationContext, metaMem, columnCount, rowCount, columnNameOffset);
                }
            } else {
                path.trimTo(plen).concat(DEFAULT_PARTITION_NAME);
                migrate(ff, path, migrationContext, metaMem, columnCount, transientRowCount, columnNameOffset);
            }
            // update symbol maps
            long tmpMem = migrationContext.getTempMemory();
            int denseSymbolCount = 0;
            long currentColumnNameOffset = columnNameOffset;
            for (int i = 0; i < columnCount; i++) {
                final CharSequence columnName = metaMem.getStr(currentColumnNameOffset);
                currentColumnNameOffset += Vm.getStorageLength(columnName.length());
                if (ColumnType.tagOf(metaMem.getInt(MigrationActions.prefixedBlockOffset(MigrationActions.META_OFFSET_COLUMN_TYPES_606, i, MigrationActions.META_COLUMN_DATA_SIZE_606))) == ColumnType.SYMBOL) {
                    final int symbolCount = txMem.getInt(MigrationActions.TX_OFFSET_MAP_WRITER_COUNT_505 + 8 + denseSymbolCount * 8L);
                    final long offset = MigrationActions.prefixedBlockOffset(SymbolMapWriter.HEADER_SIZE, symbolCount, 8L);
                    SymbolMapWriter.offsetFileName(path.trimTo(plen), columnName);
                    long fd = TableUtils.openRW(ff, path, MigrationActions.LOG);
                    try {
                        long fileLen = ff.length(fd);
                        if (symbolCount > 0) {
                            if (fileLen < offset) {
                                MigrationActions.LOG.error().$("file is too short [path=").$(path).I$();
                            } else {
                                TableUtils.allocateDiskSpace(ff, fd, offset + 8);
                                long dataOffset = TableUtils.readLongOrFail(ff, fd, offset - 8L, tmpMem, path);
                                // string length
                                SymbolMapWriter.charFileName(path.trimTo(plen), columnName);
                                long fd2 = TableUtils.openRO(ff, path, MigrationActions.LOG);
                                try {
                                    long len = TableUtils.readIntOrFail(ff, fd2, dataOffset, tmpMem, path);
                                    if (len == -1) {
                                        dataOffset += 4;
                                    } else {
                                        dataOffset += 4 + len * 2L;
                                    }
                                    TableUtils.writeLongOrFail(ff, fd, offset, dataOffset, tmpMem, path);
                                } finally {
                                    ff.close(fd2);
                                }
                            }
                        }
                    } finally {
                        Vm.bestEffortClose(ff, MigrationActions.LOG, fd, true, offset + 8);
                    }
                    denseSymbolCount++;
                }
            }
            txFileSize = txMem.getAppendOffset();
        }
        metaFileSize = metaMem.getAppendOffset();
    }
    // This migration when written originally used implementation of MemoryMARW which truncated files to size on close
    // MemoryMARW now truncate to page size. To test old migrations here we simulate the migration as it is originally released
    // So trim TX and META files to their sizes
    path.trimTo(plen).concat(META_FILE_NAME).$();
    trimFile(ff, path, metaFileSize);
    path.trimTo(plen).concat(TXN_FILE_NAME).$();
    trimFile(ff, path, txFileSize);
}
Also used : Path(io.questdb.std.str.Path) FilesFacade(io.questdb.std.FilesFacade) MemoryMARW(io.questdb.cairo.vm.api.MemoryMARW) MemoryCMARWImpl(io.questdb.cairo.vm.MemoryCMARWImpl)

Example 5 with MemoryMARW

use of io.questdb.cairo.vm.api.MemoryMARW in project questdb by bluestreak01.

the class Mig605 method migrate.

static void migrate(MigrationContext migrationContext) {
    MigrationActions.LOG.info().$("updating column type IDs [table=").$(migrationContext.getTablePath()).I$();
    final FilesFacade ff = migrationContext.getFf();
    Path path = migrationContext.getTablePath();
    path.concat(META_FILE_NAME).$();
    if (!ff.exists(path)) {
        MigrationActions.LOG.error().$("meta file does not exist, nothing to migrate [path=").$(path).I$();
        return;
    }
    // Metadata file should already be backed up
    try (final MemoryMARW rwMem = migrationContext.getRwMemory()) {
        rwMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
        // column count
        final int columnCount = rwMem.getInt(TableUtils.META_OFFSET_COUNT);
        long offset = TableUtils.META_OFFSET_COLUMN_TYPES;
        for (int i = 0; i < columnCount; i++) {
            final byte oldTypeId = rwMem.getByte(offset);
            final long oldFlags = rwMem.getLong(offset + 1);
            final int blockCapacity = rwMem.getInt(offset + 1 + 8);
            // column type id is int now
            // we grabbed 3 reserved bytes for extra type info
            // extra for old types is zeros
            // ColumnType.VERSION_420 - ColumnType.VERSION_419 = 1 except for BINARY, old 13 new 18
            rwMem.putInt(offset, oldTypeId == 13 ? 18 : oldTypeId + 1);
            rwMem.putLong(offset + 4, oldFlags);
            rwMem.putInt(offset + 4 + 8, blockCapacity);
            // old TableUtils.META_COLUMN_DATA_SIZE;
            offset += 16;
        }
    }
}
Also used : Path(io.questdb.std.str.Path) FilesFacade(io.questdb.std.FilesFacade) MemoryMARW(io.questdb.cairo.vm.api.MemoryMARW)

Aggregations

MemoryMARW (io.questdb.cairo.vm.api.MemoryMARW)7 Path (io.questdb.std.str.Path)7 FilesFacade (io.questdb.std.FilesFacade)6 MemoryCMARWImpl (io.questdb.cairo.vm.MemoryCMARWImpl)2 MemoryARW (io.questdb.cairo.vm.api.MemoryARW)2 FilesFacadeImpl (io.questdb.std.FilesFacadeImpl)1 Rnd (io.questdb.std.Rnd)1 Test (org.junit.Test)1