use of io.questdb.std.str.Path in project questdb by bluestreak01.
the class Mig605 method migrate.
static void migrate(MigrationContext migrationContext) {
MigrationActions.LOG.info().$("updating column type IDs [table=").$(migrationContext.getTablePath()).I$();
final FilesFacade ff = migrationContext.getFf();
Path path = migrationContext.getTablePath();
path.concat(META_FILE_NAME).$();
if (!ff.exists(path)) {
MigrationActions.LOG.error().$("meta file does not exist, nothing to migrate [path=").$(path).I$();
return;
}
// Metadata file should already be backed up
try (final MemoryMARW rwMem = migrationContext.getRwMemory()) {
rwMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
// column count
final int columnCount = rwMem.getInt(TableUtils.META_OFFSET_COUNT);
long offset = TableUtils.META_OFFSET_COLUMN_TYPES;
for (int i = 0; i < columnCount; i++) {
final byte oldTypeId = rwMem.getByte(offset);
final long oldFlags = rwMem.getLong(offset + 1);
final int blockCapacity = rwMem.getInt(offset + 1 + 8);
// column type id is int now
// we grabbed 3 reserved bytes for extra type info
// extra for old types is zeros
// ColumnType.VERSION_420 - ColumnType.VERSION_419 = 1 except for BINARY, old 13 new 18
rwMem.putInt(offset, oldTypeId == 13 ? 18 : oldTypeId + 1);
rwMem.putLong(offset + 4, oldFlags);
rwMem.putInt(offset + 4 + 8, blockCapacity);
// old TableUtils.META_COLUMN_DATA_SIZE;
offset += 16;
}
}
}
use of io.questdb.std.str.Path in project questdb by bluestreak01.
the class Mig608 method migrate.
static void migrate(MigrationContext migrationContext) {
// META_COLUMN_DATA_SIZE = 16 -> 32;
// TX_OFFSET_MAP_WRITER_COUNT = 72 -> 128
final FilesFacade ff = migrationContext.getFf();
final Path path = migrationContext.getTablePath();
final int plen = path.length();
path.concat(META_FILE_NAME).$();
if (!ff.exists(path)) {
MigrationActions.LOG.error().$("meta file does not exist, nothing to migrate [path=").$(path).I$();
return;
}
// modify metadata
try (final MemoryMARW rwMem = migrationContext.getRwMemory()) {
final long thatMetaColumnDataSize = 16;
final long thisMetaColumnDataSize = 32;
rwMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
// column count
final int columnCount = rwMem.getInt(TableUtils.META_OFFSET_COUNT);
long offset = TableUtils.META_OFFSET_COLUMN_TYPES;
// 32L here is TableUtils.META_COLUMN_DATA_SIZE at the time of writing this migration
long newNameOffset = offset + thisMetaColumnDataSize * columnCount;
// the intent is to resize the _meta file and move the variable length (names) segment
// to do that we need to work out size of the variable length segment first
long oldNameOffset = offset + thatMetaColumnDataSize * columnCount;
long o = oldNameOffset;
for (int i = 0; i < columnCount; i++) {
int len = rwMem.getStrLen(o);
o += Vm.getStorageLength(len);
}
final long nameSegmentLen = o - oldNameOffset;
// resize the file
rwMem.extend(newNameOffset + nameSegmentLen);
// move name segment
Vect.memmove(rwMem.addressOf(newNameOffset), rwMem.addressOf(oldNameOffset), nameSegmentLen);
// copy column information in reverse order
o = offset + thatMetaColumnDataSize * (columnCount - 1);
long o2 = offset + thisMetaColumnDataSize * (columnCount - 1);
final Rnd rnd = SharedRandom.getRandom(migrationContext.getConfiguration());
while (o >= offset) {
// type
rwMem.putInt(o2, rwMem.getInt(o));
// flags
rwMem.putLong(o2 + 4, rwMem.getInt(o + 4));
// index block capacity
rwMem.putInt(o2 + 12, rwMem.getInt(o + 12));
// column hash
rwMem.putLong(o2 + 20, rnd.nextLong());
o -= thatMetaColumnDataSize;
o2 -= thisMetaColumnDataSize;
}
rwMem.jumpTo(newNameOffset + nameSegmentLen);
}
// update _txn file
path.trimTo(plen).concat(TXN_FILE_NAME).$();
if (!ff.exists(path)) {
MigrationActions.LOG.error().$("tx file does not exist, nothing to migrate [path=").$(path).I$();
return;
}
EngineMigration.backupFile(ff, path, migrationContext.getTablePath2(), TXN_FILE_NAME, 422);
MigrationActions.LOG.debug().$("opening for rw [path=").$(path).I$();
try (MemoryMARW txMem = migrationContext.createRwMemoryOf(ff, path.$())) {
// calculate size of the _txn file
final long thatTxOffsetMapWriterCount = 72;
final long thisTxOffsetMapWriterCount = 128;
final int longsPerAttachedPartition = 4;
int symbolCount = txMem.getInt(thatTxOffsetMapWriterCount);
int partitionTableSize = txMem.getInt(thatTxOffsetMapWriterCount + 4 + symbolCount * 8L) * 8 * longsPerAttachedPartition;
// resize existing file:
// thisTxOffsetMapWriterCount + symbolCount + symbolData + partitionTableEntryCount + partitionTableSize
long thatSize = thatTxOffsetMapWriterCount + 4 + symbolCount * 8L + 4L + partitionTableSize;
long thisSize = thisTxOffsetMapWriterCount + 4 + symbolCount * 8L + 4L + partitionTableSize;
txMem.extend(thisSize);
txMem.jumpTo(thisSize);
Vect.memmove(txMem.addressOf(thisTxOffsetMapWriterCount), txMem.addressOf(thatTxOffsetMapWriterCount), thatSize - thatTxOffsetMapWriterCount);
// zero out reserved area
Vect.memset(txMem.addressOf(thatTxOffsetMapWriterCount), thisTxOffsetMapWriterCount - thatTxOffsetMapWriterCount, 0);
}
}
use of io.questdb.std.str.Path in project questdb by bluestreak01.
the class Mig609 method migrate.
static void migrate(MigrationContext migrationContext) {
final FilesFacade ff = migrationContext.getFf();
final Path path = migrationContext.getTablePath();
final int plen = path.length();
path.trimTo(plen).concat(META_FILE_NAME).$();
try (MemoryMARW metaMem = migrationContext.getRwMemory()) {
metaMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
// we require partition by value to avoid processing non-partitioned tables
final int partitionBy = metaMem.getInt(4);
try (MemoryMARW txMem = new MemoryCMARWImpl(ff, path.trimTo(plen).concat(TXN_FILE_NAME).$(), ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT)) {
// this is a variable length file; we need to count of symbol maps before we get to the partition
// table data
final int symbolMapCount = txMem.getInt(TX_OFFSET_MAP_WRITER_COUNT_608);
final long partitionCountOffset = TX_OFFSET_MAP_WRITER_COUNT_608 + 4 + symbolMapCount * 8L;
// walk only non-active partition to extract sizes
final int partitionCount = txMem.getInt(partitionCountOffset) / Long.BYTES / LONGS_PER_TX_ATTACHED_PARTITION - 1;
if (partitionBy != PartitionBy.NONE) {
long calculatedFixedRowCount = 0;
for (int partitionIndex = 0; partitionIndex < partitionCount; partitionIndex++) {
final long partitionDataOffset = partitionCountOffset + Integer.BYTES + partitionIndex * 8L * LONGS_PER_TX_ATTACHED_PARTITION;
// the row count may not be stored in _txn file for the last partition
// we need to use transient row count instead
calculatedFixedRowCount += txMem.getLong(partitionDataOffset + Long.BYTES);
}
long currentFixedRowCount = txMem.getLong(TX_OFFSET_FIXED_ROW_COUNT_505);
if (currentFixedRowCount != calculatedFixedRowCount) {
txMem.putLong(TX_OFFSET_FIXED_ROW_COUNT_505, calculatedFixedRowCount);
LOG.info().$("fixed row count is out [table=").$(path.trimTo(plen).$()).$(", currentFixedRowCount=").$(currentFixedRowCount).$(", calculatedFixedRowCount=").$(calculatedFixedRowCount).I$();
}
}
}
}
}
use of io.questdb.std.str.Path in project questdb by bluestreak01.
the class Mig505 method migrate.
static void migrate(MigrationContext migrationContext) {
MigrationActions.LOG.info().$("assigning table ID [table=").$(migrationContext.getTablePath()).I$();
final long mem = migrationContext.getTempMemory(8);
final FilesFacade ff = migrationContext.getFf();
final Path path = migrationContext.getTablePath();
final long fd = migrationContext.getMetadataFd();
MigrationActions.LOG.info().$("setting table id in [path=").$(path).I$();
TableUtils.writeIntOrFail(ff, fd, META_OFFSET_TABLE_ID, migrationContext.getNextTableId(), mem, path);
}
use of io.questdb.std.str.Path in project questdb by bluestreak01.
the class LineTcpReceiverTest method runInContext.
private void runInContext(LineTcpServerAwareContext r) throws Exception {
minIdleMsBeforeWriterRelease = 250;
assertMemoryLeak(() -> {
path = new Path(4096);
try (LineTcpReceiver receiver = LineTcpReceiver.create(lineConfiguration, sharedWorkerPool, LOG, engine)) {
sharedWorkerPool.assignCleaner(Path.CLEANER);
sharedWorkerPool.start(LOG);
try {
r.run(receiver);
} catch (Throwable err) {
LOG.error().$("Stopping ILP worker pool because of an error").$();
throw err;
} finally {
sharedWorkerPool.halt();
Path.clearThreadLocals();
}
} catch (Throwable err) {
LOG.error().$("Stopping ILP receiver because of an error").$();
throw err;
} finally {
Misc.free(path);
}
});
}
Aggregations