use of io.questdb.std.FilesFacade in project questdb by bluestreak01.
the class Mig608 method migrate.
static void migrate(MigrationContext migrationContext) {
// META_COLUMN_DATA_SIZE = 16 -> 32;
// TX_OFFSET_MAP_WRITER_COUNT = 72 -> 128
final FilesFacade ff = migrationContext.getFf();
final Path path = migrationContext.getTablePath();
final int plen = path.length();
path.concat(META_FILE_NAME).$();
if (!ff.exists(path)) {
MigrationActions.LOG.error().$("meta file does not exist, nothing to migrate [path=").$(path).I$();
return;
}
// modify metadata
try (final MemoryMARW rwMem = migrationContext.getRwMemory()) {
final long thatMetaColumnDataSize = 16;
final long thisMetaColumnDataSize = 32;
rwMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
// column count
final int columnCount = rwMem.getInt(TableUtils.META_OFFSET_COUNT);
long offset = TableUtils.META_OFFSET_COLUMN_TYPES;
// 32L here is TableUtils.META_COLUMN_DATA_SIZE at the time of writing this migration
long newNameOffset = offset + thisMetaColumnDataSize * columnCount;
// the intent is to resize the _meta file and move the variable length (names) segment
// to do that we need to work out size of the variable length segment first
long oldNameOffset = offset + thatMetaColumnDataSize * columnCount;
long o = oldNameOffset;
for (int i = 0; i < columnCount; i++) {
int len = rwMem.getStrLen(o);
o += Vm.getStorageLength(len);
}
final long nameSegmentLen = o - oldNameOffset;
// resize the file
rwMem.extend(newNameOffset + nameSegmentLen);
// move name segment
Vect.memmove(rwMem.addressOf(newNameOffset), rwMem.addressOf(oldNameOffset), nameSegmentLen);
// copy column information in reverse order
o = offset + thatMetaColumnDataSize * (columnCount - 1);
long o2 = offset + thisMetaColumnDataSize * (columnCount - 1);
final Rnd rnd = SharedRandom.getRandom(migrationContext.getConfiguration());
while (o >= offset) {
// type
rwMem.putInt(o2, rwMem.getInt(o));
// flags
rwMem.putLong(o2 + 4, rwMem.getInt(o + 4));
// index block capacity
rwMem.putInt(o2 + 12, rwMem.getInt(o + 12));
// column hash
rwMem.putLong(o2 + 20, rnd.nextLong());
o -= thatMetaColumnDataSize;
o2 -= thisMetaColumnDataSize;
}
rwMem.jumpTo(newNameOffset + nameSegmentLen);
}
// update _txn file
path.trimTo(plen).concat(TXN_FILE_NAME).$();
if (!ff.exists(path)) {
MigrationActions.LOG.error().$("tx file does not exist, nothing to migrate [path=").$(path).I$();
return;
}
EngineMigration.backupFile(ff, path, migrationContext.getTablePath2(), TXN_FILE_NAME, 422);
MigrationActions.LOG.debug().$("opening for rw [path=").$(path).I$();
try (MemoryMARW txMem = migrationContext.createRwMemoryOf(ff, path.$())) {
// calculate size of the _txn file
final long thatTxOffsetMapWriterCount = 72;
final long thisTxOffsetMapWriterCount = 128;
final int longsPerAttachedPartition = 4;
int symbolCount = txMem.getInt(thatTxOffsetMapWriterCount);
int partitionTableSize = txMem.getInt(thatTxOffsetMapWriterCount + 4 + symbolCount * 8L) * 8 * longsPerAttachedPartition;
// resize existing file:
// thisTxOffsetMapWriterCount + symbolCount + symbolData + partitionTableEntryCount + partitionTableSize
long thatSize = thatTxOffsetMapWriterCount + 4 + symbolCount * 8L + 4L + partitionTableSize;
long thisSize = thisTxOffsetMapWriterCount + 4 + symbolCount * 8L + 4L + partitionTableSize;
txMem.extend(thisSize);
txMem.jumpTo(thisSize);
Vect.memmove(txMem.addressOf(thisTxOffsetMapWriterCount), txMem.addressOf(thatTxOffsetMapWriterCount), thatSize - thatTxOffsetMapWriterCount);
// zero out reserved area
Vect.memset(txMem.addressOf(thatTxOffsetMapWriterCount), thisTxOffsetMapWriterCount - thatTxOffsetMapWriterCount, 0);
}
}
use of io.questdb.std.FilesFacade in project questdb by bluestreak01.
the class Mig609 method migrate.
static void migrate(MigrationContext migrationContext) {
final FilesFacade ff = migrationContext.getFf();
final Path path = migrationContext.getTablePath();
final int plen = path.length();
path.trimTo(plen).concat(META_FILE_NAME).$();
try (MemoryMARW metaMem = migrationContext.getRwMemory()) {
metaMem.of(ff, path, ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT);
// we require partition by value to avoid processing non-partitioned tables
final int partitionBy = metaMem.getInt(4);
try (MemoryMARW txMem = new MemoryCMARWImpl(ff, path.trimTo(plen).concat(TXN_FILE_NAME).$(), ff.getPageSize(), ff.length(path), MemoryTag.NATIVE_DEFAULT)) {
// this is a variable length file; we need to count of symbol maps before we get to the partition
// table data
final int symbolMapCount = txMem.getInt(TX_OFFSET_MAP_WRITER_COUNT_608);
final long partitionCountOffset = TX_OFFSET_MAP_WRITER_COUNT_608 + 4 + symbolMapCount * 8L;
// walk only non-active partition to extract sizes
final int partitionCount = txMem.getInt(partitionCountOffset) / Long.BYTES / LONGS_PER_TX_ATTACHED_PARTITION - 1;
if (partitionBy != PartitionBy.NONE) {
long calculatedFixedRowCount = 0;
for (int partitionIndex = 0; partitionIndex < partitionCount; partitionIndex++) {
final long partitionDataOffset = partitionCountOffset + Integer.BYTES + partitionIndex * 8L * LONGS_PER_TX_ATTACHED_PARTITION;
// the row count may not be stored in _txn file for the last partition
// we need to use transient row count instead
calculatedFixedRowCount += txMem.getLong(partitionDataOffset + Long.BYTES);
}
long currentFixedRowCount = txMem.getLong(TX_OFFSET_FIXED_ROW_COUNT_505);
if (currentFixedRowCount != calculatedFixedRowCount) {
txMem.putLong(TX_OFFSET_FIXED_ROW_COUNT_505, calculatedFixedRowCount);
LOG.info().$("fixed row count is out [table=").$(path.trimTo(plen).$()).$(", currentFixedRowCount=").$(currentFixedRowCount).$(", calculatedFixedRowCount=").$(calculatedFixedRowCount).I$();
}
}
}
}
}
use of io.questdb.std.FilesFacade in project questdb by bluestreak01.
the class Mig505 method migrate.
static void migrate(MigrationContext migrationContext) {
MigrationActions.LOG.info().$("assigning table ID [table=").$(migrationContext.getTablePath()).I$();
final long mem = migrationContext.getTempMemory(8);
final FilesFacade ff = migrationContext.getFf();
final Path path = migrationContext.getTablePath();
final long fd = migrationContext.getMetadataFd();
MigrationActions.LOG.info().$("setting table id in [path=").$(path).I$();
TableUtils.writeIntOrFail(ff, fd, META_OFFSET_TABLE_ID, migrationContext.getNextTableId(), mem, path);
}
use of io.questdb.std.FilesFacade in project questdb by bluestreak01.
the class TxnScoreboardTest method testCleanFailsNoResourceLeak.
@Test
public void testCleanFailsNoResourceLeak() throws Exception {
TestUtils.assertMemoryLeak(() -> {
FilesFacade ff = new FilesFacadeImpl() {
@Override
public long openCleanRW(LPSZ name, long fd) {
return -1;
}
};
assertMemoryLeak(() -> {
try (final Path shmPath = new Path()) {
try (TxnScoreboard ignored = new TxnScoreboard(ff, shmPath.of(root), 2048)) {
Assert.fail();
} catch (CairoException ex) {
TestUtils.assertContains(ex.getFlyweightMessage(), "could not open read-write with clean allocation");
}
}
});
});
}
use of io.questdb.std.FilesFacade in project questdb by bluestreak01.
the class LineTCPSenderMainVarLenStrings method main.
public static void main(String[] args) {
final long count = 2_000_000_000L;
String hostIPv4 = "127.0.0.1";
int port = 9009;
int bufferCapacity = 64;
final Rnd rnd = new Rnd();
long start = System.nanoTime();
FilesFacade ff = new FilesFacadeImpl();
try (Path path = new Path()) {
long logFd = -1;
if (args.length == 1) {
path.put(args[0]).$();
logFd = ff.openRW(path);
}
try (LineTcpSender sender = new LoggingLineTcpSender(Net.parseIPv4(hostIPv4), port, bufferCapacity, logFd, ff)) {
for (int i = 0; i < count; i++) {
sender.metric("md_msgs");
sender.field("ts_nsec", rnd.nextPositiveLong()).field("pkt_size", rnd.nextPositiveInt()).field("pcap_file", nextString(rnd.nextPositiveInt() % 64, rnd)).field("raw_msg", nextString(rnd.nextPositiveInt() % 512, rnd)).field("Length", rnd.nextInt()).field("MsgSeqNum", i).field("MsgType", rnd.nextInt() % 1000).field("src_ip", rnd.nextString(rnd.nextPositiveInt() % 16)).field("dst_ip", rnd.nextString(rnd.nextPositiveInt() % 16)).field("src_port", rnd.nextInt() % 10000).field("dst_port", rnd.nextInt() % 10000).field("first_dir", rnd.nextBoolean()).$(i * 10_000_000L);
}
sender.flush();
} finally {
if (logFd > 0) {
ff.close(logFd);
}
}
}
System.out.println("Actual rate: " + (count * 1_000_000_000L / (System.nanoTime() - start)));
}
Aggregations