use of io.questdb.WorkerPoolAwareConfiguration in project questdb by bluestreak01.
the class LatestByParallelTest method executeWithPool.
protected static void executeWithPool(int workerCount, int queueCapacity, LatestByRunnable runnable) throws Exception {
executeVanilla(() -> {
if (workerCount > 0) {
int[] affinity = new int[workerCount];
for (int i = 0; i < workerCount; i++) {
affinity[i] = -1;
}
WorkerPool pool = new WorkerPool(new WorkerPoolAwareConfiguration() {
@Override
public int[] getWorkerAffinity() {
return affinity;
}
@Override
public int getWorkerCount() {
return workerCount;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isEnabled() {
return true;
}
});
final CairoConfiguration configuration = new DefaultCairoConfiguration(root) {
@Override
public FilesFacade getFilesFacade() {
return FilesFacadeImpl.INSTANCE;
}
};
execute(pool, runnable, configuration);
} else {
// we need to create entire engine
final CairoConfiguration configuration = new DefaultCairoConfiguration(root) {
@Override
public FilesFacade getFilesFacade() {
return FilesFacadeImpl.INSTANCE;
}
@Override
public int getLatestByQueueCapacity() {
return queueCapacity;
}
};
execute(null, runnable, configuration);
}
});
}
use of io.questdb.WorkerPoolAwareConfiguration in project questdb by bluestreak01.
the class O3FailureTest method testOutOfFileHandles0.
private static void testOutOfFileHandles0(CairoEngine engine, SqlCompiler compiler, SqlExecutionContext executionContext) throws SqlException {
compiler.compile("create table x as (" + "select" + " rnd_str(5,16,2) i," + " rnd_str(5,16,2) sym," + " rnd_str(5,16,2) amt," + " rnd_str(5,16,2) timestamp," + " rnd_str(5,16,2) b," + " rnd_str('ABC', 'CDE', null, 'XYZ') c," + " rnd_str(5,16,2) d," + " rnd_str(5,16,2) e," + " rnd_str(5,16,2) f," + " rnd_str(5,16,2) g," + " rnd_str(5,16,2) ik," + " rnd_str(5,16,2) j," + " timestamp_sequence(500000000000L,100000000L) ts," + " rnd_str(5,16,2) l," + " rnd_str(5,16,2) m," + " rnd_str(5,16,2) n," + " rnd_str(5,16,2) t," + " rnd_str(5,16,2) l256" + " from long_sequence(10000)" + ") timestamp (ts) partition by DAY", executionContext);
compiler.compile("create table x1 as (x) timestamp(ts) partition by DAY", executionContext);
compiler.compile("create table y as (" + "select" + " rnd_str(5,16,2) i," + " rnd_str(5,16,2) sym," + " rnd_str(5,16,2) amt," + " rnd_str(5,16,2) timestamp," + " rnd_str(5,16,2) b," + " rnd_str('ABC', 'CDE', null, 'XYZ') c," + " rnd_str(5,16,2) d," + " rnd_str(5,16,2) e," + " rnd_str(5,16,2) f," + " rnd_str(5,16,2) g," + " rnd_str(5,16,2) ik," + " rnd_str(5,16,2) j," + " timestamp_sequence(500000080000L,79999631L) ts," + " rnd_str(5,16,2) l," + " rnd_str(5,16,2) m," + " rnd_str(5,16,2) n," + " rnd_str(5,16,2) t," + " rnd_str(5,16,2) l256" + " from long_sequence(10000)" + ") timestamp (ts) partition by DAY", executionContext);
compiler.compile("create table y1 as (y)", executionContext);
// create expected result sets
compiler.compile("create table z as (x union all y)", executionContext);
// create another compiler to be used by second pool
try (SqlCompiler compiler2 = new SqlCompiler(engine)) {
final CyclicBarrier barrier = new CyclicBarrier(2);
final SOCountDownLatch haltLatch = new SOCountDownLatch(2);
final AtomicInteger errorCount = new AtomicInteger();
// we have two pairs of tables (x,y) and (x1,y1)
WorkerPool pool1 = new WorkerPool(new WorkerPoolAwareConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isEnabled() {
return true;
}
});
pool1.assign(new Job() {
private boolean toRun = true;
@Override
public boolean run(int workerId) {
if (toRun) {
try {
toRun = false;
barrier.await();
compiler.compile("insert into x select * from y", executionContext);
} catch (Throwable e) {
e.printStackTrace();
errorCount.incrementAndGet();
} finally {
haltLatch.countDown();
}
}
return false;
}
});
pool1.assignCleaner(Path.CLEANER);
final WorkerPool pool2 = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
});
pool2.assign(new Job() {
private boolean toRun = true;
@Override
public boolean run(int workerId) {
if (toRun) {
try {
toRun = false;
barrier.await();
compiler2.compile("insert into x1 select * from y1", executionContext);
} catch (Throwable e) {
e.printStackTrace();
errorCount.incrementAndGet();
} finally {
haltLatch.countDown();
}
}
return false;
}
});
pool2.assignCleaner(Path.CLEANER);
pool1.start(null);
pool2.start(null);
haltLatch.await();
pool1.halt();
pool2.halt();
Assert.assertTrue(errorCount.get() > 0);
}
}
use of io.questdb.WorkerPoolAwareConfiguration in project questdb by bluestreak01.
the class AbstractO3Test method executeWithPool.
protected static void executeWithPool(int workerCount, O3Runnable runnable, FilesFacade ff) throws Exception {
executeVanilla(() -> {
if (workerCount > 0) {
int[] affinity = new int[workerCount];
for (int i = 0; i < workerCount; i++) {
affinity[i] = -1;
}
WorkerPool pool = new WorkerPool(new WorkerPoolAwareConfiguration() {
@Override
public int[] getWorkerAffinity() {
return affinity;
}
@Override
public int getWorkerCount() {
return workerCount;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isEnabled() {
return true;
}
});
final CairoConfiguration configuration = new DefaultCairoConfiguration(root) {
@Override
public FilesFacade getFilesFacade() {
return ff;
}
};
execute(pool, runnable, configuration);
} else {
// we need to create entire engine
final CairoConfiguration configuration = new DefaultCairoConfiguration(root) {
@Override
public int getO3PurgeDiscoveryQueueCapacity() {
return 0;
}
@Override
public int getO3PurgeQueueCapacity() {
return 0;
}
@Override
public FilesFacade getFilesFacade() {
return ff;
}
@Override
public int getO3CallbackQueueCapacity() {
return 0;
}
@Override
public int getO3PartitionQueueCapacity() {
return 0;
}
@Override
public int getO3OpenColumnQueueCapacity() {
return 0;
}
@Override
public int getO3CopyQueueCapacity() {
return 0;
}
@Override
public int getO3PartitionUpdateQueueCapacity() {
return 0;
}
};
execute(null, runnable, configuration);
}
});
}
use of io.questdb.WorkerPoolAwareConfiguration in project questdb by bluestreak01.
the class O3Test method testTwoTablesCompeteForBuffer0.
private static void testTwoTablesCompeteForBuffer0(CairoEngine engine, SqlCompiler compiler, SqlExecutionContext executionContext) throws SqlException {
compiler.compile("create table x as (" + "select" + " rnd_str(5,16,10) i," + " rnd_str(5,16,10) sym," + " rnd_str(5,16,10) amt," + " rnd_str(5,16,10) timestamp," + " rnd_str(5,16,10) b," + " rnd_str('ABC', 'CDE', null, 'XYZ') c," + " rnd_str(5,16,10) d," + " rnd_str(5,16,10) e," + " rnd_str(5,16,10) f," + " rnd_str(5,16,10) g," + " rnd_str(5,16,10) ik," + " rnd_str(5,16,10) j," + " timestamp_sequence(500000000000L,100000000L) ts," + " rnd_str(5,16,10) l," + " rnd_str(5,16,10) m," + " rnd_str(5,16,10) n," + " rnd_str(5,16,10) t," + " rnd_str(5,16,10) l256" + " from long_sequence(10000)" + ") timestamp (ts) partition by DAY", executionContext);
compiler.compile("create table x1 as (x) timestamp(ts) partition by DAY", executionContext);
compiler.compile("create table y as (" + "select" + " rnd_str(5,16,10) i," + " rnd_str(5,16,10) sym," + " rnd_str(5,16,10) amt," + " rnd_str(5,16,10) timestamp," + " rnd_str(5,16,10) b," + " rnd_str('ABC', 'CDE', null, 'XYZ') c," + " rnd_str(5,16,10) d," + " rnd_str(5,16,10) e," + " rnd_str(5,16,10) f," + " rnd_str(5,16,10) g," + " rnd_str(5,16,10) ik," + " rnd_str(5,16,10) j," + " timestamp_sequence(500000080000L,79999631L) ts," + " rnd_str(5,16,10) l," + " rnd_str(5,16,10) m," + " rnd_str(5,16,10) n," + " rnd_str(5,16,10) t," + " rnd_str(5,16,10) l256" + " from long_sequence(10000)" + ") timestamp (ts) partition by DAY", executionContext);
compiler.compile("create table y1 as (y)", executionContext);
// create expected result sets
compiler.compile("create table z as (x union all y)", executionContext);
// create another compiler to be used by second pool
try (SqlCompiler compiler2 = new SqlCompiler(engine)) {
final CyclicBarrier barrier = new CyclicBarrier(2);
final SOCountDownLatch haltLatch = new SOCountDownLatch(2);
final AtomicInteger errorCount = new AtomicInteger();
// we have two pairs of tables (x,y) and (x1,y1)
WorkerPool pool1 = new WorkerPool(new WorkerPoolAwareConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isEnabled() {
return true;
}
});
pool1.assign(new Job() {
private boolean toRun = true;
@Override
public boolean run(int workerId) {
if (toRun) {
try {
toRun = false;
barrier.await();
compiler.compile("insert into x select * from y", executionContext);
} catch (Throwable e) {
e.printStackTrace();
errorCount.incrementAndGet();
} finally {
haltLatch.countDown();
}
}
return false;
}
});
pool1.assignCleaner(Path.CLEANER);
final WorkerPool pool2 = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
});
pool2.assign(new Job() {
private boolean toRun = true;
@Override
public boolean run(int workerId) {
if (toRun) {
try {
toRun = false;
barrier.await();
compiler2.compile("insert into x1 select * from y1", executionContext);
} catch (Throwable e) {
e.printStackTrace();
errorCount.incrementAndGet();
} finally {
haltLatch.countDown();
}
}
return false;
}
});
pool2.assignCleaner(Path.CLEANER);
pool1.start(null);
pool2.start(null);
haltLatch.await();
pool1.halt();
pool2.halt();
Assert.assertEquals(0, errorCount.get());
TestUtils.assertSqlCursors(compiler, executionContext, "z order by ts", "x", LOG);
TestUtils.assertSqlCursors(compiler, executionContext, "z order by ts", "x1", LOG);
}
}
Aggregations