use of io.questdb.mp.WorkerPool in project questdb by bluestreak01.
the class WorkerPoolAwareConfiguration method create.
@Nullable
static <T extends Closeable, C extends WorkerPoolAwareConfiguration> T create(C configuration, WorkerPool sharedWorkerPool, Log log, CairoEngine cairoEngine, ServerFactory<T, C> factory, FunctionFactoryCache functionFactoryCache, Metrics metrics) {
final T server;
if (configuration.isEnabled()) {
final WorkerPool localPool = configureWorkerPool(configuration, sharedWorkerPool);
final boolean local = localPool != sharedWorkerPool;
server = factory.create(configuration, cairoEngine, localPool, local, functionFactoryCache, metrics);
if (local) {
localPool.start(log);
}
return server;
}
return null;
}
use of io.questdb.mp.WorkerPool in project questdb by bluestreak01.
the class EmbeddedApiTest method testConcurrentSQLExec.
@Test
public void testConcurrentSQLExec() throws Exception {
final CairoConfiguration configuration = new DefaultCairoConfiguration(temp.getRoot().getAbsolutePath());
final Log log = LogFactory.getLog("testConcurrentSQLExec");
TestUtils.assertMemoryLeak(() -> {
WorkerPool workerPool = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1, -1 };
}
@Override
public int getWorkerCount() {
return 2;
}
@Override
public boolean haltOnError() {
return false;
}
});
Rnd rnd = new Rnd();
try (final CairoEngine engine = new CairoEngine(configuration)) {
workerPool.assign(new GroupByJob(engine.getMessageBus()));
workerPool.start(log);
try {
// number of cores is current thread + workers in the pool
final SqlExecutionContextImpl ctx = new SqlExecutionContextImpl(engine, 2);
try (SqlCompiler compiler = new SqlCompiler(engine)) {
compiler.compile("create table abc (g double, ts timestamp) timestamp(ts) partition by DAY", ctx);
long timestamp = 0;
try (TableWriter writer = engine.getWriter(ctx.getCairoSecurityContext(), "abc", "testing")) {
for (int i = 0; i < 10_000_000; i++) {
TableWriter.Row row = writer.newRow(timestamp);
row.putDouble(0, rnd.nextDouble());
row.append();
timestamp += 1_000_000;
}
writer.commit();
}
try (RecordCursorFactory factory = compiler.compile("select sum(g) from abc", ctx).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(ctx)) {
final Record ignored = cursor.getRecord();
// noinspection StatementWithEmptyBody
while (cursor.hasNext()) {
// access 'record' instance for field values
}
}
}
}
} finally {
workerPool.halt();
}
}
});
}
use of io.questdb.mp.WorkerPool in project questdb by bluestreak01.
the class LatestByParallelTest method executeWithPool.
protected static void executeWithPool(int workerCount, int queueCapacity, LatestByRunnable runnable) throws Exception {
executeVanilla(() -> {
if (workerCount > 0) {
int[] affinity = new int[workerCount];
for (int i = 0; i < workerCount; i++) {
affinity[i] = -1;
}
WorkerPool pool = new WorkerPool(new WorkerPoolAwareConfiguration() {
@Override
public int[] getWorkerAffinity() {
return affinity;
}
@Override
public int getWorkerCount() {
return workerCount;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isEnabled() {
return true;
}
});
final CairoConfiguration configuration = new DefaultCairoConfiguration(root) {
@Override
public FilesFacade getFilesFacade() {
return FilesFacadeImpl.INSTANCE;
}
};
execute(pool, runnable, configuration);
} else {
// we need to create entire engine
final CairoConfiguration configuration = new DefaultCairoConfiguration(root) {
@Override
public FilesFacade getFilesFacade() {
return FilesFacadeImpl.INSTANCE;
}
@Override
public int getLatestByQueueCapacity() {
return queueCapacity;
}
};
execute(null, runnable, configuration);
}
});
}
use of io.questdb.mp.WorkerPool in project questdb by bluestreak01.
the class O3FailureTest method testOutOfFileHandles0.
private static void testOutOfFileHandles0(CairoEngine engine, SqlCompiler compiler, SqlExecutionContext executionContext) throws SqlException {
compiler.compile("create table x as (" + "select" + " rnd_str(5,16,2) i," + " rnd_str(5,16,2) sym," + " rnd_str(5,16,2) amt," + " rnd_str(5,16,2) timestamp," + " rnd_str(5,16,2) b," + " rnd_str('ABC', 'CDE', null, 'XYZ') c," + " rnd_str(5,16,2) d," + " rnd_str(5,16,2) e," + " rnd_str(5,16,2) f," + " rnd_str(5,16,2) g," + " rnd_str(5,16,2) ik," + " rnd_str(5,16,2) j," + " timestamp_sequence(500000000000L,100000000L) ts," + " rnd_str(5,16,2) l," + " rnd_str(5,16,2) m," + " rnd_str(5,16,2) n," + " rnd_str(5,16,2) t," + " rnd_str(5,16,2) l256" + " from long_sequence(10000)" + ") timestamp (ts) partition by DAY", executionContext);
compiler.compile("create table x1 as (x) timestamp(ts) partition by DAY", executionContext);
compiler.compile("create table y as (" + "select" + " rnd_str(5,16,2) i," + " rnd_str(5,16,2) sym," + " rnd_str(5,16,2) amt," + " rnd_str(5,16,2) timestamp," + " rnd_str(5,16,2) b," + " rnd_str('ABC', 'CDE', null, 'XYZ') c," + " rnd_str(5,16,2) d," + " rnd_str(5,16,2) e," + " rnd_str(5,16,2) f," + " rnd_str(5,16,2) g," + " rnd_str(5,16,2) ik," + " rnd_str(5,16,2) j," + " timestamp_sequence(500000080000L,79999631L) ts," + " rnd_str(5,16,2) l," + " rnd_str(5,16,2) m," + " rnd_str(5,16,2) n," + " rnd_str(5,16,2) t," + " rnd_str(5,16,2) l256" + " from long_sequence(10000)" + ") timestamp (ts) partition by DAY", executionContext);
compiler.compile("create table y1 as (y)", executionContext);
// create expected result sets
compiler.compile("create table z as (x union all y)", executionContext);
// create another compiler to be used by second pool
try (SqlCompiler compiler2 = new SqlCompiler(engine)) {
final CyclicBarrier barrier = new CyclicBarrier(2);
final SOCountDownLatch haltLatch = new SOCountDownLatch(2);
final AtomicInteger errorCount = new AtomicInteger();
// we have two pairs of tables (x,y) and (x1,y1)
WorkerPool pool1 = new WorkerPool(new WorkerPoolAwareConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isEnabled() {
return true;
}
});
pool1.assign(new Job() {
private boolean toRun = true;
@Override
public boolean run(int workerId) {
if (toRun) {
try {
toRun = false;
barrier.await();
compiler.compile("insert into x select * from y", executionContext);
} catch (Throwable e) {
e.printStackTrace();
errorCount.incrementAndGet();
} finally {
haltLatch.countDown();
}
}
return false;
}
});
pool1.assignCleaner(Path.CLEANER);
final WorkerPool pool2 = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
});
pool2.assign(new Job() {
private boolean toRun = true;
@Override
public boolean run(int workerId) {
if (toRun) {
try {
toRun = false;
barrier.await();
compiler2.compile("insert into x1 select * from y1", executionContext);
} catch (Throwable e) {
e.printStackTrace();
errorCount.incrementAndGet();
} finally {
haltLatch.countDown();
}
}
return false;
}
});
pool2.assignCleaner(Path.CLEANER);
pool1.start(null);
pool2.start(null);
haltLatch.await();
pool1.halt();
pool2.halt();
Assert.assertTrue(errorCount.get() > 0);
}
}
use of io.questdb.mp.WorkerPool in project questdb by bluestreak01.
the class IODispatcherTest method testJsonQuerySyntaxError.
@Test
public void testJsonQuerySyntaxError() throws Exception {
assertMemoryLeak(() -> {
final String baseDir = temp.getRoot().getAbsolutePath();
final DefaultHttpServerConfiguration httpConfiguration = createHttpServerConfiguration(baseDir, false);
final WorkerPool workerPool = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[] { -1 };
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
});
try (CairoEngine engine = new CairoEngine(new DefaultCairoConfiguration(baseDir));
HttpServer httpServer = new HttpServer(httpConfiguration, workerPool, false)) {
httpServer.bind(new HttpRequestProcessorFactory() {
@Override
public HttpRequestProcessor newInstance() {
return new StaticContentProcessor(httpConfiguration);
}
@Override
public String getUrl() {
return HttpServerConfiguration.DEFAULT_PROCESSOR_URL;
}
});
httpServer.bind(new HttpRequestProcessorFactory() {
@Override
public HttpRequestProcessor newInstance() {
return new JsonQueryProcessor(httpConfiguration.getJsonQueryProcessorConfiguration(), engine, workerPool.getWorkerCount(), metrics);
}
@Override
public String getUrl() {
return "/query";
}
});
workerPool.start(LOG);
try {
// create table with all column types
CairoTestUtils.createTestTable(engine.getConfiguration(), 20, new Rnd(), new TestRecord.ArrayBinarySequence());
// send multipart request to server
final String request = "GET /query?query=x%20where2%20i%20%3D%20(%27EHNRX%27) HTTP/1.1\r\n" + "Host: localhost:9001\r\n" + "Connection: keep-alive\r\n" + "Cache-Control: max-age=0\r\n" + "Upgrade-Insecure-Requests: 1\r\n" + "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\r\n" + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\r\n" + "Accept-Encoding: gzip, deflate, br\r\n" + "Accept-Language: en-GB,en-US;q=0.9,en;q=0.8\r\n" + "\r\n";
String expectedResponse = "HTTP/1.1 200 OK\r\n" + "Server: questDB/1.0\r\n" + "Date: Thu, 1 Jan 1970 00:00:00 GMT\r\n" + "Transfer-Encoding: chunked\r\n" + "Content-Type: application/json; charset=utf-8\r\n" + "Keep-Alive: timeout=5, max=10000\r\n" + "\r\n" + "4d\r\n" + "{\"query\":\"x where2 i = ('EHNRX')\",\"error\":\"unexpected token: i\",\"position\":9}\r\n" + "00\r\n" + "\r\n";
sendAndReceive(NetworkFacadeImpl.INSTANCE, request, expectedResponse, 10, 0, false);
} finally {
workerPool.halt();
}
}
});
}
Aggregations