use of com.bakdata.conquery.io.storage.WorkerStorage in project conquery by bakdata.
the class Worker method newWorker.
public static Worker newWorker(@NonNull Dataset dataset, @NonNull ThreadPoolDefinition queryThreadPoolDefinition, @NonNull ExecutorService jobsExecutorService, @NonNull StoreFactory config, @NonNull String directory, @NonNull Validator validator, boolean failOnError, int entityBucketSize) {
WorkerStorage workerStorage = new WorkerStorage(validator, "worker_" + directory);
// On the worker side we don't have to set the object writer vor ForwardToWorkerMessages in WorkerInformation
WorkerInformation info = new WorkerInformation();
info.setDataset(dataset.getId());
info.setName(directory);
info.setEntityBucketSize(entityBucketSize);
workerStorage.openStores(config);
workerStorage.loadData();
workerStorage.updateDataset(dataset);
workerStorage.setWorker(info);
return new Worker(queryThreadPoolDefinition, workerStorage, jobsExecutorService, failOnError, entityBucketSize);
}
use of com.bakdata.conquery.io.storage.WorkerStorage in project conquery by bakdata.
the class ShardNode method run.
@Override
protected void run(Environment environment, Namespace namespace, ConqueryConfig config) throws Exception {
this.environment = environment;
connector = new NioSocketConnector();
jobManager = new JobManager(getName(), config.isFailOnError());
synchronized (environment) {
environment.lifecycle().manage(this);
validator = environment.getValidator();
scheduler = environment.lifecycle().scheduledExecutorService("Scheduled Messages").build();
}
this.config = config;
config.initialize(this);
scheduler.scheduleAtFixedRate(this::reportJobManagerStatus, 30, 1, TimeUnit.SECONDS);
final ObjectMapper binaryMapper = config.configureObjectMapper(Jackson.copyMapperAndInjectables(Jackson.BINARY_MAPPER));
((MutableInjectableValues) binaryMapper.getInjectableValues()).add(Validator.class, environment.getValidator());
workers = new Workers(getConfig().getQueries().getExecutionPool(), binaryMapper, getConfig().getCluster().getEntityBucketSize());
final Collection<WorkerStorage> workerStorages = config.getStorage().loadWorkerStorages();
for (WorkerStorage workerStorage : workerStorages) {
workers.createWorker(workerStorage, config.isFailOnError());
}
log.info("All Worker Storages loaded: {}", workers.getWorkers().size());
}
use of com.bakdata.conquery.io.storage.WorkerStorage in project conquery by bakdata.
the class XodusStoreFactory method loadNamespacedStores.
private <T extends NamespacedStorage> Queue<T> loadNamespacedStores(String prefix, Function<String, T> creator, Set<String> storesToTest) throws InterruptedException {
File baseDir = getDirectory().toFile();
if (baseDir.mkdirs()) {
log.warn("Had to create Storage Dir at `{}`", baseDir);
}
Queue<T> storages = new ConcurrentLinkedQueue<>();
ExecutorService loaders = Executors.newFixedThreadPool(getNThreads());
for (File directory : Objects.requireNonNull(baseDir.listFiles((file, name) -> file.isDirectory() && name.startsWith(prefix)))) {
final String name = directory.getName();
loaders.submit(() -> {
try {
ConqueryMDC.setLocation(directory.toString());
if (!environmentHasStores(directory, storesToTest)) {
log.warn("No valid WorkerStorage found.");
return;
}
T namespacedStorage = creator.apply(name);
namespacedStorage.openStores(this);
log.debug("BEGIN reading Storage");
namespacedStorage.loadData();
storages.add(namespacedStorage);
} catch (Exception e) {
log.error("Failed reading Storage", e);
} finally {
log.debug("DONE reading Storage");
ConqueryMDC.clearLocation();
}
});
}
loaders.shutdown();
while (!loaders.awaitTermination(1, TimeUnit.MINUTES)) {
log.debug("Waiting for Worker storages to load. {} are already finished.", storages.size());
}
log.info("All WorkerStores loaded: {}", storages);
return storages;
}
Aggregations