use of com.bakdata.conquery.models.config.XodusStoreFactory in project conquery by bakdata.
the class MigrateCommand method run.
@Override
protected void run(io.dropwizard.setup.Environment environment, Namespace namespace, ConqueryConfig configuration) throws Exception {
final File inStoreDirectory = namespace.get("in");
final File outStoreDirectory = namespace.get("out");
final long logsize = ((XodusStoreFactory) configuration.getStorage()).getXodus().getLogFileSize().toKilobytes();
final File[] environments = inStoreDirectory.listFiles(File::isDirectory);
if (environments == null) {
log.error("In Store is empty");
return;
}
// Create Groovy Shell and parse script
CompilerConfiguration config = new CompilerConfiguration();
config.setScriptBaseClass(MigrationScriptFactory.class.getName());
GroovyShell groovy = new GroovyShell(config);
MigrationScriptFactory factory = (MigrationScriptFactory) groovy.parse(In.file((File) namespace.get("script")).readAll());
final Function4<String, String, String, ObjectNode, Tuple> migrator = factory.run();
final ObjectMapper mapper = Jackson.BINARY_MAPPER;
final ObjectReader keyReader = mapper.readerFor(String.class);
final ObjectReader valueReader = mapper.readerFor(ObjectNode.class);
final ObjectWriter keyWriter = mapper.writerFor(String.class);
final ObjectWriter valueWriter = mapper.writerFor(ObjectNode.class);
Arrays.stream(environments).parallel().forEach(xenv -> {
final File environmentDirectory = new File(outStoreDirectory, xenv.getName());
environmentDirectory.mkdirs();
processEnvironment(xenv, logsize, environmentDirectory, migrator, keyReader, valueReader, keyWriter, valueWriter);
});
}
use of com.bakdata.conquery.models.config.XodusStoreFactory in project conquery by bakdata.
the class BigStoreTest method testFull.
@Test
public void testFull() throws JSONException, IOException {
BigStore<DictionaryId, Dictionary> store = new BigStore<>(new XodusStoreFactory(), Validators.newValidator(), env, StoreMappings.DICTIONARIES.storeInfo(), (e) -> {
}, (e) -> {
}, MAPPER);
store.setChunkByteSize(Ints.checkedCast(DataSize.megabytes(1).toBytes()));
Dictionary nDict = new MapDictionary(Dataset.PLACEHOLDER, "dict");
for (int v = 0; v < 1000000; v++) {
nDict.add(Integer.toHexString(v).getBytes());
}
// check if manual serialization deserialization works
byte[] bytes = Jackson.BINARY_MAPPER.writeValueAsBytes(nDict);
Dictionary simpleCopy = MAPPER.readValue(bytes, Dictionary.class);
for (int v = 0; v < 1000000; v++) {
assertThat(simpleCopy.getId(Integer.toHexString(v).getBytes())).isEqualTo(v);
}
// check if store works
store.add(nDict.getId(), nDict);
// check if the bytes in the store are the same as bytes
assertThat(new SequenceInputStream(Iterators.asEnumeration(store.getMetaStore().get(nDict.getId()).loadData(store.getDataStore()).map(ByteArrayInputStream::new).iterator()))).hasSameContentAs(new ByteArrayInputStream(bytes));
EncodedDictionary copy = new EncodedDictionary(store.get(nDict.getId()), StringTypeEncoded.Encoding.UTF8);
for (int v = 0; v < 1000000; v++) {
assertThat(copy.getId(Integer.toHexString(v))).isEqualTo(v);
}
}
use of com.bakdata.conquery.models.config.XodusStoreFactory in project conquery by bakdata.
the class SerializingStoreDumpTest method init.
@BeforeEach
public void init() {
tmpDir = Files.createTempDir();
config = new XodusStoreFactory();
env = Environments.newInstance(tmpDir, config.getXodus().createConfig());
}
use of com.bakdata.conquery.models.config.XodusStoreFactory in project conquery by bakdata.
the class StandaloneCommand method startStandalone.
protected void startStandalone(Environment environment, Namespace namespace, ConqueryConfig config) throws Exception {
// start ManagerNode
ConqueryMDC.setLocation("ManagerNode");
log.debug("Starting ManagerNode");
ConqueryConfig managerConfig = config;
if (config.getStorage() instanceof XodusStoreFactory) {
final Path managerDir = ((XodusStoreFactory) config.getStorage()).getDirectory().resolve("manager");
managerConfig = config.withStorage(((XodusStoreFactory) config.getStorage()).withDirectory(managerDir));
}
conquery.setManager(manager);
conquery.run(managerConfig, environment);
// create thread pool to start multiple ShardNodes at the same time
ExecutorService starterPool = Executors.newFixedThreadPool(config.getStandalone().getNumberOfShardNodes(), new ThreadFactoryBuilder().setNameFormat("ShardNode Storage Loader %d").setUncaughtExceptionHandler((t, e) -> {
ConqueryMDC.setLocation(t.getName());
log.error(t.getName() + " failed to init storage of ShardNode", e);
}).build());
List<Future<ShardNode>> tasks = new ArrayList<>();
for (int i = 0; i < config.getStandalone().getNumberOfShardNodes(); i++) {
final int id = i;
tasks.add(starterPool.submit(() -> {
ShardNode sc = new ShardNode(ShardNode.DEFAULT_NAME + id);
shardNodes.add(sc);
ConqueryMDC.setLocation(sc.getName());
ConqueryConfig clone = config;
if (config.getStorage() instanceof XodusStoreFactory) {
final Path managerDir = ((XodusStoreFactory) config.getStorage()).getDirectory().resolve("shard-node" + id);
clone = config.withStorage(((XodusStoreFactory) config.getStorage()).withDirectory(managerDir));
}
sc.run(environment, namespace, clone);
return sc;
}));
}
ConqueryMDC.setLocation("ManagerNode");
log.debug("Waiting for ShardNodes to start");
starterPool.shutdown();
starterPool.awaitTermination(1, TimeUnit.HOURS);
// catch exceptions on tasks
boolean failed = false;
for (Future<ShardNode> f : tasks) {
try {
f.get();
} catch (ExecutionException e) {
log.error("during ShardNodes creation", e);
failed = true;
}
}
if (failed) {
System.exit(-1);
}
// starts the Jersey Server
log.debug("Starting REST Server");
ConqueryMDC.setLocation(null);
super.run(environment, namespace, config);
}
use of com.bakdata.conquery.models.config.XodusStoreFactory in project conquery by bakdata.
the class BigStoreTest method testEmpty.
@Test
public void testEmpty() throws JSONException, IOException {
BigStore<DictionaryId, Dictionary> store = new BigStore<>(new XodusStoreFactory(), Validators.newValidator(), env, StoreMappings.DICTIONARIES.storeInfo(), (e) -> {
}, (e) -> {
}, MAPPER);
store.setChunkByteSize(Ints.checkedCast(DataSize.megabytes(1).toBytes()));
Dictionary nDict = new MapDictionary(Dataset.PLACEHOLDER, "dict");
// check if manual serialization deserialization works
byte[] bytes = MAPPER.writeValueAsBytes(nDict);
Dictionary simpleCopy = MAPPER.readValue(bytes, Dictionary.class);
assertThat(simpleCopy).isEmpty();
// check if store works
store.add(nDict.getId(), nDict);
// check if the bytes in the store are the same as bytes
assertThat(new SequenceInputStream(Iterators.asEnumeration(store.getMetaStore().get(nDict.getId()).loadData(store.getDataStore()).map(ByteArrayInputStream::new).iterator()))).hasSameContentAs(new ByteArrayInputStream(bytes));
Dictionary copy = store.get(nDict.getId());
assertThat(copy).isEmpty();
}
Aggregations