use of com.bakdata.conquery.io.storage.NamespaceStorage in project conquery by bakdata.
the class AdminDatasetProcessor method addDataset.
/**
* Creates and initializes a new dataset if it does not already exist.
*/
public synchronized Dataset addDataset(Dataset dataset) {
final String name = dataset.getName();
if (datasetRegistry.get(new DatasetId(name)) != null) {
throw new WebApplicationException("Dataset already exists", Response.Status.CONFLICT);
}
NamespaceStorage datasetStorage = new NamespaceStorage(validator, "dataset_" + name);
datasetStorage.openStores(config.getStorage());
datasetStorage.loadData();
datasetStorage.setMetaStorage(storage);
datasetStorage.updateDataset(dataset);
datasetStorage.updateIdMapping(new EntityIdMap());
Namespace ns = new Namespace(datasetStorage, config.isFailOnError(), config.configureObjectMapper(Jackson.copyMapperAndInjectables(Jackson.BINARY_MAPPER)).writerWithView(InternalOnly.class));
datasetRegistry.add(ns);
// for now we just add one worker to every ShardNode
for (ShardNodeInformation node : datasetRegistry.getShardNodes().values()) {
node.send(new AddWorker(dataset));
}
return dataset;
}
use of com.bakdata.conquery.io.storage.NamespaceStorage in project conquery by bakdata.
the class UIProcessor method getTableStatistics.
public TableStatistics getTableStatistics(Table table) {
final NamespaceStorage storage = getDatasetRegistry().get(table.getDataset().getId()).getStorage();
List<Import> imports = table.findImports(storage).collect(Collectors.toList());
final long entries = imports.stream().mapToLong(Import::getNumberOfEntries).sum();
return new TableStatistics(table, entries, // total size of dictionaries
imports.stream().flatMap(imp -> imp.getDictionaries().stream()).filter(Objects::nonNull).map(storage::getDictionary).mapToLong(Dictionary::estimateMemoryConsumption).sum(), // total size of entries
imports.stream().mapToLong(Import::estimateMemoryConsumption).sum(), // Total size of CBlocks
imports.stream().mapToLong(imp -> calculateCBlocksSizeBytes(imp, storage.getAllConcepts())).sum(), imports);
}
use of com.bakdata.conquery.io.storage.NamespaceStorage in project conquery by bakdata.
the class UIProcessor method getImportStatistics.
public ImportStatistics getImportStatistics(Import imp) {
final NamespaceStorage storage = getDatasetRegistry().get(imp.getDataset().getId()).getStorage();
final long cBlockSize = calculateCBlocksSizeBytes(imp, storage.getAllConcepts());
return new ImportStatistics(imp, cBlockSize);
}
use of com.bakdata.conquery.io.storage.NamespaceStorage in project conquery by bakdata.
the class ManagerNode method loadNamespaces.
public void loadNamespaces() {
final Collection<NamespaceStorage> storages = config.getStorage().loadNamespaceStorages();
final ObjectWriter objectWriter = config.configureObjectMapper(Jackson.copyMapperAndInjectables(Jackson.BINARY_MAPPER)).writerWithView(InternalOnly.class);
for (NamespaceStorage namespaceStorage : storages) {
Namespace ns = new Namespace(namespaceStorage, config.isFailOnError(), objectWriter);
datasetRegistry.add(ns);
}
}
use of com.bakdata.conquery.io.storage.NamespaceStorage in project conquery by bakdata.
the class RestartTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
// read test specification
String testJson = In.resource("/tests/query/RESTART_TEST_DATA/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
Validator validator = Validators.newValidator();
EntityIdMap entityIdMap = IdMapSerialisationTest.createTestPersistentMap();
ManagerNode manager = testConquery.getStandaloneCommand().getManager();
AdminDatasetProcessor adminDatasetProcessor = manager.getAdmin().getAdminDatasetProcessor();
AdminProcessor adminProcessor = manager.getAdmin().getAdminProcessor();
StandaloneSupport conquery = testConquery.getSupport(name);
DatasetId dataset = conquery.getDataset().getId();
ConqueryTestSpec test = JsonIntegrationTest.readJson(dataset, testJson);
ValidatorHelper.failOnError(log, validator.validate(test));
test.importRequiredData(conquery);
test.executeTest(conquery);
final int numberOfExecutions = conquery.getMetaStorage().getAllExecutions().size();
// IDMapping Testing
NamespaceStorage namespaceStorage = conquery.getNamespaceStorage();
namespaceStorage.updateIdMapping(entityIdMap);
final Dataset dataset1 = adminDatasetProcessor.addDataset(TEST_DATASET_1);
final Dataset dataset2 = adminDatasetProcessor.addDataset(TEST_DATASET_2);
final Dataset dataset3 = adminDatasetProcessor.addDataset(TEST_DATASET_3);
final Dataset dataset4 = adminDatasetProcessor.addDataset(TEST_DATASET_4);
final Dataset dataset5 = adminDatasetProcessor.addDataset(TEST_DATASET_5);
final Dataset dataset6 = adminDatasetProcessor.addDataset(TEST_DATASET_6);
MetaStorage storage = conquery.getMetaStorage();
Role role = new Role("role", "ROLE", storage);
Role roleToDelete = new Role("roleDelete", "ROLE_DELETE", storage);
User user = new User("user@test.email", "USER", storage);
User userToDelete = new User("userDelete@test.email", "USER_DELETE", storage);
Group group = new Group("group", "GROUP", storage);
Group groupToDelete = new Group("groupDelete", "GROUP_DELETE", storage);
{
// Auth testing (deletion and permission grant)
// build constellation
// TODO USE APIS
adminProcessor.addUser(user);
adminProcessor.addUser(userToDelete);
adminProcessor.addRole(role);
adminProcessor.addRole(roleToDelete);
adminProcessor.addGroup(group);
adminProcessor.addGroup(groupToDelete);
adminProcessor.addRoleTo(user, role);
adminProcessor.addRoleTo(user, roleToDelete);
adminProcessor.addRoleTo(userToDelete, role);
adminProcessor.addRoleTo(userToDelete, roleToDelete);
adminProcessor.addRoleTo(group, role);
adminProcessor.addRoleTo(group, roleToDelete);
adminProcessor.addRoleTo(groupToDelete, role);
adminProcessor.addRoleTo(groupToDelete, roleToDelete);
adminProcessor.addUserToGroup(group, user);
adminProcessor.addUserToGroup(group, userToDelete);
adminProcessor.addUserToGroup(groupToDelete, user);
adminProcessor.addUserToGroup(groupToDelete, userToDelete);
// Adding Permissions
adminProcessor.createPermission(user, dataset1.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(userToDelete, dataset2.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(role, dataset3.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(roleToDelete, dataset4.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(group, dataset5.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(groupToDelete, dataset6.createPermission(Ability.READ.asSet()));
// Delete entities
// TODO use API
adminProcessor.deleteUser(userToDelete);
adminProcessor.deleteRole(roleToDelete);
adminProcessor.deleteGroup(groupToDelete);
}
log.info("Shutting down for restart");
testConquery.shutdown();
log.info("Restarting");
testConquery.beforeAll();
final StandaloneSupport support = testConquery.openDataset(dataset);
log.info("Restart complete");
DatasetRegistry datasetRegistry = support.getDatasetsProcessor().getDatasetRegistry();
assertThat(support.getMetaStorage().getAllExecutions().size()).as("Executions after restart").isEqualTo(numberOfExecutions);
test.executeTest(support);
{
// Auth actual tests
User userStored = storage.getUser(user.getId());
assertThat(userStored).isEqualTo(user);
assertThat(storage.getRole(role.getId())).isEqualTo(role);
assertThat(storage.getGroup(group.getId())).isEqualTo(group);
assertThat(storage.getUser(userToDelete.getId())).as("deleted user should stay deleted").isNull();
assertThat(storage.getRole(roleToDelete.getId())).as("deleted role should stay deleted").isNull();
assertThat(storage.getGroup(groupToDelete.getId())).as("deleted group should stay deleted").isNull();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_1.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_2.getId()).getDataset(), Ability.READ)).isFalse();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_3.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_4.getId()).getDataset(), Ability.READ)).isFalse();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_5.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_6.getId()).getDataset(), Ability.READ)).isFalse();
}
EntityIdMap entityIdMapAfterRestart = conquery.getNamespaceStorage().getIdMapping();
assertThat(entityIdMapAfterRestart).isEqualTo(entityIdMap);
// We need to reassign the dataset processor because the instance prio to the restart became invalid
adminDatasetProcessor = testConquery.getStandaloneCommand().getManager().getAdmin().getAdminDatasetProcessor();
// Cleanup
adminDatasetProcessor.deleteDataset(dataset1);
adminDatasetProcessor.deleteDataset(dataset2);
adminDatasetProcessor.deleteDataset(dataset3);
adminDatasetProcessor.deleteDataset(dataset4);
adminDatasetProcessor.deleteDataset(dataset5);
adminDatasetProcessor.deleteDataset(dataset6);
}
Aggregations