use of com.bakdata.conquery.io.storage.ModificationShieldedWorkerStorage in project conquery by bakdata.
the class ImportUpdateTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
MetaStorage storage = conquery.getMetaStorage();
String testJson = In.resource("/tests/query/UPDATE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final ImportId importId1 = ImportId.Parser.INSTANCE.parse(dataset.getName(), "table1", "table1");
final ImportId importId2 = ImportId.Parser.INSTANCE.parse(dataset.getName(), "table2", "table2");
QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
final List<RequiredTable> tables = test.getContent().getTables();
assertThat(tables.size()).isEqualTo(2);
List<File> cqpps;
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, tables);
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
cqpps = LoadingUtil.generateCqpp(conquery, tables);
conquery.waitUntilWorkDone();
assertThat(cqpps.size()).isEqualTo(tables.size());
LoadingUtil.importCqppFiles(conquery, List.of(cqpps.get(0)));
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
// State before update.
{
log.info("Checking state before update");
assertThat(namespace.getStorage().getAllImports()).hasSize(1);
// Must contain the import.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().equals(importId1)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(importId1)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", worker.getInfo().getId()).filteredOn(block -> block.getBucket().getId().getDataset().equals(dataset.getId())).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).describedAs("Buckets for Worker %s", worker.getInfo().getId()).isNotEmpty();
// Must contain the import.
assertThat(workerStorage.getImport(importId1)).isNotNull();
}
}
assertThat(namespace.getNumberOfEntities()).isEqualTo(4);
// assert that the query can be executed after the import
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Try to update an import that does not exist should throw a Not-Found Webapplication Exception
LoadingUtil.updateCqppFile(conquery, cqpps.get(1), Response.Status.Family.CLIENT_ERROR, "Not Found");
conquery.waitUntilWorkDone();
// Load manually new data for import and update the concerned import
{
log.info("Manually loading new data for import");
final RequiredTable importTable = test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(importId1.getTable().getTable())).findFirst().orElseThrow();
final String csvName = importTable.getCsv().getName();
final String path = importTable.getCsv().getPath();
// copy new content of the importTable into the csv-File used by the preprocessor to avoid creating multiple files withe same names
FileUtils.copyInputStreamToFile(In.resource(path.substring(0, path.lastIndexOf('/')) + "/" + csvName.replace(".csv", ".update.csv")).asStream(), new File(conquery.getTmpDir(), csvName));
File descriptionFile = new File(conquery.getTmpDir(), importTable.getName() + ConqueryConstants.EXTENSION_DESCRIPTION);
File newPreprocessedFile = new File(conquery.getTmpDir(), importTable.getName() + ConqueryConstants.EXTENSION_PREPROCESSED);
// create import descriptor
{
TableImportDescriptor desc = new TableImportDescriptor();
desc.setName(importTable.getName());
desc.setTable(importTable.getName());
TableInputDescriptor input = new TableInputDescriptor();
{
input.setPrimary(importTable.getPrimaryColumn().createOutput());
input.setSourceFile(csvName);
input.setOutput(new OutputDescription[importTable.getColumns().length]);
for (int i = 0; i < importTable.getColumns().length; i++) {
input.getOutput()[i] = importTable.getColumns()[i].createOutput();
}
}
desc.setInputs(new TableInputDescriptor[] { input });
Jackson.MAPPER.writeValue(descriptionFile, desc);
}
// preprocess
conquery.preprocessTmp(conquery.getTmpDir(), List.of(descriptionFile));
log.info("updating import");
// correct update of the import
LoadingUtil.updateCqppFile(conquery, newPreprocessedFile, Response.Status.Family.SUCCESSFUL, "No Content");
conquery.waitUntilWorkDone();
}
// State after update.
{
log.info("Checking state after update");
assertThat(namespace.getStorage().getAllImports()).hasSize(1);
// Must contain the import.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().equals(importId1)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(importId1)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", worker.getInfo().getId()).filteredOn(block -> block.getBucket().getId().getDataset().equals(dataset.getId())).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).describedAs("Buckets for Worker %s", worker.getInfo().getId()).isNotEmpty();
// Must contain the import.
assertThat(workerStorage.getImport(importId1)).isNotNull();
}
}
assertThat(namespace.getNumberOfEntities()).isEqualTo(9);
// Issue a query and assert that it has more content.
IntegrationUtils.assertQueryResult(conquery, query, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
use of com.bakdata.conquery.io.storage.ModificationShieldedWorkerStorage in project conquery by bakdata.
the class DatasetDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
final MetaStorage storage = conquery.getMetaStorage();
final Dataset dataset = conquery.getDataset();
Namespace namespace = conquery.getNamespace();
final String testJson = In.resource("/tests/query/DELETE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final int nImports = namespace.getStorage().getAllImports().size();
log.info("Checking state before deletion");
// Assert state before deletion.
{
// Must contain the import.
assertThat(namespace.getStorage().getCentralRegistry().getOptional(dataset.getId())).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query before deletion");
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Delete Dataset.
{
log.info("Issuing deletion of import {}", dataset);
// Delete the import.
// But, we do not allow deletion of tables with associated connectors, so this should throw!
assertThatThrownBy(() -> conquery.getDatasetsProcessor().deleteDataset(dataset)).isInstanceOf(WebApplicationException.class);
// TODO use api
conquery.getNamespace().getStorage().getTables().forEach(tableId -> conquery.getDatasetsProcessor().deleteTable(tableId, true));
conquery.waitUntilWorkDone();
// Finally delete dataset
conquery.getDatasetsProcessor().deleteDataset(dataset);
conquery.waitUntilWorkDone();
assertThat(storage.getCentralRegistry().getOptional(dataset.getId())).isEmpty();
}
// State after deletion.
{
log.info("Checking state after deletion");
// We have deleted an import now there should be two less!
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(0);
// The deleted import should not be found.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().getTable().getDataset().equals(dataset.getId())).isEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
// No bucket should be found referencing the import.
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).filteredOn(bucket -> bucket.getTable().getDataset().getId().equals(dataset.getId())).isEmpty();
// No CBlock associated with import may exist
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getBucket().getTable().getDataset().getId().equals(dataset.getId())).isEmpty();
}
}
// It's not exactly possible to issue a query for a non-existant dataset, so we assert that parsing the fails.
assertThatThrownBy(() -> {
IntegrationUtils.parseQuery(conquery, test.getRawQuery());
}).isNotNull();
IntegrationUtils.assertQueryResult(conquery, query, 0, ExecutionState.FAILED, conquery.getTestUser(), 404);
}
// Reload the dataset and assert the state.
// We have to do some weird trix with StandaloneSupport to open it with another Dataset
final StandaloneSupport conqueryReimport = testConquery.getSupport(namespace.getDataset().getName());
{
// only import the deleted import/table
LoadingUtil.importTables(conqueryReimport, test.getContent().getTables());
assertThat(conqueryReimport.getNamespace().getStorage().getTables()).isNotEmpty();
conqueryReimport.waitUntilWorkDone();
LoadingUtil.importTableContents(conqueryReimport, test.getContent().getTables());
conqueryReimport.waitUntilWorkDone();
LoadingUtil.importConcepts(conqueryReimport, test.getRawConcepts());
conqueryReimport.waitUntilWorkDone();
assertThat(conqueryReimport.getDatasetsProcessor().getDatasetRegistry().get(conqueryReimport.getDataset().getId())).describedAs("Dataset after re-import.").isNotNull();
assertThat(conqueryReimport.getNamespace().getStorage().getAllImports().size()).isEqualTo(nImports);
for (ShardNode node : conqueryReimport.getShardNodes()) {
assertThat(node.getWorkers().getWorkers().values()).filteredOn(w -> w.getInfo().getDataset().equals(conqueryReimport.getDataset().getId())).describedAs("Workers for node {}", node.getName()).isNotEmpty();
}
log.info("Executing query after re-import");
final Query query2 = IntegrationUtils.parseQuery(conqueryReimport, test.getRawQuery());
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conqueryReimport, query2, 2L, ExecutionState.DONE, conqueryReimport.getTestUser(), 201);
}
// Finally, restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
final StandaloneSupport conqueryRestart = testConquery.openDataset(conqueryReimport.getDataset().getId());
log.info("Checking state after re-start");
assertThat(conqueryRestart.getNamespace().getStorage().getAllImports().size()).isEqualTo(2);
for (ShardNode node : conqueryRestart.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllBuckets().stream().filter(bucket -> bucket.getTable().getDataset().getId().equals(dataset.getId()))).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query after restart");
final Query query3 = IntegrationUtils.parseQuery(conqueryRestart, test.getRawQuery());
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conqueryRestart, query3, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
use of com.bakdata.conquery.io.storage.ModificationShieldedWorkerStorage in project conquery by bakdata.
the class ConceptUpdateAndDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
StandaloneSupport conquery = testConquery.getSupport(name);
// Read two JSONs with different Trees
final String testJson = In.resource("/tests/query/UPDATE_CONCEPT_TESTS/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
final String testJson2 = In.resource("/tests/query/UPDATE_CONCEPT_TESTS/SIMPLE_TREECONCEPT_2_Query.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final ConceptId conceptId = ConceptId.Parser.INSTANCE.parse(dataset.getName(), "test_tree");
final Concept<?> concept;
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
final QueryTest test2 = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson2);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
assertThat(namespace.getStorage().getConcept(conceptId)).isNotNull();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
// State before update.
{
log.info("Checking state before update");
// Must contain the concept.
assertThat(namespace.getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query before update");
IntegrationUtils.assertQueryResult(conquery, query, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
log.info("Query before update executed");
}
// Load a different concept with the same id (it has different children "C1" that are more than "A1")
// To perform the update, the old concept will be deleted first and the new concept will be added. That means the deletion of concept is also covered here
{
log.info("Executing update");
LoadingUtil.updateConcepts(conquery, test2.getRawConcepts(), Response.Status.Family.SUCCESSFUL);
conquery.waitUntilWorkDone();
log.info("Update executed");
}
// Check state after update.
{
log.info("Checking state after update");
// Must contain the concept now.
assertThat(namespace.getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query after update");
// Assert that it now contains 2 instead of 1.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
log.info("Query after update executed");
}
// new Conquery generated after restarting
// StandaloneSupport conquery;
// Restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
conquery = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
// Must contain the concept.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query after restart.");
// Re-assert state.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
}
}
// Delete the Concept.
{
log.info("Issuing deletion of import {}", conceptId);
concept = Objects.requireNonNull(conquery.getNamespace().getStorage().getConcept(conceptId));
conquery.getDatasetsProcessor().deleteConcept(concept);
conquery.waitUntilWorkDone();
}
// Check state after deletion.
{
log.info("Checking state after deletion");
// We've deleted the concept so it and it's associated cblock should be gone.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isEmpty();
assertThat(conquery.getShardNodes().stream().flatMap(node -> node.getWorkers().getWorkers().values().stream()).filter(worker -> worker.getInfo().getDataset().equals(dataset.getId())).map(Worker::getStorage)).noneMatch(workerStorage -> workerStorage.getConcept(conceptId) != null).noneMatch(workerStorage -> workerStorage.getAllCBlocks().stream().anyMatch(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)));
log.info("Executing query after deletion (EXPECTING AN EXCEPTION IN THE LOGS!)");
// Issue a query and assert that it is failing.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
// Restart conquery and assert again, that the state after deletion was maintained.
{
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
conquery = testConquery.openDataset(dataset.getId());
}
// Check state after restart.
{
log.info("Checking state after restart");
// We've deleted the concept so it and it's associated cblock should be gone.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isEmpty();
assertThat(conquery.getShardNodes().stream().flatMap(node -> node.getWorkers().getWorkers().values().stream()).filter(worker -> worker.getInfo().getDataset().equals(dataset.getId())).map(Worker::getStorage)).noneMatch(workerStorage -> workerStorage.getConcept(conceptId) != null).noneMatch(workerStorage -> workerStorage.getAllCBlocks().stream().anyMatch(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)));
log.info("Executing query after restart (EXPECTING AN EXCEPTION IN THE LOGS!)");
// Issue a query and assert that it is failing.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
}
}
use of com.bakdata.conquery.io.storage.ModificationShieldedWorkerStorage in project conquery by bakdata.
the class ImportDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
MetaStorage storage = conquery.getMetaStorage();
final String testJson = In.resource("/tests/query/DELETE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final ImportId importId = ImportId.Parser.INSTANCE.parse(dataset.getName(), "test_table2", "test_table2");
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final int nImports = namespace.getStorage().getAllImports().size();
// State before deletion.
{
log.info("Checking state before deletion");
// Must contain the import.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().equals(importId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(importId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", worker.getInfo().getId()).filteredOn(block -> block.getBucket().getId().getDataset().equals(dataset.getId())).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).describedAs("Buckets for Worker %s", worker.getInfo().getId()).isNotEmpty();
// Must contain the import.
assertThat(workerStorage.getImport(importId)).isNotNull();
}
}
log.info("Executing query before deletion");
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Delete the import.
{
log.info("Issuing deletion of import {}", importId);
final URI deleteImportUri = HierarchyHelper.hierarchicalPath(conquery.defaultAdminURIBuilder(), AdminTablesResource.class, "deleteImport").buildFromMap(Map.of(ResourceConstants.DATASET, conquery.getDataset().getId(), ResourceConstants.TABLE, importId.getTable(), ResourceConstants.IMPORT_ID, importId));
final Response delete = conquery.getClient().target(deleteImportUri).request(MediaType.APPLICATION_JSON).delete();
assertThat(delete.getStatusInfo().getFamily()).isEqualTo(Response.Status.Family.SUCCESSFUL);
conquery.waitUntilWorkDone();
}
// State after deletion.
{
log.info("Checking state after deletion");
// We have deleted an import now there should be one less!
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports - 1);
// The deleted import should not be found.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().equals(importId)).isEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
// No bucket should be found referencing the import.
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", worker.getInfo().getId()).filteredOn(bucket -> bucket.getImp().getId().equals(importId)).isEmpty();
// No CBlock associated with import may exist
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", worker.getInfo().getId()).filteredOn(cBlock -> cBlock.getBucket().getId().getImp().equals(importId)).isEmpty();
// Import should not exists anymore
assertThat(workerStorage.getImport(importId)).describedAs("Import for Worker %s", worker.getInfo().getId()).isNull();
}
}
log.info("Executing query after deletion");
// Issue a query and assert that it has less content.
IntegrationUtils.assertQueryResult(conquery, query, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
conquery.waitUntilWorkDone();
// Load more data under the same name into the same table, with only the deleted import/table
{
// only import the deleted import/table
final RequiredTable import2Table = test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(importId.getTable().getTable())).findFirst().orElseThrow();
final ResourceFile csv = import2Table.getCsv();
final String path = csv.getPath();
// copy csv to tmp folder
// Content 2.2 contains an extra entry of a value that hasn't been seen before.
FileUtils.copyInputStreamToFile(In.resource(path.substring(0, path.lastIndexOf('/')) + "/" + "content2.2.csv").asStream(), new File(conquery.getTmpDir(), csv.getName()));
File descriptionFile = new File(conquery.getTmpDir(), import2Table.getName() + ConqueryConstants.EXTENSION_DESCRIPTION);
File preprocessedFile = new File(conquery.getTmpDir(), import2Table.getName() + ConqueryConstants.EXTENSION_PREPROCESSED);
// create import descriptor
TableImportDescriptor desc = new TableImportDescriptor();
desc.setName(import2Table.getName());
desc.setTable(import2Table.getName());
TableInputDescriptor input = new TableInputDescriptor();
{
input.setPrimary(import2Table.getPrimaryColumn().createOutput());
input.setSourceFile(import2Table.getCsv().getName());
input.setOutput(new OutputDescription[import2Table.getColumns().length]);
for (int i = 0; i < import2Table.getColumns().length; i++) {
input.getOutput()[i] = import2Table.getColumns()[i].createOutput();
}
}
desc.setInputs(new TableInputDescriptor[] { input });
Jackson.MAPPER.writeValue(descriptionFile, desc);
// preprocess
conquery.preprocessTmp(conquery.getTmpDir(), List.of(descriptionFile));
// import preprocessedFiles
conquery.getDatasetsProcessor().addImport(conquery.getNamespace(), new GZIPInputStream(new FileInputStream(preprocessedFile)));
conquery.waitUntilWorkDone();
}
// State after reimport.
{
log.info("Checking state after re-import");
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports);
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", worker.getInfo().getId()).filteredOn(bucket -> bucket.getImp().getId().equals(importId)).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Finally, restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
StandaloneSupport conquery2 = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(2);
for (ShardNode node : conquery2.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId()))
continue;
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", worker.getInfo().getId()).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).filteredOn(bucket -> bucket.getImp().getId().equals(importId)).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery2, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
}
use of com.bakdata.conquery.io.storage.ModificationShieldedWorkerStorage in project conquery by bakdata.
the class TableDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
final MetaStorage storage = conquery.getMetaStorage();
final String testJson = In.resource("/tests/query/DELETE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final TableId tableId = TableId.Parser.INSTANCE.parse(dataset.getName(), "test_table2");
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final int nImports = namespace.getStorage().getAllImports().size();
// State before deletion.
{
log.info("Checking state before deletion");
// Must contain the import.
assertThat(namespace.getStorage().getCentralRegistry().getOptional(tableId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query before deletion");
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Delete the import.
{
log.info("Issuing deletion of import {}", tableId);
// Delete the import via API.
// But, we do not allow deletion of tables with associated connectors, so this should throw!
final URI deleteTable = HierarchyHelper.hierarchicalPath(conquery.defaultAdminURIBuilder(), AdminTablesResource.class, "remove").buildFromMap(Map.of(ResourceConstants.DATASET, conquery.getDataset().getName(), ResourceConstants.TABLE, tableId.toString()));
final Response failed = conquery.getClient().target(deleteTable).request().delete();
assertThat(failed.getStatusInfo().getFamily()).isEqualTo(Response.Status.Family.CLIENT_ERROR);
conquery.getDatasetsProcessor().deleteConcept(conquery.getNamespace().getStorage().getAllConcepts().iterator().next());
Thread.sleep(100);
conquery.waitUntilWorkDone();
final Response success = conquery.getClient().target(deleteTable).request().delete();
assertThat(success.getStatusInfo().getStatusCode()).isEqualTo(Response.Status.OK.getStatusCode());
Thread.sleep(100);
conquery.waitUntilWorkDone();
}
// State after deletion.
{
log.info("Checking state after deletion");
// We have deleted an import now there should be two less!
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports - 1);
// The deleted import should not be found.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().getTable().equals(tableId)).isEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
// No bucket should be found referencing the import.
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).filteredOn(bucket -> bucket.getImp().getTable().getId().equals(tableId)).isEmpty();
// No CBlock associated with import may exist
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getBucket().getImp().getTable().getId().equals(tableId)).isEmpty();
}
}
log.info("Executing query after deletion");
// Issue a query and asseert that it has less content.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
conquery.waitUntilWorkDone();
// Load the same import into the same table, with only the deleted import/table
{
// only import the deleted import/table
LoadingUtil.importTables(conquery, test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(tableId.getTable())).collect(Collectors.toList()));
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(tableId.getTable())).collect(Collectors.toList()));
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
assertThat(namespace.getStorage().getTable(tableId)).describedAs("Table after re-import.").isNotNull();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
assertThat(value.getStorage().getCentralRegistry().resolve(tableId)).describedAs("Table in worker storage.").isNotNull();
}
}
}
// Test state after reimport.
{
log.info("Checking state after re-import");
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports);
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllBuckets().stream().filter(bucket -> bucket.getImp().getTable().getId().equals(tableId))).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Finally, restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
StandaloneSupport conquery2 = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(2);
for (ShardNode node : conquery2.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllBuckets().stream().filter(bucket -> bucket.getImp().getTable().getId().equals(tableId))).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery2, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
}
Aggregations