use of io.datarouter.scanner.Scanner in project datarouter by hotpads.
the class JobletHandler method list.
@Handler
private Mav list(@Param(PARAM_whereStatus) OptionalString pStatus, @Param(PARAM_type) OptionalString pType) {
Scanner<JobletRequest> requests = jobletRequestDao.scan();
if (pStatus.isPresent() && pType.isPresent()) {
JobletStatus status = JobletStatus.fromPersistentStringStatic(pStatus.get());
requests = requests.include(request -> status == request.getStatus()).include(request -> request.getKey().getType().equals(pType.get()));
} else if (pStatus.isPresent() && pType.isEmpty()) {
JobletStatus status = JobletStatus.fromPersistentStringStatic(pStatus.get());
requests = requests.include(request -> status == request.getStatus());
} else if (pStatus.isEmpty() && pType.isPresent()) {
requests = requests.include(request -> request.getKey().getType().equals(pType.get()));
}
Collection<JobletSummary> summaries = JobletSummary.summarizeByTypeExecutionOrderStatus(requests);
return pageFactory.startBuilder(request).withTitle(TITLE).withRequires(DatarouterWebRequireJsV2.SORTTABLE).withContent(makeContent(summaries)).buildMav();
}
use of io.datarouter.scanner.Scanner in project datarouter by hotpads.
the class BaseDatarouterServletContextListener method buildExecuteOnActionsLists.
private void buildExecuteOnActionsLists(ServletContextEvent event) {
DatarouterInjector injector = getInjector(event.getServletContext());
Scanner.of(listenerClasses).map(injector::getInstance).forEach(allListeners::add);
Scanner.of(webListenerClasses).map(injector::getInstance).each(listener -> listener.setServletContext(event.getServletContext())).forEach(allListeners::add);
Scanner.of(allListeners).splitBy(DatarouterAppListener::safeToExecuteInParallel).map(Scanner::list).map(listeners -> new Pair<>(listeners.get(0).safeToExecuteInParallel() ? ExecutionMode.PARALLEL : ExecutionMode.SYNCHRONOUS, listeners)).forEach(listenersByExecutionMods::add);
}
use of io.datarouter.scanner.Scanner in project datarouter by hotpads.
the class SpannerSingleTableSchemaUpdateService method performSchemaUpdate.
public Optional<SchemaUpdateResult> performSchemaUpdate(ClientId clientId, Supplier<List<String>> existingTableNames, PhysicalNode<?, ?, ?> physicalNode) {
String tableName = physicalNode.getFieldInfo().getTableName();
List<Field<?>> primaryKeyFields = physicalNode.getFieldInfo().getSamplePrimaryKey().getFields();
List<? extends SpannerBaseFieldCodec<?, ?>> primaryKeyCodecs = fieldCodecRegistry.createCodecs(primaryKeyFields);
for (SpannerBaseFieldCodec<?, ?> codec : primaryKeyCodecs) {
if (codec.getSpannerColumnType().isArray()) {
throw new RuntimeException("Invalid field type used for primary key: " + codec.getField().getKey().getName());
}
}
List<SpannerIndex> indexes = new ArrayList<>();
List<SpannerIndex> uniqueIndexes = Scanner.of(physicalNode.getFieldInfo().getUniqueIndexes().entrySet()).map(entry -> new SpannerIndex(tableName, entry.getKey(), entry.getValue(), Collections.emptyList(), true)).list();
var statements = new SpannerUpdateStatements();
String entityTableName = null;
if (physicalNode instanceof IndexedStorage) {
IndexedStorage<?, ?> indexedStorage = (IndexedStorage<?, ?>) physicalNode;
indexes = Scanner.of(indexedStorage.getManagedNodes()).map(node -> new SpannerIndex(tableName, node.getName(), node.getIndexEntryFieldInfo().getPrimaryKeyFields(), node.getIndexEntryFieldInfo().getFields(), false)).list();
}
List<SpannerColumn> primaryKeyColumns = Scanner.of(primaryKeyCodecs).map(codec -> codec.getSpannerColumn(false)).list();
List<SpannerColumn> nonKeyColumns = Scanner.of(fieldCodecRegistry.createCodecs(physicalNode.getFieldInfo().getNonKeyFields())).map(codec -> codec.getSpannerColumn(true)).list();
if (!existingTableNames.get().contains(tableName)) {
statements.updateFunction(tableOperationsGenerator.createTable(tableName, primaryKeyColumns, nonKeyColumns, entityTableName), updateOptions::getCreateTables, true);
Scanner.of(indexes, uniqueIndexes).concat(Scanner::of).map(index -> createIndex(index, primaryKeyColumns)).forEach(statement -> statements.updateFunction(statement, updateOptions::getCreateTables, true));
} else {
DatabaseClient databaseClient = clientsHolder.getDatabaseClient(clientId);
List<SpannerColumn> allColumns = Scanner.of(primaryKeyColumns, nonKeyColumns).concat(Scanner::of).list();
ResultSet columnRs = databaseClient.singleUse().executeQuery(Statement.of(tableOperationsGenerator.getTableSchema(tableName)));
ResultSet primaryKeyRs = databaseClient.singleUse().executeQuery(Statement.of(tableOperationsGenerator.getTableIndexColumnsSchema(tableName, "PRIMARY_KEY")));
tableAlterSchemaService.generateUpdateStatementColumns(tableName, allColumns, primaryKeyColumns, columnRs, primaryKeyRs, statements);
ResultSet indexesRs = databaseClient.singleUse().executeQuery(Statement.of(tableOperationsGenerator.getTableIndexSchema(tableName)));
Set<String> currentIndexes = tableAlterSchemaService.getIndexes(indexesRs);
Scanner.of(indexes, uniqueIndexes).concat(Scanner::of).forEach(index -> {
Statement tableIndexColumnsSchema = Statement.of(tableOperationsGenerator.getTableIndexColumnsSchema(tableName, index.getIndexName()));
ResultSet indexRs = databaseClient.singleUse().executeQuery(tableIndexColumnsSchema);
if (!tableAlterSchemaService.indexEqual(index, indexRs)) {
if (currentIndexes.contains(index.getIndexName())) {
statements.updateFunction(tableOperationsGenerator.dropIndex(index.getIndexName()), updateOptions::getDropIndexes, false);
}
statements.updateFunction(createIndex(index, primaryKeyColumns), updateOptions::getAddIndexes, true);
}
currentIndexes.remove(index.getIndexName());
});
currentIndexes.forEach(name -> statements.updateFunction(tableOperationsGenerator.dropIndex(name), updateOptions::getDropIndexes, false));
}
String errorMessage = null;
if (!statements.getExecuteStatements().isEmpty()) {
logger.info(SchemaUpdateTool.generateFullWidthMessage("Executing Spanner " + getClass().getSimpleName() + " SchemaUpdate"));
logger.info(String.join("\n\n", statements.getExecuteStatements()));
Database database = clientsHolder.getDatabase(clientId);
OperationFuture<Void, UpdateDatabaseDdlMetadata> future = database.updateDdl(statements.getExecuteStatements(), null);
errorMessage = FutureTool.get(future.getPollingFuture().getAttemptResult()).getErrorMessage();
if (StringTool.notNullNorEmptyNorWhitespace(errorMessage)) {
logger.error(errorMessage);
}
}
if (statements.getPreventStartUp()) {
errorMessage = "an alter on Spanner table " + tableName + " is required";
}
if (statements.getPrintStatements().isEmpty()) {
return Optional.empty();
}
String printStatement = statements.getPrintStatements().stream().map(statement -> statement + ";").collect(Collectors.joining("\n"));
SchemaUpdateTool.printSchemaUpdate(logger, printStatement);
return Optional.of(new SchemaUpdateResult(printStatement, errorMessage, clientId));
}
use of io.datarouter.scanner.Scanner in project datarouter by hotpads.
the class ClusterSettingService method scanClusterSettingAndValidityWithPrefix.
public Scanner<ClusterSettingAndValidityJspDto> scanClusterSettingAndValidityWithPrefix(String prefix) {
WebappInstance currentWebappInstance = webappInstanceDao.get(webappInstanceService.buildCurrentWebappInstanceKey());
Range<ClusterSettingKey> range = prefix == null ? Range.everything() : KeyRangeTool.forPrefixWithWildcard(prefix, name -> new ClusterSettingKey(name, null, null, null));
return clusterSettingDao.scan(range).map(setting -> {
ClusterSettingValidity validity = getValidityForWebappInstance(setting, currentWebappInstance);
return new ClusterSettingAndValidityJspDto(setting, validity);
});
}
use of io.datarouter.scanner.Scanner in project datarouter by hotpads.
the class ScanningBlockReader method scanLeafBlockKeys.
private Scanner<BlockKey> scanLeafBlockKeys(long fromRecordIdInclusive) {
BlockKey topBranchBlockKey = rootBlock.rootBranchBlockKey(snapshotKey);
BranchBlock topBranchBlock = blockLoader.branch(topBranchBlockKey);
return scanDescendantBranchBlocks(topBranchBlock, fromRecordIdInclusive).include(branchBlock -> branchBlock.level() == 0).concat(branchBlock -> {
return Scanner.iterate(0, i -> i + 1).limit(branchBlock.numRecords()).include(index -> branchBlock.recordId(index) >= fromRecordIdInclusive).map(branchBlock::childBlock).map(leafBlockId -> branchBlock.leafBlockKey(snapshotKey, leafBlockId));
});
}
Aggregations