use of io.quarkus.deployment.builditem.SystemPropertyBuildItem in project quarkus by quarkusio.
the class HibernateOrmProcessor method producePersistenceUnitDescriptorFromConfig.
private static void producePersistenceUnitDescriptorFromConfig(HibernateOrmConfig hibernateOrmConfig, String persistenceUnitName, HibernateOrmConfigPersistenceUnit persistenceUnitConfig, Set<String> modelClassesAndPackages, List<RecordableXmlMapping> xmlMappings, List<JdbcDataSourceBuildItem> jdbcDataSources, ApplicationArchivesBuildItem applicationArchivesBuildItem, LaunchMode launchMode, Capabilities capabilities, BuildProducer<SystemPropertyBuildItem> systemProperties, BuildProducer<NativeImageResourceBuildItem> nativeImageResources, BuildProducer<HotDeploymentWatchedFileBuildItem> hotDeploymentWatchedFiles, BuildProducer<PersistenceUnitDescriptorBuildItem> persistenceUnitDescriptors, Set<String> storageEngineCollector) {
Optional<JdbcDataSourceBuildItem> jdbcDataSource = findJdbcDataSource(persistenceUnitName, persistenceUnitConfig, jdbcDataSources);
Optional<String> explicitDialect = persistenceUnitConfig.dialect.dialect;
String dialect;
MultiTenancyStrategy multiTenancyStrategy = getMultiTenancyStrategy(persistenceUnitConfig.multitenant);
if (multiTenancyStrategy == MultiTenancyStrategy.DATABASE) {
// since the datasource will be resolved separately for each tenant.
if (explicitDialect.isPresent()) {
dialect = explicitDialect.get();
} else if (jdbcDataSource.isPresent()) {
dialect = Dialects.guessDialect(persistenceUnitName, jdbcDataSource.get().getDbKind());
} else {
throw new ConfigurationException(String.format(Locale.ROOT, "The Hibernate ORM extension could not infer the dialect for persistence unit '%s'." + " When using database multi-tenancy, you must either configure a datasource for that persistence unit" + " (refer to https://quarkus.io/guides/datasource for guidance)," + " or set the dialect explicitly through property '" + HibernateOrmConfig.puPropertyKey(persistenceUnitName, "dialect") + "'.", persistenceUnitName));
}
} else {
if (!jdbcDataSource.isPresent()) {
throw new ConfigurationException(String.format(Locale.ROOT, "Datasource must be defined for persistence unit '%s'." + " Refer to https://quarkus.io/guides/datasource for guidance.", persistenceUnitName), new HashSet<>(Arrays.asList("quarkus.datasource.db-kind", "quarkus.datasource.username", "quarkus.datasource.password", "quarkus.datasource.jdbc.url")));
}
if (explicitDialect.isPresent()) {
dialect = explicitDialect.get();
} else {
dialect = Dialects.guessDialect(persistenceUnitName, jdbcDataSource.get().getDbKind());
}
}
// we found one
// todo URL
ParsedPersistenceXmlDescriptor descriptor = new ParsedPersistenceXmlDescriptor(null);
descriptor.setName(persistenceUnitName);
descriptor.setExcludeUnlistedClasses(true);
if (modelClassesAndPackages.isEmpty()) {
LOG.warnf("Could not find any entities affected to the persistence unit '%s'.", persistenceUnitName);
} else {
// That's right, we're pushing both class names and package names
// to a method called "addClasses".
// It's a misnomer: while the method populates the set that backs getManagedClasses(),
// that method is also poorly named because it can actually return both class names
// and package names.
// See for proof:
// - how org.hibernate.boot.archive.scan.internal.ScanResultCollector.isListedOrDetectable
// is used for packages too, even though it relies (indirectly) on getManagedClassNames().
// - the comment at org/hibernate/boot/model/process/internal/ScanningCoordinator.java:246:
// "IMPL NOTE : "explicitlyListedClassNames" can contain class or package names..."
descriptor.addClasses(new ArrayList<>(modelClassesAndPackages));
}
descriptor.setTransactionType(PersistenceUnitTransactionType.JTA);
descriptor.getProperties().setProperty(AvailableSettings.DIALECT, dialect);
// The storage engine has to be set as a system property.
if (persistenceUnitConfig.dialect.storageEngine.isPresent()) {
systemProperties.produce(new SystemPropertyBuildItem(AvailableSettings.STORAGE_ENGINE, persistenceUnitConfig.dialect.storageEngine.get()));
}
// Physical Naming Strategy
persistenceUnitConfig.physicalNamingStrategy.ifPresent(namingStrategy -> descriptor.getProperties().setProperty(AvailableSettings.PHYSICAL_NAMING_STRATEGY, namingStrategy));
// Implicit Naming Strategy
persistenceUnitConfig.implicitNamingStrategy.ifPresent(namingStrategy -> descriptor.getProperties().setProperty(AvailableSettings.IMPLICIT_NAMING_STRATEGY, namingStrategy));
// Metadata builder contributor
persistenceUnitConfig.metadataBuilderContributor.ifPresent(className -> descriptor.getProperties().setProperty(EntityManagerFactoryBuilderImpl.METADATA_BUILDER_CONTRIBUTOR, className));
// charset
descriptor.getProperties().setProperty(AvailableSettings.HBM2DDL_CHARSET_NAME, persistenceUnitConfig.database.charset.name());
if (persistenceUnitConfig.database.globallyQuotedIdentifiers) {
descriptor.getProperties().setProperty(AvailableSettings.GLOBALLY_QUOTED_IDENTIFIERS, "true");
}
// Query
int batchSize = firstPresent(persistenceUnitConfig.fetch.batchSize, persistenceUnitConfig.batchFetchSize).orElse(16);
if (batchSize > 0) {
descriptor.getProperties().setProperty(AvailableSettings.DEFAULT_BATCH_FETCH_SIZE, Integer.toString(batchSize));
descriptor.getProperties().setProperty(AvailableSettings.BATCH_FETCH_STYLE, BatchFetchStyle.PADDED.toString());
}
if (persistenceUnitConfig.fetch.maxDepth.isPresent()) {
setMaxFetchDepth(descriptor, persistenceUnitConfig.fetch.maxDepth);
} else if (persistenceUnitConfig.maxFetchDepth.isPresent()) {
setMaxFetchDepth(descriptor, persistenceUnitConfig.maxFetchDepth);
}
descriptor.getProperties().setProperty(AvailableSettings.QUERY_PLAN_CACHE_MAX_SIZE, Integer.toString(persistenceUnitConfig.query.queryPlanCacheMaxSize));
descriptor.getProperties().setProperty(AvailableSettings.DEFAULT_NULL_ORDERING, persistenceUnitConfig.query.defaultNullOrdering.name().toLowerCase(Locale.ROOT));
// Disable sequence validations: they are reportedly slow, and people already get the same validation from normal schema validation
descriptor.getProperties().put(AvailableSettings.SEQUENCE_INCREMENT_SIZE_MISMATCH_STRATEGY, SequenceMismatchStrategy.NONE);
// JDBC
persistenceUnitConfig.jdbc.timezone.ifPresent(timezone -> descriptor.getProperties().setProperty(AvailableSettings.JDBC_TIME_ZONE, timezone));
persistenceUnitConfig.jdbc.statementFetchSize.ifPresent(fetchSize -> descriptor.getProperties().setProperty(AvailableSettings.STATEMENT_FETCH_SIZE, String.valueOf(fetchSize)));
persistenceUnitConfig.jdbc.statementBatchSize.ifPresent(fetchSize -> descriptor.getProperties().setProperty(AvailableSettings.STATEMENT_BATCH_SIZE, String.valueOf(fetchSize)));
// Statistics
if (hibernateOrmConfig.metricsEnabled || (hibernateOrmConfig.statistics.isPresent() && hibernateOrmConfig.statistics.get())) {
descriptor.getProperties().setProperty(AvailableSettings.GENERATE_STATISTICS, "true");
// When statistics are enabled, the default in Hibernate ORM is to also log them after each
// session; turn that off by default as it's very noisy:
descriptor.getProperties().setProperty(AvailableSettings.LOG_SESSION_METRICS, String.valueOf(hibernateOrmConfig.logSessionMetrics.orElse(false)));
}
// sql-load-scripts
List<String> importFiles = getSqlLoadScript(persistenceUnitConfig.sqlLoadScript, launchMode);
if (!importFiles.isEmpty()) {
for (String importFile : importFiles) {
Path loadScriptPath;
try {
loadScriptPath = applicationArchivesBuildItem.getRootArchive().getChildPath(importFile);
} catch (RuntimeException e) {
throw new ConfigurationException("Unable to interpret path referenced in '" + HibernateOrmConfig.puPropertyKey(persistenceUnitName, "sql-load-script") + "=" + String.join(",", persistenceUnitConfig.sqlLoadScript.get()) + "': " + e.getMessage());
}
if (loadScriptPath != null && !Files.isDirectory(loadScriptPath)) {
// enlist resource if present
nativeImageResources.produce(new NativeImageResourceBuildItem(importFile));
} else if (persistenceUnitConfig.sqlLoadScript.isPresent()) {
// raise exception if explicit file is not present (i.e. not the default)
throw new ConfigurationException("Unable to find file referenced in '" + HibernateOrmConfig.puPropertyKey(persistenceUnitName, "sql-load-script") + "=" + String.join(",", persistenceUnitConfig.sqlLoadScript.get()) + "'. Remove property or add file to your path.");
}
// in dev mode we want to make sure that we watch for changes to file even if it doesn't currently exist
// as a user could still add it after performing the initial configuration
hotDeploymentWatchedFiles.produce(new HotDeploymentWatchedFileBuildItem(importFile));
}
// only set the found import files if configured
if (persistenceUnitConfig.sqlLoadScript.isPresent()) {
descriptor.getProperties().setProperty(AvailableSettings.HBM2DDL_IMPORT_FILES, String.join(",", importFiles));
}
} else {
// Disable implicit loading of the default import script (import.sql)
descriptor.getProperties().setProperty(AvailableSettings.HBM2DDL_IMPORT_FILES, "");
}
// Caching
if (persistenceUnitConfig.secondLevelCachingEnabled) {
Properties p = descriptor.getProperties();
// Only set these if the user isn't making an explicit choice:
p.putIfAbsent(USE_DIRECT_REFERENCE_CACHE_ENTRIES, Boolean.TRUE);
p.putIfAbsent(USE_SECOND_LEVEL_CACHE, Boolean.TRUE);
p.putIfAbsent(USE_QUERY_CACHE, Boolean.TRUE);
p.putIfAbsent(JPA_SHARED_CACHE_MODE, SharedCacheMode.ENABLE_SELECTIVE);
Map<String, String> cacheConfigEntries = HibernateConfigUtil.getCacheConfigEntries(persistenceUnitConfig);
for (Entry<String, String> entry : cacheConfigEntries.entrySet()) {
descriptor.getProperties().setProperty(entry.getKey(), entry.getValue());
}
} else {
// Unless the global switch is explicitly set to off, in which case we disable all caching:
Properties p = descriptor.getProperties();
p.put(USE_DIRECT_REFERENCE_CACHE_ENTRIES, Boolean.FALSE);
p.put(USE_SECOND_LEVEL_CACHE, Boolean.FALSE);
p.put(USE_QUERY_CACHE, Boolean.FALSE);
p.put(JPA_SHARED_CACHE_MODE, SharedCacheMode.NONE);
}
// if there is any issue when bootstrapping Hibernate Validator.
if (capabilities.isPresent(Capability.HIBERNATE_VALIDATOR)) {
descriptor.getProperties().setProperty(AvailableSettings.JPA_VALIDATION_MODE, ValidationMode.CALLBACK.name());
}
// Collect the storage engines if MySQL or MariaDB
if (isMySQLOrMariaDB(dialect) && persistenceUnitConfig.dialect.storageEngine.isPresent()) {
storageEngineCollector.add(persistenceUnitConfig.dialect.storageEngine.get());
}
// Discriminator Column
descriptor.getProperties().setProperty(AvailableSettings.IGNORE_EXPLICIT_DISCRIMINATOR_COLUMNS_FOR_JOINED_SUBCLASS, String.valueOf(persistenceUnitConfig.discriminator.ignoreExplicitForJoined));
persistenceUnitDescriptors.produce(new PersistenceUnitDescriptorBuildItem(descriptor, jdbcDataSource.map(JdbcDataSourceBuildItem::getName), multiTenancyStrategy, persistenceUnitConfig.multitenantSchemaDatasource.orElse(null), xmlMappings, false, false));
}
use of io.quarkus.deployment.builditem.SystemPropertyBuildItem in project quarkus by quarkusio.
the class InfinispanClientProcessor method setup.
@BuildStep
InfinispanPropertiesBuildItem setup(ApplicationArchivesBuildItem applicationArchivesBuildItem, BuildProducer<ReflectiveClassBuildItem> reflectiveClass, BuildProducer<HotDeploymentWatchedFileBuildItem> hotDeployment, BuildProducer<SystemPropertyBuildItem> systemProperties, BuildProducer<FeatureBuildItem> feature, BuildProducer<AdditionalBeanBuildItem> additionalBeans, BuildProducer<ExtensionSslNativeSupportBuildItem> sslNativeSupport, BuildProducer<NativeImageSecurityProviderBuildItem> nativeImageSecurityProviders, BuildProducer<NativeImageConfigBuildItem> nativeImageConfig, CombinedIndexBuildItem applicationIndexBuildItem) throws ClassNotFoundException, IOException {
feature.produce(new FeatureBuildItem(Feature.INFINISPAN_CLIENT));
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(InfinispanClientProducer.class));
systemProperties.produce(new SystemPropertyBuildItem("io.netty.noUnsafe", "true"));
hotDeployment.produce(new HotDeploymentWatchedFileBuildItem(META_INF + File.separator + HOTROD_CLIENT_PROPERTIES));
// Enable SSL support by default
sslNativeSupport.produce(new ExtensionSslNativeSupportBuildItem(Feature.INFINISPAN_CLIENT));
nativeImageSecurityProviders.produce(new NativeImageSecurityProviderBuildItem(SASL_SECURITY_PROVIDER));
InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream(META_INF + "/" + HOTROD_CLIENT_PROPERTIES);
Properties properties;
if (stream == null) {
properties = new Properties();
if (log.isTraceEnabled()) {
log.trace("There was no hotrod-client.properties file found - using defaults");
}
} else {
try {
properties = loadFromStream(stream);
if (log.isDebugEnabled()) {
log.debugf("Found HotRod properties of %s", properties);
}
} finally {
Util.close(stream);
}
// We use caffeine for bounded near cache - so register that reflection if we have a bounded near cache
if (properties.containsKey(ConfigurationProperties.NEAR_CACHE_MAX_ENTRIES)) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, "com.github.benmanes.caffeine.cache.SSMS"));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, "com.github.benmanes.caffeine.cache.PSMS"));
}
}
InfinispanClientProducer.replaceProperties(properties);
IndexView index = applicationIndexBuildItem.getIndex();
// This is always non null
Object marshaller = properties.get(ConfigurationProperties.MARSHALLER);
if (marshaller instanceof ProtoStreamMarshaller) {
for (ApplicationArchive applicationArchive : applicationArchivesBuildItem.getAllApplicationArchives()) {
// If we have properties file we may have to care about
Path metaPath = applicationArchive.getChildPath(META_INF);
if (metaPath != null) {
try (Stream<Path> dirElements = Files.list(metaPath)) {
Iterator<Path> protoFiles = dirElements.filter(Files::isRegularFile).filter(p -> p.toString().endsWith(PROTO_EXTENSION)).iterator();
// We monitor the entire meta inf directory if properties are available
if (protoFiles.hasNext()) {
// Quarkus doesn't currently support hot deployment watching directories
// hotDeployment.produce(new HotDeploymentConfigFileBuildItem(META_INF));
}
while (protoFiles.hasNext()) {
Path path = protoFiles.next();
if (log.isDebugEnabled()) {
log.debug(" " + path.toAbsolutePath());
}
byte[] bytes = Files.readAllBytes(path);
// This uses the default file encoding - should we enforce UTF-8?
properties.put(InfinispanClientProducer.PROTOBUF_FILE_PREFIX + path.getFileName().toString(), new String(bytes, StandardCharsets.UTF_8));
}
}
}
}
InfinispanClientProducer.handleProtoStreamRequirements(properties);
Collection<ClassInfo> initializerClasses = index.getAllKnownImplementors(DotName.createSimple(SerializationContextInitializer.class.getName()));
initializerClasses.addAll(index.getAllKnownImplementors(DotName.createSimple(GeneratedSchema.class.getName())));
Set<SerializationContextInitializer> initializers = new HashSet<>(initializerClasses.size());
for (ClassInfo ci : initializerClasses) {
Class<?> initializerClass = Thread.currentThread().getContextClassLoader().loadClass(ci.toString());
try {
SerializationContextInitializer sci = (SerializationContextInitializer) initializerClass.getDeclaredConstructor().newInstance();
initializers.add(sci);
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
// This shouldn't ever be possible as annotation processor should generate empty constructor
throw new RuntimeException(e);
}
}
if (!initializers.isEmpty()) {
properties.put(InfinispanClientProducer.PROTOBUF_INITIALIZERS, initializers);
}
}
// Add any user project listeners to allow reflection in native code
Collection<AnnotationInstance> listenerInstances = index.getAnnotations(DotName.createSimple(ClientListener.class.getName()));
for (AnnotationInstance instance : listenerInstances) {
AnnotationTarget target = instance.target();
if (target.kind() == AnnotationTarget.Kind.CLASS) {
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, target.asClass().name().toString()));
}
}
// This is required for netty to work properly
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, "io.netty.channel.socket.nio.NioSocketChannel"));
// We use reflection to have continuous queries work
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "org.infinispan.client.hotrod.event.impl.ContinuousQueryImpl$ClientEntryListener"));
// We use reflection to allow for near cache invalidations
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "org.infinispan.client.hotrod.near.NearCacheService$InvalidatedNearCacheListener"));
// This is required when a cache is clustered to tell us topology
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, "org.infinispan.client.hotrod.impl.consistenthash.SegmentConsistentHash"));
return new InfinispanPropertiesBuildItem(properties);
}
use of io.quarkus.deployment.builditem.SystemPropertyBuildItem in project quarkus by quarkusio.
the class NettyProcessor method setNettyMachineId.
@BuildStep
public SystemPropertyBuildItem setNettyMachineId() {
// we set the io.netty.machineId system property so to prevent potential
// slowness when generating/inferring the default machine id in io.netty.channel.DefaultChannelId
// implementation, which iterates over the NetworkInterfaces to determine the "best" machine id
// borrowed from io.netty.util.internal.MacAddressUtil.EUI64_MAC_ADDRESS_LENGTH
final int EUI64_MAC_ADDRESS_LENGTH = 8;
final byte[] machineIdBytes = new byte[EUI64_MAC_ADDRESS_LENGTH];
new Random().nextBytes(machineIdBytes);
final String nettyMachineId = io.netty.util.internal.MacAddressUtil.formatAddress(machineIdBytes);
return new SystemPropertyBuildItem("io.netty.machineId", nettyMachineId);
}
use of io.quarkus.deployment.builditem.SystemPropertyBuildItem in project infinispan-quarkus by infinispan.
the class InfinispanServerProcessor method setSystemProperties.
@BuildStep
void setSystemProperties(BuildProducer<NativeImageSystemPropertyBuildItem> buildSystemProperties, BuildProducer<SystemPropertyBuildItem> systemProperties) {
// We disable the replacement of JdkSslContext in the NettyExtensions - this shouldn't be needed once we move to Java 11
buildSystemProperties.produce(new NativeImageSystemPropertyBuildItem("substratevm.replacement.jdksslcontext", "false"));
// Make sure to disable the logging endpoint in JVM mode as it won't work as Quarkus replaces log4j classes
systemProperties.produce(new SystemPropertyBuildItem("infinispan.server.resource.logging", "false"));
}
use of io.quarkus.deployment.builditem.SystemPropertyBuildItem in project kogito-runtimes by kiegroup.
the class KogitoDevServicesProcessor method startDataIndexDevService.
@BuildStep(onlyIf = { GlobalDevServicesConfig.Enabled.class, IsDevelopment.class })
public void startDataIndexDevService(BuildProducer<AdditionalBeanBuildItem> additionalBean, BuildProducer<SystemPropertyBuildItem> systemProperties, BuildProducer<KogitoDataIndexServiceAvailableBuildItem> dataIndexServiceAvailableBuildItemBuildProducer, LaunchModeBuildItem launchMode, KogitoBuildTimeConfig buildTimeConfig, List<DevServicesSharedNetworkBuildItem> devServicesSharedNetwork, Optional<ConsoleInstalledBuildItem> consoleInstalled, CuratedApplicationShutdownBuildItem applicationShutdown, LoggingSetupBuildItem loggingSetup) {
DataIndexDevServiceConfig configuration = getConfiguration(buildTimeConfig);
if (configuration.devServicesEnabled && isDockerWorking.getAsBoolean()) {
additionalBean.produce(AdditionalBeanBuildItem.builder().addBeanClass(DataIndexEventPublisher.class).build());
}
LOGGER.info("Dev Services for Kogito Data Index using image {}", configuration.imageName);
if (closeable != null) {
boolean shouldShutdown = !configuration.equals(cfg);
if (!shouldShutdown) {
// Signal the service is available when DevServices may have restarted but the service not
dataIndexServiceAvailableBuildItemBuildProducer.produce(new KogitoDataIndexServiceAvailableBuildItem());
return;
}
shutdownDataIndex();
cfg = null;
}
StartupLogCompressor compressor = new StartupLogCompressor((launchMode.isTest() ? "(test) " : "") + "Kogito Data Index Dev Service starting:", consoleInstalled, loggingSetup);
DataIndexInstance dataIndex;
try {
dataIndex = startDataIndex(configuration, launchMode, !devServicesSharedNetwork.isEmpty());
if (dataIndex != null) {
// Signal the service is available
dataIndexServiceAvailableBuildItemBuildProducer.produce(new KogitoDataIndexServiceAvailableBuildItem());
closeable = dataIndex.getCloseable();
systemProperties.produce(new SystemPropertyBuildItem(KOGITO_DATA_INDEX, dataIndex.getUrl()));
}
compressor.close();
} catch (Throwable t) {
compressor.closeAndDumpCaptured();
throw new RuntimeException("Failed to start Kogito Data Index Dev Services", t);
}
// Configure the watch dog
if (first) {
first = false;
Runnable closeTask = () -> {
if (closeable != null) {
shutdownDataIndex();
}
first = true;
closeable = null;
cfg = null;
};
applicationShutdown.addCloseTask(closeTask, true);
}
cfg = configuration;
if (dataIndex != null && dataIndex.isOwner()) {
LOGGER.info("Dev Services for Kogito Data Index started at {}", dataIndex.getUrl());
}
}
Aggregations