use of org.apache.nifi.annotation.lifecycle.OnEnabled in project nifi by apache.
the class StandardSSLContextService method onConfigured.
@OnEnabled
public void onConfigured(final ConfigurationContext context) throws InitializationException {
configContext = context;
final Collection<ValidationResult> results = new ArrayList<>();
results.addAll(validateStore(context.getProperties(), KeystoreValidationGroup.KEYSTORE));
results.addAll(validateStore(context.getProperties(), KeystoreValidationGroup.TRUSTSTORE));
if (!results.isEmpty()) {
final StringBuilder sb = new StringBuilder(this + " is not valid due to:");
for (final ValidationResult result : results) {
sb.append("\n").append(result.toString());
}
throw new InitializationException(sb.toString());
}
if (countNulls(context.getProperty(KEYSTORE).getValue(), context.getProperty(KEYSTORE_PASSWORD).getValue(), context.getProperty(KEYSTORE_TYPE).getValue(), context.getProperty(TRUSTSTORE).getValue(), context.getProperty(TRUSTSTORE_PASSWORD).getValue(), context.getProperty(TRUSTSTORE_TYPE).getValue()) >= 4) {
throw new InitializationException(this + " does not have the KeyStore or the TrustStore populated");
}
// verify that the filename, password, and type match
createSSLContext(ClientAuth.REQUIRED);
}
use of org.apache.nifi.annotation.lifecycle.OnEnabled in project nifi by apache.
the class HiveConnectionPool method onConfigured.
/**
* Configures connection pool by creating an instance of the
* {@link BasicDataSource} based on configuration provided with
* {@link ConfigurationContext}.
* <p>
* This operation makes no guarantees that the actual connection could be
* made since the underlying system may still go off-line during normal
* operation of the connection pool.
* <p/>
* As of Apache NiFi 1.5.0, due to changes made to
* {@link SecurityUtil#loginKerberos(Configuration, String, String)}, which is used by this class invoking
* {@link HiveConfigurator#authenticate(Configuration, String, String)}
* to authenticate a principal with Kerberos, Hive controller services no longer
* attempt relogins explicitly. For more information, please read the documentation for
* {@link SecurityUtil#loginKerberos(Configuration, String, String)}.
* <p/>
* In previous versions of NiFi, a {@link org.apache.nifi.hadoop.KerberosTicketRenewer} was started by
* {@link HiveConfigurator#authenticate(Configuration, String, String, long)} when the Hive
* controller service was enabled. The use of a separate thread to explicitly relogin could cause race conditions
* with the implicit relogin attempts made by hadoop/Hive code on a thread that references the same
* {@link UserGroupInformation} instance. One of these threads could leave the
* {@link javax.security.auth.Subject} in {@link UserGroupInformation} to be cleared or in an unexpected state
* while the other thread is attempting to use the {@link javax.security.auth.Subject}, resulting in failed
* authentication attempts that would leave the Hive controller service in an unrecoverable state.
*
* @see SecurityUtil#loginKerberos(Configuration, String, String)
* @see HiveConfigurator#authenticate(Configuration, String, String)
* @see HiveConfigurator#authenticate(Configuration, String, String, long)
* @param context the configuration context
* @throws InitializationException if unable to create a database connection
*/
@OnEnabled
public void onConfigured(final ConfigurationContext context) throws InitializationException {
ComponentLog log = getLogger();
final String configFiles = context.getProperty(HIVE_CONFIGURATION_RESOURCES).evaluateAttributeExpressions().getValue();
final Configuration hiveConfig = hiveConfigurator.getConfigurationFromFiles(configFiles);
final String validationQuery = context.getProperty(VALIDATION_QUERY).evaluateAttributeExpressions().getValue();
// add any dynamic properties to the Hive configuration
for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
final PropertyDescriptor descriptor = entry.getKey();
if (descriptor.isDynamic()) {
hiveConfig.set(descriptor.getName(), context.getProperty(descriptor).evaluateAttributeExpressions().getValue());
}
}
final String drv = HiveDriver.class.getName();
if (SecurityUtil.isSecurityEnabled(hiveConfig)) {
final String explicitPrincipal = context.getProperty(kerberosProperties.getKerberosPrincipal()).evaluateAttributeExpressions().getValue();
final String explicitKeytab = context.getProperty(kerberosProperties.getKerberosKeytab()).evaluateAttributeExpressions().getValue();
final KerberosCredentialsService credentialsService = context.getProperty(KERBEROS_CREDENTIALS_SERVICE).asControllerService(KerberosCredentialsService.class);
final String resolvedPrincipal;
final String resolvedKeytab;
if (credentialsService == null) {
resolvedPrincipal = explicitPrincipal;
resolvedKeytab = explicitKeytab;
} else {
resolvedPrincipal = credentialsService.getPrincipal();
resolvedKeytab = credentialsService.getKeytab();
}
log.info("Hive Security Enabled, logging in as principal {} with keytab {}", new Object[] { resolvedPrincipal, resolvedKeytab });
try {
ugi = hiveConfigurator.authenticate(hiveConfig, resolvedPrincipal, resolvedKeytab);
} catch (AuthenticationFailedException ae) {
log.error(ae.getMessage(), ae);
}
getLogger().info("Successfully logged in as principal {} with keytab {}", new Object[] { resolvedPrincipal, resolvedKeytab });
}
final String user = context.getProperty(DB_USER).evaluateAttributeExpressions().getValue();
final String passw = context.getProperty(DB_PASSWORD).evaluateAttributeExpressions().getValue();
final Long maxWaitMillis = context.getProperty(MAX_WAIT_TIME).evaluateAttributeExpressions().asTimePeriod(TimeUnit.MILLISECONDS);
final Integer maxTotal = context.getProperty(MAX_TOTAL_CONNECTIONS).evaluateAttributeExpressions().asInteger();
dataSource = new BasicDataSource();
dataSource.setDriverClassName(drv);
connectionUrl = context.getProperty(DATABASE_URL).evaluateAttributeExpressions().getValue();
dataSource.setMaxWait(maxWaitMillis);
dataSource.setMaxActive(maxTotal);
if (validationQuery != null && !validationQuery.isEmpty()) {
dataSource.setValidationQuery(validationQuery);
dataSource.setTestOnBorrow(true);
}
dataSource.setUrl(connectionUrl);
dataSource.setUsername(user);
dataSource.setPassword(passw);
}
use of org.apache.nifi.annotation.lifecycle.OnEnabled in project kylo by Teradata.
the class MetadataProviderSelectorService method onConfigured.
@OnEnabled
public void onConfigured(final ConfigurationContext context) {
PropertyValue impl = context.getProperty(IMPLEMENTATION);
if (impl.getValue().equalsIgnoreCase("REMOTE")) {
URI uri = URI.create(context.getProperty(CLIENT_URL).getValue());
String user = context.getProperty(CLIENT_USERNAME).getValue();
String password = context.getProperty(CLIENT_PASSWORD).getValue();
MetadataClient client;
SSLContext sslContext = null;
if (context.getProperty(SSL_CONTEXT_SERVICE) != null && context.getProperty(SSL_CONTEXT_SERVICE).isSet()) {
this.sslContextService = context.getProperty(SSL_CONTEXT_SERVICE).asControllerService(SSLContextService.class);
sslContext = this.sslContextService.createSSLContext(SSLContextService.ClientAuth.REQUIRED);
}
if (StringUtils.isEmpty(user)) {
client = new MetadataClient(uri, sslContext);
} else {
client = new MetadataClient(uri, user, password, sslContext);
}
this.provider = new MetadataClientProvider(client);
this.recorder = new MetadataClientRecorder(client);
this.kyloProvenanceClientProvider = new KyloProvenanceClientProvider(client);
getSpringContextService(context).ifPresent(springService -> {
CancelActiveWaterMarkEventConsumer waterMarkConsumer = springService.getBean(CancelActiveWaterMarkEventConsumer.class);
waterMarkConsumer.addMetadataRecorder(this.recorder);
FeedInitializationChangeEventConsumer initChangeConsumer = springService.getBean(FeedInitializationChangeEventConsumer.class);
initChangeConsumer.addMetadataRecorder(this.recorder);
});
} else {
throw new UnsupportedOperationException("Provider implementations not currently supported: " + impl.getValue());
}
}
use of org.apache.nifi.annotation.lifecycle.OnEnabled in project nifi by apache.
the class StandardControllerServiceNode method enable.
/**
* Will atomically enable this service by invoking its @OnEnabled operation.
* It uses CAS operation on {@link #stateRef} to transition this service
* from DISABLED to ENABLING state. If such transition succeeds the service
* will be marked as 'active' (see {@link ControllerServiceNode#isActive()}).
* If such transition doesn't succeed then no enabling logic will be
* performed and the method will exit. In other words it is safe to invoke
* this operation multiple times and from multiple threads.
* <br>
* This operation will also perform re-try of service enabling in the event
* of exception being thrown by previous invocation of @OnEnabled.
* <br>
* Upon successful invocation of @OnEnabled this service will be transitioned to
* ENABLED state.
* <br>
* In the event where enabling took longer then expected by the user and such user
* initiated disable operation, this service will be automatically disabled as soon
* as it reached ENABLED state.
*/
@Override
public CompletableFuture<Void> enable(final ScheduledExecutorService scheduler, final long administrativeYieldMillis) {
final CompletableFuture<Void> future = new CompletableFuture<>();
if (this.stateTransition.transitionToEnabling(ControllerServiceState.DISABLED, future)) {
synchronized (active) {
this.active.set(true);
}
final StandardControllerServiceNode service = this;
final ConfigurationContext configContext = new StandardConfigurationContext(this, this.serviceProvider, null, getVariableRegistry());
scheduler.execute(new Runnable() {
@Override
public void run() {
try {
try (final NarCloseable nc = NarCloseable.withComponentNarLoader(getControllerServiceImplementation().getClass(), getIdentifier())) {
ReflectionUtils.invokeMethodsWithAnnotation(OnEnabled.class, getControllerServiceImplementation(), configContext);
}
boolean shouldEnable;
synchronized (active) {
shouldEnable = active.get() && stateTransition.enable();
}
if (!shouldEnable) {
LOG.debug("Disabling service {} after it has been enabled due to disable action being initiated.", service);
// Can only happen if user initiated DISABLE operation before service finished enabling. It's state will be
// set to DISABLING (see disable() operation)
invokeDisable(configContext);
stateTransition.disable();
} else {
LOG.debug("Successfully enabled {}", service);
}
} catch (Exception e) {
future.completeExceptionally(e);
final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e;
final ComponentLog componentLog = new SimpleProcessLogger(getIdentifier(), StandardControllerServiceNode.this);
componentLog.error("Failed to invoke @OnEnabled method due to {}", cause);
LOG.error("Failed to invoke @OnEnabled method of {} due to {}", getControllerServiceImplementation(), cause.toString());
invokeDisable(configContext);
if (isActive()) {
scheduler.schedule(this, administrativeYieldMillis, TimeUnit.MILLISECONDS);
} else {
try (final NarCloseable nc = NarCloseable.withComponentNarLoader(getControllerServiceImplementation().getClass(), getIdentifier())) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnDisabled.class, getControllerServiceImplementation(), configContext);
}
stateTransition.disable();
}
}
}
});
} else {
future.complete(null);
}
return future;
}
use of org.apache.nifi.annotation.lifecycle.OnEnabled in project nifi by apache.
the class DruidTranquilityController method onConfigured.
@OnEnabled
public void onConfigured(final ConfigurationContext context) {
ComponentLog log = getLogger();
log.info("Starting Druid Tranquility Controller Service...");
final String dataSource = context.getProperty(DATASOURCE).evaluateAttributeExpressions().getValue();
final String zkConnectString = context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue();
zkBaseSleepMillis = context.getProperty(ZOOKEEPER_RETRY_BASE_SLEEP_TIME).evaluateAttributeExpressions().asInteger();
zkMaxRetries = context.getProperty(ZOOKEEPER_RETRY_BASE_SLEEP_TIME).evaluateAttributeExpressions().asInteger();
zkSleepMillis = context.getProperty(ZOOKEEPER_RETRY_SLEEP_TIME).evaluateAttributeExpressions().asInteger();
final String indexService = context.getProperty(DRUID_INDEX_SERVICE_PATH).evaluateAttributeExpressions().getValue();
final String discoveryPath = context.getProperty(DRUID_DISCOVERY_PATH).evaluateAttributeExpressions().getValue();
final int clusterPartitions = context.getProperty(CLUSTER_PARTITIONS).evaluateAttributeExpressions().asInteger();
final int clusterReplication = context.getProperty(CLUSTER_REPLICATION).evaluateAttributeExpressions().asInteger();
final String timestampField = context.getProperty(TIMESTAMP_FIELD).evaluateAttributeExpressions().getValue();
final String segmentGranularity = context.getProperty(SEGMENT_GRANULARITY).getValue();
final String queryGranularity = context.getProperty(QUERY_GRANULARITY).getValue();
final String windowPeriod = context.getProperty(WINDOW_PERIOD).getValue();
final String indexRetryPeriod = context.getProperty(INDEX_RETRY_PERIOD).getValue();
final String aggregatorJSON = context.getProperty(AGGREGATOR_JSON).evaluateAttributeExpressions().getValue();
final String dimensionsStringList = context.getProperty(DIMENSIONS_LIST).getValue();
final int maxBatchSize = context.getProperty(MAX_BATCH_SIZE).evaluateAttributeExpressions().asInteger();
final int maxPendingBatches = context.getProperty(MAX_PENDING_BATCHES).evaluateAttributeExpressions().asInteger();
final int lingerMillis = context.getProperty(LINGER_MILLIS).evaluateAttributeExpressions().asInteger();
transitUri = String.format(FIREHOSE_PATTERN, dataSource) + ";indexServicePath=" + indexService;
final List<String> dimensions = getDimensions(dimensionsStringList);
final List<AggregatorFactory> aggregator = getAggregatorList(aggregatorJSON);
final Timestamper<Map<String, Object>> timestamper = new Timestamper<Map<String, Object>>() {
private static final long serialVersionUID = 1L;
@Override
public DateTime timestamp(Map<String, Object> theMap) {
return new DateTime(theMap.get(timestampField));
}
};
Iterator<AggregatorFactory> aggIterator = aggregator.iterator();
AggregatorFactory currFactory;
log.debug("Number of Aggregations Defined: {}", new Object[] { aggregator.size() });
while (aggIterator.hasNext()) {
currFactory = aggIterator.next();
log.debug("Verifying Aggregator Definition\n\tAggregator Name: {}\n\tAggregator Type: {}\n\tAggregator Req Fields: {}", new Object[] { currFactory.getName(), currFactory.getTypeName(), currFactory.requiredFields() });
}
// Tranquility uses ZooKeeper (through Curator) for coordination.
curator = getCurator(zkConnectString);
curator.start();
// The JSON serialization of your object must have a timestamp field in a format that Druid understands. By default,
// Druid expects the field to be called "timestamp" and to be an ISO8601 timestamp.
final TimestampSpec timestampSpec = new TimestampSpec(timestampField, "auto", null);
final Beam<Map<String, Object>> beam = buildBeam(dataSource, indexService, discoveryPath, clusterPartitions, clusterReplication, segmentGranularity, queryGranularity, windowPeriod, indexRetryPeriod, dimensions, aggregator, timestamper, timestampSpec);
tranquilizer = buildTranquilizer(maxBatchSize, maxPendingBatches, lingerMillis, beam);
tranquilizer.start();
}
Aggregations