use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class FlowController method reload.
@Override
public void reload(final ReportingTaskNode existingNode, final String newType, final BundleCoordinate bundleCoordinate, final Set<URL> additionalUrls) throws ReportingTaskInstantiationException {
if (existingNode == null) {
throw new IllegalStateException("Existing ReportingTaskNode cannot be null");
}
final String id = existingNode.getReportingTask().getIdentifier();
// ghost components will have a null logger
if (existingNode.getLogger() != null) {
existingNode.getLogger().debug("Reloading component {} to type {} from bundle {}", new Object[] { id, newType, bundleCoordinate });
}
// createReportingTask will create a new instance class loader for the same id so
// save the instance class loader to use it for calling OnRemoved on the existing processor
final ClassLoader existingInstanceClassLoader = ExtensionManager.getInstanceClassLoader(id);
// set firstTimeAdded to true so lifecycle annotations get fired, but don't register this node
// attempt the creation to make sure it works before firing the OnRemoved methods below
final ReportingTaskNode newNode = createReportingTask(newType, id, bundleCoordinate, additionalUrls, true, false);
// call OnRemoved for the existing reporting task using the previous instance class loader
try (final NarCloseable x = NarCloseable.withComponentNarLoader(existingInstanceClassLoader)) {
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnRemoved.class, existingNode.getReportingTask(), existingNode.getConfigurationContext());
} finally {
ExtensionManager.closeURLClassLoader(id, existingInstanceClassLoader);
}
// set the new reporting task into the existing node
final ComponentLog componentLogger = new SimpleProcessLogger(id, existingNode.getReportingTask());
final TerminationAwareLogger terminationAwareLogger = new TerminationAwareLogger(componentLogger);
LogRepositoryFactory.getRepository(id).setLogger(terminationAwareLogger);
final LoggableComponent<ReportingTask> newReportingTask = new LoggableComponent<>(newNode.getReportingTask(), newNode.getBundleCoordinate(), terminationAwareLogger);
existingNode.setReportingTask(newReportingTask);
existingNode.setExtensionMissing(newNode.isExtensionMissing());
// need to refresh the properties in case we are changing from ghost component to real component
existingNode.refreshProperties();
}
use of org.apache.nifi.logging.ComponentLog in project nifi-minifi by apache.
the class ProcessorInitializer method teardown.
@Override
public void teardown(ConfigurableComponent component) {
Processor processor = (Processor) component;
try (NarCloseable narCloseable = NarCloseable.withComponentNarLoader(component.getClass(), component.getIdentifier())) {
final ComponentLog logger = new MockComponentLogger();
final MockProcessContext context = new MockProcessContext();
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class, processor, logger, context);
} finally {
ExtensionManager.removeInstanceClassLoader(component.getIdentifier());
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi-minifi by apache.
the class ControllerServiceInitializer method teardown.
@Override
public void teardown(ConfigurableComponent component) {
try (NarCloseable narCloseable = NarCloseable.withComponentNarLoader(component.getClass(), component.getIdentifier())) {
ControllerService controllerService = (ControllerService) component;
final ComponentLog logger = new MockComponentLogger();
final MockConfigurationContext context = new MockConfigurationContext();
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class, controllerService, logger, context);
} finally {
ExtensionManager.removeInstanceClassLoader(component.getIdentifier());
}
}
use of org.apache.nifi.logging.ComponentLog in project kylo by Teradata.
the class AbstractMergeTable method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLog();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String blockingValue = context.getProperty(BLOCKING_KEY).evaluateAttributeExpressions(flowFile).getValue();
String flowFileId = flowFile.getAttribute(CoreAttributes.UUID.key());
boolean block = false;
if (blocking && blockingCache.putIfAbsent(blockingValue, flowFileId) != null) {
if (StringUtils.isBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
flowFile = session.putAttribute(flowFile, BLOCKED_START_TIME, String.valueOf(System.currentTimeMillis()));
getLogger().info("Transferring Flow file {} to blocked relationship", new Object[] { flowFile });
}
// penalize the flow file and transfer to BLOCKED
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_BLOCKED);
return;
}
// Add Blocking time to flow file if this was a blocked flowfile.
if (blocking && StringUtils.isNotBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
String blockedStartTime = flowFile.getAttribute(BLOCKED_START_TIME);
try {
Long l = Long.parseLong(blockedStartTime);
Long blockTime = System.currentTimeMillis() - l;
getLogger().info("Processing Blocked flow file {}. This was blocked for {} ms", new Object[] { flowFile, blockTime });
flowFile = session.putAttribute(flowFile, BLOCKED_TIME, String.valueOf(blockTime) + " ms");
} catch (NumberFormatException e) {
}
}
String PROVENANCE_EXECUTION_STATUS_KEY = context.getName() + " Execution Status";
String partitionSpecString = context.getProperty(PARTITION_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue();
String sourceSchema = context.getProperty(SOURCE_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String sourceTable = context.getProperty(SOURCE_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String targetSchema = context.getProperty(TARGET_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String targetTable = context.getProperty(TARGET_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String feedPartitionValue = context.getProperty(FEED_PARTITION).evaluateAttributeExpressions(flowFile).getValue();
String mergeStrategyValue = context.getProperty(MERGE_STRATEGY).evaluateAttributeExpressions(flowFile).getValue();
String hiveConfigurations = context.getProperty(HIVE_CONFIGURATIONS).evaluateAttributeExpressions(flowFile).getValue();
boolean resetHive = context.getProperty(RESET_HIVE).asBoolean();
final ColumnSpec[] columnSpecs = Optional.ofNullable(context.getProperty(FIELD_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue()).filter(StringUtils::isNotEmpty).map(ColumnSpec::createFromString).orElse(new ColumnSpec[0]);
if (STRATEGY_PK_MERGE.equals(mergeStrategyValue) && (columnSpecs == null || columnSpecs.length == 0)) {
getLog().error("Missing required field specification for PK merge feature");
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: Missing required field specification for PK merge feature");
release(blockingValue);
session.transfer(flowFile, IngestProperties.REL_FAILURE);
return;
}
// Maintain default for backward compatibility
if (StringUtils.isEmpty(mergeStrategyValue)) {
mergeStrategyValue = STRATEGY_DEDUPE_MERGE;
}
logger.info("Merge strategy: " + mergeStrategyValue + " Using Source: " + sourceTable + " Target: " + targetTable + " feed partition:" + feedPartitionValue + " partSpec: " + partitionSpecString);
final StopWatch stopWatch = new StopWatch(true);
try (final Connection conn = getConnection(context)) {
TableMergeSyncSupport mergeSupport = new TableMergeSyncSupport(conn);
if (resetHive) {
mergeSupport.resetHiveConf();
}
mergeSupport.enableDynamicPartitions();
if (StringUtils.isNotEmpty(hiveConfigurations)) {
mergeSupport.setHiveConf(hiveConfigurations.split("\\|"));
}
PartitionSpec partitionSpec = new PartitionSpec(partitionSpecString);
if (STRATEGY_DEDUPE_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, true);
} else if (STRATEGY_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, false);
} else if (STRATEGY_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_ROLLING_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doRollingSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_PK_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doPKMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, columnSpecs);
} else {
throw new UnsupportedOperationException("Failed to resolve the merge strategy");
}
session.getProvenanceReporter().modifyContent(flowFile, "Execution completed", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Successful");
release(blockingValue);
logger.info("Execution completed: " + stopWatch.getElapsed(TimeUnit.MILLISECONDS) + " Merge strategy: " + mergeStrategyValue + " Using Source: " + sourceTable + " Target: " + targetTable + " feed partition:" + feedPartitionValue + " partSpec: " + partitionSpecString);
session.transfer(flowFile, REL_SUCCESS);
} catch (final Exception e) {
logger.error("Unable to execute merge doMerge for {} due to {}; routing to failure", new Object[] { flowFile, e }, e);
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: " + e.getMessage());
release(blockingValue);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.logging.ComponentLog in project kylo by Teradata.
the class GetFeedsHistoryReindex method onTrigger.
@Override
public void onTrigger(ProcessContext context, ProcessSession session) {
final ComponentLog logger = getLog();
FlowFile flowFile = session.get();
if (flowFile == null) {
flowFile = session.create();
}
logger.debug("Checking for feeds requiring reindexing historical data");
try {
MetadataProviderService metadataProviderService = getMetadataService(context);
if ((metadataProviderService != null) && (metadataProviderService.getProvider() != null)) {
String dateTimeOfCheck = String.valueOf(DateTime.now(DateTimeZone.UTC));
FeedsForDataHistoryReindex feedsForHistoryReindexing = getMetadataService(context).getProvider().getFeedsForHistoryReindexing();
if (feedsForHistoryReindexing != null) {
logger.info("Found {} feeds requiring reindexing historical data", new Object[] { feedsForHistoryReindexing.getFeeds().size() });
if (feedsForHistoryReindexing.getFeedCount() > 0) {
for (Feed feedForHistoryReindexing : feedsForHistoryReindexing.getFeeds()) {
Map<String, String> attributes = new HashMap<>();
attributes.put(FEED_ID_FOR_HISTORY_REINDEX_KEY, feedForHistoryReindexing.getId());
attributes.put(FEED_SYSTEM_NAME_FOR_HISTORY_REINDEX_KEY, feedForHistoryReindexing.getSystemName());
attributes.put(FEED_CATEGORY_SYSTEM_NAME_FOR_HISTORY_REINDEX_KEY, feedForHistoryReindexing.getCategory().getSystemName());
attributes.put(FEED_STATUS_FOR_HISTORY_REINDEX_KEY, feedForHistoryReindexing.getCurrentHistoryReindexingStatus().getHistoryReindexingState().toString());
attributes.put(FEED_LAST_MODIFIED_UTC_FOR_HISTORY_REINDEX_KEY, feedForHistoryReindexing.getCurrentHistoryReindexingStatus().getLastModifiedTimestamp().toString());
attributes.put(FEEDS_TOTAL_COUNT_FOR_HISTORY_REINDEX_KEY, String.valueOf(feedsForHistoryReindexing.getFeedCount()));
attributes.put(FEEDS_TOTAL_IDS_FOR_HISTORY_REINDEX_KEY, feedsForHistoryReindexing.getFeedIds().toString());
attributes.put(FEEDS_CHECK_TIME_UTC_FOR_HISTORY_REINDEX_KEY, dateTimeOfCheck);
// all attributes from parent flow file copied except uuid, creates a FORK event
FlowFile feedFlowFile = session.create(flowFile);
feedFlowFile = session.putAllAttributes(feedFlowFile, attributes);
session.transfer(feedFlowFile, REL_FOUND);
logger.info("Flow file created for reindexing feed's historical data: feed id {}, category name {}, feed name {}", new Object[] { FEED_ID_FOR_HISTORY_REINDEX_KEY, FEED_CATEGORY_SYSTEM_NAME_FOR_HISTORY_REINDEX_KEY, FEED_SYSTEM_NAME_FOR_HISTORY_REINDEX_KEY });
}
flowFile = session.putAttribute(flowFile, FEEDS_TOTAL_COUNT_FOR_HISTORY_REINDEX_KEY, String.valueOf(feedsForHistoryReindexing.getFeedCount()));
flowFile = session.putAttribute(flowFile, FEEDS_CHECK_TIME_UTC_FOR_HISTORY_REINDEX_KEY, dateTimeOfCheck);
// only for found case
session.transfer(flowFile, REL_ORIGINAL);
} else {
// this will always be 0 here
flowFile = session.putAttribute(flowFile, FEEDS_TOTAL_COUNT_FOR_HISTORY_REINDEX_KEY, String.valueOf(feedsForHistoryReindexing.getFeedCount()));
// this will always be empty list here
flowFile = session.putAttribute(flowFile, FEEDS_TOTAL_IDS_FOR_HISTORY_REINDEX_KEY, feedsForHistoryReindexing.getFeedIds().toString());
flowFile = session.putAttribute(flowFile, FEEDS_CHECK_TIME_UTC_FOR_HISTORY_REINDEX_KEY, dateTimeOfCheck);
session.transfer(flowFile, REL_NOT_FOUND);
}
}
} else {
logger.error("Error checking for feeds requiring reindexing historical data. Check if Kylo services is running, and accessible from NiFi.");
session.transfer(flowFile, REL_FAILURE);
}
} catch (Exception e) {
logger.error("An exception was thrown during check for feeds requiring reindexing historical data: {}", new Object[] { e });
session.transfer(flowFile, REL_FAILURE);
}
}
Aggregations