use of org.apache.nifi.controller.serialization.FlowSynchronizationException in project nifi by apache.
the class StandardFlowService method handleConnectionFailure.
private void handleConnectionFailure(final Exception ex) {
DisconnectionCode disconnectionCode;
if (ex instanceof UninheritableFlowException) {
disconnectionCode = DisconnectionCode.MISMATCHED_FLOWS;
} else if (ex instanceof MissingBundleException) {
disconnectionCode = DisconnectionCode.MISSING_BUNDLE;
} else if (ex instanceof FlowSynchronizationException) {
disconnectionCode = DisconnectionCode.MISMATCHED_FLOWS;
} else {
disconnectionCode = DisconnectionCode.STARTUP_FAILURE;
}
clusterCoordinator.disconnectionRequestedByNode(getNodeId(), disconnectionCode, ex.toString());
controller.setClustered(false, null);
clusterCoordinator.setConnected(false);
}
use of org.apache.nifi.controller.serialization.FlowSynchronizationException in project nifi by apache.
the class JettyServer method start.
@Override
public void start() {
try {
ExtensionManager.discoverExtensions(systemBundle, bundles);
ExtensionManager.logClassLoaderMapping();
DocGenerator.generate(props, extensionMapping);
// start the server
server.start();
// ensure everything started successfully
for (Handler handler : server.getChildHandlers()) {
// see if the handler is a web app
if (handler instanceof WebAppContext) {
WebAppContext context = (WebAppContext) handler;
// cause it to be unavailable
if (context.getUnavailableException() != null) {
startUpFailure(context.getUnavailableException());
}
}
}
// this must be done after starting the server (and ensuring there were no start up failures)
if (webApiContext != null) {
// give the web api the component ui extensions
final ServletContext webApiServletContext = webApiContext.getServletHandler().getServletContext();
webApiServletContext.setAttribute("nifi-ui-extensions", componentUiExtensions);
// get the application context
final WebApplicationContext webApplicationContext = WebApplicationContextUtils.getRequiredWebApplicationContext(webApiServletContext);
// component ui extensions
if (CollectionUtils.isNotEmpty(componentUiExtensionWebContexts)) {
final NiFiWebConfigurationContext configurationContext = webApplicationContext.getBean("nifiWebConfigurationContext", NiFiWebConfigurationContext.class);
for (final WebAppContext customUiContext : componentUiExtensionWebContexts) {
// set the NiFi context in each custom ui servlet context
final ServletContext customUiServletContext = customUiContext.getServletHandler().getServletContext();
customUiServletContext.setAttribute("nifi-web-configuration-context", configurationContext);
// add the security filter to any ui extensions wars
final FilterHolder securityFilter = webApiContext.getServletHandler().getFilter("springSecurityFilterChain");
if (securityFilter != null) {
customUiContext.addFilter(securityFilter, "/*", EnumSet.allOf(DispatcherType.class));
}
}
}
// content viewer extensions
if (CollectionUtils.isNotEmpty(contentViewerWebContexts)) {
for (final WebAppContext contentViewerContext : contentViewerWebContexts) {
// add the security filter to any content viewer wars
final FilterHolder securityFilter = webApiContext.getServletHandler().getFilter("springSecurityFilterChain");
if (securityFilter != null) {
contentViewerContext.addFilter(securityFilter, "/*", EnumSet.allOf(DispatcherType.class));
}
}
}
// content viewer controller
if (webContentViewerContext != null) {
final ContentAccess contentAccess = webApplicationContext.getBean("contentAccess", ContentAccess.class);
// add the content access
final ServletContext webContentViewerServletContext = webContentViewerContext.getServletHandler().getServletContext();
webContentViewerServletContext.setAttribute("nifi-content-access", contentAccess);
final FilterHolder securityFilter = webApiContext.getServletHandler().getFilter("springSecurityFilterChain");
if (securityFilter != null) {
webContentViewerContext.addFilter(securityFilter, "/*", EnumSet.allOf(DispatcherType.class));
}
}
}
// ensure the web document war was loaded and provide the extension mapping
if (webDocsContext != null) {
final ServletContext webDocsServletContext = webDocsContext.getServletHandler().getServletContext();
webDocsServletContext.setAttribute("nifi-extension-mapping", extensionMapping);
}
// in a cluster
if (props.isNode()) {
FlowService flowService = null;
try {
logger.info("Loading Flow...");
ApplicationContext ctx = WebApplicationContextUtils.getWebApplicationContext(webApiContext.getServletContext());
flowService = ctx.getBean("flowService", FlowService.class);
// start and load the flow
flowService.start();
flowService.load(null);
logger.info("Flow loaded successfully.");
} catch (BeansException | LifeCycleStartException | IOException | FlowSerializationException | FlowSynchronizationException | UninheritableFlowException e) {
// ensure the flow service is terminated
if (flowService != null && flowService.isRunning()) {
flowService.stop(false);
}
logger.error("Unable to load flow due to: " + e, e);
// cannot wrap the exception as they are not defined in a classloader accessible to the caller
throw new Exception("Unable to load flow due to: " + e);
}
}
// dump the application url after confirming everything started successfully
dumpUrls();
} catch (Exception ex) {
startUpFailure(ex);
}
}
use of org.apache.nifi.controller.serialization.FlowSynchronizationException in project nifi by apache.
the class StandardFlowService method loadFromConnectionResponse.
private void loadFromConnectionResponse(final ConnectionResponse response) throws ConnectionException {
writeLock.lock();
try {
if (response.getNodeConnectionStatuses() != null) {
clusterCoordinator.resetNodeStatuses(response.getNodeConnectionStatuses().stream().collect(Collectors.toMap(status -> status.getNodeIdentifier(), status -> status)));
}
// get the dataflow from the response
final DataFlow dataFlow = response.getDataFlow();
if (logger.isTraceEnabled()) {
logger.trace("ResponseFlow = " + new String(dataFlow.getFlow(), StandardCharsets.UTF_8));
}
// load new controller state
loadFromBytes(dataFlow, true);
// set node ID on controller before we start heartbeating because heartbeat needs node ID
nodeId = response.getNodeIdentifier();
logger.info("Setting Flow Controller's Node ID: " + nodeId);
controller.setNodeId(nodeId);
clusterCoordinator.setLocalNodeIdentifier(nodeId);
clusterCoordinator.setConnected(true);
revisionManager.reset(response.getComponentRevisions().stream().map(rev -> rev.toRevision()).collect(Collectors.toList()));
// mark the node as clustered
controller.setClustered(true, response.getInstanceId());
controller.setConnectionStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTED));
// Initialize the controller after the flow is loaded so we don't take any actions on repos until everything is good
initializeController();
// start the processors as indicated by the dataflow
controller.onFlowInitialized(autoResumeState);
loadSnippets(dataFlow.getSnippets());
controller.startHeartbeating();
} catch (final UninheritableFlowException ufe) {
throw new UninheritableFlowException(CONNECTION_EXCEPTION_MSG_PREFIX + "local flow is different than cluster flow.", ufe);
} catch (final MissingBundleException mbe) {
throw new MissingBundleException(CONNECTION_EXCEPTION_MSG_PREFIX + "cluster flow contains bundles that do not exist on the current node", mbe);
} catch (final FlowSerializationException fse) {
throw new ConnectionException(CONNECTION_EXCEPTION_MSG_PREFIX + "local or cluster flow is malformed.", fse);
} catch (final FlowSynchronizationException fse) {
throw new FlowSynchronizationException(CONNECTION_EXCEPTION_MSG_PREFIX + "local flow controller partially updated. " + "Administrator should disconnect node and review flow for corruption.", fse);
} catch (final Exception ex) {
throw new ConnectionException("Failed to connect node to cluster due to: " + ex, ex);
} finally {
writeLock.unlock();
}
}
use of org.apache.nifi.controller.serialization.FlowSynchronizationException in project nifi by apache.
the class StandardFlowService method load.
@Override
public void load(final DataFlow dataFlow) throws IOException, FlowSerializationException, FlowSynchronizationException, UninheritableFlowException, MissingBundleException {
if (configuredForClustering) {
// Create the initial flow from disk if it exists, or from serializing the empty root group in flow controller
final DataFlow initialFlow = (dataFlow == null) ? createDataFlow() : dataFlow;
if (logger.isTraceEnabled()) {
logger.trace("InitialFlow = " + new String(initialFlow.getFlow(), StandardCharsets.UTF_8));
}
// Sync the initial flow into the flow controller so that if the flow came from disk we loaded the
// whole flow into the flow controller and applied any bundle upgrades
writeLock.lock();
try {
loadFromBytes(initialFlow, true);
} finally {
writeLock.unlock();
}
// Get the proposed flow by serializing the flow controller which now has the synced version from above
final DataFlow proposedFlow = createDataFlowFromController();
if (logger.isTraceEnabled()) {
logger.trace("ProposedFlow = " + new String(proposedFlow.getFlow(), StandardCharsets.UTF_8));
}
/*
* Attempt to connect to the cluster. If the manager is able to
* provide a data flow, then the manager will send a connection
* response. If the manager was unable to be located, then
* the response will be null and we should load the local dataflow
* and heartbeat until a manager is located.
*/
final boolean localFlowEmpty = StandardFlowSynchronizer.isEmpty(proposedFlow);
final ConnectionResponse response = connect(true, localFlowEmpty, proposedFlow);
// obtain write lock while we are updating the controller. We need to ensure that we don't
// obtain the lock before calling connect(), though, or we will end up getting a deadlock
// because the node that is receiving the connection request won't be able to get the current
// flow, as that requires a read lock.
writeLock.lock();
try {
if (response == null || response.shouldTryLater()) {
logger.info("Flow controller will load local dataflow and suspend connection handshake until a cluster connection response is received.");
// set node ID on controller before we start heartbeating because heartbeat needs node ID
controller.setNodeId(nodeId);
clusterCoordinator.setLocalNodeIdentifier(nodeId);
// set node as clustered, since it is trying to connect to a cluster
controller.setClustered(true, null);
clusterCoordinator.setConnected(false);
controller.setConnectionStatus(new NodeConnectionStatus(nodeId, DisconnectionCode.NOT_YET_CONNECTED));
/*
* Start heartbeating. Heartbeats will fail because we can't reach
* the manager, but when we locate the manager, the node will
* reconnect and establish a connection to the cluster. The
* heartbeat is the trigger that will cause the manager to
* issue a reconnect request.
*/
controller.startHeartbeating();
// Initialize the controller after the flow is loaded so we don't take any actions on repos until everything is good
initializeController();
// notify controller that flow is initialized
try {
controller.onFlowInitialized(autoResumeState);
} catch (final Exception ex) {
logger.warn("Unable to start all processors due to invalid flow configuration.");
if (logger.isDebugEnabled()) {
logger.warn(StringUtils.EMPTY, ex);
}
}
} else {
try {
loadFromConnectionResponse(response);
} catch (final Exception e) {
logger.error("Failed to load flow from cluster due to: " + e, e);
handleConnectionFailure(e);
throw new IOException(e);
}
}
// save the flow in the controller so we write out the latest flow with any updated bundles to disk
dao.save(controller, true);
} finally {
writeLock.unlock();
}
} else {
writeLock.lock();
try {
// operating in standalone mode, so load proposed flow and initialize the controller
loadFromBytes(dataFlow, true);
initializeController();
dao.save(controller, true);
} finally {
writeLock.unlock();
}
}
}
use of org.apache.nifi.controller.serialization.FlowSynchronizationException in project nifi by apache.
the class StandardFlowService method loadFromBytes.
// write lock must already be acquired
private void loadFromBytes(final DataFlow proposedFlow, final boolean allowEmptyFlow) throws IOException, FlowSerializationException, FlowSynchronizationException, UninheritableFlowException, MissingBundleException {
logger.trace("Loading flow from bytes");
// resolve the given flow (null means load flow from disk)
final DataFlow actualProposedFlow;
final byte[] flowBytes;
final byte[] authorizerFingerprint;
final Set<String> missingComponents;
if (proposedFlow == null) {
final ByteArrayOutputStream flowOnDisk = new ByteArrayOutputStream();
copyCurrentFlow(flowOnDisk);
flowBytes = flowOnDisk.toByteArray();
authorizerFingerprint = getAuthorizerFingerprint();
missingComponents = new HashSet<>();
logger.debug("Loaded Flow from bytes");
} else {
flowBytes = proposedFlow.getFlow();
authorizerFingerprint = proposedFlow.getAuthorizerFingerprint();
missingComponents = proposedFlow.getMissingComponents();
logger.debug("Loaded flow from proposed flow");
}
actualProposedFlow = new StandardDataFlow(flowBytes, null, authorizerFingerprint, missingComponents);
// load the flow
logger.debug("Loading proposed flow into FlowController");
dao.load(controller, actualProposedFlow);
final ProcessGroup rootGroup = controller.getGroup(controller.getRootGroupId());
if (rootGroup.isEmpty() && !allowEmptyFlow) {
throw new FlowSynchronizationException("Failed to load flow because unable to connect to cluster and local flow is empty");
}
final List<Template> templates = loadTemplates();
for (final Template template : templates) {
final Template existing = rootGroup.getTemplate(template.getIdentifier());
if (existing == null) {
logger.info("Imported Template '{}' to Root Group", template.getDetails().getName());
rootGroup.addTemplate(template);
} else {
logger.info("Template '{}' was already present in Root Group so will not import from file", template.getDetails().getName());
}
}
}
Aggregations