use of org.apache.nifi.controller.serialization.FlowSerializationException in project nifi by apache.
the class StandardXMLFlowConfigurationDAO method save.
@Override
public synchronized void save(final FlowController controller, final boolean archive) throws IOException {
if (null == controller) {
throw new NullPointerException();
}
Path tempFile;
Path configFile;
configFile = flowXmlPath;
tempFile = configFile.getParent().resolve(configFile.toFile().getName() + ".new.xml.gz");
try (final OutputStream fileOut = Files.newOutputStream(tempFile);
final OutputStream outStream = new GZIPOutputStream(fileOut)) {
final StandardFlowSerializer xmlTransformer = new StandardFlowSerializer(encryptor);
controller.serialize(xmlTransformer, outStream);
Files.deleteIfExists(configFile);
FileUtils.renameFile(tempFile.toFile(), configFile.toFile(), 5, true);
} catch (final FlowSerializationException fse) {
throw new IOException(fse);
} finally {
Files.deleteIfExists(tempFile);
}
if (archive) {
try {
archiveManager.archive();
} catch (final Exception ex) {
LOG.error("Unable to archive flow configuration as requested due to " + ex);
if (LOG.isDebugEnabled()) {
LOG.error("", ex);
}
}
}
}
use of org.apache.nifi.controller.serialization.FlowSerializationException in project nifi by apache.
the class TemplateSerializer method serialize.
/**
* This method when called assumes the Framework Nar ClassLoader is in the
* classloader hierarchy of the current context class loader.
* @param dto the template dto to serialize
* @return serialized representation of the DTO
*/
public static byte[] serialize(final TemplateDTO dto) {
try {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final BufferedOutputStream bos = new BufferedOutputStream(baos);
JAXBContext context = JAXBContext.newInstance(TemplateDTO.class);
Marshaller marshaller = context.createMarshaller();
marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
marshaller.marshal(dto, bos);
bos.flush();
// Note: For really large templates this could use a lot of heap space
return baos.toByteArray();
} catch (final IOException | JAXBException e) {
throw new FlowSerializationException(e);
}
}
use of org.apache.nifi.controller.serialization.FlowSerializationException in project nifi by apache.
the class JettyServer method start.
@Override
public void start() {
try {
ExtensionManager.discoverExtensions(systemBundle, bundles);
ExtensionManager.logClassLoaderMapping();
DocGenerator.generate(props, extensionMapping);
// start the server
server.start();
// ensure everything started successfully
for (Handler handler : server.getChildHandlers()) {
// see if the handler is a web app
if (handler instanceof WebAppContext) {
WebAppContext context = (WebAppContext) handler;
// cause it to be unavailable
if (context.getUnavailableException() != null) {
startUpFailure(context.getUnavailableException());
}
}
}
// this must be done after starting the server (and ensuring there were no start up failures)
if (webApiContext != null) {
// give the web api the component ui extensions
final ServletContext webApiServletContext = webApiContext.getServletHandler().getServletContext();
webApiServletContext.setAttribute("nifi-ui-extensions", componentUiExtensions);
// get the application context
final WebApplicationContext webApplicationContext = WebApplicationContextUtils.getRequiredWebApplicationContext(webApiServletContext);
// component ui extensions
if (CollectionUtils.isNotEmpty(componentUiExtensionWebContexts)) {
final NiFiWebConfigurationContext configurationContext = webApplicationContext.getBean("nifiWebConfigurationContext", NiFiWebConfigurationContext.class);
for (final WebAppContext customUiContext : componentUiExtensionWebContexts) {
// set the NiFi context in each custom ui servlet context
final ServletContext customUiServletContext = customUiContext.getServletHandler().getServletContext();
customUiServletContext.setAttribute("nifi-web-configuration-context", configurationContext);
// add the security filter to any ui extensions wars
final FilterHolder securityFilter = webApiContext.getServletHandler().getFilter("springSecurityFilterChain");
if (securityFilter != null) {
customUiContext.addFilter(securityFilter, "/*", EnumSet.allOf(DispatcherType.class));
}
}
}
// content viewer extensions
if (CollectionUtils.isNotEmpty(contentViewerWebContexts)) {
for (final WebAppContext contentViewerContext : contentViewerWebContexts) {
// add the security filter to any content viewer wars
final FilterHolder securityFilter = webApiContext.getServletHandler().getFilter("springSecurityFilterChain");
if (securityFilter != null) {
contentViewerContext.addFilter(securityFilter, "/*", EnumSet.allOf(DispatcherType.class));
}
}
}
// content viewer controller
if (webContentViewerContext != null) {
final ContentAccess contentAccess = webApplicationContext.getBean("contentAccess", ContentAccess.class);
// add the content access
final ServletContext webContentViewerServletContext = webContentViewerContext.getServletHandler().getServletContext();
webContentViewerServletContext.setAttribute("nifi-content-access", contentAccess);
final FilterHolder securityFilter = webApiContext.getServletHandler().getFilter("springSecurityFilterChain");
if (securityFilter != null) {
webContentViewerContext.addFilter(securityFilter, "/*", EnumSet.allOf(DispatcherType.class));
}
}
}
// ensure the web document war was loaded and provide the extension mapping
if (webDocsContext != null) {
final ServletContext webDocsServletContext = webDocsContext.getServletHandler().getServletContext();
webDocsServletContext.setAttribute("nifi-extension-mapping", extensionMapping);
}
// in a cluster
if (props.isNode()) {
FlowService flowService = null;
try {
logger.info("Loading Flow...");
ApplicationContext ctx = WebApplicationContextUtils.getWebApplicationContext(webApiContext.getServletContext());
flowService = ctx.getBean("flowService", FlowService.class);
// start and load the flow
flowService.start();
flowService.load(null);
logger.info("Flow loaded successfully.");
} catch (BeansException | LifeCycleStartException | IOException | FlowSerializationException | FlowSynchronizationException | UninheritableFlowException e) {
// ensure the flow service is terminated
if (flowService != null && flowService.isRunning()) {
flowService.stop(false);
}
logger.error("Unable to load flow due to: " + e, e);
// cannot wrap the exception as they are not defined in a classloader accessible to the caller
throw new Exception("Unable to load flow due to: " + e);
}
}
// dump the application url after confirming everything started successfully
dumpUrls();
} catch (Exception ex) {
startUpFailure(ex);
}
}
use of org.apache.nifi.controller.serialization.FlowSerializationException in project nifi by apache.
the class StandardFlowService method loadFromConnectionResponse.
private void loadFromConnectionResponse(final ConnectionResponse response) throws ConnectionException {
writeLock.lock();
try {
if (response.getNodeConnectionStatuses() != null) {
clusterCoordinator.resetNodeStatuses(response.getNodeConnectionStatuses().stream().collect(Collectors.toMap(status -> status.getNodeIdentifier(), status -> status)));
}
// get the dataflow from the response
final DataFlow dataFlow = response.getDataFlow();
if (logger.isTraceEnabled()) {
logger.trace("ResponseFlow = " + new String(dataFlow.getFlow(), StandardCharsets.UTF_8));
}
// load new controller state
loadFromBytes(dataFlow, true);
// set node ID on controller before we start heartbeating because heartbeat needs node ID
nodeId = response.getNodeIdentifier();
logger.info("Setting Flow Controller's Node ID: " + nodeId);
controller.setNodeId(nodeId);
clusterCoordinator.setLocalNodeIdentifier(nodeId);
clusterCoordinator.setConnected(true);
revisionManager.reset(response.getComponentRevisions().stream().map(rev -> rev.toRevision()).collect(Collectors.toList()));
// mark the node as clustered
controller.setClustered(true, response.getInstanceId());
controller.setConnectionStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTED));
// Initialize the controller after the flow is loaded so we don't take any actions on repos until everything is good
initializeController();
// start the processors as indicated by the dataflow
controller.onFlowInitialized(autoResumeState);
loadSnippets(dataFlow.getSnippets());
controller.startHeartbeating();
} catch (final UninheritableFlowException ufe) {
throw new UninheritableFlowException(CONNECTION_EXCEPTION_MSG_PREFIX + "local flow is different than cluster flow.", ufe);
} catch (final MissingBundleException mbe) {
throw new MissingBundleException(CONNECTION_EXCEPTION_MSG_PREFIX + "cluster flow contains bundles that do not exist on the current node", mbe);
} catch (final FlowSerializationException fse) {
throw new ConnectionException(CONNECTION_EXCEPTION_MSG_PREFIX + "local or cluster flow is malformed.", fse);
} catch (final FlowSynchronizationException fse) {
throw new FlowSynchronizationException(CONNECTION_EXCEPTION_MSG_PREFIX + "local flow controller partially updated. " + "Administrator should disconnect node and review flow for corruption.", fse);
} catch (final Exception ex) {
throw new ConnectionException("Failed to connect node to cluster due to: " + ex, ex);
} finally {
writeLock.unlock();
}
}
use of org.apache.nifi.controller.serialization.FlowSerializationException in project nifi by apache.
the class StandardFlowService method load.
@Override
public void load(final DataFlow dataFlow) throws IOException, FlowSerializationException, FlowSynchronizationException, UninheritableFlowException, MissingBundleException {
if (configuredForClustering) {
// Create the initial flow from disk if it exists, or from serializing the empty root group in flow controller
final DataFlow initialFlow = (dataFlow == null) ? createDataFlow() : dataFlow;
if (logger.isTraceEnabled()) {
logger.trace("InitialFlow = " + new String(initialFlow.getFlow(), StandardCharsets.UTF_8));
}
// Sync the initial flow into the flow controller so that if the flow came from disk we loaded the
// whole flow into the flow controller and applied any bundle upgrades
writeLock.lock();
try {
loadFromBytes(initialFlow, true);
} finally {
writeLock.unlock();
}
// Get the proposed flow by serializing the flow controller which now has the synced version from above
final DataFlow proposedFlow = createDataFlowFromController();
if (logger.isTraceEnabled()) {
logger.trace("ProposedFlow = " + new String(proposedFlow.getFlow(), StandardCharsets.UTF_8));
}
/*
* Attempt to connect to the cluster. If the manager is able to
* provide a data flow, then the manager will send a connection
* response. If the manager was unable to be located, then
* the response will be null and we should load the local dataflow
* and heartbeat until a manager is located.
*/
final boolean localFlowEmpty = StandardFlowSynchronizer.isEmpty(proposedFlow);
final ConnectionResponse response = connect(true, localFlowEmpty, proposedFlow);
// obtain write lock while we are updating the controller. We need to ensure that we don't
// obtain the lock before calling connect(), though, or we will end up getting a deadlock
// because the node that is receiving the connection request won't be able to get the current
// flow, as that requires a read lock.
writeLock.lock();
try {
if (response == null || response.shouldTryLater()) {
logger.info("Flow controller will load local dataflow and suspend connection handshake until a cluster connection response is received.");
// set node ID on controller before we start heartbeating because heartbeat needs node ID
controller.setNodeId(nodeId);
clusterCoordinator.setLocalNodeIdentifier(nodeId);
// set node as clustered, since it is trying to connect to a cluster
controller.setClustered(true, null);
clusterCoordinator.setConnected(false);
controller.setConnectionStatus(new NodeConnectionStatus(nodeId, DisconnectionCode.NOT_YET_CONNECTED));
/*
* Start heartbeating. Heartbeats will fail because we can't reach
* the manager, but when we locate the manager, the node will
* reconnect and establish a connection to the cluster. The
* heartbeat is the trigger that will cause the manager to
* issue a reconnect request.
*/
controller.startHeartbeating();
// Initialize the controller after the flow is loaded so we don't take any actions on repos until everything is good
initializeController();
// notify controller that flow is initialized
try {
controller.onFlowInitialized(autoResumeState);
} catch (final Exception ex) {
logger.warn("Unable to start all processors due to invalid flow configuration.");
if (logger.isDebugEnabled()) {
logger.warn(StringUtils.EMPTY, ex);
}
}
} else {
try {
loadFromConnectionResponse(response);
} catch (final Exception e) {
logger.error("Failed to load flow from cluster due to: " + e, e);
handleConnectionFailure(e);
throw new IOException(e);
}
}
// save the flow in the controller so we write out the latest flow with any updated bundles to disk
dao.save(controller, true);
} finally {
writeLock.unlock();
}
} else {
writeLock.lock();
try {
// operating in standalone mode, so load proposed flow and initialize the controller
loadFromBytes(dataFlow, true);
initializeController();
dao.save(controller, true);
} finally {
writeLock.unlock();
}
}
}
Aggregations