use of org.apache.nifi.cluster.protocol.ConnectionResponse in project nifi by apache.
the class StandardFlowService method handleReconnectionRequest.
private void handleReconnectionRequest(final ReconnectionRequestMessage request) {
try {
logger.info("Processing reconnection request from manager.");
// reconnect
ConnectionResponse connectionResponse = new ConnectionResponse(getNodeId(), request.getDataFlow(), request.getInstanceId(), request.getNodeConnectionStatuses(), request.getComponentRevisions());
if (connectionResponse.getDataFlow() == null) {
logger.info("Received a Reconnection Request that contained no DataFlow. Will attempt to connect to cluster using local flow.");
connectionResponse = connect(false, false, createDataFlowFromController());
}
loadFromConnectionResponse(connectionResponse);
clusterCoordinator.resetNodeStatuses(connectionResponse.getNodeConnectionStatuses().stream().collect(Collectors.toMap(status -> status.getNodeIdentifier(), status -> status)));
// reconnected, this node needs to explicitly write the inherited flow to disk, and resume heartbeats
saveFlowChanges();
controller.resumeHeartbeats();
logger.info("Node reconnected.");
} catch (final Exception ex) {
// disconnect controller
if (controller.isClustered()) {
disconnect("Failed to properly handle Reconnection request due to " + ex.toString());
}
logger.error("Handling reconnection request failed due to: " + ex, ex);
handleConnectionFailure(ex);
}
}
use of org.apache.nifi.cluster.protocol.ConnectionResponse in project nifi by apache.
the class StandardFlowService method load.
@Override
public void load(final DataFlow dataFlow) throws IOException, FlowSerializationException, FlowSynchronizationException, UninheritableFlowException, MissingBundleException {
if (configuredForClustering) {
// Create the initial flow from disk if it exists, or from serializing the empty root group in flow controller
final DataFlow initialFlow = (dataFlow == null) ? createDataFlow() : dataFlow;
if (logger.isTraceEnabled()) {
logger.trace("InitialFlow = " + new String(initialFlow.getFlow(), StandardCharsets.UTF_8));
}
// Sync the initial flow into the flow controller so that if the flow came from disk we loaded the
// whole flow into the flow controller and applied any bundle upgrades
writeLock.lock();
try {
loadFromBytes(initialFlow, true);
} finally {
writeLock.unlock();
}
// Get the proposed flow by serializing the flow controller which now has the synced version from above
final DataFlow proposedFlow = createDataFlowFromController();
if (logger.isTraceEnabled()) {
logger.trace("ProposedFlow = " + new String(proposedFlow.getFlow(), StandardCharsets.UTF_8));
}
/*
* Attempt to connect to the cluster. If the manager is able to
* provide a data flow, then the manager will send a connection
* response. If the manager was unable to be located, then
* the response will be null and we should load the local dataflow
* and heartbeat until a manager is located.
*/
final boolean localFlowEmpty = StandardFlowSynchronizer.isEmpty(proposedFlow);
final ConnectionResponse response = connect(true, localFlowEmpty, proposedFlow);
// obtain write lock while we are updating the controller. We need to ensure that we don't
// obtain the lock before calling connect(), though, or we will end up getting a deadlock
// because the node that is receiving the connection request won't be able to get the current
// flow, as that requires a read lock.
writeLock.lock();
try {
if (response == null || response.shouldTryLater()) {
logger.info("Flow controller will load local dataflow and suspend connection handshake until a cluster connection response is received.");
// set node ID on controller before we start heartbeating because heartbeat needs node ID
controller.setNodeId(nodeId);
clusterCoordinator.setLocalNodeIdentifier(nodeId);
// set node as clustered, since it is trying to connect to a cluster
controller.setClustered(true, null);
clusterCoordinator.setConnected(false);
controller.setConnectionStatus(new NodeConnectionStatus(nodeId, DisconnectionCode.NOT_YET_CONNECTED));
/*
* Start heartbeating. Heartbeats will fail because we can't reach
* the manager, but when we locate the manager, the node will
* reconnect and establish a connection to the cluster. The
* heartbeat is the trigger that will cause the manager to
* issue a reconnect request.
*/
controller.startHeartbeating();
// Initialize the controller after the flow is loaded so we don't take any actions on repos until everything is good
initializeController();
// notify controller that flow is initialized
try {
controller.onFlowInitialized(autoResumeState);
} catch (final Exception ex) {
logger.warn("Unable to start all processors due to invalid flow configuration.");
if (logger.isDebugEnabled()) {
logger.warn(StringUtils.EMPTY, ex);
}
}
} else {
try {
loadFromConnectionResponse(response);
} catch (final Exception e) {
logger.error("Failed to load flow from cluster due to: " + e, e);
handleConnectionFailure(e);
throw new IOException(e);
}
}
// save the flow in the controller so we write out the latest flow with any updated bundles to disk
dao.save(controller, true);
} finally {
writeLock.unlock();
}
} else {
writeLock.lock();
try {
// operating in standalone mode, so load proposed flow and initialize the controller
loadFromBytes(dataFlow, true);
initializeController();
dao.save(controller, true);
} finally {
writeLock.unlock();
}
}
}
use of org.apache.nifi.cluster.protocol.ConnectionResponse in project nifi by apache.
the class TestJaxbProtocolUtils method testRoundTripConnectionResponse.
@Test
public void testRoundTripConnectionResponse() throws JAXBException {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final ConnectionResponseMessage msg = new ConnectionResponseMessage();
final NodeIdentifier nodeId = new NodeIdentifier("id", "localhost", 8000, "localhost", 8001, "localhost", 8002, 8003, true);
final DataFlow dataFlow = new StandardDataFlow(new byte[0], new byte[0], new byte[0], new HashSet<>());
final List<NodeConnectionStatus> nodeStatuses = Collections.singletonList(new NodeConnectionStatus(nodeId, DisconnectionCode.NOT_YET_CONNECTED));
final List<ComponentRevision> componentRevisions = Collections.singletonList(ComponentRevision.fromRevision(new Revision(8L, "client-1", "component-1")));
msg.setConnectionResponse(new ConnectionResponse(nodeId, dataFlow, "instance-1", nodeStatuses, componentRevisions));
JaxbProtocolUtils.JAXB_CONTEXT.createMarshaller().marshal(msg, baos);
final Object unmarshalled = JaxbProtocolUtils.JAXB_CONTEXT.createUnmarshaller().unmarshal(new ByteArrayInputStream(baos.toByteArray()));
assertTrue(unmarshalled instanceof ConnectionResponseMessage);
final ConnectionResponseMessage unmarshalledMsg = (ConnectionResponseMessage) unmarshalled;
final List<ComponentRevision> revisions = msg.getConnectionResponse().getComponentRevisions();
assertEquals(1, revisions.size());
assertEquals(8L, revisions.get(0).getVersion().longValue());
assertEquals("client-1", revisions.get(0).getClientId());
assertEquals("component-1", revisions.get(0).getComponentId());
assertEquals(revisions, unmarshalledMsg.getConnectionResponse().getComponentRevisions());
}
Aggregations