use of com.datatorrent.stram.ComponentContextPair in project apex-core by apache.
the class StreamingContainer method deployInputStreams.
@SuppressWarnings("unchecked")
private void deployInputStreams(List<OperatorDeployInfo> operatorList, HashMap<String, ComponentContextPair<Stream, StreamContext>> newStreams) throws UnknownHostException {
/*
* collect any input operators along with their smallest window id,
* those are subsequently used to setup the window generator
*/
ArrayList<OperatorDeployInfo> inputNodes = new ArrayList<>();
long smallestCheckpointedWindowId = Long.MAX_VALUE;
//a simple map which maps the oio node to it's the node which owns the thread.
Map<Integer, Integer> oioNodes = new ConcurrentHashMap<>();
/*
* Hook up all the downstream ports. There are 2 places where we deal with more than 1
* downstream ports. The first one follows immediately for WindowGenerator. The second
* case is when source for the input port of some node in this container is in another
* container. So we need to create the stream. We need to track this stream along with
* other streams,and many such streams may exist, we hash them against buffer server
* info as we did for outputs but throw in the sinkid in the mix as well.
*/
for (OperatorDeployInfo ndi : operatorList) {
if (ndi.inputs == null || ndi.inputs.isEmpty()) {
/*
* This has to be InputNode, so let's hook the WindowGenerator to it.
* A node which does not take any input cannot exist in the DAG since it would be completely
* unaware of the windows. So for that reason, AbstractInputNode allows Component.INPUT port.
*/
inputNodes.add(ndi);
/*
* When we activate the window Generator, we plan to activate it only from required windowId.
*/
ndi.checkpoint = getFinishedCheckpoint(ndi);
if (ndi.checkpoint.windowId < smallestCheckpointedWindowId) {
smallestCheckpointedWindowId = ndi.checkpoint.windowId;
}
} else {
Node<?> node = nodes.get(ndi.id);
for (OperatorDeployInfo.InputDeployInfo nidi : ndi.inputs) {
if (nidi.streamCodecs.size() != 1) {
throw new IllegalStateException("Only one input codec configuration should be present");
}
Map.Entry<Integer, StreamCodec<?>> entry = nidi.streamCodecs.entrySet().iterator().next();
Integer streamCodecIdentifier = entry.getKey();
StreamCodec<?> streamCodec = entry.getValue();
String sourceIdentifier = Integer.toString(nidi.sourceNodeId).concat(Component.CONCAT_SEPARATOR).concat(nidi.sourcePortName);
String sinkIdentifier = Integer.toString(ndi.id).concat(Component.CONCAT_SEPARATOR).concat(nidi.portName);
int queueCapacity = getValue(PortContext.QUEUE_CAPACITY, nidi, ndi);
Checkpoint checkpoint = getFinishedCheckpoint(ndi);
ComponentContextPair<Stream, StreamContext> pair = streams.get(sourceIdentifier);
if (pair == null) {
pair = newStreams.get(sourceIdentifier);
}
if (pair == null) {
/*
* We connect to the buffer server for the input on this port.
* We have already placed all the output streams for all the operators in this container.
* Yet, there is no stream which can source this port so it has to come from the buffer
* server, so let's make a connection to it.
*/
assert (nidi.locality != Locality.CONTAINER_LOCAL && nidi.locality != Locality.THREAD_LOCAL);
StreamContext context = new StreamContext(nidi.declaredStreamId);
context.setBufferServerAddress(InetSocketAddress.createUnresolved(nidi.bufferServerHost, nidi.bufferServerPort));
InetAddress inetAddress = context.getBufferServerAddress().getAddress();
if (inetAddress != null && NetUtils.isLocalAddress(inetAddress)) {
context.setBufferServerAddress(new InetSocketAddress(InetAddress.getByName(null), nidi.bufferServerPort));
}
context.put(StreamContext.BUFFER_SERVER_TOKEN, nidi.bufferServerToken);
String connIdentifier = sourceIdentifier + Component.CONCAT_SEPARATOR + streamCodecIdentifier;
context.setPortId(nidi.portName);
context.put(StreamContext.CODEC, streamCodec);
context.put(StreamContext.EVENT_LOOP, eventloop);
context.setPartitions(nidi.partitionMask, nidi.partitionKeys);
//context.setSourceId(sourceIdentifier);
context.setSourceId(connIdentifier);
context.setSinkId(sinkIdentifier);
context.setFinishedWindowId(checkpoint.windowId);
BufferServerSubscriber subscriber = fastPublisherSubscriber ? new FastSubscriber("tcp://".concat(nidi.bufferServerHost).concat(":").concat(String.valueOf(nidi.bufferServerPort)).concat("/").concat(connIdentifier), queueCapacity) : new BufferServerSubscriber("tcp://".concat(nidi.bufferServerHost).concat(":").concat(String.valueOf(nidi.bufferServerPort)).concat("/").concat(connIdentifier), queueCapacity);
if (streamCodec instanceof StreamCodecWrapperForPersistance) {
subscriber.acquireReservoirForPersistStream(sinkIdentifier, queueCapacity, streamCodec);
}
SweepableReservoir reservoir = subscriber.acquireReservoir(sinkIdentifier, queueCapacity);
if (checkpoint.windowId >= 0) {
node.connectInputPort(nidi.portName, new WindowIdActivatedReservoir(sinkIdentifier, reservoir, checkpoint.windowId));
}
node.connectInputPort(nidi.portName, reservoir);
newStreams.put(sinkIdentifier, new ComponentContextPair<Stream, StreamContext>(subscriber, context));
logger.debug("put input stream {} against key {}", subscriber, sinkIdentifier);
} else {
assert (nidi.locality == Locality.CONTAINER_LOCAL || nidi.locality == Locality.THREAD_LOCAL);
/* we are still dealing with the MuxStream originating at the output of the source port */
StreamContext inlineContext = new StreamContext(nidi.declaredStreamId);
inlineContext.setSourceId(sourceIdentifier);
inlineContext.setSinkId(sinkIdentifier);
Stream stream;
SweepableReservoir reservoir;
switch(nidi.locality) {
case CONTAINER_LOCAL:
int outputQueueCapacity = getOutputQueueCapacity(operatorList, nidi.sourceNodeId, nidi.sourcePortName);
if (outputQueueCapacity > queueCapacity) {
queueCapacity = outputQueueCapacity;
}
stream = new InlineStream(queueCapacity);
reservoir = ((InlineStream) stream).getReservoir();
if (checkpoint.windowId >= 0) {
node.connectInputPort(nidi.portName, new WindowIdActivatedReservoir(sinkIdentifier, reservoir, checkpoint.windowId));
}
break;
case THREAD_LOCAL:
stream = new OiOStream();
reservoir = ((OiOStream) stream).getReservoir();
((OiOStream.OiOReservoir) reservoir).setControlSink(((OiONode) node).getControlSink(reservoir));
oioNodes.put(ndi.id, nidi.sourceNodeId);
break;
default:
throw new IllegalStateException("Locality can be either ContainerLocal or ThreadLocal");
}
node.connectInputPort(nidi.portName, reservoir);
newStreams.put(sinkIdentifier, new ComponentContextPair<>(stream, inlineContext));
if (!(pair.component instanceof Stream.MultiSinkCapableStream)) {
String originalSinkId = pair.context.getSinkId();
/* we come here only if we are trying to augment the dag */
StreamContext muxContext = new StreamContext(nidi.declaredStreamId);
muxContext.setSourceId(sourceIdentifier);
muxContext.setFinishedWindowId(checkpoint.windowId);
muxContext.setSinkId(originalSinkId);
MuxStream muxStream = new MuxStream();
muxStream.setSink(originalSinkId, pair.component);
streams.put(originalSinkId, pair);
Node<?> sourceNode = nodes.get(nidi.sourceNodeId);
sourceNode.connectOutputPort(nidi.sourcePortName, muxStream);
newStreams.put(sourceIdentifier, pair = new ComponentContextPair<Stream, StreamContext>(muxStream, muxContext));
}
/* here everything should be multisink capable */
if (streamCodec instanceof StreamCodecWrapperForPersistance) {
PartitionAwareSinkForPersistence pas;
if (nidi.partitionKeys == null) {
pas = new PartitionAwareSinkForPersistence((StreamCodecWrapperForPersistance<Object>) streamCodec, nidi.partitionMask, stream);
} else {
pas = new PartitionAwareSinkForPersistence((StreamCodecWrapperForPersistance<Object>) streamCodec, nidi.partitionKeys, nidi.partitionMask, stream);
}
((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, pas);
} else if (nidi.partitionKeys == null || nidi.partitionKeys.isEmpty()) {
((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, stream);
} else {
/*
* generally speaking we do not have partitions on the inline streams so the control should not
* come here but if it comes, then we are ready to handle it using the partition aware streams.
*/
PartitionAwareSink<Object> pas = new PartitionAwareSink<>(streamCodec == null ? nonSerializingStreamCodec : (StreamCodec<Object>) streamCodec, nidi.partitionKeys, nidi.partitionMask, stream);
((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, pas);
}
String streamSinkId = pair.context.getSinkId();
if (streamSinkId == null) {
pair.context.setSinkId(sinkIdentifier);
} else {
pair.context.setSinkId(streamSinkId.concat(", ").concat(sinkIdentifier));
}
}
}
}
}
setupOiOGroups(oioNodes);
if (!inputNodes.isEmpty()) {
WindowGenerator windowGenerator = setupWindowGenerator(smallestCheckpointedWindowId);
for (OperatorDeployInfo ndi : inputNodes) {
generators.put(ndi.id, windowGenerator);
Node<?> node = nodes.get(ndi.id);
SweepableReservoir reservoir = windowGenerator.acquireReservoir(String.valueOf(ndi.id), 1024);
if (ndi.checkpoint.windowId >= 0) {
node.connectInputPort(Node.INPUT, new WindowIdActivatedReservoir(Integer.toString(ndi.id), reservoir, ndi.checkpoint.windowId));
}
node.connectInputPort(Node.INPUT, reservoir);
}
}
}
use of com.datatorrent.stram.ComponentContextPair in project apex-core by apache.
the class StreamingContainer method deploy.
private synchronized void deploy(List<OperatorDeployInfo> nodeList) throws Exception {
/*
* A little bit of up front sanity check would reduce the percentage of deploy failures later.
*/
for (OperatorDeployInfo ndi : nodeList) {
if (nodes.containsKey(ndi.id)) {
throw new IllegalStateException("Node with id: " + ndi.id + " already present in container " + containerId + "!");
}
}
deployNodes(nodeList);
HashMap<String, ArrayList<String>> groupedInputStreams = new HashMap<>();
for (OperatorDeployInfo ndi : nodeList) {
groupInputStreams(groupedInputStreams, ndi);
}
HashMap<String, ComponentContextPair<Stream, StreamContext>> newStreams = deployOutputStreams(nodeList, groupedInputStreams);
deployInputStreams(nodeList, newStreams);
for (ComponentContextPair<Stream, StreamContext> pair : newStreams.values()) {
pair.component.setup(pair.context);
}
streams.putAll(newStreams);
HashMap<Integer, OperatorDeployInfo> operatorMap = new HashMap<>(nodeList.size());
for (OperatorDeployInfo o : nodeList) {
operatorMap.put(o.id, o);
}
activate(operatorMap, newStreams);
}
use of com.datatorrent.stram.ComponentContextPair in project apex-core by apache.
the class StreamingContainer method deployOutputStreams.
private HashMap<String, ComponentContextPair<Stream, StreamContext>> deployOutputStreams(List<OperatorDeployInfo> nodeList, HashMap<String, ArrayList<String>> groupedInputStreams) throws Exception {
HashMap<String, ComponentContextPair<Stream, StreamContext>> newStreams = new HashMap<>();
/*
* We proceed to deploy all the output streams. At the end of this block, our streams collection
* will contain all the streams which originate at the output port of the operators. The streams
* are generally mapped against the "nodename.portname" string. But the BufferServerPublishers which
* share the output port with other inline streams are mapped against the Buffer Server port to
* avoid collision and at the same time keep track of these buffer streams.
*/
for (OperatorDeployInfo ndi : nodeList) {
Node<?> node = nodes.get(ndi.id);
long checkpointWindowId = ndi.checkpoint.windowId;
for (OperatorDeployInfo.OutputDeployInfo nodi : ndi.outputs) {
String sourceIdentifier = Integer.toString(ndi.id).concat(Component.CONCAT_SEPARATOR).concat(nodi.portName);
int queueCapacity = getValue(PortContext.QUEUE_CAPACITY, nodi, ndi);
logger.debug("for stream {} the queue capacity is {}", sourceIdentifier, queueCapacity);
ArrayList<String> collection = groupedInputStreams.get(sourceIdentifier);
Map<Integer, StreamCodec<?>> streamCodecs = nodi.streamCodecs;
if ((collection == null) && (streamCodecs.size() == 1)) {
assert (nodi.bufferServerHost != null) : "resulting stream cannot be inline: " + nodi;
/*
* Let's create a stream to carry the data to the Buffer Server.
* Nobody in this container is interested in the output placed on this stream, but
* this stream exists. That means someone outside of this container must be interested.
*/
Map.Entry<Integer, StreamCodec<?>> entry = streamCodecs.entrySet().iterator().next();
StreamCodec<?> streamCodec = entry.getValue();
Integer streamCodecIdentifier = entry.getKey();
String connIdentifier = sourceIdentifier + Component.CONCAT_SEPARATOR + streamCodecIdentifier;
SimpleEntry<String, ComponentContextPair<Stream, StreamContext>> deployBufferServerPublisher = deployBufferServerPublisher(connIdentifier, streamCodec, checkpointWindowId, queueCapacity, nodi);
newStreams.put(sourceIdentifier, deployBufferServerPublisher.getValue());
node.connectOutputPort(nodi.portName, deployBufferServerPublisher.getValue().component);
} else {
/*
* In this case we have 2 possibilities, either we have 1 inline or multiple streams.
* Since we cannot tell at this point, we assume that we will have multiple streams and
* plan accordingly. we possibly will come to this code block multiple times. We create
* the MuxStream only the first time and use it for subsequent calls of this block.
*
* There is also the possibility that we have a stream with multiple sinks having distinct codecs
*/
ComponentContextPair<Stream, StreamContext> pair = newStreams.get(sourceIdentifier);
if (pair == null) {
/**
* Let's multiplex the output placed on this stream.
* This container itself contains more than one parties interested.
*/
StreamContext context = new StreamContext(nodi.declaredStreamId);
context.setSourceId(sourceIdentifier);
context.setFinishedWindowId(checkpointWindowId);
Stream stream = new MuxStream();
newStreams.put(sourceIdentifier, pair = new ComponentContextPair<>(stream, context));
node.connectOutputPort(nodi.portName, stream);
}
if (nodi.bufferServerHost != null) {
/*
* Although there is a node in this container interested in output placed on this stream, there
* seems to at least one more party interested but placed in a container other than this one.
*/
for (Map.Entry<Integer, StreamCodec<?>> entry : streamCodecs.entrySet()) {
Integer streamCodecIdentifier = entry.getKey();
StreamCodec<?> streamCodec = entry.getValue();
String connIdentifier = sourceIdentifier + Component.CONCAT_SEPARATOR + streamCodecIdentifier;
SimpleEntry<String, ComponentContextPair<Stream, StreamContext>> deployBufferServerPublisher = deployBufferServerPublisher(connIdentifier, streamCodec, checkpointWindowId, queueCapacity, nodi);
newStreams.put(deployBufferServerPublisher.getKey(), deployBufferServerPublisher.getValue());
String sinkIdentifier = pair.context.getSinkId();
if (sinkIdentifier == null) {
pair.context.setSinkId(deployBufferServerPublisher.getKey());
} else {
pair.context.setSinkId(sinkIdentifier.concat(", ").concat(deployBufferServerPublisher.getKey()));
}
((Stream.MultiSinkCapableStream) pair.component).setSink(deployBufferServerPublisher.getKey(), deployBufferServerPublisher.getValue().component);
}
}
}
}
}
return newStreams;
}
Aggregations