use of com.datatorrent.api.Operator.InputPort in project apex-core by apache.
the class TupleRecorderCollection method startRecording.
private void startRecording(String id, final Node<?> node, int operatorId, final String portName, long numWindows) {
PortMappingDescriptor descriptor = node.getPortMappingDescriptor();
OperatorIdPortNamePair operatorIdPortNamePair = new OperatorIdPortNamePair(operatorId, portName);
// check any recording conflict
boolean conflict = false;
if (containsKey(new OperatorIdPortNamePair(operatorId, null))) {
conflict = true;
} else if (portName == null) {
for (Map.Entry<String, PortContextPair<InputPort<?>>> entry : descriptor.inputPorts.entrySet()) {
if (containsKey(new OperatorIdPortNamePair(operatorId, entry.getKey()))) {
conflict = true;
break;
}
}
for (Map.Entry<String, PortContextPair<OutputPort<?>>> entry : descriptor.outputPorts.entrySet()) {
if (containsKey(new OperatorIdPortNamePair(operatorId, entry.getKey()))) {
conflict = true;
break;
}
}
} else {
if (containsKey(operatorIdPortNamePair)) {
conflict = true;
}
}
if (!conflict) {
logger.debug("Executing start recording request for {}", operatorIdPortNamePair);
if (gatewayAddress != null && wsClient == null) {
synchronized (this) {
if (wsClient == null) {
try {
wsClient = new SharedPubSubWebSocketClient((gatewayUseSsl ? "wss://" : "ws://") + gatewayAddress + "/pubsub", 500);
if (gatewayUserName != null && gatewayPassword != null) {
wsClient.setLoginUrl((gatewayUseSsl ? "https://" : "http://") + gatewayAddress + StreamingContainerManager.GATEWAY_LOGIN_URL_PATH);
wsClient.setUserName(gatewayUserName);
wsClient.setPassword(gatewayPassword);
}
wsClient.setup();
} catch (Exception ex) {
logger.warn("Error initializing websocket", ex);
}
}
}
}
TupleRecorder tupleRecorder = new TupleRecorder(id, appId);
tupleRecorder.setWebSocketClient(wsClient);
HashMap<String, Sink<Object>> sinkMap = new HashMap<>();
for (Map.Entry<String, PortContextPair<InputPort<?>>> entry : descriptor.inputPorts.entrySet()) {
String streamId = getDeclaredStreamId(operatorId, entry.getKey());
if (streamId == null) {
streamId = portName + "_implicit_stream";
}
if (entry.getValue().context != null && (portName == null || entry.getKey().equals(portName))) {
logger.debug("Adding recorder sink to input port {}, stream {}", entry.getKey(), streamId);
tupleRecorder.addInputPortInfo(entry.getKey(), streamId);
sinkMap.put(entry.getKey(), tupleRecorder.newSink(entry.getKey()));
}
}
for (Map.Entry<String, PortContextPair<OutputPort<?>>> entry : descriptor.outputPorts.entrySet()) {
String streamId = getDeclaredStreamId(operatorId, entry.getKey());
if (streamId == null) {
streamId = portName + "_implicit_stream";
}
if (portName == null || entry.getKey().equals(portName)) {
logger.debug("Adding recorder sink to output port {}, stream {}", entry.getKey(), streamId);
tupleRecorder.addOutputPortInfo(entry.getKey(), streamId);
sinkMap.put(entry.getKey(), tupleRecorder.newSink(entry.getKey()));
}
}
if (!sinkMap.isEmpty()) {
logger.debug("Started recording on {} through {}", operatorIdPortNamePair, System.identityHashCode(this));
String basePath = appPath + "/recordings/" + operatorId + "/" + tupleRecorder.getId();
tupleRecorder.getStorage().setBasePath(basePath);
tupleRecorder.getStorage().setBytesPerPartFile(tupleRecordingPartFileSize);
tupleRecorder.getStorage().setMillisPerPartFile(tupleRecordingPartFileTimeMillis);
node.addSinks(sinkMap);
tupleRecorder.setup(node.getOperator(), codecs);
put(operatorIdPortNamePair, tupleRecorder);
if (numWindows > 0) {
tupleRecorder.setNumWindows(numWindows, new Runnable() {
@Override
public void run() {
node.context.request(new OperatorRequest() {
@Override
public StatsListener.OperatorResponse execute(Operator operator, int operatorId, long windowId) throws IOException {
stopRecording(node, operatorId, portName);
return null;
}
});
}
});
}
} else {
logger.warn("Tuple recording request ignored because operator is not connected on the specified port.");
}
} else {
logger.error("Operator id {} is already being recorded.", operatorId);
}
}
use of com.datatorrent.api.Operator.InputPort in project apex-core by apache.
the class StreamingContainer method setupNode.
private void setupNode(OperatorDeployInfo ndi) {
failedNodes.remove(ndi.id);
final Node<?> node = nodes.get(ndi.id);
node.setup(node.context);
/* setup context for all the input ports */
LinkedHashMap<String, PortContextPair<InputPort<?>>> inputPorts = node.getPortMappingDescriptor().inputPorts;
LinkedHashMap<String, PortContextPair<InputPort<?>>> newInputPorts = new LinkedHashMap<>(inputPorts.size());
for (OperatorDeployInfo.InputDeployInfo idi : ndi.inputs) {
InputPort<?> port = inputPorts.get(idi.portName).component;
PortContext context = new PortContext(idi.contextAttributes, node.context);
newInputPorts.put(idi.portName, new PortContextPair<InputPort<?>>(port, context));
port.setup(context);
}
inputPorts.putAll(newInputPorts);
/* setup context for all the output ports */
LinkedHashMap<String, PortContextPair<OutputPort<?>>> outputPorts = node.getPortMappingDescriptor().outputPorts;
LinkedHashMap<String, PortContextPair<OutputPort<?>>> newOutputPorts = new LinkedHashMap<>(outputPorts.size());
for (OperatorDeployInfo.OutputDeployInfo odi : ndi.outputs) {
OutputPort<?> port = outputPorts.get(odi.portName).component;
PortContext context = new PortContext(odi.contextAttributes, node.context);
newOutputPorts.put(odi.portName, new PortContextPair<OutputPort<?>>(port, context));
port.setup(context);
}
outputPorts.putAll(newOutputPorts);
logger.debug("activating {} in container {}", node, containerId);
/* This introduces need for synchronization on processNodeRequest which was solved by adding deleted field in StramToNodeRequest */
processNodeRequests(false);
node.activate();
eventBus.publish(new NodeActivationEvent(node));
}
use of com.datatorrent.api.Operator.InputPort in project beam by apache.
the class TranslationContext method populateDAG.
public void populateDAG(DAG dag) {
for (Map.Entry<String, Operator> nameAndOperator : this.operators.entrySet()) {
dag.addOperator(nameAndOperator.getKey(), nameAndOperator.getValue());
}
int streamIndex = 0;
for (Map.Entry<PCollection, Pair<OutputPortInfo, List<InputPortInfo>>> streamEntry : this.streams.entrySet()) {
List<InputPortInfo> destInfo = streamEntry.getValue().getRight();
InputPort[] sinks = new InputPort[destInfo.size()];
for (int i = 0; i < sinks.length; i++) {
sinks[i] = destInfo.get(i).port;
}
if (sinks.length > 0) {
DAG.StreamMeta streamMeta = dag.addStream("stream" + streamIndex++, streamEntry.getValue().getLeft().port, sinks);
if (pipelineOptions.isParDoFusionEnabled()) {
optimizeStreams(streamMeta, streamEntry);
}
for (InputPort port : sinks) {
PCollection pc = streamEntry.getKey();
Coder coder = pc.getCoder();
if (pc.getWindowingStrategy() != null) {
coder = FullWindowedValueCoder.of(pc.getCoder(), pc.getWindowingStrategy().getWindowFn().windowCoder());
}
Coder<Object> wrapperCoder = ApexStreamTuple.ApexStreamTupleCoder.of(coder);
CoderAdapterStreamCodec streamCodec = new CoderAdapterStreamCodec(wrapperCoder);
dag.setInputPortAttribute(port, PortContext.STREAM_CODEC, streamCodec);
}
}
}
}
use of com.datatorrent.api.Operator.InputPort in project apex-core by apache.
the class PhysicalPlanTest method testDefaultPartitioning.
@Test
public void testDefaultPartitioning() {
LogicalPlan dag = new LogicalPlan();
dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
dag.addStream("node1.outport1", node1.outport1, node2.inport2, node2.inport1);
int initialPartitionCount = 5;
OperatorMeta node2Decl = dag.getMeta(node2);
node2Decl.getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(initialPartitionCount));
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
List<PTOperator> n2Instances = plan.getOperators(node2Decl);
Assert.assertEquals("partition instances " + n2Instances, initialPartitionCount, n2Instances.size());
List<Integer> assignedPartitionKeys = Lists.newArrayList();
for (int i = 0; i < n2Instances.size(); i++) {
PTOperator n2Partition = n2Instances.get(i);
Assert.assertNotNull("partition keys null: " + n2Partition, n2Partition.getPartitionKeys());
Map<InputPort<?>, PartitionKeys> pkeys = n2Partition.getPartitionKeys();
// one port partitioned
Assert.assertEquals("partition keys size: " + pkeys, 1, pkeys.size());
InputPort<?> expectedPort = node2.inport2;
Assert.assertEquals("partition port: " + pkeys, expectedPort, pkeys.keySet().iterator().next());
Assert.assertEquals("partition mask: " + pkeys, "111", Integer.toBinaryString(pkeys.get(expectedPort).mask));
Set<Integer> pks = pkeys.get(expectedPort).partitions;
Assert.assertTrue("number partition keys: " + pkeys, pks.size() == 1 || pks.size() == 2);
assignedPartitionKeys.addAll(pks);
}
int expectedMask = Integer.parseInt("111", 2);
Assert.assertEquals("assigned partitions ", expectedMask + 1, assignedPartitionKeys.size());
for (int i = 0; i <= expectedMask; i++) {
Assert.assertTrue("" + assignedPartitionKeys, assignedPartitionKeys.contains(i));
}
}
use of com.datatorrent.api.Operator.InputPort in project apex-core by apache.
the class LogicalPlan method addDAGToCurrentDAG.
@SuppressWarnings({ "unchecked", "rawtypes" })
private void addDAGToCurrentDAG(ModuleMeta moduleMeta) {
LogicalPlan subDag = moduleMeta.getDag();
String subDAGName = moduleMeta.getName();
String name;
for (OperatorMeta operatorMeta : subDag.getAllOperators()) {
name = subDAGName + MODULE_NAMESPACE_SEPARATOR + operatorMeta.getName();
Operator op = this.addOperator(name, operatorMeta.getOperator());
OperatorMeta operatorMetaNew = this.getMeta(op);
operatorMetaNew.copyAttributesFrom(operatorMeta);
operatorMetaNew.setModuleName(operatorMeta.getModuleName() == null ? subDAGName : subDAGName + MODULE_NAMESPACE_SEPARATOR + operatorMeta.getModuleName());
}
for (StreamMeta streamMeta : subDag.getAllStreams()) {
OutputPortMeta sourceMeta = streamMeta.getSource();
List<InputPort<?>> ports = new LinkedList<>();
for (InputPortMeta inputPortMeta : streamMeta.getSinks()) {
ports.add(inputPortMeta.getPort());
}
InputPort[] inputPorts = ports.toArray(new InputPort[] {});
name = subDAGName + MODULE_NAMESPACE_SEPARATOR + streamMeta.getName();
StreamMeta streamMetaNew = this.addStream(name, sourceMeta.getPort(), inputPorts);
streamMetaNew.setLocality(streamMeta.getLocality());
}
}
Aggregations