use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class ActivityClusterGraphRewriter method rewriteIntraActivityCluster.
/**
* rewrite an activity cluster internally
*
* @param ac
* the activity cluster to be rewritten
*/
private void rewriteIntraActivityCluster(ActivityCluster ac, Map<IActivity, SuperActivity> invertedActivitySuperActivityMap) {
Map<ActivityId, IActivity> activities = ac.getActivityMap();
Map<ActivityId, List<IConnectorDescriptor>> activityInputMap = ac.getActivityInputMap();
Map<ActivityId, List<IConnectorDescriptor>> activityOutputMap = ac.getActivityOutputMap();
Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> connectorActivityMap = ac.getConnectorActivityMap();
ActivityClusterGraph acg = ac.getActivityClusterGraph();
Map<ActivityId, IActivity> startActivities = new HashMap<ActivityId, IActivity>();
Map<ActivityId, SuperActivity> superActivities = new HashMap<ActivityId, SuperActivity>();
Map<ActivityId, Queue<IActivity>> toBeExpendedMap = new HashMap<ActivityId, Queue<IActivity>>();
/**
* Build the initial super activities
*/
for (Entry<ActivityId, IActivity> entry : activities.entrySet()) {
ActivityId activityId = entry.getKey();
IActivity activity = entry.getValue();
if (activityInputMap.get(activityId) == null) {
startActivities.put(activityId, activity);
/**
* use the start activity's id as the id of the super activity
*/
createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, activityId, activity);
}
}
/**
* expand one-to-one connected activity cluster by the BFS order.
* after the while-loop, the original activities are partitioned
* into equivalent classes, one-per-super-activity.
*/
Map<ActivityId, SuperActivity> clonedSuperActivities = new HashMap<ActivityId, SuperActivity>();
while (toBeExpendedMap.size() > 0) {
clonedSuperActivities.clear();
clonedSuperActivities.putAll(superActivities);
for (Entry<ActivityId, SuperActivity> entry : clonedSuperActivities.entrySet()) {
ActivityId superActivityId = entry.getKey();
SuperActivity superActivity = entry.getValue();
/**
* for the case where the super activity has already been swallowed
*/
if (superActivities.get(superActivityId) == null) {
continue;
}
/**
* expend the super activity
*/
Queue<IActivity> toBeExpended = toBeExpendedMap.get(superActivityId);
if (toBeExpended == null) {
/**
* Nothing to expand
*/
continue;
}
IActivity expendingActivity = toBeExpended.poll();
List<IConnectorDescriptor> outputConnectors = activityOutputMap.get(expendingActivity.getActivityId());
if (outputConnectors != null) {
for (IConnectorDescriptor outputConn : outputConnectors) {
Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = connectorActivityMap.get(outputConn.getConnectorId());
IActivity newActivity = endPoints.getRight().getLeft();
SuperActivity existingSuperActivity = invertedActivitySuperActivityMap.get(newActivity);
if (outputConn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
/**
* expend the super activity cluster on an one-to-one out-bound connection
*/
if (existingSuperActivity == null) {
superActivity.addActivity(newActivity);
toBeExpended.add(newActivity);
invertedActivitySuperActivityMap.put(newActivity, superActivity);
} else {
/**
* the two activities already in the same super activity
*/
if (existingSuperActivity == superActivity) {
continue;
}
/**
* swallow an existing super activity
*/
swallowExistingSuperActivity(superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, superActivity, superActivityId, existingSuperActivity);
}
} else {
if (existingSuperActivity == null) {
/**
* create new activity
*/
createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, newActivity.getActivityId(), newActivity);
}
}
}
}
/**
* remove the to-be-expended queue if it is empty
*/
if (toBeExpended.size() == 0) {
toBeExpendedMap.remove(superActivityId);
}
}
}
Map<ConnectorDescriptorId, IConnectorDescriptor> connMap = ac.getConnectorMap();
Map<ConnectorDescriptorId, RecordDescriptor> connRecordDesc = ac.getConnectorRecordDescriptorMap();
Map<SuperActivity, Integer> superActivityProducerPort = new HashMap<SuperActivity, Integer>();
Map<SuperActivity, Integer> superActivityConsumerPort = new HashMap<SuperActivity, Integer>();
for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
superActivityProducerPort.put(entry.getValue(), 0);
superActivityConsumerPort.put(entry.getValue(), 0);
}
/**
* create a new activity cluster to replace the old activity cluster
*/
ActivityCluster newActivityCluster = new ActivityCluster(acg, ac.getId());
newActivityCluster.setConnectorPolicyAssignmentPolicy(ac.getConnectorPolicyAssignmentPolicy());
for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
newActivityCluster.addActivity(entry.getValue());
acg.getActivityMap().put(entry.getKey(), newActivityCluster);
}
/**
* Setup connectors: either inside a super activity or among super activities
*/
for (Entry<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> entry : connectorActivityMap.entrySet()) {
ConnectorDescriptorId connectorId = entry.getKey();
Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = entry.getValue();
IActivity producerActivity = endPoints.getLeft().getLeft();
IActivity consumerActivity = endPoints.getRight().getLeft();
int producerPort = endPoints.getLeft().getRight();
int consumerPort = endPoints.getRight().getRight();
RecordDescriptor recordDescriptor = connRecordDesc.get(connectorId);
IConnectorDescriptor conn = connMap.get(connectorId);
if (conn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
/**
* connection edge between inner activities
*/
SuperActivity residingSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
residingSuperActivity.connect(conn, producerActivity, producerPort, consumerActivity, consumerPort, recordDescriptor);
} else {
/**
* connection edge between super activities
*/
SuperActivity producerSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
SuperActivity consumerSuperActivity = invertedActivitySuperActivityMap.get(consumerActivity);
int producerSAPort = superActivityProducerPort.get(producerSuperActivity);
int consumerSAPort = superActivityConsumerPort.get(consumerSuperActivity);
newActivityCluster.addConnector(conn);
newActivityCluster.connect(conn, producerSuperActivity, producerSAPort, consumerSuperActivity, consumerSAPort, recordDescriptor);
/**
* bridge the port
*/
producerSuperActivity.setClusterOutputIndex(producerSAPort, producerActivity.getActivityId(), producerPort);
consumerSuperActivity.setClusterInputIndex(consumerSAPort, consumerActivity.getActivityId(), consumerPort);
acg.getConnectorMap().put(connectorId, newActivityCluster);
/**
* increasing the port number for the producer and consumer
*/
superActivityProducerPort.put(producerSuperActivity, ++producerSAPort);
superActivityConsumerPort.put(consumerSuperActivity, ++consumerSAPort);
}
}
/**
* Set up the roots of the new activity cluster
*/
for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
List<IConnectorDescriptor> connIds = newActivityCluster.getActivityOutputMap().get(entry.getKey());
if (connIds == null || connIds.size() == 0) {
newActivityCluster.addRoot(entry.getValue());
}
}
/**
* set up the blocked2Blocker mapping, which will be updated in the rewriteInterActivityCluster call
*/
newActivityCluster.getBlocked2BlockerMap().putAll(ac.getBlocked2BlockerMap());
/**
* replace the old activity cluster with the new activity cluster
*/
acg.getActivityClusterMap().put(ac.getId(), newActivityCluster);
}
use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class JobActivityGraphBuilder method finish.
public void finish() {
Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> caMap = jag.getConnectorActivityMap();
for (Map.Entry<ConnectorDescriptorId, Pair<IActivity, Integer>> e : connectorProducerMap.entrySet()) {
ConnectorDescriptorId cdId = e.getKey();
Pair<IActivity, Integer> producer = e.getValue();
Pair<IActivity, Integer> consumer = connectorConsumerMap.get(cdId);
caMap.put(cdId, Pair.of(producer, consumer));
}
}
use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class JobSpecification method createConnectorDescriptor.
@Override
public ConnectorDescriptorId createConnectorDescriptor(IConnectorDescriptor conn) {
ConnectorDescriptorId cdId = new ConnectorDescriptorId(connectorIdCounter++);
conn.setConnectorId(cdId);
connMap.put(cdId, conn);
return cdId;
}
use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class CCNCFunctions method readPartitionId.
private static PartitionId readPartitionId(DataInputStream dis) throws IOException {
long jobId = dis.readLong();
int cdid = dis.readInt();
int senderIndex = dis.readInt();
int receiverIndex = dis.readInt();
PartitionId pid = new PartitionId(new JobId(jobId), new ConnectorDescriptorId(cdid), senderIndex, receiverIndex);
return pid;
}
use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class FeedOperations method combineIntakeCollectJobs.
private static JobSpecification combineIntakeCollectJobs(MetadataProvider metadataProvider, Feed feed, JobSpecification intakeJob, List<JobSpecification> jobsList, List<FeedConnection> feedConnections, String[] intakeLocations) throws AlgebricksException, HyracksDataException {
JobSpecification jobSpec = new JobSpecification(intakeJob.getFrameSize());
// copy ingestor
FeedIntakeOperatorDescriptor firstOp = (FeedIntakeOperatorDescriptor) intakeJob.getOperatorMap().get(new OperatorDescriptorId(0));
FeedIntakeOperatorDescriptor ingestionOp;
if (firstOp.getAdaptorFactory() == null) {
ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorLibraryName(), firstOp.getAdaptorFactoryClassName(), firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
} else {
ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorFactory(), firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
}
// create replicator
ReplicateOperatorDescriptor replicateOp = new ReplicateOperatorDescriptor(jobSpec, ingestionOp.getOutputRecordDescriptors()[0], jobsList.size());
jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), ingestionOp, 0, replicateOp, 0);
PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, ingestionOp, intakeLocations);
PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, replicateOp, intakeLocations);
// Loop over the jobs to copy operators and connections
Map<OperatorDescriptorId, OperatorDescriptorId> operatorIdMapping = new HashMap<>();
Map<ConnectorDescriptorId, ConnectorDescriptorId> connectorIdMapping = new HashMap<>();
Map<OperatorDescriptorId, List<LocationConstraint>> operatorLocations = new HashMap<>();
Map<OperatorDescriptorId, Integer> operatorCounts = new HashMap<>();
List<JobId> jobIds = new ArrayList<>();
FeedMetaOperatorDescriptor metaOp;
for (int iter1 = 0; iter1 < jobsList.size(); iter1++) {
FeedConnection curFeedConnection = feedConnections.get(iter1);
JobSpecification subJob = jobsList.get(iter1);
operatorIdMapping.clear();
Map<OperatorDescriptorId, IOperatorDescriptor> operatorsMap = subJob.getOperatorMap();
String datasetName = feedConnections.get(iter1).getDatasetName();
FeedConnectionId feedConnectionId = new FeedConnectionId(ingestionOp.getEntityId(), datasetName);
FeedPolicyEntity feedPolicyEntity = FeedMetadataUtil.validateIfPolicyExists(curFeedConnection.getDataverseName(), curFeedConnection.getPolicyName(), metadataProvider.getMetadataTxnContext());
for (Map.Entry<OperatorDescriptorId, IOperatorDescriptor> entry : operatorsMap.entrySet()) {
IOperatorDescriptor opDesc = entry.getValue();
OperatorDescriptorId oldId = opDesc.getOperatorId();
OperatorDescriptorId opId = null;
if (opDesc instanceof LSMTreeInsertDeleteOperatorDescriptor && ((LSMTreeInsertDeleteOperatorDescriptor) opDesc).isPrimary()) {
metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc, feedPolicyEntity.getProperties(), FeedRuntimeType.STORE);
opId = metaOp.getOperatorId();
opDesc.setOperatorId(opId);
} else {
if (opDesc instanceof AlgebricksMetaOperatorDescriptor) {
AlgebricksMetaOperatorDescriptor algOp = (AlgebricksMetaOperatorDescriptor) opDesc;
IPushRuntimeFactory[] runtimeFactories = algOp.getPipeline().getRuntimeFactories();
// Tweak AssignOp to work with messages
if (runtimeFactories[0] instanceof AssignRuntimeFactory && runtimeFactories.length > 1) {
IConnectorDescriptor connectorDesc = subJob.getOperatorInputMap().get(opDesc.getOperatorId()).get(0);
// anything on the network interface needs to be message compatible
if (connectorDesc instanceof MToNPartitioningConnectorDescriptor) {
metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc, feedPolicyEntity.getProperties(), FeedRuntimeType.COMPUTE);
opId = metaOp.getOperatorId();
opDesc.setOperatorId(opId);
}
}
}
if (opId == null) {
opId = jobSpec.createOperatorDescriptorId(opDesc);
}
}
operatorIdMapping.put(oldId, opId);
}
// copy connectors
connectorIdMapping.clear();
for (Entry<ConnectorDescriptorId, IConnectorDescriptor> entry : subJob.getConnectorMap().entrySet()) {
IConnectorDescriptor connDesc = entry.getValue();
ConnectorDescriptorId newConnId;
if (connDesc instanceof MToNPartitioningConnectorDescriptor) {
MToNPartitioningConnectorDescriptor m2nConn = (MToNPartitioningConnectorDescriptor) connDesc;
connDesc = new MToNPartitioningWithMessageConnectorDescriptor(jobSpec, m2nConn.getTuplePartitionComputerFactory());
newConnId = connDesc.getConnectorId();
} else {
newConnId = jobSpec.createConnectorDescriptor(connDesc);
}
connectorIdMapping.put(entry.getKey(), newConnId);
}
// make connections between operators
for (Entry<ConnectorDescriptorId, Pair<Pair<IOperatorDescriptor, Integer>, Pair<IOperatorDescriptor, Integer>>> entry : subJob.getConnectorOperatorMap().entrySet()) {
ConnectorDescriptorId newId = connectorIdMapping.get(entry.getKey());
IConnectorDescriptor connDesc = jobSpec.getConnectorMap().get(newId);
Pair<IOperatorDescriptor, Integer> leftOp = entry.getValue().getLeft();
Pair<IOperatorDescriptor, Integer> rightOp = entry.getValue().getRight();
IOperatorDescriptor leftOpDesc = jobSpec.getOperatorMap().get(leftOp.getLeft().getOperatorId());
IOperatorDescriptor rightOpDesc = jobSpec.getOperatorMap().get(rightOp.getLeft().getOperatorId());
if (leftOp.getLeft() instanceof FeedCollectOperatorDescriptor) {
jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), replicateOp, iter1, leftOpDesc, leftOp.getRight());
}
jobSpec.connect(connDesc, leftOpDesc, leftOp.getRight(), rightOpDesc, rightOp.getRight());
}
// prepare for setting partition constraints
operatorLocations.clear();
operatorCounts.clear();
for (Constraint constraint : subJob.getUserConstraints()) {
LValueConstraintExpression lexpr = constraint.getLValue();
ConstraintExpression cexpr = constraint.getRValue();
OperatorDescriptorId opId;
switch(lexpr.getTag()) {
case PARTITION_COUNT:
opId = ((PartitionCountExpression) lexpr).getOperatorDescriptorId();
operatorCounts.put(operatorIdMapping.get(opId), (int) ((ConstantExpression) cexpr).getValue());
break;
case PARTITION_LOCATION:
opId = ((PartitionLocationExpression) lexpr).getOperatorDescriptorId();
IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(operatorIdMapping.get(opId));
List<LocationConstraint> locations = operatorLocations.get(opDesc.getOperatorId());
if (locations == null) {
locations = new ArrayList<>();
operatorLocations.put(opDesc.getOperatorId(), locations);
}
String location = (String) ((ConstantExpression) cexpr).getValue();
LocationConstraint lc = new LocationConstraint(location, ((PartitionLocationExpression) lexpr).getPartition());
locations.add(lc);
break;
default:
break;
}
}
// set absolute location constraints
for (Entry<OperatorDescriptorId, List<LocationConstraint>> entry : operatorLocations.entrySet()) {
IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
// why do we need to sort?
Collections.sort(entry.getValue(), (LocationConstraint o1, LocationConstraint o2) -> {
return o1.partition - o2.partition;
});
String[] locations = new String[entry.getValue().size()];
for (int j = 0; j < locations.length; ++j) {
locations[j] = entry.getValue().get(j).location;
}
PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, opDesc, locations);
}
// set count constraints
for (Entry<OperatorDescriptorId, Integer> entry : operatorCounts.entrySet()) {
IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
if (!operatorLocations.keySet().contains(entry.getKey())) {
PartitionConstraintHelper.addPartitionCountConstraint(jobSpec, opDesc, entry.getValue());
}
}
// roots
for (OperatorDescriptorId root : subJob.getRoots()) {
jobSpec.addRoot(jobSpec.getOperatorMap().get(operatorIdMapping.get(root)));
}
jobIds.add(((JobEventListenerFactory) subJob.getJobletEventListenerFactory()).getJobId());
}
// jobEventListenerFactory
jobSpec.setJobletEventListenerFactory(new MultiTransactionJobletEventListenerFactory(jobIds, true));
// useConnectorSchedulingPolicy
jobSpec.setUseConnectorPolicyForScheduling(jobsList.get(0).isUseConnectorPolicyForScheduling());
// connectorAssignmentPolicy
jobSpec.setConnectorPolicyAssignmentPolicy(jobsList.get(0).getConnectorPolicyAssignmentPolicy());
return jobSpec;
}
Aggregations