Search in sources :

Example 6 with Pair

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project azure-tools-for-java by Microsoft.

the class RedisCacheModule method refreshItems.

@Override
protected void refreshItems() throws AzureCmdException {
    List<Pair<String, String>> failedSubscriptions = new ArrayList<>();
    try {
        AzureManager azureManager = AuthMethodManager.getInstance().getAzureManager();
        // not signed in
        if (azureManager == null) {
            return;
        }
        SubscriptionManager subscriptionManager = azureManager.getSubscriptionManager();
        Set<String> sidList = subscriptionManager.getAccountSidList();
        for (String sid : sidList) {
            try {
                Azure azure = azureManager.getAzure(sid);
                for (RedisCache cache : azure.redisCaches().list()) {
                    addChildNode(new RedisCacheNode(this, sid, cache));
                }
            } catch (Exception ex) {
                failedSubscriptions.add(new ImmutablePair<>(sid, ex.getMessage()));
                continue;
            }
        }
    } catch (Exception ex) {
        DefaultLoader.getUIHelper().logError("An error occurred when trying to load Redis Caches\n\n" + ex.getMessage(), ex);
    }
    if (!failedSubscriptions.isEmpty()) {
        StringBuilder errorMessage = new StringBuilder("An error occurred when trying to load Redis Caches for the subscriptions:\n\n");
        for (Pair error : failedSubscriptions) {
            errorMessage.append(error.getKey()).append(": ").append(error.getValue()).append("\n");
        }
        DefaultLoader.getUIHelper().logError("An error occurred when trying to load Redis Caches\n\n" + errorMessage.toString(), null);
    }
}
Also used : Azure(com.microsoft.azure.management.Azure) RedisCache(com.microsoft.azure.management.redis.RedisCache) AzureManager(com.microsoft.azuretools.sdkmanage.AzureManager) ArrayList(java.util.ArrayList) SubscriptionManager(com.microsoft.azuretools.authmanage.SubscriptionManager) AzureCmdException(com.microsoft.azuretools.azurecommons.helpers.AzureCmdException) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) Pair(org.apache.commons.lang3.tuple.Pair)

Example 7 with Pair

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project azure-tools-for-java by Microsoft.

the class StorageModule method refreshItems.

@Override
protected void refreshItems() throws AzureCmdException {
    List<Pair<String, String>> failedSubscriptions = new ArrayList<>();
    try {
        AzureManager azureManager = AuthMethodManager.getInstance().getAzureManager();
        // not signed in
        if (azureManager == null) {
            return;
        }
        SubscriptionManager subscriptionManager = azureManager.getSubscriptionManager();
        Set<String> sidList = subscriptionManager.getAccountSidList();
        for (String sid : sidList) {
            try {
                Azure azure = azureManager.getAzure(sid);
                List<com.microsoft.azure.management.storage.StorageAccount> storageAccounts = azure.storageAccounts().list();
                for (StorageAccount sm : storageAccounts) {
                    addChildNode(new StorageNode(this, sid, sm));
                }
            } catch (Exception ex) {
                failedSubscriptions.add(new ImmutablePair<>(sid, ex.getMessage()));
                continue;
            }
        }
    } catch (Exception ex) {
        DefaultLoader.getUIHelper().logError("An error occurred when trying to load Storage Accounts\n\n" + ex.getMessage(), ex);
    }
    // load External Accounts
    for (ClientStorageAccount clientStorageAccount : ExternalStorageHelper.getList(getProject())) {
        ClientStorageAccount storageAccount = StorageClientSDKManager.getManager().getStorageAccount(clientStorageAccount.getConnectionString());
    //            addChildNode(new ExternalStorageNode(this, storageAccount));
    }
    if (!failedSubscriptions.isEmpty()) {
        StringBuilder errorMessage = new StringBuilder("An error occurred when trying to load Storage Accounts for the subscriptions:\n\n");
        for (Pair error : failedSubscriptions) {
            errorMessage.append(error.getKey()).append(": ").append(error.getValue()).append("\n");
        }
        DefaultLoader.getUIHelper().logError("An error occurred when trying to load Storage Accounts\n\n" + errorMessage.toString(), null);
    }
}
Also used : Azure(com.microsoft.azure.management.Azure) AzureManager(com.microsoft.azuretools.sdkmanage.AzureManager) ArrayList(java.util.ArrayList) ClientStorageAccount(com.microsoft.tooling.msservices.model.storage.ClientStorageAccount) SubscriptionManager(com.microsoft.azuretools.authmanage.SubscriptionManager) AzureCmdException(com.microsoft.azuretools.azurecommons.helpers.AzureCmdException) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) StorageAccount(com.microsoft.azure.management.storage.StorageAccount) ClientStorageAccount(com.microsoft.tooling.msservices.model.storage.ClientStorageAccount) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) Pair(org.apache.commons.lang3.tuple.Pair)

Example 8 with Pair

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.

the class FeedOperations method combineIntakeCollectJobs.

private static JobSpecification combineIntakeCollectJobs(MetadataProvider metadataProvider, Feed feed, JobSpecification intakeJob, List<JobSpecification> jobsList, List<FeedConnection> feedConnections, String[] intakeLocations) throws AlgebricksException, HyracksDataException {
    JobSpecification jobSpec = new JobSpecification(intakeJob.getFrameSize());
    // copy ingestor
    FeedIntakeOperatorDescriptor firstOp = (FeedIntakeOperatorDescriptor) intakeJob.getOperatorMap().get(new OperatorDescriptorId(0));
    FeedIntakeOperatorDescriptor ingestionOp;
    if (firstOp.getAdaptorFactory() == null) {
        ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorLibraryName(), firstOp.getAdaptorFactoryClassName(), firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
    } else {
        ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorFactory(), firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
    }
    // create replicator
    ReplicateOperatorDescriptor replicateOp = new ReplicateOperatorDescriptor(jobSpec, ingestionOp.getOutputRecordDescriptors()[0], jobsList.size());
    jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), ingestionOp, 0, replicateOp, 0);
    PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, ingestionOp, intakeLocations);
    PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, replicateOp, intakeLocations);
    // Loop over the jobs to copy operators and connections
    Map<OperatorDescriptorId, OperatorDescriptorId> operatorIdMapping = new HashMap<>();
    Map<ConnectorDescriptorId, ConnectorDescriptorId> connectorIdMapping = new HashMap<>();
    Map<OperatorDescriptorId, List<LocationConstraint>> operatorLocations = new HashMap<>();
    Map<OperatorDescriptorId, Integer> operatorCounts = new HashMap<>();
    List<JobId> jobIds = new ArrayList<>();
    FeedMetaOperatorDescriptor metaOp;
    for (int iter1 = 0; iter1 < jobsList.size(); iter1++) {
        FeedConnection curFeedConnection = feedConnections.get(iter1);
        JobSpecification subJob = jobsList.get(iter1);
        operatorIdMapping.clear();
        Map<OperatorDescriptorId, IOperatorDescriptor> operatorsMap = subJob.getOperatorMap();
        String datasetName = feedConnections.get(iter1).getDatasetName();
        FeedConnectionId feedConnectionId = new FeedConnectionId(ingestionOp.getEntityId(), datasetName);
        FeedPolicyEntity feedPolicyEntity = FeedMetadataUtil.validateIfPolicyExists(curFeedConnection.getDataverseName(), curFeedConnection.getPolicyName(), metadataProvider.getMetadataTxnContext());
        for (Map.Entry<OperatorDescriptorId, IOperatorDescriptor> entry : operatorsMap.entrySet()) {
            IOperatorDescriptor opDesc = entry.getValue();
            OperatorDescriptorId oldId = opDesc.getOperatorId();
            OperatorDescriptorId opId = null;
            if (opDesc instanceof LSMTreeInsertDeleteOperatorDescriptor && ((LSMTreeInsertDeleteOperatorDescriptor) opDesc).isPrimary()) {
                metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc, feedPolicyEntity.getProperties(), FeedRuntimeType.STORE);
                opId = metaOp.getOperatorId();
                opDesc.setOperatorId(opId);
            } else {
                if (opDesc instanceof AlgebricksMetaOperatorDescriptor) {
                    AlgebricksMetaOperatorDescriptor algOp = (AlgebricksMetaOperatorDescriptor) opDesc;
                    IPushRuntimeFactory[] runtimeFactories = algOp.getPipeline().getRuntimeFactories();
                    // Tweak AssignOp to work with messages
                    if (runtimeFactories[0] instanceof AssignRuntimeFactory && runtimeFactories.length > 1) {
                        IConnectorDescriptor connectorDesc = subJob.getOperatorInputMap().get(opDesc.getOperatorId()).get(0);
                        // anything on the network interface needs to be message compatible
                        if (connectorDesc instanceof MToNPartitioningConnectorDescriptor) {
                            metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc, feedPolicyEntity.getProperties(), FeedRuntimeType.COMPUTE);
                            opId = metaOp.getOperatorId();
                            opDesc.setOperatorId(opId);
                        }
                    }
                }
                if (opId == null) {
                    opId = jobSpec.createOperatorDescriptorId(opDesc);
                }
            }
            operatorIdMapping.put(oldId, opId);
        }
        // copy connectors
        connectorIdMapping.clear();
        for (Entry<ConnectorDescriptorId, IConnectorDescriptor> entry : subJob.getConnectorMap().entrySet()) {
            IConnectorDescriptor connDesc = entry.getValue();
            ConnectorDescriptorId newConnId;
            if (connDesc instanceof MToNPartitioningConnectorDescriptor) {
                MToNPartitioningConnectorDescriptor m2nConn = (MToNPartitioningConnectorDescriptor) connDesc;
                connDesc = new MToNPartitioningWithMessageConnectorDescriptor(jobSpec, m2nConn.getTuplePartitionComputerFactory());
                newConnId = connDesc.getConnectorId();
            } else {
                newConnId = jobSpec.createConnectorDescriptor(connDesc);
            }
            connectorIdMapping.put(entry.getKey(), newConnId);
        }
        // make connections between operators
        for (Entry<ConnectorDescriptorId, Pair<Pair<IOperatorDescriptor, Integer>, Pair<IOperatorDescriptor, Integer>>> entry : subJob.getConnectorOperatorMap().entrySet()) {
            ConnectorDescriptorId newId = connectorIdMapping.get(entry.getKey());
            IConnectorDescriptor connDesc = jobSpec.getConnectorMap().get(newId);
            Pair<IOperatorDescriptor, Integer> leftOp = entry.getValue().getLeft();
            Pair<IOperatorDescriptor, Integer> rightOp = entry.getValue().getRight();
            IOperatorDescriptor leftOpDesc = jobSpec.getOperatorMap().get(leftOp.getLeft().getOperatorId());
            IOperatorDescriptor rightOpDesc = jobSpec.getOperatorMap().get(rightOp.getLeft().getOperatorId());
            if (leftOp.getLeft() instanceof FeedCollectOperatorDescriptor) {
                jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), replicateOp, iter1, leftOpDesc, leftOp.getRight());
            }
            jobSpec.connect(connDesc, leftOpDesc, leftOp.getRight(), rightOpDesc, rightOp.getRight());
        }
        // prepare for setting partition constraints
        operatorLocations.clear();
        operatorCounts.clear();
        for (Constraint constraint : subJob.getUserConstraints()) {
            LValueConstraintExpression lexpr = constraint.getLValue();
            ConstraintExpression cexpr = constraint.getRValue();
            OperatorDescriptorId opId;
            switch(lexpr.getTag()) {
                case PARTITION_COUNT:
                    opId = ((PartitionCountExpression) lexpr).getOperatorDescriptorId();
                    operatorCounts.put(operatorIdMapping.get(opId), (int) ((ConstantExpression) cexpr).getValue());
                    break;
                case PARTITION_LOCATION:
                    opId = ((PartitionLocationExpression) lexpr).getOperatorDescriptorId();
                    IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(operatorIdMapping.get(opId));
                    List<LocationConstraint> locations = operatorLocations.get(opDesc.getOperatorId());
                    if (locations == null) {
                        locations = new ArrayList<>();
                        operatorLocations.put(opDesc.getOperatorId(), locations);
                    }
                    String location = (String) ((ConstantExpression) cexpr).getValue();
                    LocationConstraint lc = new LocationConstraint(location, ((PartitionLocationExpression) lexpr).getPartition());
                    locations.add(lc);
                    break;
                default:
                    break;
            }
        }
        // set absolute location constraints
        for (Entry<OperatorDescriptorId, List<LocationConstraint>> entry : operatorLocations.entrySet()) {
            IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
            // why do we need to sort?
            Collections.sort(entry.getValue(), (LocationConstraint o1, LocationConstraint o2) -> {
                return o1.partition - o2.partition;
            });
            String[] locations = new String[entry.getValue().size()];
            for (int j = 0; j < locations.length; ++j) {
                locations[j] = entry.getValue().get(j).location;
            }
            PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, opDesc, locations);
        }
        // set count constraints
        for (Entry<OperatorDescriptorId, Integer> entry : operatorCounts.entrySet()) {
            IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
            if (!operatorLocations.keySet().contains(entry.getKey())) {
                PartitionConstraintHelper.addPartitionCountConstraint(jobSpec, opDesc, entry.getValue());
            }
        }
        // roots
        for (OperatorDescriptorId root : subJob.getRoots()) {
            jobSpec.addRoot(jobSpec.getOperatorMap().get(operatorIdMapping.get(root)));
        }
        jobIds.add(((JobEventListenerFactory) subJob.getJobletEventListenerFactory()).getJobId());
    }
    // jobEventListenerFactory
    jobSpec.setJobletEventListenerFactory(new MultiTransactionJobletEventListenerFactory(jobIds, true));
    // useConnectorSchedulingPolicy
    jobSpec.setUseConnectorPolicyForScheduling(jobsList.get(0).isUseConnectorPolicyForScheduling());
    // connectorAssignmentPolicy
    jobSpec.setConnectorPolicyAssignmentPolicy(jobsList.get(0).getConnectorPolicyAssignmentPolicy());
    return jobSpec;
}
Also used : HashMap(java.util.HashMap) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) Constraint(org.apache.hyracks.api.constraints.Constraint) LocationConstraint(org.apache.asterix.metadata.feeds.LocationConstraint) ConstantExpression(org.apache.hyracks.api.constraints.expressions.ConstantExpression) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) ArrayList(java.util.ArrayList) OneToOneConnectorDescriptor(org.apache.hyracks.dataflow.std.connectors.OneToOneConnectorDescriptor) LocationConstraint(org.apache.asterix.metadata.feeds.LocationConstraint) List(java.util.List) ArrayList(java.util.ArrayList) AlgebricksMetaOperatorDescriptor(org.apache.hyracks.algebricks.runtime.operators.meta.AlgebricksMetaOperatorDescriptor) LValueConstraintExpression(org.apache.hyracks.api.constraints.expressions.LValueConstraintExpression) ReplicateOperatorDescriptor(org.apache.hyracks.dataflow.std.misc.ReplicateOperatorDescriptor) Map(java.util.Map) HashMap(java.util.HashMap) FeedCollectOperatorDescriptor(org.apache.asterix.external.operators.FeedCollectOperatorDescriptor) FeedPolicyEntity(org.apache.asterix.metadata.entities.FeedPolicyEntity) JobSpecification(org.apache.hyracks.api.job.JobSpecification) FeedIntakeOperatorDescriptor(org.apache.asterix.external.operators.FeedIntakeOperatorDescriptor) JobId(org.apache.asterix.common.transactions.JobId) Pair(org.apache.commons.lang3.tuple.Pair) IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) MToNPartitioningWithMessageConnectorDescriptor(org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor) OperatorDescriptorId(org.apache.hyracks.api.dataflow.OperatorDescriptorId) LValueConstraintExpression(org.apache.hyracks.api.constraints.expressions.LValueConstraintExpression) ConstraintExpression(org.apache.hyracks.api.constraints.expressions.ConstraintExpression) FeedConnection(org.apache.asterix.metadata.entities.FeedConnection) MultiTransactionJobletEventListenerFactory(org.apache.asterix.runtime.job.listener.MultiTransactionJobletEventListenerFactory) MToNPartitioningConnectorDescriptor(org.apache.hyracks.dataflow.std.connectors.MToNPartitioningConnectorDescriptor) AssignRuntimeFactory(org.apache.hyracks.algebricks.runtime.operators.std.AssignRuntimeFactory) IPushRuntimeFactory(org.apache.hyracks.algebricks.runtime.base.IPushRuntimeFactory) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) Constraint(org.apache.hyracks.api.constraints.Constraint) LocationConstraint(org.apache.asterix.metadata.feeds.LocationConstraint) FeedMetaOperatorDescriptor(org.apache.asterix.external.operators.FeedMetaOperatorDescriptor) IOperatorDescriptor(org.apache.hyracks.api.dataflow.IOperatorDescriptor) FeedConnectionId(org.apache.asterix.external.feed.management.FeedConnectionId) LSMTreeInsertDeleteOperatorDescriptor(org.apache.asterix.common.dataflow.LSMTreeInsertDeleteOperatorDescriptor)

Example 9 with Pair

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.

the class RegisterPartitionAvailibilityWork method run.

@Override
public void run() {
    final PartitionId pid = partitionDescriptor.getPartitionId();
    IJobManager jobManager = ccs.getJobManager();
    JobRun run = jobManager.get(pid.getJobId());
    if (run == null) {
        return;
    }
    PartitionMatchMaker pmm = run.getPartitionMatchMaker();
    List<Pair<PartitionDescriptor, PartitionRequest>> matches = pmm.registerPartitionDescriptor(partitionDescriptor);
    for (Pair<PartitionDescriptor, PartitionRequest> match : matches) {
        try {
            PartitionUtils.reportPartitionMatch(ccs, pid, match);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
Also used : PartitionRequest(org.apache.hyracks.control.common.job.PartitionRequest) PartitionDescriptor(org.apache.hyracks.control.common.job.PartitionDescriptor) IJobManager(org.apache.hyracks.control.cc.job.IJobManager) PartitionMatchMaker(org.apache.hyracks.control.cc.partitions.PartitionMatchMaker) PartitionId(org.apache.hyracks.api.partitions.PartitionId) JobRun(org.apache.hyracks.control.cc.job.JobRun) Pair(org.apache.commons.lang3.tuple.Pair)

Example 10 with Pair

use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.

the class PartitionMatchMaker method registerPartitionDescriptor.

public List<Pair<PartitionDescriptor, PartitionRequest>> registerPartitionDescriptor(PartitionDescriptor partitionDescriptor) {
    List<Pair<PartitionDescriptor, PartitionRequest>> matches = new ArrayList<Pair<PartitionDescriptor, PartitionRequest>>();
    PartitionId pid = partitionDescriptor.getPartitionId();
    boolean matched = false;
    List<PartitionRequest> requests = partitionRequests.get(pid);
    if (requests != null) {
        Iterator<PartitionRequest> i = requests.iterator();
        while (i.hasNext()) {
            PartitionRequest req = i.next();
            if (partitionDescriptor.getState().isAtLeast(req.getMinimumState())) {
                matches.add(Pair.<PartitionDescriptor, PartitionRequest>of(partitionDescriptor, req));
                i.remove();
                matched = true;
                if (!partitionDescriptor.isReusable()) {
                    break;
                }
            }
        }
        if (requests.isEmpty()) {
            partitionRequests.remove(pid);
        }
    }
    if (!matched) {
        List<PartitionDescriptor> descriptors = partitionDescriptors.get(pid);
        if (descriptors == null) {
            descriptors = new ArrayList<PartitionDescriptor>();
            partitionDescriptors.put(pid, descriptors);
        }
        descriptors.add(partitionDescriptor);
    }
    return matches;
}
Also used : ArrayList(java.util.ArrayList) PartitionRequest(org.apache.hyracks.control.common.job.PartitionRequest) PartitionDescriptor(org.apache.hyracks.control.common.job.PartitionDescriptor) PartitionId(org.apache.hyracks.api.partitions.PartitionId) Pair(org.apache.commons.lang3.tuple.Pair)

Aggregations

Pair (org.apache.commons.lang3.tuple.Pair)685 ArrayList (java.util.ArrayList)209 List (java.util.List)154 Test (org.junit.Test)150 ImmutablePair (org.apache.commons.lang3.tuple.ImmutablePair)142 HashMap (java.util.HashMap)123 Collectors (java.util.stream.Collectors)123 Map (java.util.Map)112 Message (com.microsoft.azure.sdk.iot.device.Message)71 IOException (java.io.IOException)70 MutablePair (org.apache.commons.lang3.tuple.MutablePair)64 java.util (java.util)55 IotHubTransportMessage (com.microsoft.azure.sdk.iot.device.transport.IotHubTransportMessage)52 Set (java.util.Set)49 StringUtils (org.apache.commons.lang3.StringUtils)48 File (java.io.File)46 Optional (java.util.Optional)45 Arrays (java.util.Arrays)44 HashSet (java.util.HashSet)40 Test (org.junit.jupiter.api.Test)39