Search in sources :

Example 11 with Table

use of com.google.common.collect.Table in project cdap by caskdata.

the class FlowQueuePendingCorrector method run.

public void run(final FlowId flowId, FlowSpecification flow) throws Exception {
    /* Temporary check. Can be removed when FlowId class is ready */
    Preconditions.checkArgument(ProgramType.FLOW == flowId.getType(), "Unexpected program type %s, FlowPendingQueueCorrector only runs on flows", flowId.getType());
    System.out.println("Running queue.pending correction on flow " + flowId);
    SimpleQueueSpecificationGenerator queueSpecGenerator = new SimpleQueueSpecificationGenerator(flowId.getParent());
    Table<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> table = queueSpecGenerator.create(flow);
    for (Table.Cell<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> cell : table.cellSet()) {
        if (cell.getRowKey().getType() == FlowletConnection.Type.FLOWLET) {
            String producerFlowlet = cell.getRowKey().getName();
            String consumerFlowlet = cell.getColumnKey();
            for (QueueSpecification queue : cell.getValue()) {
                run(flowId, producerFlowlet, consumerFlowlet, queue.getQueueName().getSimpleName());
            }
        }
    }
}
Also used : Set(java.util.Set) Table(com.google.common.collect.Table) QueueSpecification(co.cask.cdap.app.queue.QueueSpecification) SimpleQueueSpecificationGenerator(co.cask.cdap.internal.app.queue.SimpleQueueSpecificationGenerator)

Example 12 with Table

use of com.google.common.collect.Table in project sonarqube by SonarSource.

the class PrMeasureFix method createReplacementMeasures.

static void createReplacementMeasures(List<MetricDto> metrics, Table<String, MetricDto, ComponentTreeData.Measure> measuresByComponentUuidAndMetric, Set<String> requestedMetricKeys) {
    Map<String, MetricDto> metricByKey = Maps.uniqueIndex(metrics, MetricDto::getKey);
    for (MetricDto metric : measuresByComponentUuidAndMetric.columnKeySet()) {
        Map<String, ComponentTreeData.Measure> newEntries = new HashMap<>();
        String originalKey = METRICS.inverse().get(metric.getKey());
        if (originalKey != null && requestedMetricKeys.contains(originalKey)) {
            for (Map.Entry<String, ComponentTreeData.Measure> e : measuresByComponentUuidAndMetric.column(metric).entrySet()) {
                newEntries.put(e.getKey(), copyMeasureToVariation(e.getValue()));
            }
            MetricDto originalMetric = metricByKey.get(originalKey);
            newEntries.forEach((k, v) -> measuresByComponentUuidAndMetric.put(k, originalMetric, v));
        }
    }
    List<MetricDto> toRemove = measuresByComponentUuidAndMetric.columnKeySet().stream().filter(m -> !requestedMetricKeys.contains(m.getKey())).collect(Collectors.toList());
    measuresByComponentUuidAndMetric.columnKeySet().removeAll(toRemove);
}
Also used : CRITICAL_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.CRITICAL_VIOLATIONS_KEY) NEW_SECURITY_REMEDIATION_EFFORT_KEY(org.sonar.api.measures.CoreMetrics.NEW_SECURITY_REMEDIATION_EFFORT_KEY) MINOR_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.MINOR_VIOLATIONS_KEY) NEW_RELIABILITY_REMEDIATION_EFFORT_KEY(org.sonar.api.measures.CoreMetrics.NEW_RELIABILITY_REMEDIATION_EFFORT_KEY) INFO_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.INFO_VIOLATIONS_KEY) VULNERABILITIES_KEY(org.sonar.api.measures.CoreMetrics.VULNERABILITIES_KEY) VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.VIOLATIONS_KEY) NEW_CRITICAL_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.NEW_CRITICAL_VIOLATIONS_KEY) HashMap(java.util.HashMap) BLOCKER_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.BLOCKER_VIOLATIONS_KEY) NEW_SECURITY_RATING_KEY(org.sonar.api.measures.CoreMetrics.NEW_SECURITY_RATING_KEY) NEW_MINOR_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.NEW_MINOR_VIOLATIONS_KEY) NEW_TECHNICAL_DEBT_KEY(org.sonar.api.measures.CoreMetrics.NEW_TECHNICAL_DEBT_KEY) Map(java.util.Map) RELIABILITY_RATING_KEY(org.sonar.api.measures.CoreMetrics.RELIABILITY_RATING_KEY) NEW_BLOCKER_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.NEW_BLOCKER_VIOLATIONS_KEY) TECHNICAL_DEBT_KEY(org.sonar.api.measures.CoreMetrics.TECHNICAL_DEBT_KEY) BiMap(com.google.common.collect.BiMap) SECURITY_RATING_KEY(org.sonar.api.measures.CoreMetrics.SECURITY_RATING_KEY) RELIABILITY_REMEDIATION_EFFORT_KEY(org.sonar.api.measures.CoreMetrics.RELIABILITY_REMEDIATION_EFFORT_KEY) CODE_SMELLS_KEY(org.sonar.api.measures.CoreMetrics.CODE_SMELLS_KEY) Collection(java.util.Collection) NEW_BUGS_KEY(org.sonar.api.measures.CoreMetrics.NEW_BUGS_KEY) Set(java.util.Set) NEW_MAJOR_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.NEW_MAJOR_VIOLATIONS_KEY) NEW_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.NEW_VIOLATIONS_KEY) NEW_INFO_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.NEW_INFO_VIOLATIONS_KEY) SECURITY_REMEDIATION_EFFORT_KEY(org.sonar.api.measures.CoreMetrics.SECURITY_REMEDIATION_EFFORT_KEY) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) NEW_CODE_SMELLS_KEY(org.sonar.api.measures.CoreMetrics.NEW_CODE_SMELLS_KEY) NEW_VULNERABILITIES_KEY(org.sonar.api.measures.CoreMetrics.NEW_VULNERABILITIES_KEY) NEW_RELIABILITY_RATING_KEY(org.sonar.api.measures.CoreMetrics.NEW_RELIABILITY_RATING_KEY) HashBiMap(com.google.common.collect.HashBiMap) List(java.util.List) MetricDto(org.sonar.db.metric.MetricDto) LiveMeasureDto(org.sonar.db.measure.LiveMeasureDto) BUGS_KEY(org.sonar.api.measures.CoreMetrics.BUGS_KEY) MAJOR_VIOLATIONS_KEY(org.sonar.api.measures.CoreMetrics.MAJOR_VIOLATIONS_KEY) Table(com.google.common.collect.Table) MetricDto(org.sonar.db.metric.MetricDto) HashMap(java.util.HashMap) HashMap(java.util.HashMap) Map(java.util.Map) BiMap(com.google.common.collect.BiMap) HashBiMap(com.google.common.collect.HashBiMap)

Example 13 with Table

use of com.google.common.collect.Table in project toolkit by googleapis.

the class SampleConfig method createSampleConfigTable.

public static ImmutableTable<String, String, ImmutableList<SampleConfig>> createSampleConfigTable(@Nullable SampleConfigProto sampleConfigProto, final Map<String, InterfaceConfig> interfaceConfigMap) {
    if (sampleConfigProto == null) {
        sampleConfigProto = SampleConfigProto.getDefaultInstance();
    }
    // First, apply region tag as IDs if IDs are not given
    List<SampleSpecProto> sampleSpecs = new ArrayList<>();
    for (SampleSpecProto spec : sampleConfigProto.getSamplesList()) {
        if (spec.getId().isEmpty()) {
            spec = spec.toBuilder().setId(spec.getRegionTag()).build();
        }
        sampleSpecs.add(spec);
    }
    // Then, check user specified sample IDs do not clash
    Set<String> distinctIds = new HashSet<>();
    Set<String> duplicateIds = sampleSpecs.stream().map(s -> s.getId()).filter(id -> !id.isEmpty()).filter(s -> !distinctIds.add(s)).collect(Collectors.toSet());
    Preconditions.checkArgument(duplicateIds.isEmpty(), "Found duplicate IDs: %s", duplicateIds.stream().collect(Collectors.joining(", ")));
    // Next, flatten the calling pattern list so we have one per sample
    // Note these are not the final calling pattern values, because the
    // regexes specified in the config need to be matched against
    // language-specific calling pattern definitions.
    List<SampleSpecProto> flattenedSampleSpecs = new ArrayList<>();
    for (SampleSpecProto spec : sampleSpecs) {
        if (spec.getCallingPatternsList().isEmpty()) {
            flattenedSampleSpecs.add(spec.toBuilder().addCallingPatterns(DEFAULT_CALLING_PATTERN).build());
        }
        for (String pattern : spec.getCallingPatternsList()) {
            flattenedSampleSpecs.add(spec.toBuilder().clearCallingPatterns().addCallingPatterns(pattern).build());
        }
    }
    // Construct the table.
    HashBasedTable<String, String, ArrayList<SampleConfig>> table = HashBasedTable.create();
    for (SampleSpecProto sampleSpec : flattenedSampleSpecs) {
        SampleConfig config = createOneSampleConfig(sampleSpec, interfaceConfigMap);
        if (!table.contains(sampleSpec.getService(), sampleSpec.getRpc())) {
            table.put(sampleSpec.getService(), sampleSpec.getRpc(), new ArrayList<>());
        }
        table.get(sampleSpec.getService(), sampleSpec.getRpc()).add(config);
    }
    // Make an immutable copy.
    return table.cellSet().stream().collect(ImmutableTable.toImmutableTable(Table.Cell::getRowKey, Table.Cell::getColumnKey, v -> ImmutableList.copyOf(v.getValue())));
}
Also used : SampleValueSet(com.google.api.codegen.SampleValueSet) CallingForm(com.google.api.codegen.viewmodel.CallingForm) RequestFieldProto(com.google.api.codegen.samplegen.v1p2.RequestFieldProto) ResponseStatementProto(com.google.api.codegen.samplegen.v1p2.ResponseStatementProto) Set(java.util.Set) HashBasedTable(com.google.common.collect.HashBasedTable) SampleConfigProto(com.google.api.codegen.samplegen.v1p2.SampleConfigProto) Collectors(java.util.stream.Collectors) SampleSpecProto(com.google.api.codegen.samplegen.v1p2.SampleSpecProto) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) ImmutableTable(com.google.common.collect.ImmutableTable) Map(java.util.Map) AutoValue(com.google.auto.value.AutoValue) Preconditions(com.google.common.base.Preconditions) Table(com.google.common.collect.Table) Nullable(javax.annotation.Nullable) HashBasedTable(com.google.common.collect.HashBasedTable) ImmutableTable(com.google.common.collect.ImmutableTable) Table(com.google.common.collect.Table) ArrayList(java.util.ArrayList) SampleSpecProto(com.google.api.codegen.samplegen.v1p2.SampleSpecProto) HashSet(java.util.HashSet)

Example 14 with Table

use of com.google.common.collect.Table in project SpinalTap by airbnb.

the class MysqlSchemaTracker method processDDLStatement.

public void processDDLStatement(@NotNull final QueryEvent event) {
    BinlogFilePos binlogFilePos = event.getBinlogFilePos();
    String ddl = event.getSql();
    if (schemaStore.get(binlogFilePos) != null) {
        log.info(String.format("DDL Statement (%s) has already been processed. (BinlogFilePos: %s)", ddl, binlogFilePos));
        return;
    }
    // It could be a new database which has not been created in schema store database, so don't
    // switch to any database before applying database DDL.
    schemaDatabase.applyDDLStatement(DATABASE_DDL_SQL_PATTERN.matcher(ddl).find() ? "" : event.getDatabase(), ddl);
    // Get schemas for active tables in schema store
    Table<String, String, MysqlTableSchema> activeTableSchemasInStore = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap);
    schemaStore.getAll().values().stream().map(treeMap -> treeMap.lastEntry().getValue()).filter(schema -> !schema.getColumnInfo().isEmpty()).forEach(schema -> activeTableSchemasInStore.put(schema.getDatabase(), schema.getTable(), schema));
    Set<String> activeDatabasesInStore = activeTableSchemasInStore.rowKeySet();
    Set<String> databasesInSchemaDatabase = schemaDatabase.listDatabases();
    // Handle new databases
    Sets.difference(databasesInSchemaDatabase, activeDatabasesInStore).forEach(newDatabase -> updateSchemaStore(newDatabase, event, Maps.newHashMap(), schemaDatabase.fetchTableSchema(newDatabase)));
    // Handle existing databases
    activeDatabasesInStore.forEach(database -> updateSchemaStore(database, event, activeTableSchemasInStore.row(database), schemaDatabase.fetchTableSchema(database)));
}
Also used : QueryEvent(com.airbnb.spinaltap.mysql.event.QueryEvent) BinlogFilePos(com.airbnb.spinaltap.mysql.BinlogFilePos) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Set(java.util.Set) NotNull(javax.validation.constraints.NotNull) Maps(com.google.common.collect.Maps) Sets(com.google.common.collect.Sets) Tables(com.google.common.collect.Tables) Slf4j(lombok.extern.slf4j.Slf4j) Lists(com.google.common.collect.Lists) Map(java.util.Map) Pattern(java.util.regex.Pattern) Table(com.google.common.collect.Table) Maps(com.google.common.collect.Maps) BinlogFilePos(com.airbnb.spinaltap.mysql.BinlogFilePos)

Example 15 with Table

use of com.google.common.collect.Table in project coprhd-controller by CoprHD.

the class VPlexBlockServiceApiImpl method migrateVolumesInReplicationGroup.

/**
 * Group all volumes in RGs and create a WF to migrate those volumes together.
 *
 * @param volumes All volumes being considered for migration
 * @param vpool The vpool to migrate to
 * @param volumesNotInRG A container to store all volumes NOT in an RG
 * @param volumesInRG A container to store all the volumes in an RG
 * @param controllerOperationsWrapper values from controller called used to determine if
 *            we need to suspend on commit or deletion of source volumes
 * @return taskList Tasks generated for RG migrations
 */
protected TaskList migrateVolumesInReplicationGroup(List<Volume> volumes, VirtualPool vpool, List<Volume> volumesNotInRG, List<Volume> volumesInRG, ControllerOperationValuesWrapper controllerOperationValues) {
    TaskList taskList = new TaskList();
    // Group all volumes in the request by RG. If there are no volumes in the request
    // that are in an RG then the table will be empty.
    Table<URI, String, List<Volume>> groupVolumes = VPlexUtil.groupVPlexVolumesByRG(volumes, volumesNotInRG, volumesInRG, _dbClient);
    for (Table.Cell<URI, String, List<Volume>> cell : groupVolumes.cellSet()) {
        // Get all the volumes in the request that have been grouped by RG
        List<Volume> volumesInRGRequest = cell.getValue();
        // Grab the first volume
        Volume firstVolume = volumesInRGRequest.get(0);
        // Get all the volumes from the RG
        List<Volume> rgVolumes = VPlexUtil.getVolumesInSameReplicationGroup(cell.getColumnKey(), cell.getRowKey(), firstVolume.getPersonality(), _dbClient);
        // We need to migrate all the volumes from the RG together.
        if (volumesInRGRequest.size() != rgVolumes.size()) {
            throw APIException.badRequests.cantChangeVpoolNotAllCGVolumes();
        }
        BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, firstVolume.getConsistencyGroup());
        URI systemURI = firstVolume.getStorageController();
        // vpool change.
        if (volumesInRGRequest.size() > _maxCgVolumesForMigration) {
            throw APIException.badRequests.cgContainsTooManyVolumesForVPoolChange(cg.getLabel(), volumes.size(), _maxCgVolumesForMigration);
        }
        // be placed in a CG on the target storage system.
        if (volumesInRGRequest.size() > 1) {
            s_logger.info("Multiple volume request, verifying target storage systems");
            verifyTargetSystemsForCGDataMigration(volumesInRGRequest, vpool, cg.getVirtualArray());
        }
        // Create a unique task id.
        String taskId = UUID.randomUUID().toString();
        // Get all volume descriptors for all volumes to be migrated.
        StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, systemURI);
        List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
        for (Volume volume : volumesInRGRequest) {
            descriptors.addAll(createChangeVirtualPoolDescriptors(storageSystem, volume, vpool, taskId, null, null, controllerOperationValues, true));
        }
        // Create a task object associated with the CG
        taskList.getTaskList().add(createTaskForRG(vpool, rgVolumes, taskId));
        // Orchestrate the vpool changes of all volumes as a single request.
        orchestrateVPoolChanges(volumesInRGRequest, descriptors, taskId);
    }
    return taskList;
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) Table(com.google.common.collect.Table) TaskList(com.emc.storageos.model.TaskList) ArrayList(java.util.ArrayList) FCTN_STRING_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_STRING_TO_URI) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) FCTN_VPLEX_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_VPLEX_MIRROR_TO_URI) BlockConsistencyGroup(com.emc.storageos.db.client.model.BlockConsistencyGroup) Volume(com.emc.storageos.db.client.model.Volume) ApplicationAddVolumeList(com.emc.storageos.volumecontroller.ApplicationAddVolumeList) ArrayList(java.util.ArrayList) TaskList(com.emc.storageos.model.TaskList) VolumeGroupVolumeList(com.emc.storageos.model.application.VolumeGroupUpdateParam.VolumeGroupVolumeList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) StorageSystemConnectivityList(com.emc.storageos.model.systems.StorageSystemConnectivityList) List(java.util.List) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Aggregations

Table (com.google.common.collect.Table)15 HashBasedTable (com.google.common.collect.HashBasedTable)7 List (java.util.List)6 Set (java.util.Set)6 Map (java.util.Map)5 ArrayList (java.util.ArrayList)4 ImmutableTable (com.google.common.collect.ImmutableTable)3 QueueSpecification (co.cask.cdap.app.queue.QueueSpecification)2 SimpleQueueSpecificationGenerator (co.cask.cdap.internal.app.queue.SimpleQueueSpecificationGenerator)2 ProgramId (co.cask.cdap.proto.id.ProgramId)2 Maps (com.google.common.collect.Maps)2 File (java.io.File)2 IOException (java.io.IOException)2 Collectors (java.util.stream.Collectors)2 ApplicationSpecification (co.cask.cdap.api.app.ApplicationSpecification)1 FlowSpecification (co.cask.cdap.api.flow.FlowSpecification)1 MetricsCollectionService (co.cask.cdap.api.metrics.MetricsCollectionService)1 MetricsContext (co.cask.cdap.api.metrics.MetricsContext)1 AppFabricServiceRuntimeModule (co.cask.cdap.app.guice.AppFabricServiceRuntimeModule)1 ProgramDescriptor (co.cask.cdap.app.program.ProgramDescriptor)1