Search in sources :

Example 6 with MapDifference

use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project buck by facebook.

the class DaemonicParserState method invalidateIfBuckConfigOrEnvHasChanged.

private synchronized void invalidateIfBuckConfigOrEnvHasChanged(Cell cell, Path buildFile) {
    try (AutoCloseableLock readLock = cellStateLock.readLock()) {
        DaemonicCellState state = cellPathToDaemonicState.get(cell.getRoot());
        if (state == null) {
            return;
        }
        // Invalidates and also keeps the state cell up-to-date
        state.invalidateIfBuckConfigHasChanged(cell, buildFile);
        Optional<MapDifference<String, String>> envDiff = state.invalidateIfEnvHasChanged(cell, buildFile);
        if (envDiff.isPresent()) {
            MapDifference<String, String> diff = envDiff.get();
            LOG.warn("Invalidating cache on environment change (%s)", diff);
            Set<String> environmentChanges = new HashSet<>();
            environmentChanges.addAll(diff.entriesOnlyOnLeft().keySet());
            environmentChanges.addAll(diff.entriesOnlyOnRight().keySet());
            environmentChanges.addAll(diff.entriesDiffering().keySet());
            cacheInvalidatedByEnvironmentVariableChangeCounter.addAll(environmentChanges);
            broadcastEventListener.broadcast(ParsingEvent.environmentalChange(environmentChanges.toString()));
        }
    }
}
Also used : MapDifference(com.google.common.collect.MapDifference) AutoCloseableLock(com.facebook.buck.util.concurrent.AutoCloseableLock) HashSet(java.util.HashSet)

Example 7 with MapDifference

use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project cassandra by apache.

the class Schema method reload.

private void reload(KeyspaceMetadata previous, KeyspaceMetadata updated) {
    Keyspace keyspace = getKeyspaceInstance(updated.name);
    if (keyspace != null)
        keyspace.setMetadata(updated);
    MapDifference<TableId, TableMetadata> tablesDiff = previous.tables.diff(updated.tables);
    MapDifference<TableId, ViewMetadata> viewsDiff = previous.views.diff(updated.views);
    MapDifference<String, TableMetadata> indexesDiff = previous.tables.indexesDiff(updated.tables);
    // clean up after removed entries
    tablesDiff.entriesOnlyOnLeft().values().forEach(table -> metadataRefs.remove(table.id));
    viewsDiff.entriesOnlyOnLeft().values().forEach(view -> metadataRefs.remove(view.metadata.id));
    indexesDiff.entriesOnlyOnLeft().values().forEach(indexTable -> indexMetadataRefs.remove(Pair.create(indexTable.keyspace, indexTable.indexName().get())));
    // load up new entries
    tablesDiff.entriesOnlyOnRight().values().forEach(table -> metadataRefs.put(table.id, new TableMetadataRef(table)));
    viewsDiff.entriesOnlyOnRight().values().forEach(view -> metadataRefs.put(view.metadata.id, new TableMetadataRef(view.metadata)));
    indexesDiff.entriesOnlyOnRight().values().forEach(indexTable -> indexMetadataRefs.put(Pair.create(indexTable.keyspace, indexTable.indexName().get()), new TableMetadataRef(indexTable)));
    // refresh refs to updated ones
    tablesDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().id).set(diff.rightValue()));
    viewsDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().metadata.id).set(diff.rightValue().metadata));
    indexesDiff.entriesDiffering().values().stream().map(MapDifference.ValueDifference::rightValue).forEach(indexTable -> indexMetadataRefs.get(Pair.create(indexTable.keyspace, indexTable.indexName().get())).set(indexTable));
}
Also used : MapDifference(com.google.common.collect.MapDifference) SystemKeyspace(org.apache.cassandra.db.SystemKeyspace) Keyspace(org.apache.cassandra.db.Keyspace)

Example 8 with MapDifference

use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project metacat by Netflix.

the class DirectSqlDatabase method update.

/**
 * Updates the database object.
 * @param databaseInfo database object
 */
public void update(final DatabaseInfo databaseInfo) {
    log.debug("Start: Database update using direct sql for {}", databaseInfo.getName());
    final long start = registry.clock().wallTime();
    try {
        final Long databaseId = getDatabaseId(databaseInfo.getName());
        final DatabaseInfo existingDatabaseInfo = getDatabaseById(databaseId, databaseInfo.getName());
        final Map<String, String> newMetadata = databaseInfo.getMetadata() == null ? Maps.newHashMap() : databaseInfo.getMetadata();
        final MapDifference<String, String> diff = Maps.difference(existingDatabaseInfo.getMetadata(), newMetadata);
        insertDatabaseParams(databaseId, diff.entriesOnlyOnRight());
        final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
        updateDatabaseParams(databaseId, updateParams);
        final String uri = Strings.isNullOrEmpty(databaseInfo.getUri()) ? existingDatabaseInfo.getUri() : databaseInfo.getUri();
        final String newOwner = getOwner(databaseInfo.getAudit());
        final String owner = Strings.isNullOrEmpty(newOwner) ? newOwner : existingDatabaseInfo.getAudit().getCreatedBy();
        jdbcTemplate.update(SQL.UPDATE_DATABASE, new SqlParameterValue(Types.VARCHAR, uri), new SqlParameterValue(Types.VARCHAR, owner), new SqlParameterValue(Types.BIGINT, databaseId));
    } finally {
        this.fastServiceMetric.recordTimer(HiveMetrics.TagAlterDatabase.getMetricName(), registry.clock().wallTime() - start);
        log.debug("End: Database update using direct sql for {}", databaseInfo.getName());
    }
}
Also used : HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) MapDifference(com.google.common.collect.MapDifference) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Registry(com.netflix.spectator.api.Registry) Map(java.util.Map) SqlRowSet(org.springframework.jdbc.support.rowset.SqlRowSet) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Nullable(javax.annotation.Nullable) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) Transactional(org.springframework.transaction.annotation.Transactional) Types(java.sql.Types) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) Map(java.util.Map)

Example 9 with MapDifference

use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project metacat by Netflix.

the class DirectSqlTable method updateIcebergTable.

/**
 * Locks and updates the iceberg table for update so that no other request can modify the table at the same time.
 * 1. Gets the table parameters and locks the requested records. If lock cannot be attained,
 * the request to update fails
 * 2. Validates the metadata location
 * 3. If validated, updates the table parameters.
 * @param tableInfo table info
 */
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void updateIcebergTable(final TableInfo tableInfo) {
    final QualifiedName tableName = tableInfo.getName();
    final Map<String, String> newTableMetadata = tableInfo.getMetadata();
    // 
    if (newTableMetadata == null || newTableMetadata.isEmpty()) {
        final String message = String.format("No parameters defined for iceberg table %s", tableName);
        log.warn(message);
        throw new InvalidMetaException(tableName, message, null);
    }
    // 
    // If the previous metadata location is not empty, check if it is valid.
    // 
    final String previousMetadataLocation = newTableMetadata.get(PARAM_PREVIOUS_METADATA_LOCATION);
    if (config.isIcebergPreviousMetadataLocationCheckEnabled() && !StringUtils.isBlank(previousMetadataLocation)) {
        boolean doesPathExists = true;
        try {
            final Path previousMetadataPath = new Path(previousMetadataLocation);
            doesPathExists = warehouse.getFs(previousMetadataPath).exists(previousMetadataPath);
        } catch (Exception ignored) {
            log.warn(String.format("Failed getting the filesystem for %s", previousMetadataLocation));
            registry.counter(HiveMetrics.CounterFileSystemReadFailure.name()).increment();
        }
        if (!doesPathExists) {
            throw new InvalidMetaException(tableName, String.format("Invalid metadata for %s..Location %s does not exist", tableName, previousMetadataLocation), null);
        }
    }
    final Long tableId = getTableId(tableName);
    Map<String, String> existingTableMetadata = null;
    log.debug("Lock Iceberg table {}", tableName);
    try {
        existingTableMetadata = jdbcTemplate.query(SQL.TABLE_PARAMS_LOCK, new SqlParameterValue[] { new SqlParameterValue(Types.BIGINT, tableId) }, rs -> {
            final Map<String, String> result = Maps.newHashMap();
            while (rs.next()) {
                result.put(rs.getString(COL_PARAM_KEY), rs.getString(COL_PARAM_VALUE));
            }
            return result;
        });
    } catch (EmptyResultDataAccessException ex) {
        log.info(String.format("No parameters defined for iceberg table %s", tableName));
    } catch (Exception ex) {
        final String message = String.format("Failed getting a lock on iceberg table %s", tableName);
        log.warn(message, ex);
        throw new InvalidMetaException(tableName, message, null);
    }
    if (existingTableMetadata == null) {
        existingTableMetadata = Maps.newHashMap();
    }
    final boolean needUpdate = validateIcebergUpdate(tableName, existingTableMetadata, newTableMetadata);
    final String existingMetadataLocation = existingTableMetadata.get(PARAM_METADATA_LOCATION);
    final String newMetadataLocation = newTableMetadata.get(PARAM_METADATA_LOCATION);
    log.info("Servicing Iceberg commit request with tableId: {}, needUpdate: {}, " + "previousLocation: {}, existingLocation: {}, newLocation: {}", tableId, needUpdate, previousMetadataLocation, existingMetadataLocation, newMetadataLocation);
    if (needUpdate) {
        final MapDifference<String, String> diff = Maps.difference(existingTableMetadata, newTableMetadata);
        insertTableParams(tableId, diff.entriesOnlyOnRight());
        final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
        updateTableParams(tableId, updateParams);
        // 
        // In addition to updating the table params, the table location in HMS needs to be updated for usage by
        // external tools, that access HMS directly
        // 
        updateTableLocation(tableId, tableInfo);
        log.info("Finished updating Iceberg table with tableId: {}", tableId);
    }
    log.debug("Unlocked Iceberg table {}", tableName);
}
Also used : Path(org.apache.hadoop.fs.Path) StringUtils(org.apache.commons.lang.StringUtils) DataAccessException(org.springframework.dao.DataAccessException) HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) TablePreconditionFailedException(com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) Lists(com.google.common.collect.Lists) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) Propagation(org.springframework.transaction.annotation.Propagation) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Config(com.netflix.metacat.common.server.properties.Config) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) QualifiedName(com.netflix.metacat.common.QualifiedName) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) Objects(java.util.Objects) MapDifference(com.google.common.collect.MapDifference) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) HiveTableUtil(com.netflix.metacat.connector.hive.util.HiveTableUtil) Registry(com.netflix.spectator.api.Registry) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) Transactional(org.springframework.transaction.annotation.Transactional) Types(java.sql.Types) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) DataAccessException(org.springframework.dao.DataAccessException) TablePreconditionFailedException(com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) Map(java.util.Map) Transactional(org.springframework.transaction.annotation.Transactional)

Example 10 with MapDifference

use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project syndesis by syndesisio.

the class AbstractResourceUpdateHandler method computePropertiesDiffMessages.

// *********************
// Simple Bulletin
// *********************
protected List<LeveledMessage> computePropertiesDiffMessages(Supplier<LeveledMessage.Builder> supplier, Map<String, ConfigurationProperty> left, Map<String, ConfigurationProperty> right) {
    final List<LeveledMessage> messages = new ArrayList<>();
    final MapDifference<String, ConfigurationProperty> diff = Maps.difference(left, right);
    for (Map.Entry<String, MapDifference.ValueDifference<ConfigurationProperty>> entry : diff.entriesDiffering().entrySet()) {
        final MapDifference.ValueDifference<ConfigurationProperty> value = entry.getValue();
        final ConfigurationProperty leftValue = value.leftValue();
        final ConfigurationProperty rightValue = value.rightValue();
        // Special handling because of dynamic metadata
        if (!equals(leftValue, rightValue)) {
            messages.add(supplier.get().level(LeveledMessage.Level.INFO).code(LeveledMessage.Code.SYNDESIS001).build());
            break;
        }
    }
    if (!diff.entriesOnlyOnLeft().isEmpty() || !diff.entriesOnlyOnRight().isEmpty()) {
        messages.add(supplier.get().level(LeveledMessage.Level.WARN).code(LeveledMessage.Code.SYNDESIS002).build());
    }
    return messages;
}
Also used : ConfigurationProperty(io.syndesis.common.model.connection.ConfigurationProperty) MapDifference(com.google.common.collect.MapDifference) ArrayList(java.util.ArrayList) Map(java.util.Map) LeveledMessage(io.syndesis.common.model.bulletin.LeveledMessage)

Aggregations

MapDifference (com.google.common.collect.MapDifference)10 Map (java.util.Map)7 Maps (com.google.common.collect.Maps)5 List (java.util.List)4 Collectors (java.util.stream.Collectors)4 Lists (com.google.common.collect.Lists)3 AutoCloseableLock (com.facebook.buck.util.concurrent.AutoCloseableLock)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 Strings (com.google.common.base.Strings)2 ImmutableList (com.google.common.collect.ImmutableList)2 Queues (com.google.common.collect.Queues)2 Config (com.mcmoddev.orespawn.data.Config)2 Constants (com.mcmoddev.orespawn.data.Constants)2 QualifiedName (com.netflix.metacat.common.QualifiedName)2 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)2 HiveMetrics (com.netflix.metacat.connector.hive.monitoring.HiveMetrics)2 HiveConnectorFastServiceMetric (com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric)2 Registry (com.netflix.spectator.api.Registry)2 Types (java.sql.Types)2 Arrays (java.util.Arrays)2