use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project buck by facebook.
the class DaemonicParserState method invalidateIfBuckConfigOrEnvHasChanged.
private synchronized void invalidateIfBuckConfigOrEnvHasChanged(Cell cell, Path buildFile) {
try (AutoCloseableLock readLock = cellStateLock.readLock()) {
DaemonicCellState state = cellPathToDaemonicState.get(cell.getRoot());
if (state == null) {
return;
}
// Invalidates and also keeps the state cell up-to-date
state.invalidateIfBuckConfigHasChanged(cell, buildFile);
Optional<MapDifference<String, String>> envDiff = state.invalidateIfEnvHasChanged(cell, buildFile);
if (envDiff.isPresent()) {
MapDifference<String, String> diff = envDiff.get();
LOG.warn("Invalidating cache on environment change (%s)", diff);
Set<String> environmentChanges = new HashSet<>();
environmentChanges.addAll(diff.entriesOnlyOnLeft().keySet());
environmentChanges.addAll(diff.entriesOnlyOnRight().keySet());
environmentChanges.addAll(diff.entriesDiffering().keySet());
cacheInvalidatedByEnvironmentVariableChangeCounter.addAll(environmentChanges);
broadcastEventListener.broadcast(ParsingEvent.environmentalChange(environmentChanges.toString()));
}
}
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project cassandra by apache.
the class Schema method reload.
private void reload(KeyspaceMetadata previous, KeyspaceMetadata updated) {
Keyspace keyspace = getKeyspaceInstance(updated.name);
if (keyspace != null)
keyspace.setMetadata(updated);
MapDifference<TableId, TableMetadata> tablesDiff = previous.tables.diff(updated.tables);
MapDifference<TableId, ViewMetadata> viewsDiff = previous.views.diff(updated.views);
MapDifference<String, TableMetadata> indexesDiff = previous.tables.indexesDiff(updated.tables);
// clean up after removed entries
tablesDiff.entriesOnlyOnLeft().values().forEach(table -> metadataRefs.remove(table.id));
viewsDiff.entriesOnlyOnLeft().values().forEach(view -> metadataRefs.remove(view.metadata.id));
indexesDiff.entriesOnlyOnLeft().values().forEach(indexTable -> indexMetadataRefs.remove(Pair.create(indexTable.keyspace, indexTable.indexName().get())));
// load up new entries
tablesDiff.entriesOnlyOnRight().values().forEach(table -> metadataRefs.put(table.id, new TableMetadataRef(table)));
viewsDiff.entriesOnlyOnRight().values().forEach(view -> metadataRefs.put(view.metadata.id, new TableMetadataRef(view.metadata)));
indexesDiff.entriesOnlyOnRight().values().forEach(indexTable -> indexMetadataRefs.put(Pair.create(indexTable.keyspace, indexTable.indexName().get()), new TableMetadataRef(indexTable)));
// refresh refs to updated ones
tablesDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().id).set(diff.rightValue()));
viewsDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().metadata.id).set(diff.rightValue().metadata));
indexesDiff.entriesDiffering().values().stream().map(MapDifference.ValueDifference::rightValue).forEach(indexTable -> indexMetadataRefs.get(Pair.create(indexTable.keyspace, indexTable.indexName().get())).set(indexTable));
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project metacat by Netflix.
the class DirectSqlDatabase method update.
/**
* Updates the database object.
* @param databaseInfo database object
*/
public void update(final DatabaseInfo databaseInfo) {
log.debug("Start: Database update using direct sql for {}", databaseInfo.getName());
final long start = registry.clock().wallTime();
try {
final Long databaseId = getDatabaseId(databaseInfo.getName());
final DatabaseInfo existingDatabaseInfo = getDatabaseById(databaseId, databaseInfo.getName());
final Map<String, String> newMetadata = databaseInfo.getMetadata() == null ? Maps.newHashMap() : databaseInfo.getMetadata();
final MapDifference<String, String> diff = Maps.difference(existingDatabaseInfo.getMetadata(), newMetadata);
insertDatabaseParams(databaseId, diff.entriesOnlyOnRight());
final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
updateDatabaseParams(databaseId, updateParams);
final String uri = Strings.isNullOrEmpty(databaseInfo.getUri()) ? existingDatabaseInfo.getUri() : databaseInfo.getUri();
final String newOwner = getOwner(databaseInfo.getAudit());
final String owner = Strings.isNullOrEmpty(newOwner) ? newOwner : existingDatabaseInfo.getAudit().getCreatedBy();
jdbcTemplate.update(SQL.UPDATE_DATABASE, new SqlParameterValue(Types.VARCHAR, uri), new SqlParameterValue(Types.VARCHAR, owner), new SqlParameterValue(Types.BIGINT, databaseId));
} finally {
this.fastServiceMetric.recordTimer(HiveMetrics.TagAlterDatabase.getMetricName(), registry.clock().wallTime() - start);
log.debug("End: Database update using direct sql for {}", databaseInfo.getName());
}
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project metacat by Netflix.
the class DirectSqlTable method updateIcebergTable.
/**
* Locks and updates the iceberg table for update so that no other request can modify the table at the same time.
* 1. Gets the table parameters and locks the requested records. If lock cannot be attained,
* the request to update fails
* 2. Validates the metadata location
* 3. If validated, updates the table parameters.
* @param tableInfo table info
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void updateIcebergTable(final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
final Map<String, String> newTableMetadata = tableInfo.getMetadata();
//
if (newTableMetadata == null || newTableMetadata.isEmpty()) {
final String message = String.format("No parameters defined for iceberg table %s", tableName);
log.warn(message);
throw new InvalidMetaException(tableName, message, null);
}
//
// If the previous metadata location is not empty, check if it is valid.
//
final String previousMetadataLocation = newTableMetadata.get(PARAM_PREVIOUS_METADATA_LOCATION);
if (config.isIcebergPreviousMetadataLocationCheckEnabled() && !StringUtils.isBlank(previousMetadataLocation)) {
boolean doesPathExists = true;
try {
final Path previousMetadataPath = new Path(previousMetadataLocation);
doesPathExists = warehouse.getFs(previousMetadataPath).exists(previousMetadataPath);
} catch (Exception ignored) {
log.warn(String.format("Failed getting the filesystem for %s", previousMetadataLocation));
registry.counter(HiveMetrics.CounterFileSystemReadFailure.name()).increment();
}
if (!doesPathExists) {
throw new InvalidMetaException(tableName, String.format("Invalid metadata for %s..Location %s does not exist", tableName, previousMetadataLocation), null);
}
}
final Long tableId = getTableId(tableName);
Map<String, String> existingTableMetadata = null;
log.debug("Lock Iceberg table {}", tableName);
try {
existingTableMetadata = jdbcTemplate.query(SQL.TABLE_PARAMS_LOCK, new SqlParameterValue[] { new SqlParameterValue(Types.BIGINT, tableId) }, rs -> {
final Map<String, String> result = Maps.newHashMap();
while (rs.next()) {
result.put(rs.getString(COL_PARAM_KEY), rs.getString(COL_PARAM_VALUE));
}
return result;
});
} catch (EmptyResultDataAccessException ex) {
log.info(String.format("No parameters defined for iceberg table %s", tableName));
} catch (Exception ex) {
final String message = String.format("Failed getting a lock on iceberg table %s", tableName);
log.warn(message, ex);
throw new InvalidMetaException(tableName, message, null);
}
if (existingTableMetadata == null) {
existingTableMetadata = Maps.newHashMap();
}
final boolean needUpdate = validateIcebergUpdate(tableName, existingTableMetadata, newTableMetadata);
final String existingMetadataLocation = existingTableMetadata.get(PARAM_METADATA_LOCATION);
final String newMetadataLocation = newTableMetadata.get(PARAM_METADATA_LOCATION);
log.info("Servicing Iceberg commit request with tableId: {}, needUpdate: {}, " + "previousLocation: {}, existingLocation: {}, newLocation: {}", tableId, needUpdate, previousMetadataLocation, existingMetadataLocation, newMetadataLocation);
if (needUpdate) {
final MapDifference<String, String> diff = Maps.difference(existingTableMetadata, newTableMetadata);
insertTableParams(tableId, diff.entriesOnlyOnRight());
final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
updateTableParams(tableId, updateParams);
//
// In addition to updating the table params, the table location in HMS needs to be updated for usage by
// external tools, that access HMS directly
//
updateTableLocation(tableId, tableInfo);
log.info("Finished updating Iceberg table with tableId: {}", tableId);
}
log.debug("Unlocked Iceberg table {}", tableName);
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.MapDifference in project syndesis by syndesisio.
the class AbstractResourceUpdateHandler method computePropertiesDiffMessages.
// *********************
// Simple Bulletin
// *********************
protected List<LeveledMessage> computePropertiesDiffMessages(Supplier<LeveledMessage.Builder> supplier, Map<String, ConfigurationProperty> left, Map<String, ConfigurationProperty> right) {
final List<LeveledMessage> messages = new ArrayList<>();
final MapDifference<String, ConfigurationProperty> diff = Maps.difference(left, right);
for (Map.Entry<String, MapDifference.ValueDifference<ConfigurationProperty>> entry : diff.entriesDiffering().entrySet()) {
final MapDifference.ValueDifference<ConfigurationProperty> value = entry.getValue();
final ConfigurationProperty leftValue = value.leftValue();
final ConfigurationProperty rightValue = value.rightValue();
// Special handling because of dynamic metadata
if (!equals(leftValue, rightValue)) {
messages.add(supplier.get().level(LeveledMessage.Level.INFO).code(LeveledMessage.Code.SYNDESIS001).build());
break;
}
}
if (!diff.entriesOnlyOnLeft().isEmpty() || !diff.entriesOnlyOnRight().isEmpty()) {
messages.add(supplier.get().level(LeveledMessage.Level.WARN).code(LeveledMessage.Code.SYNDESIS002).build());
}
return messages;
}
Aggregations