use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project cas by apereo.
the class WSFederationClaimsReleasePolicy method getAttributesInternal.
@Override
public Map<String, List<Object>> getAttributesInternal(final RegisteredServiceAttributeReleasePolicyContext context, final Map<String, List<Object>> attrs) {
val resolvedAttributes = new TreeMap<String, List<Object>>(String.CASE_INSENSITIVE_ORDER);
resolvedAttributes.putAll(attrs);
val attributesToRelease = Maps.<String, List<Object>>newHashMapWithExpectedSize(resolvedAttributes.size());
getAllowedAttributes().entrySet().stream().filter(entry -> WSFederationClaims.contains(entry.getKey().toUpperCase())).forEach(entry -> {
val claimName = entry.getKey();
val attributeValue = resolvedAttributes.get(entry.getValue());
val claim = WSFederationClaims.valueOf(claimName.toUpperCase());
if (resolvedAttributes.containsKey(claim.getUri())) {
attributesToRelease.put(claim.getUri(), resolvedAttributes.get(claim.getUri()));
} else {
LOGGER.trace("Evaluating claim [{}] mapped to attribute value [{}]", claim.getUri(), attributeValue);
mapSingleAttributeDefinition(claim.getUri(), entry.getValue(), attributeValue, resolvedAttributes, attributesToRelease);
}
});
return attributesToRelease;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project cas by apereo.
the class LdapAuthenticationHandler method collectAttributesForLdapEntry.
/**
* Collect attributes for ldap entry.
*
* @param ldapEntry the ldap entry
* @param username the username
* @return the map
*/
protected Map<String, List<Object>> collectAttributesForLdapEntry(final LdapEntry ldapEntry, final String username) {
val attributeMap = Maps.<String, List<Object>>newHashMapWithExpectedSize(this.principalAttributeMap.size());
LOGGER.debug("The following attributes are requested to be retrieved and mapped: [{}]", attributeMap.keySet());
principalAttributeMap.forEach((key, names) -> {
val attributeNames = CollectionUtils.toCollection(names, ArrayList.class);
if (attributeNames.size() == 1 && attributeNames.stream().allMatch(s -> s.toString().endsWith(";"))) {
val attrs = ldapEntry.getAttributes().stream().filter(attr -> attr.getName().startsWith(key.concat(";"))).collect(Collectors.toList());
attrs.forEach(attr -> attributeMap.putAll(collectAttributeValueForEntry(ldapEntry, attr.getName(), List.of())));
} else {
attributeMap.putAll(collectAttributeValueForEntry(ldapEntry, key, attributeNames));
}
});
if (this.collectDnAttribute) {
LOGGER.debug("Recording principal DN attribute as [{}]", this.principalDnAttributeName);
attributeMap.put(this.principalDnAttributeName, CollectionUtils.wrapList(ldapEntry.getDn()));
}
return attributeMap;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project SpinalTap by airbnb.
the class MysqlSchemaStore method getAll.
@Override
public Table<String, String, TreeMap<Integer, MysqlTableSchema>> getAll() {
Table<String, String, TreeMap<Integer, MysqlTableSchema>> allSchemaTable = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap);
List<String> allSchemaInfo;
try (Handle handle = jdbi.open()) {
allSchemaInfo = MysqlSchemaUtil.LIST_STRING_RETRYER.call(() -> handle.createQuery(String.format(GET_ALL_SCHEMA_QUERY, source)).map(StringColumnMapper.INSTANCE).list());
} catch (Exception ex) {
log.error(String.format("Failed to get all schema for source: %s", source), ex);
Throwables.throwIfUnchecked(ex);
throw new RuntimeException(ex);
}
allSchemaInfo.stream().map(MysqlSchemaStore::deserializeSchemaInfo).forEach(schemaInfo -> {
String database = schemaInfo.getDatabase();
String table = schemaInfo.getTable();
int version = schemaInfo.getVersion();
if (!allSchemaTable.contains(database, table)) {
allSchemaTable.put(database, table, Maps.newTreeMap());
}
allSchemaTable.get(database, table).put(version, schemaInfo);
});
return allSchemaTable;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project SpinalTap by airbnb.
the class MysqlSchemaTracker method processDDLStatement.
public void processDDLStatement(@NotNull final QueryEvent event) {
BinlogFilePos binlogFilePos = event.getBinlogFilePos();
String ddl = event.getSql();
if (schemaStore.get(binlogFilePos) != null) {
log.info(String.format("DDL Statement (%s) has already been processed. (BinlogFilePos: %s)", ddl, binlogFilePos));
return;
}
// It could be a new database which has not been created in schema store database, so don't
// switch to any database before applying database DDL.
schemaDatabase.applyDDLStatement(DATABASE_DDL_SQL_PATTERN.matcher(ddl).find() ? "" : event.getDatabase(), ddl);
// Get schemas for active tables in schema store
Table<String, String, MysqlTableSchema> activeTableSchemasInStore = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap);
schemaStore.getAll().values().stream().map(treeMap -> treeMap.lastEntry().getValue()).filter(schema -> !schema.getColumnInfo().isEmpty()).forEach(schema -> activeTableSchemasInStore.put(schema.getDatabase(), schema.getTable(), schema));
Set<String> activeDatabasesInStore = activeTableSchemasInStore.rowKeySet();
Set<String> databasesInSchemaDatabase = schemaDatabase.listDatabases();
// Handle new databases
Sets.difference(databasesInSchemaDatabase, activeDatabasesInStore).forEach(newDatabase -> updateSchemaStore(newDatabase, event, Maps.newHashMap(), schemaDatabase.fetchTableSchema(newDatabase)));
// Handle existing databases
activeDatabasesInStore.forEach(database -> updateSchemaStore(database, event, activeTableSchemasInStore.row(database), schemaDatabase.fetchTableSchema(database)));
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project SpinalTap by airbnb.
the class MysqlSchemaTrackerTest method testDropDatabase.
@Test
public void testDropDatabase() throws Exception {
Table<String, String, TreeMap<Integer, MysqlTableSchema>> allTableSchemaInStore = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap);
allTableSchemaInStore.put(DATABASE_NAME, "table1", new TreeMap<Integer, MysqlTableSchema>() {
{
put(1, TABLE1_SCHEMA);
}
});
allTableSchemaInStore.put(DATABASE_NAME, "table2", new TreeMap<Integer, MysqlTableSchema>() {
{
put(1, TABLE2_SCHEMA);
}
});
allTableSchemaInStore.put(DATABASE2_NAME, "table1", new TreeMap<Integer, MysqlTableSchema>() {
{
put(1, DATABASE2_TABLE1_SCHEMA);
}
});
when(schemaDatabase.listDatabases()).thenReturn(Sets.newHashSet(DATABASE2_NAME));
when(schemaStore.getAll()).thenReturn(allTableSchemaInStore);
when(schemaDatabase.fetchTableSchema(DATABASE_NAME)).thenReturn(ImmutableMap.of());
when(schemaDatabase.fetchTableSchema(DATABASE2_NAME)).thenReturn(ImmutableMap.of("table1", DATABASE2_TABLE1_SCHEMA));
QueryEvent queryEvent = new QueryEvent(0, 0, binlogFilePos, DATABASE2_NAME, "DROP DATABASE `database1`");
SchemaTracker schemaTracker = new MysqlSchemaTracker(schemaStore, schemaDatabase);
schemaTracker.processDDLStatement(queryEvent);
verify(schemaStore).put(DATABASE_NAME, "table1", queryEvent.getBinlogFilePos(), queryEvent.getTimestamp(), queryEvent.getSql(), Lists.newArrayList());
verify(schemaStore).put(DATABASE_NAME, "table2", queryEvent.getBinlogFilePos(), queryEvent.getTimestamp(), queryEvent.getSql(), Lists.newArrayList());
}
Aggregations