use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project graylog2-server by Graylog2.
the class V20191125144500_MigrateDashboardsToViews method migrateDashboard.
private Map.Entry<View, Search> migrateDashboard(Dashboard dashboard, Consumer<String> recordMigratedDashboardIds, Consumer<Map<String, Set<String>>> recordMigratedWidgetMap) {
final Map<String, Set<String>> migratedWidgetIds = new HashMap<>(dashboard.widgets().size());
final BiConsumer<String, String> recordMigratedWidgetIds = (String before, String after) -> migratedWidgetIds.merge(before, Collections.singleton(after), Sets::union);
final Map<String, String> newWidgetTitles = new HashMap<>(dashboard.widgets().size());
final BiConsumer<String, String> recordWidgetTitle = newWidgetTitles::put;
final Set<ViewWidget> newViewWidgets = dashboard.widgets().stream().sorted(Comparator.comparing(Widget::id)).flatMap(widget -> migrateWidget(widget, recordMigratedWidgetIds, recordWidgetTitle).stream()).collect(Collectors.toCollection(LinkedHashSet::new));
final Map<String, ViewWidgetPosition> newViewWidgetPositions = migrateWidgetPositions(dashboard, Collections.unmodifiableMap(migratedWidgetIds), Collections.unmodifiableSet(newViewWidgets));
final Map<String, Set<String>> newWidgetMapping = new HashMap<>(newViewWidgets.size());
final BiConsumer<String, String> recordWidgetMapping = (String viewWidgetId, String searchTypeId) -> newWidgetMapping.merge(viewWidgetId, Collections.singleton(searchTypeId), Sets::union);
final DateTime createdAt = dashboard.createdAt();
final Set<SearchType> newSearchTypes = newViewWidgets.stream().flatMap(viewWidget -> createSearchType(viewWidget, recordWidgetMapping).stream()).collect(Collectors.toSet());
final Query newQuery = Query.create(randomUUIDProvider.get(), RelativeRange.create(300), "", newSearchTypes);
final Set<Query> newQueries = Collections.singleton(newQuery);
final Search newSearch = Search.create(randomObjectIdProvider.get(), newQueries, dashboard.creatorUserId(), createdAt);
final ViewState newViewState = ViewState.create(Titles.ofWidgetTitles(newWidgetTitles).withQueryTitle(dashboard.title()), newViewWidgets, newWidgetMapping, newViewWidgetPositions);
final View newView = View.create(dashboard.id(), View.Type.DASHBOARD, dashboard.title(), "This dashboard was migrated automatically.", dashboard.description(), newSearch.id(), Collections.singletonMap(newQuery.id(), newViewState), Optional.ofNullable(dashboard.creatorUserId()), createdAt);
recordMigratedDashboardIds.accept(dashboard.id());
recordMigratedWidgetMap.accept(migratedWidgetIds);
return new AbstractMap.SimpleEntry<>(newView, newSearch);
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project pravega by pravega.
the class StreamSegmentContainerTests method testAttributeCleanup.
/**
* Tests the ability to clean up Extended Attributes from Segment Metadatas that have not been used recently.
* This test does the following:
* 1. Sets up a custom SegmentContainer with a hook into the metadataCleanup task
* 2. Creates a segment and appends something to it, each time updating attributes (and verifies they were updated correctly).
* 3. Waits for the segment's attributes to be forgotten.
* 4. Verifies that the forgotten attributes can be fetched from the Attribute Index and re-cached in memory.
*/
@Test
public void testAttributeCleanup() throws Exception {
final String segmentName = "segment";
final AttributeId[] attributes = new AttributeId[] { Attributes.EVENT_COUNT, AttributeId.uuid(0, 1), AttributeId.uuid(0, 2), AttributeId.uuid(0, 3) };
Map<AttributeId, Long> allAttributes = new HashMap<>();
final TestContainerConfig containerConfig = new TestContainerConfig();
containerConfig.setSegmentMetadataExpiration(Duration.ofMillis(250));
containerConfig.setMaxCachedExtendedAttributeCount(1);
@Cleanup TestContext context = createContext(containerConfig);
OperationLogFactory localDurableLogFactory = new DurableLogFactory(FREQUENT_TRUNCATIONS_DURABLE_LOG_CONFIG, context.dataLogFactory, executorService());
@Cleanup MetadataCleanupContainer localContainer = new MetadataCleanupContainer(CONTAINER_ID, containerConfig, localDurableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
localContainer.startAsync().awaitRunning();
// Create segment with initial attributes and verify they were set correctly.
localContainer.createStreamSegment(segmentName, getSegmentType(segmentName), null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Add one append with some attribute changes and verify they were set correctly.
val appendAttributes = createAttributeUpdates(attributes);
applyAttributes(appendAttributes, allAttributes);
for (val au : appendAttributes) {
localContainer.updateAttributes(segmentName, AttributeUpdateCollection.from(au), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
SegmentProperties sp = localContainer.getStreamSegmentInfo(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after initial updateAttributes() call.", allAttributes, sp, AUTO_ATTRIBUTES);
// Wait until the attributes are forgotten
localContainer.triggerAttributeCleanup(segmentName, containerConfig.getMaxCachedExtendedAttributeCount()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Now get attributes again and verify them.
sp = localContainer.getStreamSegmentInfo(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// During attribute eviction, we expect all core attributes to be preserved, and only 1 extended attribute (as
// defined in the config) to be preserved. This extended attribute should be the last one we updated.
val expectedAttributes = new HashMap<>(Attributes.getCoreNonNullAttributes(allAttributes));
val lastExtAttribute = appendAttributes.stream().filter(au -> !Attributes.isCoreAttribute(au.getAttributeId())).reduce((a, b) -> b).get();
expectedAttributes.put(lastExtAttribute.getAttributeId(), lastExtAttribute.getValue());
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after eviction.", expectedAttributes, sp, AUTO_ATTRIBUTES);
val fetchedAttributes = localContainer.getAttributes(segmentName, allAttributes.keySet(), true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
AssertExtensions.assertMapEquals("Unexpected attributes after eviction & reload.", allAttributes, fetchedAttributes);
sp = localContainer.getStreamSegmentInfo(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after eviction & reload+getInfo.", allAttributes, sp, AUTO_ATTRIBUTES);
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project xtext-core by eclipse.
the class ContentAssistFragment2 method getFQFeatureNamesToExclude.
public Set<String> getFQFeatureNamesToExclude(final Grammar g) {
Set<String> _xifexpression = null;
Grammar _nonTerminalsSuperGrammar = GrammarUtil2.getNonTerminalsSuperGrammar(g);
boolean _tripleNotEquals = (_nonTerminalsSuperGrammar != null);
if (_tripleNotEquals) {
Sets.SetView<String> _xblockexpression = null;
{
final Set<String> thisGrammarFqFeatureNames = IterableExtensions.<String>toSet(this.computeFQFeatureNames(g));
final Function1<Grammar, Iterable<String>> _function = (Grammar it) -> {
return this.computeFQFeatureNames(it);
};
final Set<String> superGrammarsFqFeatureNames = IterableExtensions.<String>toSet(Iterables.<String>concat(ListExtensions.<Grammar, Iterable<String>>map(GrammarUtil.allUsedGrammars(g), _function)));
_xblockexpression = Sets.<String>intersection(thisGrammarFqFeatureNames, superGrammarsFqFeatureNames);
}
_xifexpression = _xblockexpression;
} else {
_xifexpression = CollectionLiterals.<String>emptySet();
}
return _xifexpression;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project kylo by Teradata.
the class FeedImporter method validateUserDataSets.
/**
* Validates that user data sets can be imported with provided properties.
*
* @return {@code true} if the feed can be imported, or {@code false} otherwise
*/
private boolean validateUserDataSets() {
List<com.thinkbiganalytics.kylo.catalog.rest.model.DataSet> sourceDataSets = importFeed.getDataSetsToImport();
if (sourceDataSets != null && !sourceDataSets.isEmpty()) {
final UploadProgressMessage statusMessage = uploadProgressService.addUploadStatus(importFeed.getImportOptions().getUploadKey(), "Validating data sets.");
final ImportComponentOption componentOption = importFeedOptions.findImportComponentOption(ImportComponent.USER_DATA_SETS);
// /Map the orig datasets by their id
Map<String, com.thinkbiganalytics.kylo.catalog.rest.model.DataSet> origDataSetMap = sourceDataSets.stream().collect(Collectors.toMap(ds -> ds.getId(), ds -> ds));
// create a copy with the map so it can be modified from the user properties
Map<String, com.thinkbiganalytics.kylo.catalog.rest.model.DataSet> modifiedDataSetMap = sourceDataSets.stream().collect(Collectors.toMap(ds -> ds.getId(), ds -> new com.thinkbiganalytics.kylo.catalog.rest.model.DataSet(ds)));
// look at the properties supplied by the user and apply those first
List<ImportProperty> properties = componentOption.getProperties();
properties.stream().forEach(importProperty -> {
if (StringUtils.isNotBlank(importProperty.getPropertyValue())) {
com.thinkbiganalytics.kylo.catalog.rest.model.DataSet matchingDataSet = modifiedDataSetMap.get(importProperty.getComponentId());
if (matchingDataSet != null) {
matchingDataSet.setId(importProperty.getPropertyValue());
log.info("Remap dataset old id: {}, new id: {}, details: {} ", importProperty.getComponentId(), importProperty.getPropertyValue(), importProperty);
}
}
});
FeedMetadata metadata = importFeed.getFeedToImport();
// find the data sets that need importing
Map<String, Map<String, String>> datasetAdditionalProperties = new HashMap<>();
// find schemas associated with data set for data transform feeds
if (metadata.getDataTransformation() != null && StringUtils.isNotBlank(metadata.getDataTransformation().getDataTransformScript())) {
List<Map<String, Object>> nodes = (List<Map<String, Object>>) metadata.getDataTransformation().getChartViewModel().get("nodes");
if (nodes != null) {
nodes.stream().forEach((nodeMap) -> {
Map<String, Object> nodeDataSetMap = (Map<String, Object>) nodeMap.get("dataset");
if (nodeDataSetMap != null) {
String dataSetId = (String) nodeDataSetMap.get("id");
List<Map<String, String>> schema = (List<Map<String, String>>) nodeDataSetMap.get("schema");
if (schema != null) {
String schemaString = schema.stream().map(field -> {
Map<String, String> fieldMap = (Map<String, String>) field;
String name = fieldMap.get("name");
String dataType = fieldMap.get("dataType");
return name + " " + dataType;
}).collect(Collectors.joining(","));
// find the property associated with this dataset and add the schema as an additional property
datasetAdditionalProperties.computeIfAbsent(dataSetId, dsId -> new HashMap<String, String>()).put("schema", schemaString);
}
}
});
}
}
// create a map of the zip file datasets and the matching system datasets
Map<com.thinkbiganalytics.kylo.catalog.rest.model.DataSet, com.thinkbiganalytics.kylo.catalog.rest.model.DataSet> importDataSetIdMap = new HashMap<>();
// attempt to find the dataset and associate it with the incoming one
sourceDataSets.stream().forEach(dataSet -> {
com.thinkbiganalytics.kylo.catalog.rest.model.DataSet modifiedDataSet = modifiedDataSetMap.get(dataSet.getId());
importDataSetIdMap.put(dataSet, findMatchingDataSet(modifiedDataSet));
});
// the list of properties to be returned to the user to reassign datasets
List<ImportProperty> dataSetProperties = new ArrayList<>();
boolean valid = true;
// for all the values that are null they need to be created, otherwise we have what we need
// if the value in the map is null, we need to ask the user to supply a dataset. Create the ImportProperty and mark as invalid
importDataSetIdMap.entrySet().stream().forEach(entry -> {
com.thinkbiganalytics.kylo.catalog.rest.model.DataSet incomingDataSet = entry.getKey();
com.thinkbiganalytics.kylo.catalog.rest.model.DataSet matchingDataSet = entry.getValue();
String datasetPathTitle = incomingDataSet.getPaths().stream().collect(Collectors.joining(","));
String title = incomingDataSet.getDataSource().getTitle();
ImportProperty property = ImportPropertyBuilder.anImportProperty().withComponentName(title).withDisplayName(datasetPathTitle).withPropertyKey("dataset_" + UUID.randomUUID().toString().replaceAll("-", "_")).withDescription(datasetPathTitle).withComponentId(incomingDataSet.getId()).withImportComponent(ImportComponent.USER_DATA_SETS).asValid(matchingDataSet != null).withAdditionalProperties(datasetAdditionalProperties.get(incomingDataSet.getId())).putAdditionalProperty("dataset", "true").build();
dataSetProperties.add(property);
componentOption.setValidForImport(property.isValid());
});
componentOption.setProperties(dataSetProperties);
// mark the component as valid only if the dataset properties are all valid
componentOption.setValidForImport(dataSetProperties.stream().allMatch(ImportProperty::isValid));
if (componentOption.isValidForImport()) {
// replace the source datasets with the found ones
metadata.setSourceDataSets(new ArrayList<>(importDataSetIdMap.values()));
Set<String> datasourceIds = new HashSet<>();
Map<String, String> chartModelReplacements = new HashMap<>();
// replace the Data Transformation dataset references with the new one
if (metadata.getDataTransformation() != null && StringUtils.isNotBlank(metadata.getDataTransformation().getDataTransformScript())) {
String script = metadata.getDataTransformation().getDataTransformScript();
for (Map.Entry<com.thinkbiganalytics.kylo.catalog.rest.model.DataSet, com.thinkbiganalytics.kylo.catalog.rest.model.DataSet> entry : importDataSetIdMap.entrySet()) {
com.thinkbiganalytics.kylo.catalog.rest.model.DataSet incomingDataSet = entry.getKey();
com.thinkbiganalytics.kylo.catalog.rest.model.DataSet matchingDataSet = entry.getValue();
if (!incomingDataSet.getId().equalsIgnoreCase(matchingDataSet.getId())) {
script = script.replaceAll(incomingDataSet.getId(), matchingDataSet.getId());
chartModelReplacements.put(incomingDataSet.getId(), matchingDataSet.getId());
chartModelReplacements.put(incomingDataSet.getDataSource().getId(), matchingDataSet.getDataSource().getId());
}
datasourceIds.add(matchingDataSet.getDataSource().getId());
metadata.getDataTransformation().setDatasourceIds(new ArrayList<>(datasourceIds));
}
metadata.getDataTransformation().setDataTransformScript(script);
FeedImportDatasourceUtil.replaceChartModelReferences(metadata, chartModelReplacements);
}
statusMessage.update("Validated data sets.", true);
} else {
statusMessage.update("Validation Error. Additional properties are needed before uploading the feed.", false);
importFeed.setValid(false);
}
uploadProgressService.completeSection(importFeed.getImportOptions(), ImportSection.Section.VALIDATE_USER_DATASOURCES);
return componentOption.isValidForImport();
}
return true;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project grakn by graknlabs.
the class PostProcessingTest method whenCreatingDuplicateResources_EnsureTheyAreMergedInPost.
@Test
public void whenCreatingDuplicateResources_EnsureTheyAreMergedInPost() throws InvalidKBException, InterruptedException, JsonProcessingException {
String value = "1";
String sample = "Sample";
// Create GraknTx With Duplicate Resources
EmbeddedGraknTx<?> tx = session.open(GraknTxType.WRITE);
AttributeType<String> attributeType = tx.putAttributeType(sample, AttributeType.DataType.STRING);
Attribute<String> attribute = attributeType.putAttribute(value);
tx.commitSubmitNoLogs();
tx = session.open(GraknTxType.WRITE);
assertEquals(1, attributeType.instances().count());
// Check duplicates have been created
Set<Vertex> resource1 = createDuplicateResource(tx, attributeType, attribute);
Set<Vertex> resource2 = createDuplicateResource(tx, attributeType, attribute);
Set<Vertex> resource3 = createDuplicateResource(tx, attributeType, attribute);
Set<Vertex> resource4 = createDuplicateResource(tx, attributeType, attribute);
assertEquals(5, attributeType.instances().count());
// Attribute vertex index
String resourceIndex = resource1.iterator().next().value(INDEX.name()).toString();
// Merge the attribute sets
Set<Vertex> merged = Sets.newHashSet();
merged.addAll(resource1);
merged.addAll(resource2);
merged.addAll(resource3);
merged.addAll(resource4);
tx.close();
// Casting sets as ConceptIds
Set<ConceptId> resourceConcepts = merged.stream().map(c -> ConceptId.of(Schema.PREFIX_VERTEX + c.id().toString())).collect(toSet());
// Create Commit Log
CommitLog commitLog = CommitLog.createDefault(tx.keyspace());
commitLog.attributes().put(resourceIndex, resourceConcepts);
// Submit it
postProcessor.submit(commitLog);
// Force running the PP job
engine.server().backgroundTaskRunner().tasks().forEach(BackgroundTask::run);
Thread.sleep(2000);
tx = session.open(GraknTxType.READ);
// Check it's fixed
assertEquals(1, tx.getAttributeType(sample).instances().count());
tx.close();
}
Aggregations