Search in sources :

Example 21 with NonNull

use of lombok.NonNull in project pravega by pravega.

the class HealthEndpointImpl method getHealth.

/**
 * Validates that the {@link HealthContributor} exists and requests it's {@link Health}.
 *
 * @param id  The id/name of the {@link HealthContributor} to check the {@link Health} of.
 * @return The {@link Health} result of the {@link HealthContributor} with name 'id'. Returns NULL if 'id' does not map to
 * a {@link HealthContributor}.
 */
@NonNull
@Override
public Health getHealth(String id) {
    Health health;
    if (id == null || id.equals(root.getName())) {
        health = updater.getLatestHealth();
    } else {
        List<String> path = Arrays.asList(id.split(DELIMITER));
        health = search(path, updater.getLatestHealth());
    }
    if (health == null) {
        throw new ContributorNotFoundException(String.format("No HealthContributor found with name '%s'", id), 404);
    }
    return health;
}
Also used : Health(io.pravega.shared.health.Health) ContributorNotFoundException(io.pravega.shared.health.ContributorNotFoundException) NonNull(lombok.NonNull)

Example 22 with NonNull

use of lombok.NonNull in project open-kilda by telstra.

the class YFlowReadService method getYFlowPaths.

/**
 * Gets y-flow paths.
 */
public YFlowPathsResponse getYFlowPaths(@NonNull String yFlowId) throws FlowNotFoundException {
    return transactionManager.doInTransaction(getReadOperationRetryPolicy(), () -> {
        Set<YSubFlow> subFlows = yFlowRepository.findById(yFlowId).orElseThrow(() -> new FlowNotFoundException(yFlowId)).getSubFlows();
        Set<FlowPath> mainPaths = new HashSet<>();
        Set<FlowPath> protectedPaths = new HashSet<>();
        for (YSubFlow subFlow : subFlows) {
            Flow flow = subFlow.getFlow();
            mainPaths.add(flow.getForwardPath());
            if (flow.isAllocateProtectedPath() && flow.getProtectedForwardPath() != null) {
                protectedPaths.add(flow.getProtectedForwardPath());
            }
        }
        List<PathSegment> sharedPathSegments = IntersectionComputer.calculatePathIntersectionFromSource(mainPaths);
        PathInfoData sharedPath = FlowPathMapper.INSTANCE.map(sharedPathSegments);
        PathInfoData sharedProtectedPath;
        // At least 2 paths required to calculate Y-point.
        if (protectedPaths.size() < 2) {
            sharedProtectedPath = new PathInfoData();
        } else {
            List<PathSegment> pathSegments = IntersectionComputer.calculatePathIntersectionFromSource(protectedPaths);
            sharedProtectedPath = FlowPathMapper.INSTANCE.map(pathSegments);
        }
        List<SubFlowPathDto> subFlowPathDtos = mainPaths.stream().map(flowPath -> new SubFlowPathDto(flowPath.getFlowId(), FlowPathMapper.INSTANCE.map(flowPath))).collect(Collectors.toList());
        List<SubFlowPathDto> subFlowProtectedPathDtos = protectedPaths.stream().map(flowPath -> new SubFlowPathDto(flowPath.getFlowId(), FlowPathMapper.INSTANCE.map(flowPath))).collect(Collectors.toList());
        return new YFlowPathsResponse(sharedPath, subFlowPathDtos, sharedProtectedPath, subFlowProtectedPathDtos);
    });
}
Also used : TransactionManager(org.openkilda.persistence.tx.TransactionManager) PathSegment(org.openkilda.model.PathSegment) FlowPath(org.openkilda.model.FlowPath) SubFlowDto(org.openkilda.messaging.command.yflow.SubFlowDto) YFlowResponse(org.openkilda.messaging.command.yflow.YFlowResponse) FlowPathMapper(org.openkilda.wfm.share.mappers.FlowPathMapper) YFlowDto(org.openkilda.messaging.command.yflow.YFlowDto) PathInfoData(org.openkilda.messaging.info.event.PathInfoData) HashSet(java.util.HashSet) Flow(org.openkilda.model.Flow) IntersectionComputer(org.openkilda.wfm.share.service.IntersectionComputer) Duration(java.time.Duration) SubFlowsResponse(org.openkilda.messaging.command.yflow.SubFlowsResponse) YFlow(org.openkilda.model.YFlow) FlowRepository(org.openkilda.persistence.repositories.FlowRepository) PersistenceManager(org.openkilda.persistence.PersistenceManager) YFlowPathsResponse(org.openkilda.messaging.command.yflow.YFlowPathsResponse) FlowNotFoundException(org.openkilda.wfm.error.FlowNotFoundException) YSubFlow(org.openkilda.model.YSubFlow) ErrorType(org.openkilda.messaging.error.ErrorType) NonNull(lombok.NonNull) Collection(java.util.Collection) Set(java.util.Set) RetryPolicy(net.jodah.failsafe.RetryPolicy) PersistenceException(org.openkilda.persistence.exceptions.PersistenceException) Collectors(java.util.stream.Collectors) YFlowRepository(org.openkilda.persistence.repositories.YFlowRepository) YFlowMapper(org.openkilda.wfm.topology.flowhs.mapper.YFlowMapper) Objects(java.util.Objects) FlowProcessingException(org.openkilda.wfm.topology.flowhs.exception.FlowProcessingException) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) SubFlowPathDto(org.openkilda.messaging.command.yflow.SubFlowPathDto) Collections(java.util.Collections) FlowNotFoundException(org.openkilda.wfm.error.FlowNotFoundException) PathSegment(org.openkilda.model.PathSegment) YSubFlow(org.openkilda.model.YSubFlow) Flow(org.openkilda.model.Flow) YFlow(org.openkilda.model.YFlow) YSubFlow(org.openkilda.model.YSubFlow) PathInfoData(org.openkilda.messaging.info.event.PathInfoData) FlowPath(org.openkilda.model.FlowPath) HashSet(java.util.HashSet) SubFlowPathDto(org.openkilda.messaging.command.yflow.SubFlowPathDto) YFlowPathsResponse(org.openkilda.messaging.command.yflow.YFlowPathsResponse)

Example 23 with NonNull

use of lombok.NonNull in project metacat by Netflix.

the class HiveConnectorPartitionService method savePartitions.

/**
     * {@inheritDoc}.
     */
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
    final String databasename = tableName.getDatabaseName();
    final String tablename = tableName.getTableName();
    // New partitions
    final List<Partition> hivePartitions = Lists.newArrayList();
    try {
        final Table table = metacatHiveClient.getTableByName(databasename, tablename);
        final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
        // New partition ids
        final List<String> addedPartitionIds = Lists.newArrayList();
        // Updated partition ids
        final List<String> existingPartitionIds = Lists.newArrayList();
        // Existing partitions
        final List<Partition> existingHivePartitions = Lists.newArrayList();
        // Existing partition map
        Map<String, Partition> existingPartitionMap = Collections.emptyMap();
        if (partitionsSaveRequest.getCheckIfExists()) {
            final List<String> partitionNames = partitionInfos.stream().map(partition -> {
                final String partitionName = partition.getName().getPartitionName();
                PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
                return partitionName;
            }).collect(Collectors.toList());
            existingPartitionMap = getPartitionsByNames(table, partitionNames);
        }
        final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
        for (PartitionInfo partitionInfo : partitionInfos) {
            final String partitionName = partitionInfo.getName().getPartitionName();
            final Partition hivePartition = existingPartitionMap.get(partitionName);
            if (hivePartition == null) {
                addedPartitionIds.add(partitionName);
                hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
            } else {
                //unless we alterifExists
                if (partitionsSaveRequest.getAlterIfExists()) {
                    final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
                    existingPartitionIds.add(partitionName);
                    existingPartition.setParameters(hivePartition.getParameters());
                    existingPartition.setCreateTime(hivePartition.getCreateTime());
                    existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
                    existingHivePartitions.add(existingPartition);
                }
            }
        }
        final Set<String> deletePartitionIds = Sets.newHashSet();
        if (!partitionsSaveRequest.getAlterIfExists()) {
            deletePartitionIds.addAll(existingPartitionIds);
        }
        if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
            deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
        }
        if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
            copyTableSdToPartitionSd(existingHivePartitions, table);
            metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
        }
        copyTableSdToPartitionSd(hivePartitions, table);
        metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
        final PartitionsSaveResponse result = new PartitionsSaveResponse();
        result.setAdded(addedPartitionIds);
        result.setUpdated(existingPartitionIds);
        return result;
    } catch (NoSuchObjectException exception) {
        if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
            throw new PartitionNotFoundException(tableName, "", exception);
        } else {
            throw new TableNotFoundException(tableName, exception);
        }
    } catch (MetaException | InvalidObjectException exception) {
        throw new InvalidMetaException("One or more partitions are invalid.", exception);
    } catch (AlreadyExistsException e) {
        final List<String> ids = getFakePartitionName(hivePartitions);
        throw new PartitionAlreadyExistsException(tableName, ids, e);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
    }
}
Also used : MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SortOrder(com.netflix.metacat.common.dto.SortOrder) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) Partition(org.apache.hadoop.hive.metastore.api.Partition) Function(java.util.function.Function) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ArrayList(java.util.ArrayList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Inject(javax.inject.Inject) LinkedHashMap(java.util.LinkedHashMap) Strings(com.google.common.base.Strings) ConnectorPartitionService(com.netflix.metacat.common.server.connectors.ConnectorPartitionService) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) Lists(com.google.common.collect.Lists) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) PartitionUtil(com.netflix.metacat.common.server.partition.util.PartitionUtil) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) NonNull(lombok.NonNull) Pageable(com.netflix.metacat.common.dto.Pageable) TException(org.apache.thrift.TException) Set(java.util.Set) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionsSaveResponse(com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) PartitionsSaveRequest(com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) PartitionNotFoundException(com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException) Collections(java.util.Collections) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Sort(com.netflix.metacat.common.dto.Sort) TException(org.apache.thrift.TException) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) PartitionNotFoundException(com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionsSaveResponse(com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) ArrayList(java.util.ArrayList) List(java.util.List) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 24 with NonNull

use of lombok.NonNull in project metacat by Netflix.

the class HiveConnectorDatabaseService method list.

/**
     * {@inheritDoc}.
     */
@Override
public List<DatabaseInfo> list(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
    try {
        final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
        for (String databaseName : metacatHiveClient.getAllDatabases()) {
            final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
            if (!qualifiedName.toString().startsWith(prefix.toString())) {
                continue;
            }
            databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
        }
        //supporting sort by name only
        if (sort != null) {
            ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
        }
        return ConnectorUtils.paginate(databaseInfos, pageable);
    } catch (MetaException exception) {
        throw new InvalidMetaException(name, exception);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
    }
}
Also used : MetaException(org.apache.hadoop.hive.metastore.api.MetaException) DatabaseAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Inject(javax.inject.Inject) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) Lists(com.google.common.collect.Lists) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) MetacatNotSupportedException(com.netflix.metacat.common.exception.MetacatNotSupportedException) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) ConnectorDatabaseService(com.netflix.metacat.common.server.connectors.ConnectorDatabaseService) NonNull(lombok.NonNull) Pageable(com.netflix.metacat.common.dto.Pageable) TException(org.apache.thrift.TException) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) List(java.util.List) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) Comparator(java.util.Comparator) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Sort(com.netflix.metacat.common.dto.Sort) TException(org.apache.thrift.TException) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 25 with NonNull

use of lombok.NonNull in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionKeys.

/**
     * {@inheritDoc}.
     */
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", "getPartitionKeys");
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        final FilterPartition filter = new FilterPartition();
        // batch exists
        final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
        final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                final String name = rs.getString("name");
                final String uri = rs.getString("uri");
                final long createdDate = rs.getLong(FIELD_DATE_CREATED);
                Map<String, String> values = null;
                if (hasDateCreated) {
                    values = Maps.newHashMap();
                    values.put(FIELD_DATE_CREATED, createdDate + "");
                }
                if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                    names.add(name);
                }
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), filterExpression, partitionNames, SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
    } else {
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                names.add(rs.getString("name"));
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
    }
    final long duration = registry.clock().monotonicTime() - start;
    log.debug("### Time taken to complete getPartitionKeys is {} ms", duration);
    this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) HashMap(java.util.HashMap) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

NonNull (lombok.NonNull)52 List (java.util.List)31 Collectors (java.util.stream.Collectors)22 Map (java.util.Map)20 Slf4j (lombok.extern.slf4j.Slf4j)18 ArrayList (java.util.ArrayList)17 Collection (java.util.Collection)17 HashMap (java.util.HashMap)16 lombok.val (lombok.val)14 Collections (java.util.Collections)13 Duration (java.time.Duration)11 Nullable (javax.annotation.Nullable)11 TimeoutTimer (io.pravega.common.TimeoutTimer)10 Set (java.util.Set)10 CompletableFuture (java.util.concurrent.CompletableFuture)10 Function (java.util.function.Function)10 Futures (io.pravega.common.concurrent.Futures)9 Getter (lombok.Getter)9 Preconditions (com.google.common.base.Preconditions)8 Strings (com.google.common.base.Strings)8