Search in sources :

Example 56 with Nonnull

use of javax.annotation.Nonnull in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionCount.

/**
     * Number of partitions for the given table.
     *
     * @param tableName tableName
     * @return Number of partitions
     */
@Override
public int getPartitionCount(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", HiveMetrics.getPartitionCount.name());
    final Integer result;
    final DataSource dataSource = DataSourceManager.get().get(catalogName);
    try (Connection conn = dataSource.getConnection()) {
        // Handler for reading the result set
        final ResultSetHandler<Integer> handler = rs -> {
            int count = 0;
            while (rs.next()) {
                count = rs.getInt("count");
            }
            return count;
        };
        result = new QueryRunner().query(conn, SQL_GET_PARTITION_COUNT, handler, tableName.getDatabaseName(), tableName.getTableName());
    } catch (SQLException e) {
        throw new ConnectorException("getPartitionCount", e);
    } finally {
        final long duration = registry.clock().monotonicTime() - start;
        log.debug("### Time taken to complete getPartitionCount is {} ms", duration);
        this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    }
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) Connection(java.sql.Connection) QueryRunner(org.apache.commons.dbutils.QueryRunner) DataSource(javax.sql.DataSource)

Example 57 with Nonnull

use of javax.annotation.Nonnull in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getpartitions.

private List<PartitionInfo> getpartitions(@Nonnull @NonNull final String databaseName, @Nonnull @NonNull final String tableName, @Nullable final List<String> partitionIds, final String filterExpression, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
    final FilterPartition filter = new FilterPartition();
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
    // Handler for reading the result set
    final ResultSetHandler<List<PartitionDetail>> handler = rs -> {
        final List<PartitionDetail> result = Lists.newArrayList();
        while (rs.next()) {
            final String name = rs.getString("name");
            final String uri = rs.getString("uri");
            final long createdDate = rs.getLong(FIELD_DATE_CREATED);
            Map<String, String> values = null;
            if (hasDateCreated) {
                values = Maps.newHashMap();
                values.put(FIELD_DATE_CREATED, createdDate + "");
            }
            if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                final Long id = rs.getLong("id");
                final Long sdId = rs.getLong("sd_id");
                final Long serdeId = rs.getLong("serde_id");
                final String inputFormat = rs.getString("input_format");
                final String outputFormat = rs.getString("output_format");
                final String serializationLib = rs.getString("slib");
                final StorageInfo storageInfo = new StorageInfo();
                storageInfo.setUri(uri);
                storageInfo.setInputFormat(inputFormat);
                storageInfo.setOutputFormat(outputFormat);
                storageInfo.setSerializationLib(serializationLib);
                final AuditInfo auditInfo = new AuditInfo();
                auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                result.add(new PartitionDetail(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
            }
        }
        return result;
    };
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    final List<PartitionDetail> partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL_GET_PARTITIONS, handler, sort, pageable);
    if (includePartitionDetails && !partitions.isEmpty()) {
        final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
        for (PartitionDetail partitionDetail : partitions) {
            partIds.add(partitionDetail.getId());
            sdIds.add(partitionDetail.getSdId());
            serdeIds.add(partitionDetail.getSerdeId());
        }
        final List<ListenableFuture<Void>> futures = Lists.newArrayList();
        final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
        futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
        final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
        if (!sdIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL_GET_SD_PARAMS, "sd_id", sdParams)));
        }
        final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
        if (!serdeIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
        }
        try {
            Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1, TimeUnit.HOURS);
        } catch (Exception e) {
            Throwables.propagate(e);
        }
        for (PartitionDetail partitionDetail : partitions) {
            partitionDetail.getPartitionInfo().setMetadata(partitionParams.get(partitionDetail.getId()));
            partitionDetail.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionDetail.getSdId()));
            partitionDetail.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionDetail.getSerdeId()));
        }
    }
    for (PartitionDetail partitionDetail : partitions) {
        partitionInfos.add(partitionDetail.getPartitionInfo());
    }
    return partitionInfos;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ArrayList(java.util.ArrayList) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) SQLException(java.sql.SQLException) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) List(java.util.List) ArrayList(java.util.ArrayList) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap)

Example 58 with Nonnull

use of javax.annotation.Nonnull in project metacat by Netflix.

the class HiveConnectorTableService method list.

/**
     * {@inheritDoc}.
     */
@Override
public List<TableInfo> list(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
    try {
        final List<TableInfo> tableInfos = Lists.newArrayList();
        for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
            final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
            if (!qualifiedName.toString().startsWith(prefix.toString())) {
                continue;
            }
            final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
            tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
        }
        //supporting sort by name only
        if (sort != null) {
            ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
        }
        return ConnectorUtils.paginate(tableInfos, pageable);
    } catch (MetaException exception) {
        throw new DatabaseNotFoundException(name, exception);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
    }
}
Also used : MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) ConnectorTableService(com.netflix.metacat.common.server.connectors.ConnectorTableService) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) ImmutableMap(com.google.common.collect.ImmutableMap) NonNull(lombok.NonNull) Pageable(com.netflix.metacat.common.dto.Pageable) TException(org.apache.thrift.TException) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Maps(com.google.common.collect.Maps) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) TableAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException) TableType(org.apache.hadoop.hive.metastore.TableType) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) Comparator(java.util.Comparator) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Sort(com.netflix.metacat.common.dto.Sort) TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) QualifiedName(com.netflix.metacat.common.QualifiedName) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 59 with Nonnull

use of javax.annotation.Nonnull in project jackrabbit-oak by apache.

the class ExternalLoginModule method createAuthInfo.

@Nonnull
private AuthInfo createAuthInfo(@Nonnull String userId, @Nonnull Set<? extends Principal> principals) {
    Credentials creds;
    if (credentials instanceof ImpersonationCredentials) {
        creds = ((ImpersonationCredentials) credentials).getBaseCredentials();
    } else {
        creds = credentials;
    }
    Map<String, Object> attributes = new HashMap<String, Object>();
    Object shared = sharedState.get(SHARED_KEY_ATTRIBUTES);
    if (shared instanceof Map) {
        for (Map.Entry entry : ((Map<?, ?>) shared).entrySet()) {
            attributes.put(entry.getKey().toString(), entry.getValue());
        }
    } else if (creds != null) {
        attributes.putAll(credentialsSupport.getAttributes(creds));
    }
    return new AuthInfoImpl(userId, attributes, principals);
}
Also used : AuthInfoImpl(org.apache.jackrabbit.oak.spi.security.authentication.AuthInfoImpl) ImpersonationCredentials(org.apache.jackrabbit.oak.spi.security.authentication.ImpersonationCredentials) HashMap(java.util.HashMap) HashMap(java.util.HashMap) Map(java.util.Map) ImpersonationCredentials(org.apache.jackrabbit.oak.spi.security.authentication.ImpersonationCredentials) Credentials(javax.jcr.Credentials) Nonnull(javax.annotation.Nonnull)

Example 60 with Nonnull

use of javax.annotation.Nonnull in project jackrabbit-oak by apache.

the class TokenLoginModuleTest method testCreateTokenFailed.

@Test
public void testCreateTokenFailed() throws Exception {
    TokenProvider tp = new TokenProvider() {

        @Override
        public boolean doCreateToken(@Nonnull Credentials credentials) {
            return true;
        }

        @CheckForNull
        @Override
        public TokenInfo createToken(@Nonnull Credentials credentials) {
            return null;
        }

        @CheckForNull
        @Override
        public TokenInfo createToken(@Nonnull String userId, @Nonnull Map<String, ?> attributes) {
            return null;
        }

        @CheckForNull
        @Override
        public TokenInfo getTokenInfo(@Nonnull String token) {
            return null;
        }
    };
    TokenLoginModule lm = new TokenLoginModule();
    lm.initialize(new Subject(), new TestCallbackHandler(tp), ImmutableMap.<String, Object>of(AbstractLoginModule.SHARED_KEY_CREDENTIALS, new Credentials() {
    }), ImmutableMap.<String, Object>of());
    lm.login();
    try {
        lm.commit();
        fail("LoginException expected");
    } catch (LoginException e) {
    // success
    }
}
Also used : TokenProvider(org.apache.jackrabbit.oak.spi.security.authentication.token.TokenProvider) Nonnull(javax.annotation.Nonnull) LoginException(javax.security.auth.login.LoginException) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) GuestCredentials(javax.jcr.GuestCredentials) TokenCredentials(org.apache.jackrabbit.api.security.authentication.token.TokenCredentials) SimpleCredentials(javax.jcr.SimpleCredentials) Credentials(javax.jcr.Credentials) Subject(javax.security.auth.Subject) AbstractSecurityTest(org.apache.jackrabbit.oak.AbstractSecurityTest) Test(org.junit.Test)

Aggregations

Nonnull (javax.annotation.Nonnull)2624 Nullable (javax.annotation.Nullable)338 ArrayList (java.util.ArrayList)336 ItemStack (net.minecraft.item.ItemStack)327 List (java.util.List)305 Map (java.util.Map)229 Layer (com.simiacryptus.mindseye.lang.Layer)188 Tensor (com.simiacryptus.mindseye.lang.Tensor)185 Arrays (java.util.Arrays)182 Collectors (java.util.stream.Collectors)169 IOException (java.io.IOException)165 JsonObject (com.google.gson.JsonObject)156 HashMap (java.util.HashMap)145 IntStream (java.util.stream.IntStream)145 Test (org.junit.Test)143 LoggerFactory (org.slf4j.LoggerFactory)138 Logger (org.slf4j.Logger)137 Result (com.simiacryptus.mindseye.lang.Result)130 TensorList (com.simiacryptus.mindseye.lang.TensorList)123 DeltaSet (com.simiacryptus.mindseye.lang.DeltaSet)111