Search in sources :

Example 56 with Objects

use of java.util.Objects in project wildfly by wildfly.

the class HibernateSecondLevelCache method addSecondLevelCacheDependencies.

public static void addSecondLevelCacheDependencies(Properties mutableProperties, String scopedPersistenceUnitName) {
    if (mutableProperties.getProperty(AvailableSettings.CACHE_REGION_PREFIX) == null) {
        if (scopedPersistenceUnitName != null) {
            mutableProperties.setProperty(AvailableSettings.CACHE_REGION_PREFIX, scopedPersistenceUnitName);
        }
    }
    String regionFactory = mutableProperties.getProperty(AvailableSettings.CACHE_REGION_FACTORY);
    if (regionFactory == null) {
        regionFactory = DEFAULT_REGION_FACTORY;
        mutableProperties.setProperty(AvailableSettings.CACHE_REGION_FACTORY, regionFactory);
    }
    if (regionFactory.equals(DEFAULT_REGION_FACTORY)) {
        // Set infinispan defaults
        String container = mutableProperties.getProperty(CACHE_CONTAINER);
        if (container == null) {
            container = DEFAULT_CACHE_CONTAINER;
            mutableProperties.setProperty(CACHE_CONTAINER, container);
        }
        /**
             * AS will need the ServiceBuilder<?> builder that used to be passed to PersistenceProviderAdaptor.addProviderDependencies
             */
        Properties cacheSettings = new Properties();
        cacheSettings.put(CONTAINER, container);
        cacheSettings.put(ENTITY, mutableProperties.getProperty(ENTITY_CACHE_RESOURCE_PROP, DEF_ENTITY_RESOURCE));
        cacheSettings.put(COLLECTION, mutableProperties.getProperty(COLLECTION_CACHE_RESOURCE_PROP, DEF_ENTITY_RESOURCE));
        cacheSettings.put(NATURAL_ID, mutableProperties.getProperty(NATURAL_ID_CACHE_RESOURCE_PROP, DEF_ENTITY_RESOURCE));
        if (Boolean.parseBoolean(mutableProperties.getProperty(AvailableSettings.USE_QUERY_CACHE))) {
            cacheSettings.put(QUERY, mutableProperties.getProperty(QUERY_CACHE_RESOURCE_PROP, DEF_QUERY_RESOURCE));
            cacheSettings.put(TIMESTAMPS, mutableProperties.getProperty(TIMESTAMPS_CACHE_RESOURCE_PROP, DEF_QUERY_RESOURCE));
        }
        // Collect distinct cache configurations for standard regions
        Set<String> standardRegionConfigs = Stream.of(ENTITY, COLLECTION, NATURAL_ID, QUERY, TIMESTAMPS).map(region -> cacheSettings.getProperty(region)).filter(Objects::nonNull).collect(Collectors.toSet());
        int length = INFINISPAN_CONFIG_RESOURCE_PROP.length();
        String customRegionPrefix = INFINISPAN_CONFIG_RESOURCE_PROP.substring(0, length - 3) + mutableProperties.getProperty(AvailableSettings.CACHE_REGION_PREFIX, "");
        String customRegionSuffix = INFINISPAN_CONFIG_RESOURCE_PROP.substring(length - 4, length);
        // Collect distinct cache configurations for custom regions
        Set<String> customRegionConfigs = mutableProperties.stringPropertyNames().stream().filter(name -> name.startsWith(customRegionPrefix) && name.endsWith(customRegionSuffix)).map(name -> mutableProperties.getProperty(name)).filter(config -> !standardRegionConfigs.contains(config)).collect(Collectors.toSet());
        if (!customRegionConfigs.isEmpty()) {
            cacheSettings.setProperty(CUSTOM, String.join(" ", customRegionConfigs));
        }
        Notification.addCacheDependencies(Classification.INFINISPAN, cacheSettings);
    }
}
Also used : NATURAL_ID_CACHE_RESOURCE_PROP(org.hibernate.cache.infinispan.InfinispanRegionFactory.NATURAL_ID_CACHE_RESOURCE_PROP) COLLECTION_CACHE_RESOURCE_PROP(org.hibernate.cache.infinispan.InfinispanRegionFactory.COLLECTION_CACHE_RESOURCE_PROP) DEF_QUERY_RESOURCE(org.hibernate.cache.infinispan.InfinispanRegionFactory.DEF_QUERY_RESOURCE) INFINISPAN_CONFIG_RESOURCE_PROP(org.hibernate.cache.infinispan.InfinispanRegionFactory.INFINISPAN_CONFIG_RESOURCE_PROP) Properties(java.util.Properties) Notification(org.jipijapa.event.impl.internal.Notification) DEF_ENTITY_RESOURCE(org.hibernate.cache.infinispan.InfinispanRegionFactory.DEF_ENTITY_RESOURCE) AvailableSettings(org.hibernate.cfg.AvailableSettings) Set(java.util.Set) DEFAULT_CACHE_CONTAINER(org.jboss.as.jpa.hibernate4.infinispan.InfinispanRegionFactory.DEFAULT_CACHE_CONTAINER) CACHE_CONTAINER(org.jboss.as.jpa.hibernate4.infinispan.InfinispanRegionFactory.CACHE_CONTAINER) Collectors(java.util.stream.Collectors) SharedInfinispanRegionFactory(org.jboss.as.jpa.hibernate4.infinispan.SharedInfinispanRegionFactory) Objects(java.util.Objects) Stream(java.util.stream.Stream) QUERY_CACHE_RESOURCE_PROP(org.hibernate.cache.infinispan.InfinispanRegionFactory.QUERY_CACHE_RESOURCE_PROP) TIMESTAMPS_CACHE_RESOURCE_PROP(org.hibernate.cache.infinispan.InfinispanRegionFactory.TIMESTAMPS_CACHE_RESOURCE_PROP) ENTITY_CACHE_RESOURCE_PROP(org.hibernate.cache.infinispan.InfinispanRegionFactory.ENTITY_CACHE_RESOURCE_PROP) Classification(org.jipijapa.cache.spi.Classification) Properties(java.util.Properties)

Example 57 with Objects

use of java.util.Objects in project presto by prestodb.

the class ParquetHiveRecordCursor method createParquetRecordReader.

private ParquetRecordReader<FakeParquetRecord> createParquetRecordReader(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, List<HiveColumnHandle> columns, boolean useParquetColumnNames, TypeManager typeManager, boolean predicatePushdownEnabled, TupleDomain<HiveColumnHandle> effectivePredicate) {
    ParquetDataSource dataSource = null;
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
        dataSource = buildHdfsParquetDataSource(fileSystem, path, start, length);
        ParquetMetadata parquetMetadata = hdfsEnvironment.doAs(sessionUser, () -> ParquetFileReader.readFooter(configuration, path, NO_FILTER));
        List<BlockMetaData> blocks = parquetMetadata.getBlocks();
        FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
        MessageType fileSchema = fileMetaData.getSchema();
        PrestoReadSupport readSupport = new PrestoReadSupport(useParquetColumnNames, columns, fileSchema);
        List<parquet.schema.Type> fields = columns.stream().filter(column -> column.getColumnType() == REGULAR).map(column -> getParquetType(column, fileSchema, useParquetColumnNames)).filter(Objects::nonNull).collect(toList());
        MessageType requestedSchema = new MessageType(fileSchema.getName(), fields);
        LongArrayList offsets = new LongArrayList(blocks.size());
        for (BlockMetaData block : blocks) {
            long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
            if (firstDataPage >= start && firstDataPage < start + length) {
                if (predicatePushdownEnabled) {
                    ParquetPredicate parquetPredicate = buildParquetPredicate(columns, effectivePredicate, fileMetaData.getSchema(), typeManager);
                    if (predicateMatches(parquetPredicate, block, dataSource, requestedSchema, effectivePredicate)) {
                        offsets.add(block.getStartingPos());
                    }
                } else {
                    offsets.add(block.getStartingPos());
                }
            }
        }
        ParquetInputSplit split = new ParquetInputSplit(path, start, start + length, length, null, offsets.toLongArray());
        TaskAttemptContext taskContext = ContextUtil.newTaskAttemptContext(configuration, new TaskAttemptID());
        return hdfsEnvironment.doAs(sessionUser, () -> {
            ParquetRecordReader<FakeParquetRecord> realReader = new PrestoParquetRecordReader(readSupport);
            realReader.initialize(split, taskContext);
            return realReader;
        });
    } catch (Exception e) {
        Throwables.propagateIfInstanceOf(e, PrestoException.class);
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
            throw Throwables.propagate(e);
        }
        String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
        if (e.getClass().getSimpleName().equals("BlockMissingException")) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    } finally {
        if (dataSource != null) {
            try {
                dataSource.close();
            } catch (IOException ignored) {
            }
        }
    }
}
Also used : HdfsEnvironment(com.facebook.presto.hive.HdfsEnvironment) Arrays(java.util.Arrays) Block(com.facebook.presto.spi.block.Block) TypeManager(com.facebook.presto.spi.type.TypeManager) FileSystem(org.apache.hadoop.fs.FileSystem) HIVE_CURSOR_ERROR(com.facebook.presto.hive.HiveErrorCode.HIVE_CURSOR_ERROR) LongArrayList(it.unimi.dsi.fastutil.longs.LongArrayList) Slices.wrappedBuffer(io.airlift.slice.Slices.wrappedBuffer) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) DecimalType(com.facebook.presto.spi.type.DecimalType) DecimalMetadata(parquet.schema.DecimalMetadata) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) BigInteger(java.math.BigInteger) PrimitiveType(parquet.schema.PrimitiveType) MAP_KEY_VALUE(parquet.schema.OriginalType.MAP_KEY_VALUE) Decimals(com.facebook.presto.spi.type.Decimals) ReadSupport(parquet.hadoop.api.ReadSupport) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) BlockBuilder(com.facebook.presto.spi.block.BlockBuilder) Math.min(java.lang.Math.min) Chars.trimSpacesAndTruncateToLength(com.facebook.presto.spi.type.Chars.trimSpacesAndTruncateToLength) Binary(parquet.io.api.Binary) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) Objects(java.util.Objects) TupleDomain(com.facebook.presto.spi.predicate.TupleDomain) ROW(com.facebook.presto.spi.type.StandardTypes.ROW) RecordCursor(com.facebook.presto.spi.RecordCursor) List(java.util.List) ParquetPredicateUtils.buildParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicateUtils.buildParquetPredicate) DecimalType.createDecimalType(com.facebook.presto.spi.type.DecimalType.createDecimalType) NO_FILTER(parquet.format.converter.ParquetMetadataConverter.NO_FILTER) Optional(java.util.Optional) Math.max(java.lang.Math.max) Varchars.truncateToLength(com.facebook.presto.spi.type.Varchars.truncateToLength) HiveColumnHandle(com.facebook.presto.hive.HiveColumnHandle) RecordMaterializer(parquet.io.api.RecordMaterializer) Converter(parquet.io.api.Converter) Varchars.isVarcharType(com.facebook.presto.spi.type.Varchars.isVarcharType) HdfsParquetDataSource.buildHdfsParquetDataSource(com.facebook.presto.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) GroupConverter(parquet.io.api.GroupConverter) ParquetTypeUtils.getParquetType(com.facebook.presto.hive.parquet.ParquetTypeUtils.getParquetType) Slice(io.airlift.slice.Slice) ParquetFileReader(parquet.hadoop.ParquetFileReader) REGULAR(com.facebook.presto.hive.HiveColumnHandle.ColumnType.REGULAR) ParquetRecordReader(parquet.hadoop.ParquetRecordReader) PrestoException(com.facebook.presto.spi.PrestoException) PrimitiveConverter(parquet.io.api.PrimitiveConverter) HIVE_CANNOT_OPEN_SPLIT(com.facebook.presto.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) HIVE_MISSING_DATA(com.facebook.presto.hive.HiveErrorCode.HIVE_MISSING_DATA) MAP(com.facebook.presto.spi.type.StandardTypes.MAP) ParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicate) DecimalUtils(com.facebook.presto.hive.util.DecimalUtils) ARRAY(com.facebook.presto.spi.type.StandardTypes.ARRAY) Float.floatToRawIntBits(java.lang.Float.floatToRawIntBits) ImmutableList(com.google.common.collect.ImmutableList) HiveUtil.closeWithSuppression(com.facebook.presto.hive.HiveUtil.closeWithSuppression) Type(com.facebook.presto.spi.type.Type) ParquetMetadata(parquet.hadoop.metadata.ParquetMetadata) Objects.requireNonNull(java.util.Objects.requireNonNull) DECIMAL(parquet.schema.OriginalType.DECIMAL) BlockBuilderStatus(com.facebook.presto.spi.block.BlockBuilderStatus) Dictionary(parquet.column.Dictionary) TIMESTAMP(com.facebook.presto.spi.type.TimestampType.TIMESTAMP) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) MessageType(parquet.schema.MessageType) Properties(java.util.Properties) ParquetPredicateUtils.predicateMatches(com.facebook.presto.hive.parquet.predicate.ParquetPredicateUtils.predicateMatches) HiveUtil.getDecimalType(com.facebook.presto.hive.HiveUtil.getDecimalType) ContextUtil(parquet.hadoop.util.ContextUtil) Throwables(com.google.common.base.Throwables) IOException(java.io.IOException) FileMetaData(parquet.hadoop.metadata.FileMetaData) BlockMetaData(parquet.hadoop.metadata.BlockMetaData) Collectors.toList(java.util.stream.Collectors.toList) GroupType(parquet.schema.GroupType) Chars.isCharType(com.facebook.presto.spi.type.Chars.isCharType) ParquetInputSplit(parquet.hadoop.ParquetInputSplit) BlockMetaData(parquet.hadoop.metadata.BlockMetaData) ParquetMetadata(parquet.hadoop.metadata.ParquetMetadata) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) PrestoException(com.facebook.presto.spi.PrestoException) FileSystem(org.apache.hadoop.fs.FileSystem) FileMetaData(parquet.hadoop.metadata.FileMetaData) MessageType(parquet.schema.MessageType) ParquetPredicateUtils.buildParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicateUtils.buildParquetPredicate) ParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicate) HdfsParquetDataSource.buildHdfsParquetDataSource(com.facebook.presto.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) LongArrayList(it.unimi.dsi.fastutil.longs.LongArrayList) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) IOException(java.io.IOException) PrestoException(com.facebook.presto.spi.PrestoException) IOException(java.io.IOException) DecimalType(com.facebook.presto.spi.type.DecimalType) PrimitiveType(parquet.schema.PrimitiveType) DecimalType.createDecimalType(com.facebook.presto.spi.type.DecimalType.createDecimalType) Varchars.isVarcharType(com.facebook.presto.spi.type.Varchars.isVarcharType) ParquetTypeUtils.getParquetType(com.facebook.presto.hive.parquet.ParquetTypeUtils.getParquetType) Type(com.facebook.presto.spi.type.Type) MessageType(parquet.schema.MessageType) HiveUtil.getDecimalType(com.facebook.presto.hive.HiveUtil.getDecimalType) GroupType(parquet.schema.GroupType) Chars.isCharType(com.facebook.presto.spi.type.Chars.isCharType) ParquetInputSplit(parquet.hadoop.ParquetInputSplit)

Example 58 with Objects

use of java.util.Objects in project presto by prestodb.

the class ParquetPageSourceFactory method createParquetPageSource.

public static ParquetPageSource createParquetPageSource(HdfsEnvironment hdfsEnvironment, String user, Configuration configuration, Path path, long start, long length, Properties schema, List<HiveColumnHandle> columns, boolean useParquetColumnNames, TypeManager typeManager, boolean predicatePushdownEnabled, TupleDomain<HiveColumnHandle> effectivePredicate) {
    AggregatedMemoryContext systemMemoryContext = new AggregatedMemoryContext();
    ParquetDataSource dataSource = null;
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(user, path, configuration);
        dataSource = buildHdfsParquetDataSource(fileSystem, path, start, length);
        ParquetMetadata parquetMetadata = ParquetMetadataReader.readFooter(fileSystem, path);
        FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
        MessageType fileSchema = fileMetaData.getSchema();
        List<parquet.schema.Type> fields = columns.stream().filter(column -> column.getColumnType() == REGULAR).map(column -> getParquetType(column, fileSchema, useParquetColumnNames)).filter(Objects::nonNull).collect(toList());
        MessageType requestedSchema = new MessageType(fileSchema.getName(), fields);
        List<BlockMetaData> blocks = new ArrayList<>();
        for (BlockMetaData block : parquetMetadata.getBlocks()) {
            long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
            if (firstDataPage >= start && firstDataPage < start + length) {
                blocks.add(block);
            }
        }
        if (predicatePushdownEnabled) {
            ParquetPredicate parquetPredicate = buildParquetPredicate(columns, effectivePredicate, fileMetaData.getSchema(), typeManager);
            final ParquetDataSource finalDataSource = dataSource;
            blocks = blocks.stream().filter(block -> predicateMatches(parquetPredicate, block, finalDataSource, requestedSchema, effectivePredicate)).collect(toList());
        }
        ParquetReader parquetReader = new ParquetReader(fileSchema, requestedSchema, blocks, dataSource, typeManager, systemMemoryContext);
        return new ParquetPageSource(parquetReader, dataSource, fileSchema, requestedSchema, length, schema, columns, effectivePredicate, typeManager, useParquetColumnNames, systemMemoryContext);
    } catch (Exception e) {
        try {
            if (dataSource != null) {
                dataSource.close();
            }
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
        if (e.getClass().getSimpleName().equals("BlockMissingException")) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}
Also used : HdfsParquetDataSource.buildHdfsParquetDataSource(com.facebook.presto.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) HdfsEnvironment(com.facebook.presto.hive.HdfsEnvironment) DateTimeZone(org.joda.time.DateTimeZone) ParquetTypeUtils.getParquetType(com.facebook.presto.hive.parquet.ParquetTypeUtils.getParquetType) TypeManager(com.facebook.presto.spi.type.TypeManager) FileSystem(org.apache.hadoop.fs.FileSystem) REGULAR(com.facebook.presto.hive.HiveColumnHandle.ColumnType.REGULAR) PrestoException(com.facebook.presto.spi.PrestoException) HIVE_CANNOT_OPEN_SPLIT(com.facebook.presto.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) HIVE_MISSING_DATA(com.facebook.presto.hive.HiveErrorCode.HIVE_MISSING_DATA) HiveSessionProperties.isParquetOptimizedReaderEnabled(com.facebook.presto.hive.HiveSessionProperties.isParquetOptimizedReaderEnabled) ArrayList(java.util.ArrayList) ParquetReader(com.facebook.presto.hive.parquet.reader.ParquetReader) Inject(javax.inject.Inject) ParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicate) Configuration(org.apache.hadoop.conf.Configuration) ParquetMetadata(parquet.hadoop.metadata.ParquetMetadata) Objects.requireNonNull(java.util.Objects.requireNonNull) Path(org.apache.hadoop.fs.Path) ParquetMetadataReader(com.facebook.presto.hive.parquet.reader.ParquetMetadataReader) HiveClientConfig(com.facebook.presto.hive.HiveClientConfig) HiveSessionProperties.isParquetPredicatePushdownEnabled(com.facebook.presto.hive.HiveSessionProperties.isParquetPredicatePushdownEnabled) ImmutableSet(com.google.common.collect.ImmutableSet) MessageType(parquet.schema.MessageType) Properties(java.util.Properties) ParquetPredicateUtils.predicateMatches(com.facebook.presto.hive.parquet.predicate.ParquetPredicateUtils.predicateMatches) Set(java.util.Set) IOException(java.io.IOException) AggregatedMemoryContext(com.facebook.presto.hive.parquet.memory.AggregatedMemoryContext) FileMetaData(parquet.hadoop.metadata.FileMetaData) BlockMetaData(parquet.hadoop.metadata.BlockMetaData) HivePageSourceFactory(com.facebook.presto.hive.HivePageSourceFactory) String.format(java.lang.String.format) ConnectorSession(com.facebook.presto.spi.ConnectorSession) Objects(java.util.Objects) TupleDomain(com.facebook.presto.spi.predicate.TupleDomain) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) ConnectorPageSource(com.facebook.presto.spi.ConnectorPageSource) ParquetPredicateUtils.buildParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicateUtils.buildParquetPredicate) Optional(java.util.Optional) HiveColumnHandle(com.facebook.presto.hive.HiveColumnHandle) HiveUtil.getDeserializerClassName(com.facebook.presto.hive.HiveUtil.getDeserializerClassName) HdfsParquetDataSource.buildHdfsParquetDataSource(com.facebook.presto.hive.parquet.HdfsParquetDataSource.buildHdfsParquetDataSource) BlockMetaData(parquet.hadoop.metadata.BlockMetaData) ParquetMetadata(parquet.hadoop.metadata.ParquetMetadata) ArrayList(java.util.ArrayList) ParquetReader(com.facebook.presto.hive.parquet.reader.ParquetReader) PrestoException(com.facebook.presto.spi.PrestoException) IOException(java.io.IOException) AggregatedMemoryContext(com.facebook.presto.hive.parquet.memory.AggregatedMemoryContext) PrestoException(com.facebook.presto.spi.PrestoException) IOException(java.io.IOException) ParquetTypeUtils.getParquetType(com.facebook.presto.hive.parquet.ParquetTypeUtils.getParquetType) MessageType(parquet.schema.MessageType) FileSystem(org.apache.hadoop.fs.FileSystem) FileMetaData(parquet.hadoop.metadata.FileMetaData) MessageType(parquet.schema.MessageType) ParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicate) ParquetPredicateUtils.buildParquetPredicate(com.facebook.presto.hive.parquet.predicate.ParquetPredicateUtils.buildParquetPredicate)

Example 59 with Objects

use of java.util.Objects in project geode by apache.

the class ClusterConfigurationLoader method deployJarsReceivedFromClusterConfiguration.

/**
   * Deploys the jars received from shared configuration, it undeploys any other jars that were not
   * part of shared configuration
   * 
   * @param cache Cache of this member
   * @param response {@link ConfigurationResponse} received from the locators
   */
public static void deployJarsReceivedFromClusterConfiguration(Cache cache, ConfigurationResponse response) throws IOException, ClassNotFoundException {
    logger.info("Requesting cluster configuration");
    if (response == null) {
        return;
    }
    String[] jarFileNames = response.getJarNames();
    byte[][] jarBytes = response.getJars();
    logger.info("Got response with jars: {}", Stream.of(jarFileNames).collect(joining(",")));
    if (jarFileNames != null && jarBytes != null) {
        JarDeployer jarDeployer = ClassPathLoader.getLatest().getJarDeployer();
        jarDeployer.suspendAll();
        try {
            List<String> extraJarsOnServer = jarDeployer.findDeployedJars().stream().map(DeployedJar::getJarName).filter(jarName -> !ArrayUtils.contains(jarFileNames, jarName)).collect(toList());
            for (String extraJar : extraJarsOnServer) {
                logger.info("Removing jar not present in cluster configuration: {}", extraJar);
                jarDeployer.deleteAllVersionsOfJar(extraJar);
            }
            List<DeployedJar> deployedJars = jarDeployer.deploy(jarFileNames, jarBytes);
            deployedJars.stream().filter(Objects::nonNull).forEach((jar) -> logger.info("Deployed: {}", jar.getFile().getAbsolutePath()));
        } finally {
            jarDeployer.resumeAll();
        }
    }
}
Also used : ClassPathLoader(org.apache.geode.internal.ClassPathLoader) StringUtils(org.apache.commons.lang.StringUtils) Arrays(java.util.Arrays) ClusterConfigurationService(org.apache.geode.distributed.internal.ClusterConfigurationService) DeployedJar(org.apache.geode.internal.DeployedJar) JarDeployer(org.apache.geode.internal.JarDeployer) LocalizedStrings(org.apache.geode.internal.i18n.LocalizedStrings) DistributionLocatorId(org.apache.geode.internal.admin.remote.DistributionLocatorId) ArrayList(java.util.ArrayList) InetAddress(java.net.InetAddress) Cache(org.apache.geode.cache.Cache) ConfigSource(org.apache.geode.internal.ConfigSource) ConfigurationRequest(org.apache.geode.management.internal.configuration.messages.ConfigurationRequest) ByteArrayInputStream(java.io.ByteArrayInputStream) UnmodifiableException(org.apache.geode.UnmodifiableException) LogService(org.apache.geode.internal.logging.LogService) Map(java.util.Map) LinkedList(java.util.LinkedList) ClusterConfigurationNotAvailableException(org.apache.geode.internal.process.ClusterConfigurationNotAvailableException) Properties(java.util.Properties) TcpClient(org.apache.geode.distributed.internal.tcpserver.TcpClient) Set(java.util.Set) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) Collectors.joining(java.util.stream.Collectors.joining) Objects(java.util.Objects) Collectors.toList(java.util.stream.Collectors.toList) List(java.util.List) DistributionConfig(org.apache.geode.distributed.internal.DistributionConfig) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) Configuration(org.apache.geode.management.internal.configuration.domain.Configuration) ConfigurationResponse(org.apache.geode.management.internal.configuration.messages.ConfigurationResponse) ArrayUtils(org.apache.commons.lang.ArrayUtils) InputStream(java.io.InputStream) DeployedJar(org.apache.geode.internal.DeployedJar) JarDeployer(org.apache.geode.internal.JarDeployer)

Example 60 with Objects

use of java.util.Objects in project geode by apache.

the class JarDeployer method findLatestValidDeployedJarFromDisk.

public DeployedJar findLatestValidDeployedJarFromDisk(String unversionedJarName) throws IOException {
    final File[] jarFiles = findSortedOldVersionsOfJar(unversionedJarName);
    Optional<File> latestValidDeployedJarOptional = Arrays.stream(jarFiles).filter(Objects::nonNull).filter(jarFile -> {
        try {
            return DeployedJar.hasValidJarContent(FileUtils.readFileToByteArray(jarFile));
        } catch (IOException e) {
            return false;
        }
    }).findFirst();
    if (!latestValidDeployedJarOptional.isPresent()) {
        // No valid version of this jar
        return null;
    }
    File latestValidDeployedJar = latestValidDeployedJarOptional.get();
    return new DeployedJar(latestValidDeployedJar, unversionedJarName);
}
Also used : Arrays(java.util.Arrays) BufferedInputStream(java.io.BufferedInputStream) URL(java.net.URL) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Matcher(java.util.regex.Matcher) LogService(org.apache.geode.internal.logging.LogService) Map(java.util.Map) Collectors.toSet(java.util.stream.Collectors.toSet) OutputStream(java.io.OutputStream) ReentrantLock(java.util.concurrent.locks.ReentrantLock) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FileOutputStream(java.io.FileOutputStream) Set(java.util.Set) FileUtils(org.apache.commons.io.FileUtils) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) Collectors.joining(java.util.stream.Collectors.joining) File(java.io.File) Serializable(java.io.Serializable) Objects(java.util.Objects) Collectors.toList(java.util.stream.Collectors.toList) List(java.util.List) Lock(java.util.concurrent.locks.Lock) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Collections(java.util.Collections) Objects(java.util.Objects) IOException(java.io.IOException) File(java.io.File)

Aggregations

Objects (java.util.Objects)83 List (java.util.List)45 Map (java.util.Map)37 Collectors (java.util.stream.Collectors)32 ArrayList (java.util.ArrayList)30 Set (java.util.Set)30 IOException (java.io.IOException)22 HashMap (java.util.HashMap)21 Optional (java.util.Optional)19 Collections (java.util.Collections)18 LoggerFactory (org.slf4j.LoggerFactory)17 Stream (java.util.stream.Stream)16 Logger (org.slf4j.Logger)16 HashSet (java.util.HashSet)14 Collection (java.util.Collection)13 InputStream (java.io.InputStream)12 ImmutableSet (com.google.common.collect.ImmutableSet)10 Result (ddf.catalog.data.Result)9 TimeUnit (java.util.concurrent.TimeUnit)9 Metacard (ddf.catalog.data.Metacard)8