Search in sources :

Example 31 with Supplier

use of com.google.common.base.Supplier in project GeoGig by boundlessgeo.

the class HttpRemoteRepo method sendPackedObjects.

private void sendPackedObjects(final List<ObjectId> toSend, final Set<ObjectId> roots, Deduplicator deduplicator, final ProgressListener progress) {
    Set<ObjectId> sent = new HashSet<ObjectId>();
    while (!toSend.isEmpty()) {
        try {
            BinaryPackedObjects.Callback callback = new BinaryPackedObjects.Callback() {

                @Override
                public void callback(Supplier<RevObject> supplier) {
                    RevObject object = supplier.get();
                    progress.setProgress(progress.getProgress() + 1);
                    if (object instanceof RevCommit) {
                        RevCommit commit = (RevCommit) object;
                        toSend.remove(commit.getId());
                        roots.removeAll(commit.getParentIds());
                        roots.add(commit.getId());
                    }
                }
            };
            ObjectDatabase database = localRepository.objectDatabase();
            BinaryPackedObjects packer = new BinaryPackedObjects(database);
            ImmutableList<ObjectId> have = ImmutableList.copyOf(roots);
            final boolean traverseCommits = false;
            Stopwatch sw = Stopwatch.createStarted();
            ObjectSerializingFactory serializer = DataStreamSerializationFactoryV1.INSTANCE;
            SendObjectsConnectionFactory outFactory;
            ObjectFunnel objectFunnel;
            outFactory = new SendObjectsConnectionFactory(repositoryURL);
            int pushBytesLimit = parsePushLimit();
            objectFunnel = ObjectFunnels.newFunnel(outFactory, serializer, pushBytesLimit);
            final long writtenObjectsCount = packer.write(objectFunnel, toSend, have, sent, callback, traverseCommits, deduplicator);
            objectFunnel.close();
            sw.stop();
            long compressedSize = outFactory.compressedSize;
            long uncompressedSize = outFactory.uncompressedSize;
            LOGGER.info(String.format("HttpRemoteRepo: Written %,d objects." + " Time to process: %s." + " Compressed size: %,d bytes. Uncompressed size: %,d bytes.", writtenObjectsCount, sw, compressedSize, uncompressedSize));
        } catch (IOException e) {
            Throwables.propagate(e);
        }
    }
}
Also used : ObjectSerializingFactory(org.locationtech.geogig.storage.ObjectSerializingFactory) ObjectId(org.locationtech.geogig.api.ObjectId) RevObject(org.locationtech.geogig.api.RevObject) Stopwatch(com.google.common.base.Stopwatch) IOException(java.io.IOException) ObjectDatabase(org.locationtech.geogig.storage.ObjectDatabase) Supplier(com.google.common.base.Supplier) HashSet(java.util.HashSet) RevCommit(org.locationtech.geogig.api.RevCommit)

Example 32 with Supplier

use of com.google.common.base.Supplier in project android by JetBrains.

the class AdtModuleImporter method findModules.

@Override
@NotNull
public Set<ModuleToImport> findModules(@NotNull VirtualFile importSource) throws IOException {
    final AdtImportBuilder builder = (AdtImportBuilder) myContext.getProjectBuilder();
    assert builder != null;
    builder.setSelectedProject(virtualToIoFile(importSource));
    final GradleImport gradleImport = getGradleImport();
    gradleImport.importProjects(Collections.singletonList(virtualToIoFile(importSource)));
    Map<String, File> adtProjects = gradleImport.getDetectedModuleLocations();
    Set<ModuleToImport> modules = Sets.newHashSet();
    for (final Map.Entry<String, File> entry : adtProjects.entrySet()) {
        VirtualFile location = findFileByIoFile(entry.getValue(), false);
        modules.add(new ModuleToImport(entry.getKey(), location, new Supplier<Iterable<String>>() {

            @Override
            public Iterable<String> get() {
                return gradleImport.getProjectDependencies(entry.getKey());
            }
        }));
    }
    return modules;
}
Also used : VirtualFile(com.intellij.openapi.vfs.VirtualFile) AdtImportBuilder(com.android.tools.idea.gradle.eclipse.AdtImportBuilder) GradleImport(com.android.tools.idea.gradle.eclipse.GradleImport) Supplier(com.google.common.base.Supplier) VirtualFile(com.intellij.openapi.vfs.VirtualFile) VfsUtilCore.virtualToIoFile(com.intellij.openapi.vfs.VfsUtilCore.virtualToIoFile) VfsUtil.findFileByIoFile(com.intellij.openapi.vfs.VfsUtil.findFileByIoFile) File(java.io.File) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) NotNull(org.jetbrains.annotations.NotNull)

Example 33 with Supplier

use of com.google.common.base.Supplier in project cdap by caskdata.

the class HBaseQueueTest method getConsumerConfigCache.

private ConsumerConfigCache getConsumerConfigCache(QueueName queueName) throws Exception {
    String tableName = HBaseQueueAdmin.getConfigTableName();
    TableId hTableId = tableUtil.createHTableId(new NamespaceId(queueName.getFirstComponent()), tableName);
    try (HTable hTable = tableUtil.createHTable(hConf, hTableId)) {
        HTableDescriptor htd = hTable.getTableDescriptor();
        final TableName configTableName = htd.getTableName();
        String prefix = htd.getValue(Constants.Dataset.TABLE_PREFIX);
        CConfigurationReader cConfReader = new CConfigurationReader(hConf, HTableNameConverter.getSysConfigTablePrefix(prefix));
        return TableNameAwareCacheSupplier.getSupplier(configTableName, cConfReader, new Supplier<TransactionVisibilityState>() {

            @Override
            public TransactionVisibilityState get() {
                try {
                    return getTransactionManager().getSnapshot();
                } catch (IOException e) {
                    throw Throwables.propagate(e);
                }
            }
        }, new InputSupplier<HTableInterface>() {

            @Override
            public HTableInterface getInput() throws IOException {
                return new HTable(hConf, configTableName);
            }
        }).get();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) TableName(org.apache.hadoop.hbase.TableName) CConfigurationReader(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader) InputSupplier(com.google.common.io.InputSupplier) Supplier(com.google.common.base.Supplier) TableNameAwareCacheSupplier(co.cask.cdap.data2.transaction.queue.hbase.coprocessor.TableNameAwareCacheSupplier) NamespaceId(co.cask.cdap.proto.id.NamespaceId) IOException(java.io.IOException) HTable(org.apache.hadoop.hbase.client.HTable) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) InputSupplier(com.google.common.io.InputSupplier)

Example 34 with Supplier

use of com.google.common.base.Supplier in project cdap by caskdata.

the class DistributedLogFramework method createService.

@Override
protected Service createService(Set<Integer> partitions) {
    Map<String, LogPipelineSpecification<AppenderContext>> specs = new LogPipelineLoader(cConf).load(contextProvider);
    int pipelineCount = specs.size();
    // Create one KafkaLogProcessorPipeline per spec
    final List<Service> pipelines = new ArrayList<>();
    for (final LogPipelineSpecification<AppenderContext> pipelineSpec : specs.values()) {
        final CConfiguration cConf = pipelineSpec.getConf();
        final AppenderContext context = pipelineSpec.getContext();
        long bufferSize = getBufferSize(pipelineCount, cConf, partitions.size());
        final String topic = cConf.get(Constants.Logging.KAFKA_TOPIC);
        final KafkaPipelineConfig config = new KafkaPipelineConfig(topic, partitions, bufferSize, cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS), cConf.getInt(Constants.Logging.PIPELINE_KAFKA_FETCH_SIZE), cConf.getLong(Constants.Logging.PIPELINE_CHECKPOINT_INTERVAL_MS));
        RetryStrategy retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.log.process.");
        pipelines.add(new RetryOnStartFailureService(new Supplier<Service>() {

            @Override
            public Service get() {
                return new KafkaLogProcessorPipeline(new LogProcessorPipelineContext(cConf, context.getName(), context, context.getMetricsContext(), context.getInstanceId()), checkpointManagerFactory.create(topic, pipelineSpec.getCheckpointPrefix()), brokerService, config);
            }
        }, retryStrategy));
    }
    // Returns a Service that start/stop all pipelines.
    return new AbstractIdleService() {

        @Override
        protected void startUp() throws Exception {
            // Starts all pipeline
            validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {

                @Override
                public ListenableFuture<State> apply(Service service) {
                    return service.start();
                }
            }));
        }

        @Override
        protected void shutDown() throws Exception {
            // Stops all pipeline
            validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {

                @Override
                public ListenableFuture<State> apply(Service service) {
                    return service.stop();
                }
            }));
        }
    };
}
Also used : LogPipelineSpecification(co.cask.cdap.logging.framework.LogPipelineSpecification) ArrayList(java.util.ArrayList) KafkaPipelineConfig(co.cask.cdap.logging.pipeline.kafka.KafkaPipelineConfig) ResourceBalancerService(co.cask.cdap.common.resource.ResourceBalancerService) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RetryOnStartFailureService(co.cask.cdap.common.service.RetryOnStartFailureService) DiscoveryService(org.apache.twill.discovery.DiscoveryService) Service(com.google.common.util.concurrent.Service) BrokerService(org.apache.twill.kafka.client.BrokerService) LogPipelineLoader(co.cask.cdap.logging.framework.LogPipelineLoader) LogProcessorPipelineContext(co.cask.cdap.logging.pipeline.LogProcessorPipelineContext) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Function(com.google.common.base.Function) KafkaLogProcessorPipeline(co.cask.cdap.logging.pipeline.kafka.KafkaLogProcessorPipeline) AppenderContext(co.cask.cdap.api.logging.AppenderContext) RetryOnStartFailureService(co.cask.cdap.common.service.RetryOnStartFailureService) Supplier(com.google.common.base.Supplier) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RetryStrategy(co.cask.cdap.common.service.RetryStrategy)

Example 35 with Supplier

use of com.google.common.base.Supplier in project bazel by bazelbuild.

the class BazelRulesModule method workspaceInit.

@Override
public void workspaceInit(BlazeDirectories directories, WorkspaceBuilder builder) {
    builder.addSkyFunction(FdoSupportValue.SKYFUNCTION, new FdoSupportFunction());
    builder.addPrecomputedValue(PrecomputedValue.injected(GenQuery.QUERY_OUTPUT_FORMATTERS, new Supplier<ImmutableList<OutputFormatter>>() {

        @Override
        public ImmutableList<OutputFormatter> get() {
            return env.getRuntime().getQueryOutputFormatters();
        }
    }));
}
Also used : FdoSupportFunction(com.google.devtools.build.lib.rules.cpp.FdoSupportFunction) Supplier(com.google.common.base.Supplier) OutputFormatter(com.google.devtools.build.lib.query2.output.OutputFormatter)

Aggregations

Supplier (com.google.common.base.Supplier)51 IOException (java.io.IOException)14 Test (org.junit.Test)11 ImmutableMap (com.google.common.collect.ImmutableMap)8 Map (java.util.Map)8 Path (java.nio.file.Path)5 ArrayList (java.util.ArrayList)5 List (java.util.List)5 Set (java.util.Set)5 VisibleForTesting (com.google.common.annotations.VisibleForTesting)4 ImmutableList (com.google.common.collect.ImmutableList)4 ImmutableSet (com.google.common.collect.ImmutableSet)4 Committer (io.druid.data.input.Committer)4 ByteBuffer (java.nio.ByteBuffer)4 Optional (java.util.Optional)4 SourcePath (com.facebook.buck.rules.SourcePath)3 InputRow (io.druid.data.input.InputRow)3 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)3 File (java.io.File)3 HashMap (java.util.HashMap)3