Search in sources :

Example 26 with Supplier

use of com.google.common.base.Supplier in project jackrabbit-oak by apache.

the class SegmentCompactionIT method setUp.

@Before
public void setUp() throws Exception {
    assumeTrue(ENABLED);
    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    MetricStatisticsProvider statisticsProvider = new MetricStatisticsProvider(mBeanServer, executor);
    SegmentGCOptions gcOptions = defaultGCOptions().setEstimationDisabled(true).setForceTimeout(3600);
    FileStoreBuilder builder = fileStoreBuilder(folder.getRoot());
    fileStore = builder.withMemoryMapping(true).withGCMonitor(gcMonitor).withGCOptions(gcOptions).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).build();
    nodeStore = SegmentNodeStoreBuilders.builder(fileStore).withStatisticsProvider(statisticsProvider).build();
    WriterCacheManager cacheManager = builder.getCacheManager();
    Runnable cancelGC = new Runnable() {

        @Override
        public void run() {
            fileStore.cancelGC();
        }
    };
    Supplier<String> status = new Supplier<String>() {

        @Override
        public String get() {
            return fileStoreGCMonitor.getStatus();
        }
    };
    List<Registration> registrations = newArrayList();
    registrations.add(registerMBean(segmentCompactionMBean, new ObjectName("IT:TYPE=Segment Compaction")));
    registrations.add(registerMBean(new SegmentRevisionGCMBean(fileStore, gcOptions, fileStoreGCMonitor), new ObjectName("IT:TYPE=Segment Revision GC")));
    registrations.add(registerMBean(new RevisionGC(fileStore.getGCRunner(), cancelGC, status, executor), new ObjectName("IT:TYPE=Revision GC")));
    CacheStatsMBean segmentCacheStats = fileStore.getSegmentCacheStats();
    registrations.add(registerMBean(segmentCacheStats, new ObjectName("IT:TYPE=" + segmentCacheStats.getName())));
    CacheStatsMBean stringCacheStats = fileStore.getStringCacheStats();
    registrations.add(registerMBean(stringCacheStats, new ObjectName("IT:TYPE=" + stringCacheStats.getName())));
    CacheStatsMBean templateCacheStats = fileStore.getTemplateCacheStats();
    registrations.add(registerMBean(templateCacheStats, new ObjectName("IT:TYPE=" + templateCacheStats.getName())));
    CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
    assertNotNull(stringDeduplicationCacheStats);
    registrations.add(registerMBean(stringDeduplicationCacheStats, new ObjectName("IT:TYPE=" + stringDeduplicationCacheStats.getName())));
    CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
    assertNotNull(templateDeduplicationCacheStats);
    registrations.add(registerMBean(templateDeduplicationCacheStats, new ObjectName("IT:TYPE=" + templateDeduplicationCacheStats.getName())));
    CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
    assertNotNull(nodeDeduplicationCacheStats);
    registrations.add(registerMBean(nodeDeduplicationCacheStats, new ObjectName("IT:TYPE=" + nodeDeduplicationCacheStats.getName())));
    registrations.add(registerMBean(nodeStore.getStats(), new ObjectName("IT:TYPE=" + "SegmentNodeStore statistics")));
    mBeanRegistration = new CompositeRegistration(registrations);
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ListeningScheduledExecutorService(com.google.common.util.concurrent.ListeningScheduledExecutorService) SegmentGCOptions(org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions) MetricsIOMonitor(org.apache.jackrabbit.oak.segment.file.MetricsIOMonitor) ObjectName(javax.management.ObjectName) RevisionGC(org.apache.jackrabbit.oak.spi.state.RevisionGC) SegmentRevisionGC(org.apache.jackrabbit.oak.segment.compaction.SegmentRevisionGC) SegmentRevisionGCMBean(org.apache.jackrabbit.oak.segment.compaction.SegmentRevisionGCMBean) FileStoreBuilder(org.apache.jackrabbit.oak.segment.file.FileStoreBuilder) CompositeRegistration(org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration) Registration(org.apache.jackrabbit.oak.spi.whiteboard.Registration) CacheStatsMBean(org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean) MetricStatisticsProvider(org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider) Supplier(com.google.common.base.Supplier) CompositeRegistration(org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration) Before(org.junit.Before)

Example 27 with Supplier

use of com.google.common.base.Supplier in project android by JetBrains.

the class AdtModuleImporter method findModules.

@Override
@NotNull
public Set<ModuleToImport> findModules(@NotNull VirtualFile importSource) throws IOException {
    final AdtImportBuilder builder = (AdtImportBuilder) myContext.getProjectBuilder();
    assert builder != null;
    builder.setSelectedProject(virtualToIoFile(importSource));
    final GradleImport gradleImport = getGradleImport();
    gradleImport.importProjects(Collections.singletonList(virtualToIoFile(importSource)));
    Map<String, File> adtProjects = gradleImport.getDetectedModuleLocations();
    Set<ModuleToImport> modules = Sets.newHashSet();
    for (final Map.Entry<String, File> entry : adtProjects.entrySet()) {
        VirtualFile location = findFileByIoFile(entry.getValue(), false);
        modules.add(new ModuleToImport(entry.getKey(), location, new Supplier<Iterable<String>>() {

            @Override
            public Iterable<String> get() {
                return gradleImport.getProjectDependencies(entry.getKey());
            }
        }));
    }
    return modules;
}
Also used : VirtualFile(com.intellij.openapi.vfs.VirtualFile) AdtImportBuilder(com.android.tools.idea.gradle.eclipse.AdtImportBuilder) GradleImport(com.android.tools.idea.gradle.eclipse.GradleImport) Supplier(com.google.common.base.Supplier) VirtualFile(com.intellij.openapi.vfs.VirtualFile) VfsUtilCore.virtualToIoFile(com.intellij.openapi.vfs.VfsUtilCore.virtualToIoFile) VfsUtil.findFileByIoFile(com.intellij.openapi.vfs.VfsUtil.findFileByIoFile) File(java.io.File) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) NotNull(org.jetbrains.annotations.NotNull)

Example 28 with Supplier

use of com.google.common.base.Supplier in project GeoGig by boundlessgeo.

the class HttpRemoteRepo method fetchMoreData.

/**
     * Retrieve objects from the remote repository, and update have/want lists accordingly.
     * Specifically, any retrieved commits are removed from the want list and added to the have
     * list, and any parents of those commits are removed from the have list (it only represents the
     * most recent common commits.) Retrieved objects are added to the local repository, and the
     * want/have lists are updated in-place.
     * 
     * @param want a list of ObjectIds that need to be fetched
     * @param have a list of ObjectIds that are in common with the remote repository
     * @param progress
     */
private void fetchMoreData(final List<ObjectId> want, final Set<ObjectId> have, final ProgressListener progress) {
    final JsonObject message = createFetchMessage(want, have);
    final URL resourceURL;
    try {
        resourceURL = new URL(repositoryURL.toString() + "/repo/batchobjects");
    } catch (MalformedURLException e) {
        throw Throwables.propagate(e);
    }
    final HttpURLConnection connection;
    try {
        final Gson gson = new Gson();
        OutputStream out;
        final Writer writer;
        connection = (HttpURLConnection) resourceURL.openConnection();
        connection.setDoOutput(true);
        connection.setDoInput(true);
        connection.addRequestProperty("Accept-Encoding", "gzip");
        out = connection.getOutputStream();
        writer = new OutputStreamWriter(out);
        gson.toJson(message, writer);
        writer.flush();
        out.flush();
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
    final HttpUtils.ReportingInputStream in = HttpUtils.getResponseStream(connection);
    BinaryPackedObjects unpacker = new BinaryPackedObjects(localRepository.objectDatabase());
    BinaryPackedObjects.Callback callback = new BinaryPackedObjects.Callback() {

        @Override
        public void callback(Supplier<RevObject> supplier) {
            RevObject object = supplier.get();
            progress.setProgress(progress.getProgress() + 1);
            if (object instanceof RevCommit) {
                RevCommit commit = (RevCommit) object;
                want.remove(commit.getId());
                have.removeAll(commit.getParentIds());
                have.add(commit.getId());
            } else if (object instanceof RevTag) {
                RevTag tag = (RevTag) object;
                want.remove(tag.getId());
                have.remove(tag.getCommitId());
                have.add(tag.getId());
            }
        }
    };
    Stopwatch sw = Stopwatch.createStarted();
    IngestResults ingestResults = unpacker.ingest(in, callback);
    sw.stop();
    String msg = String.format("Processed %,d objects. Inserted: %,d. Existing: %,d. Time: %s. Compressed size: %,d bytes. Uncompressed size: %,d bytes.", ingestResults.total(), ingestResults.getInserted(), ingestResults.getExisting(), sw, in.compressedSize(), in.unCompressedSize());
    LOGGER.info(msg);
    progress.setDescription(msg);
}
Also used : MalformedURLException(java.net.MalformedURLException) RevTag(org.locationtech.geogig.api.RevTag) RevObject(org.locationtech.geogig.api.RevObject) ReportingOutputStream(org.locationtech.geogig.remote.HttpUtils.ReportingOutputStream) OutputStream(java.io.OutputStream) FilterOutputStream(java.io.FilterOutputStream) Stopwatch(com.google.common.base.Stopwatch) JsonObject(com.google.gson.JsonObject) Gson(com.google.gson.Gson) IOException(java.io.IOException) IngestResults(org.locationtech.geogig.remote.BinaryPackedObjects.IngestResults) URL(java.net.URL) HttpURLConnection(java.net.HttpURLConnection) OutputStreamWriter(java.io.OutputStreamWriter) Supplier(com.google.common.base.Supplier) OutputStreamWriter(java.io.OutputStreamWriter) Writer(java.io.Writer) RevCommit(org.locationtech.geogig.api.RevCommit)

Example 29 with Supplier

use of com.google.common.base.Supplier in project GeoGig by boundlessgeo.

the class HttpRemoteRepo method sendPackedObjects.

private void sendPackedObjects(final List<ObjectId> toSend, final Set<ObjectId> roots, Deduplicator deduplicator, final ProgressListener progress) {
    Set<ObjectId> sent = new HashSet<ObjectId>();
    while (!toSend.isEmpty()) {
        try {
            BinaryPackedObjects.Callback callback = new BinaryPackedObjects.Callback() {

                @Override
                public void callback(Supplier<RevObject> supplier) {
                    RevObject object = supplier.get();
                    progress.setProgress(progress.getProgress() + 1);
                    if (object instanceof RevCommit) {
                        RevCommit commit = (RevCommit) object;
                        toSend.remove(commit.getId());
                        roots.removeAll(commit.getParentIds());
                        roots.add(commit.getId());
                    }
                }
            };
            ObjectDatabase database = localRepository.objectDatabase();
            BinaryPackedObjects packer = new BinaryPackedObjects(database);
            ImmutableList<ObjectId> have = ImmutableList.copyOf(roots);
            final boolean traverseCommits = false;
            Stopwatch sw = Stopwatch.createStarted();
            ObjectSerializingFactory serializer = DataStreamSerializationFactoryV1.INSTANCE;
            SendObjectsConnectionFactory outFactory;
            ObjectFunnel objectFunnel;
            outFactory = new SendObjectsConnectionFactory(repositoryURL);
            int pushBytesLimit = parsePushLimit();
            objectFunnel = ObjectFunnels.newFunnel(outFactory, serializer, pushBytesLimit);
            final long writtenObjectsCount = packer.write(objectFunnel, toSend, have, sent, callback, traverseCommits, deduplicator);
            objectFunnel.close();
            sw.stop();
            long compressedSize = outFactory.compressedSize;
            long uncompressedSize = outFactory.uncompressedSize;
            LOGGER.info(String.format("HttpRemoteRepo: Written %,d objects." + " Time to process: %s." + " Compressed size: %,d bytes. Uncompressed size: %,d bytes.", writtenObjectsCount, sw, compressedSize, uncompressedSize));
        } catch (IOException e) {
            Throwables.propagate(e);
        }
    }
}
Also used : ObjectSerializingFactory(org.locationtech.geogig.storage.ObjectSerializingFactory) ObjectId(org.locationtech.geogig.api.ObjectId) RevObject(org.locationtech.geogig.api.RevObject) Stopwatch(com.google.common.base.Stopwatch) IOException(java.io.IOException) ObjectDatabase(org.locationtech.geogig.storage.ObjectDatabase) Supplier(com.google.common.base.Supplier) HashSet(java.util.HashSet) RevCommit(org.locationtech.geogig.api.RevCommit)

Example 30 with Supplier

use of com.google.common.base.Supplier in project cdap by caskdata.

the class WorkflowProgramRunner method run.

@Override
public ProgramController run(final Program program, final ProgramOptions options) {
    // Extract and verify options
    ApplicationSpecification appSpec = program.getApplicationSpecification();
    Preconditions.checkNotNull(appSpec, "Missing application specification.");
    ProgramType processorType = program.getType();
    Preconditions.checkNotNull(processorType, "Missing processor type.");
    Preconditions.checkArgument(processorType == ProgramType.WORKFLOW, "Only WORKFLOW process type is supported.");
    WorkflowSpecification workflowSpec = appSpec.getWorkflows().get(program.getName());
    Preconditions.checkNotNull(workflowSpec, "Missing WorkflowSpecification for %s", program.getName());
    final RunId runId = ProgramRunners.getRunId(options);
    // Setup dataset framework context, if required
    if (datasetFramework instanceof ProgramContextAware) {
        ProgramId programId = program.getId();
        ((ProgramContextAware) datasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
    }
    // List of all Closeable resources that needs to be cleanup
    final List<Closeable> closeables = new ArrayList<>();
    try {
        PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
        if (pluginInstantiator != null) {
            closeables.add(pluginInstantiator);
        }
        WorkflowDriver driver = new WorkflowDriver(program, options, hostname, workflowSpec, programRunnerFactory, metricsCollectionService, datasetFramework, discoveryServiceClient, txClient, runtimeStore, cConf, pluginInstantiator, secureStore, secureStoreManager, messagingService);
        // Controller needs to be created before starting the driver so that the state change of the driver
        // service can be fully captured by the controller.
        final ProgramController controller = new WorkflowProgramController(program, driver, serviceAnnouncer, runId);
        final String twillRunId = options.getArguments().getOption(ProgramOptionConstants.TWILL_RUN_ID);
        controller.addListener(new AbstractListener() {

            @Override
            public void init(ProgramController.State state, @Nullable Throwable cause) {
                // Get start time from RunId
                long startTimeInSeconds = RunIds.getTime(controller.getRunId(), TimeUnit.SECONDS);
                if (startTimeInSeconds == -1) {
                    // If RunId is not time-based, use current time as start time
                    startTimeInSeconds = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
                }
                final long finalStartTimeInSeconds = startTimeInSeconds;
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        runtimeStore.setStart(program.getId(), runId.getId(), finalStartTimeInSeconds, twillRunId, options.getUserArguments().asMap(), options.getArguments().asMap());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
                // This can happen if there is a delay in calling the init listener
                if (state == ProgramController.State.COMPLETED) {
                    completed();
                }
                // This can happen if there is a delay in calling the init listener
                if (state == ProgramController.State.ERROR) {
                    error(controller.getFailureCause());
                }
            }

            @Override
            public void completed() {
                LOG.debug("Program {} with run id {} completed successfully.", program.getId(), runId.getId());
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        runtimeStore.setStop(program.getId(), runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.COMPLETED.getRunStatus());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void killed() {
                LOG.debug("Program {} with run id {} killed.", program.getId(), runId.getId());
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        runtimeStore.setStop(program.getId(), runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.KILLED.getRunStatus());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void suspended() {
                LOG.debug("Suspending Program {} with run id {}.", program.getId(), runId.getId());
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        runtimeStore.setSuspend(program.getId(), runId.getId());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void resuming() {
                LOG.debug("Resuming Program {} {}.", program.getId(), runId.getId());
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        runtimeStore.setResume(program.getId(), runId.getId());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void error(final Throwable cause) {
                LOG.info("Program {} with run id {} stopped because of error {}.", program.getId(), runId.getId(), cause);
                closeAllQuietly(closeables);
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        runtimeStore.setStop(program.getId(), runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.ERROR.getRunStatus(), new BasicThrowable(cause));
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }
        }, Threads.SAME_THREAD_EXECUTOR);
        driver.start();
        return controller;
    } catch (Exception e) {
        closeAllQuietly(closeables);
        throw Throwables.propagate(e);
    }
}
Also used : ApplicationSpecification(co.cask.cdap.api.app.ApplicationSpecification) ProgramController(co.cask.cdap.app.runtime.ProgramController) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) ProgramId(co.cask.cdap.proto.id.ProgramId) BasicProgramContext(co.cask.cdap.internal.app.runtime.BasicProgramContext) WorkflowSpecification(co.cask.cdap.api.workflow.WorkflowSpecification) BasicThrowable(co.cask.cdap.proto.BasicThrowable) PluginInstantiator(co.cask.cdap.internal.app.runtime.plugin.PluginInstantiator) AbstractListener(co.cask.cdap.internal.app.runtime.AbstractListener) Supplier(com.google.common.base.Supplier) ProgramType(co.cask.cdap.proto.ProgramType) RunId(org.apache.twill.api.RunId) BasicThrowable(co.cask.cdap.proto.BasicThrowable) ProgramContextAware(co.cask.cdap.data.ProgramContextAware)

Aggregations

Supplier (com.google.common.base.Supplier)51 IOException (java.io.IOException)14 Test (org.junit.Test)11 ImmutableMap (com.google.common.collect.ImmutableMap)8 Map (java.util.Map)8 Path (java.nio.file.Path)5 ArrayList (java.util.ArrayList)5 List (java.util.List)5 Set (java.util.Set)5 VisibleForTesting (com.google.common.annotations.VisibleForTesting)4 ImmutableList (com.google.common.collect.ImmutableList)4 ImmutableSet (com.google.common.collect.ImmutableSet)4 Committer (io.druid.data.input.Committer)4 ByteBuffer (java.nio.ByteBuffer)4 Optional (java.util.Optional)4 SourcePath (com.facebook.buck.rules.SourcePath)3 InputRow (io.druid.data.input.InputRow)3 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)3 File (java.io.File)3 HashMap (java.util.HashMap)3