use of com.google.common.base.Supplier in project jackrabbit-oak by apache.
the class SegmentCompactionIT method setUp.
@Before
public void setUp() throws Exception {
assumeTrue(ENABLED);
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
MetricStatisticsProvider statisticsProvider = new MetricStatisticsProvider(mBeanServer, executor);
SegmentGCOptions gcOptions = defaultGCOptions().setEstimationDisabled(true).setForceTimeout(3600);
FileStoreBuilder builder = fileStoreBuilder(folder.getRoot());
fileStore = builder.withMemoryMapping(true).withGCMonitor(gcMonitor).withGCOptions(gcOptions).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).build();
nodeStore = SegmentNodeStoreBuilders.builder(fileStore).withStatisticsProvider(statisticsProvider).build();
WriterCacheManager cacheManager = builder.getCacheManager();
Runnable cancelGC = new Runnable() {
@Override
public void run() {
fileStore.cancelGC();
}
};
Supplier<String> status = new Supplier<String>() {
@Override
public String get() {
return fileStoreGCMonitor.getStatus();
}
};
List<Registration> registrations = newArrayList();
registrations.add(registerMBean(segmentCompactionMBean, new ObjectName("IT:TYPE=Segment Compaction")));
registrations.add(registerMBean(new SegmentRevisionGCMBean(fileStore, gcOptions, fileStoreGCMonitor), new ObjectName("IT:TYPE=Segment Revision GC")));
registrations.add(registerMBean(new RevisionGC(fileStore.getGCRunner(), cancelGC, status, executor), new ObjectName("IT:TYPE=Revision GC")));
CacheStatsMBean segmentCacheStats = fileStore.getSegmentCacheStats();
registrations.add(registerMBean(segmentCacheStats, new ObjectName("IT:TYPE=" + segmentCacheStats.getName())));
CacheStatsMBean stringCacheStats = fileStore.getStringCacheStats();
registrations.add(registerMBean(stringCacheStats, new ObjectName("IT:TYPE=" + stringCacheStats.getName())));
CacheStatsMBean templateCacheStats = fileStore.getTemplateCacheStats();
registrations.add(registerMBean(templateCacheStats, new ObjectName("IT:TYPE=" + templateCacheStats.getName())));
CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
assertNotNull(stringDeduplicationCacheStats);
registrations.add(registerMBean(stringDeduplicationCacheStats, new ObjectName("IT:TYPE=" + stringDeduplicationCacheStats.getName())));
CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
assertNotNull(templateDeduplicationCacheStats);
registrations.add(registerMBean(templateDeduplicationCacheStats, new ObjectName("IT:TYPE=" + templateDeduplicationCacheStats.getName())));
CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
assertNotNull(nodeDeduplicationCacheStats);
registrations.add(registerMBean(nodeDeduplicationCacheStats, new ObjectName("IT:TYPE=" + nodeDeduplicationCacheStats.getName())));
registrations.add(registerMBean(nodeStore.getStats(), new ObjectName("IT:TYPE=" + "SegmentNodeStore statistics")));
mBeanRegistration = new CompositeRegistration(registrations);
}
use of com.google.common.base.Supplier in project android by JetBrains.
the class AdtModuleImporter method findModules.
@Override
@NotNull
public Set<ModuleToImport> findModules(@NotNull VirtualFile importSource) throws IOException {
final AdtImportBuilder builder = (AdtImportBuilder) myContext.getProjectBuilder();
assert builder != null;
builder.setSelectedProject(virtualToIoFile(importSource));
final GradleImport gradleImport = getGradleImport();
gradleImport.importProjects(Collections.singletonList(virtualToIoFile(importSource)));
Map<String, File> adtProjects = gradleImport.getDetectedModuleLocations();
Set<ModuleToImport> modules = Sets.newHashSet();
for (final Map.Entry<String, File> entry : adtProjects.entrySet()) {
VirtualFile location = findFileByIoFile(entry.getValue(), false);
modules.add(new ModuleToImport(entry.getKey(), location, new Supplier<Iterable<String>>() {
@Override
public Iterable<String> get() {
return gradleImport.getProjectDependencies(entry.getKey());
}
}));
}
return modules;
}
use of com.google.common.base.Supplier in project GeoGig by boundlessgeo.
the class HttpRemoteRepo method fetchMoreData.
/**
* Retrieve objects from the remote repository, and update have/want lists accordingly.
* Specifically, any retrieved commits are removed from the want list and added to the have
* list, and any parents of those commits are removed from the have list (it only represents the
* most recent common commits.) Retrieved objects are added to the local repository, and the
* want/have lists are updated in-place.
*
* @param want a list of ObjectIds that need to be fetched
* @param have a list of ObjectIds that are in common with the remote repository
* @param progress
*/
private void fetchMoreData(final List<ObjectId> want, final Set<ObjectId> have, final ProgressListener progress) {
final JsonObject message = createFetchMessage(want, have);
final URL resourceURL;
try {
resourceURL = new URL(repositoryURL.toString() + "/repo/batchobjects");
} catch (MalformedURLException e) {
throw Throwables.propagate(e);
}
final HttpURLConnection connection;
try {
final Gson gson = new Gson();
OutputStream out;
final Writer writer;
connection = (HttpURLConnection) resourceURL.openConnection();
connection.setDoOutput(true);
connection.setDoInput(true);
connection.addRequestProperty("Accept-Encoding", "gzip");
out = connection.getOutputStream();
writer = new OutputStreamWriter(out);
gson.toJson(message, writer);
writer.flush();
out.flush();
} catch (IOException e) {
throw Throwables.propagate(e);
}
final HttpUtils.ReportingInputStream in = HttpUtils.getResponseStream(connection);
BinaryPackedObjects unpacker = new BinaryPackedObjects(localRepository.objectDatabase());
BinaryPackedObjects.Callback callback = new BinaryPackedObjects.Callback() {
@Override
public void callback(Supplier<RevObject> supplier) {
RevObject object = supplier.get();
progress.setProgress(progress.getProgress() + 1);
if (object instanceof RevCommit) {
RevCommit commit = (RevCommit) object;
want.remove(commit.getId());
have.removeAll(commit.getParentIds());
have.add(commit.getId());
} else if (object instanceof RevTag) {
RevTag tag = (RevTag) object;
want.remove(tag.getId());
have.remove(tag.getCommitId());
have.add(tag.getId());
}
}
};
Stopwatch sw = Stopwatch.createStarted();
IngestResults ingestResults = unpacker.ingest(in, callback);
sw.stop();
String msg = String.format("Processed %,d objects. Inserted: %,d. Existing: %,d. Time: %s. Compressed size: %,d bytes. Uncompressed size: %,d bytes.", ingestResults.total(), ingestResults.getInserted(), ingestResults.getExisting(), sw, in.compressedSize(), in.unCompressedSize());
LOGGER.info(msg);
progress.setDescription(msg);
}
use of com.google.common.base.Supplier in project GeoGig by boundlessgeo.
the class HttpRemoteRepo method sendPackedObjects.
private void sendPackedObjects(final List<ObjectId> toSend, final Set<ObjectId> roots, Deduplicator deduplicator, final ProgressListener progress) {
Set<ObjectId> sent = new HashSet<ObjectId>();
while (!toSend.isEmpty()) {
try {
BinaryPackedObjects.Callback callback = new BinaryPackedObjects.Callback() {
@Override
public void callback(Supplier<RevObject> supplier) {
RevObject object = supplier.get();
progress.setProgress(progress.getProgress() + 1);
if (object instanceof RevCommit) {
RevCommit commit = (RevCommit) object;
toSend.remove(commit.getId());
roots.removeAll(commit.getParentIds());
roots.add(commit.getId());
}
}
};
ObjectDatabase database = localRepository.objectDatabase();
BinaryPackedObjects packer = new BinaryPackedObjects(database);
ImmutableList<ObjectId> have = ImmutableList.copyOf(roots);
final boolean traverseCommits = false;
Stopwatch sw = Stopwatch.createStarted();
ObjectSerializingFactory serializer = DataStreamSerializationFactoryV1.INSTANCE;
SendObjectsConnectionFactory outFactory;
ObjectFunnel objectFunnel;
outFactory = new SendObjectsConnectionFactory(repositoryURL);
int pushBytesLimit = parsePushLimit();
objectFunnel = ObjectFunnels.newFunnel(outFactory, serializer, pushBytesLimit);
final long writtenObjectsCount = packer.write(objectFunnel, toSend, have, sent, callback, traverseCommits, deduplicator);
objectFunnel.close();
sw.stop();
long compressedSize = outFactory.compressedSize;
long uncompressedSize = outFactory.uncompressedSize;
LOGGER.info(String.format("HttpRemoteRepo: Written %,d objects." + " Time to process: %s." + " Compressed size: %,d bytes. Uncompressed size: %,d bytes.", writtenObjectsCount, sw, compressedSize, uncompressedSize));
} catch (IOException e) {
Throwables.propagate(e);
}
}
}
use of com.google.common.base.Supplier in project cdap by caskdata.
the class WorkflowProgramRunner method run.
@Override
public ProgramController run(final Program program, final ProgramOptions options) {
// Extract and verify options
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType processorType = program.getType();
Preconditions.checkNotNull(processorType, "Missing processor type.");
Preconditions.checkArgument(processorType == ProgramType.WORKFLOW, "Only WORKFLOW process type is supported.");
WorkflowSpecification workflowSpec = appSpec.getWorkflows().get(program.getName());
Preconditions.checkNotNull(workflowSpec, "Missing WorkflowSpecification for %s", program.getName());
final RunId runId = ProgramRunners.getRunId(options);
// Setup dataset framework context, if required
if (datasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) datasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
// List of all Closeable resources that needs to be cleanup
final List<Closeable> closeables = new ArrayList<>();
try {
PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
if (pluginInstantiator != null) {
closeables.add(pluginInstantiator);
}
WorkflowDriver driver = new WorkflowDriver(program, options, hostname, workflowSpec, programRunnerFactory, metricsCollectionService, datasetFramework, discoveryServiceClient, txClient, runtimeStore, cConf, pluginInstantiator, secureStore, secureStoreManager, messagingService);
// Controller needs to be created before starting the driver so that the state change of the driver
// service can be fully captured by the controller.
final ProgramController controller = new WorkflowProgramController(program, driver, serviceAnnouncer, runId);
final String twillRunId = options.getArguments().getOption(ProgramOptionConstants.TWILL_RUN_ID);
controller.addListener(new AbstractListener() {
@Override
public void init(ProgramController.State state, @Nullable Throwable cause) {
// Get start time from RunId
long startTimeInSeconds = RunIds.getTime(controller.getRunId(), TimeUnit.SECONDS);
if (startTimeInSeconds == -1) {
// If RunId is not time-based, use current time as start time
startTimeInSeconds = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
}
final long finalStartTimeInSeconds = startTimeInSeconds;
Retries.supplyWithRetries(new Supplier<Void>() {
@Override
public Void get() {
runtimeStore.setStart(program.getId(), runId.getId(), finalStartTimeInSeconds, twillRunId, options.getUserArguments().asMap(), options.getArguments().asMap());
return null;
}
}, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
// This can happen if there is a delay in calling the init listener
if (state == ProgramController.State.COMPLETED) {
completed();
}
// This can happen if there is a delay in calling the init listener
if (state == ProgramController.State.ERROR) {
error(controller.getFailureCause());
}
}
@Override
public void completed() {
LOG.debug("Program {} with run id {} completed successfully.", program.getId(), runId.getId());
Retries.supplyWithRetries(new Supplier<Void>() {
@Override
public Void get() {
runtimeStore.setStop(program.getId(), runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.COMPLETED.getRunStatus());
return null;
}
}, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
}
@Override
public void killed() {
LOG.debug("Program {} with run id {} killed.", program.getId(), runId.getId());
Retries.supplyWithRetries(new Supplier<Void>() {
@Override
public Void get() {
runtimeStore.setStop(program.getId(), runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.KILLED.getRunStatus());
return null;
}
}, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
}
@Override
public void suspended() {
LOG.debug("Suspending Program {} with run id {}.", program.getId(), runId.getId());
Retries.supplyWithRetries(new Supplier<Void>() {
@Override
public Void get() {
runtimeStore.setSuspend(program.getId(), runId.getId());
return null;
}
}, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
}
@Override
public void resuming() {
LOG.debug("Resuming Program {} {}.", program.getId(), runId.getId());
Retries.supplyWithRetries(new Supplier<Void>() {
@Override
public Void get() {
runtimeStore.setResume(program.getId(), runId.getId());
return null;
}
}, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
}
@Override
public void error(final Throwable cause) {
LOG.info("Program {} with run id {} stopped because of error {}.", program.getId(), runId.getId(), cause);
closeAllQuietly(closeables);
Retries.supplyWithRetries(new Supplier<Void>() {
@Override
public Void get() {
runtimeStore.setStop(program.getId(), runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.ERROR.getRunStatus(), new BasicThrowable(cause));
return null;
}
}, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
}
}, Threads.SAME_THREAD_EXECUTOR);
driver.start();
return controller;
} catch (Exception e) {
closeAllQuietly(closeables);
throw Throwables.propagate(e);
}
}
Aggregations