use of com.google.common.base.Supplier in project Hystrix by Netflix.
the class FallbackMethod method equals.
// Regular Type#equals method cannot be used to compare parametrized types and type variables
// because it compares generic declarations, see java.lang.reflect.GenericDeclaration.
// If generic declaration is an instance of java.lang.reflect.Method then command and fallback return types have with different generic declarations which aren't the same.
// In this case we need to compare only few type properties, such as bounds for type literal and row types for parametrized types.
private static Result equals(Type commandType, Type fallbackType) {
if (isParametrizedType(commandType) && isParametrizedType(fallbackType)) {
final ParameterizedType pt1 = (ParameterizedType) commandType;
final ParameterizedType pt2 = (ParameterizedType) fallbackType;
Result result = regularEquals(pt1.getRawType(), pt2.getRawType());
return result.andThen(new Supplier<Result>() {
@Override
public Result get() {
return FallbackMethod.equals(pt1.getActualTypeArguments(), pt2.getActualTypeArguments());
}
});
} else if (isTypeVariable(commandType) && isTypeVariable(fallbackType)) {
final TypeVariable tv1 = (TypeVariable) commandType;
final TypeVariable tv2 = (TypeVariable) fallbackType;
if (tv1.getGenericDeclaration() instanceof Method && tv2.getGenericDeclaration() instanceof Method) {
Result result = equals(tv1.getBounds(), tv2.getBounds());
return result.append(new Supplier<List<Error>>() {
@Override
public List<Error> get() {
return Collections.singletonList(boundsError(tv1, tv1.getBounds(), "", tv2, tv2.getBounds()));
}
});
}
return regularEquals(tv1, tv2);
} else if (isWildcardType(commandType) && isWildcardType(fallbackType)) {
final WildcardType wt1 = (WildcardType) commandType;
final WildcardType wt2 = (WildcardType) fallbackType;
Result result = equals(wt1.getLowerBounds(), wt2.getLowerBounds());
result = result.append(new Supplier<List<Error>>() {
@Override
public List<Error> get() {
return Collections.singletonList(boundsError(wt1, wt1.getLowerBounds(), "lower", wt2, wt2.getLowerBounds()));
}
});
if (result.isFailure())
return result;
result = equals(wt1.getUpperBounds(), wt2.getUpperBounds());
return result.append(new Supplier<List<Error>>() {
@Override
public List<Error> get() {
return Collections.singletonList(boundsError(wt1, wt1.getUpperBounds(), "upper", wt2, wt2.getUpperBounds()));
}
});
} else {
return regularEquals(commandType, fallbackType);
}
}
use of com.google.common.base.Supplier in project torodb by torodb.
the class DefaultOplogApplier method createBatcherFlow.
/**
* Creates a flow that batches and analyze a input of {@link AnalyzedOplogBatch remote jobs}.
*
* This flow tries to accummulate several remote jobs into a bigger one and does not emit until:
* <ul>
* <li>A maximum number of operations are batched</li>
* <li>Or a maximum time has happen since the last emit</li>
* <li>Or the recived job is not {@link AnalyzedOplogBatch#isReadyForMore()}</li>
* </ul>
*
*/
private Flow<OplogBatch, AnalyzedStreamElement, NotUsed> createBatcherFlow(ApplierContext context) {
Predicate<OplogBatch> finishBatchPredicate = (OplogBatch rawBatch) -> !rawBatch.isReadyForMore();
ToIntFunction<OplogBatch> costFunction = (rawBatch) -> rawBatch.count();
Supplier<RawStreamElement> zeroFun = () -> RawStreamElement.INITIAL_ELEMENT;
BiFunction<RawStreamElement, OplogBatch, RawStreamElement> acumFun = (streamElem, newBatch) -> streamElem.concat(newBatch);
BatchAnalyzer batchAnalyzer = batchAnalyzerFactory.createBatchAnalyzer(context);
return Flow.of(OplogBatch.class).via(new BatchFlow<>(batchLimits.maxSize, batchLimits.maxPeriod, finishBatchPredicate, costFunction, zeroFun, acumFun)).filter(rawElem -> rawElem.rawBatch != null && !rawElem.rawBatch.isEmpty()).map(rawElem -> {
List<OplogOperation> rawOps = rawElem.rawBatch.getOps();
List<AnalyzedOplogBatch> analyzed = batchAnalyzer.apply(rawOps);
return new AnalyzedStreamElement(rawElem, analyzed);
});
}
use of com.google.common.base.Supplier in project helios by spotify.
the class JobCreateCommandTest method setUp.
@Before
public void setUp() {
// use a real, dummy Subparser impl to avoid having to mock out every single call
final ArgumentParser parser = ArgumentParsers.newArgumentParser("test");
final Subparser subparser = parser.addSubparsers().addParser("create");
final Supplier<Map<String, String>> envVarSupplier = new Supplier<Map<String, String>>() {
@Override
public Map<String, String> get() {
return ImmutableMap.copyOf(envVars);
}
};
command = new JobCreateCommand(subparser, envVarSupplier);
when(client.createJob(argThat(matchesName(JOB_NAME)))).thenReturn(immediateFuture(new CreateJobResponse(CreateJobResponse.Status.OK, Collections.<String>emptyList(), "12345")));
}
use of com.google.common.base.Supplier in project jackrabbit-oak by apache.
the class Registrations method registerSegmentStore.
/**
* Configures and registers a new SegmentNodeStore instance together will
* all required components. Anything that must be disposed of (like
* registered services or MBeans) will be registered via the
* {@code registration} parameter.
*
* @param context An instance of {@link ComponentContext}.
* @param blobStore An instance of {@link BlobStore}. It can be
* {@code null}.
* @param statisticsProvider An instance of {@link StatisticsProvider}.
* @param closer An instance of {@link Closer}. It will be used
* to track every registered service or
* component.
* @param whiteboard An instance of {@link Whiteboard}. It will be
* used to register services in the OSGi
* framework.
* @param role The role of this component. It can be {@code
* null}.
* @param descriptors Determines if repository descriptors related to
* discovery services should be registered.
* @return A configured {@link SegmentNodeStore}, or {@code null} if the
* setup failed.
* @throws IOException In case an unrecoverable error occurs.
*/
static SegmentNodeStore registerSegmentStore(@Nonnull ComponentContext context, @Nullable BlobStore blobStore, @Nonnull StatisticsProvider statisticsProvider, @Nonnull Closer closer, @Nonnull Whiteboard whiteboard, @Nullable String role, boolean descriptors) throws IOException {
Configuration configuration = new Configuration(context, role);
Closeables closeables = new Closeables(closer);
Registrations registrations = new Registrations(whiteboard, role);
// Listen for GCMonitor services
GCMonitor gcMonitor = GCMonitor.EMPTY;
if (configuration.isPrimarySegmentStore()) {
GCMonitorTracker tracker = new GCMonitorTracker();
tracker.start(whiteboard);
closeables.add(tracker);
gcMonitor = tracker;
}
// Create the gc options
if (configuration.getCompactionGainThreshold() != null) {
log.warn("Detected deprecated flag 'compaction.gainThreshold'. " + "Please use 'compaction.sizeDeltaEstimation' instead and " + "'compaction.disableEstimation' to disable estimation.");
}
SegmentGCOptions gcOptions = new SegmentGCOptions(configuration.getPauseCompaction(), configuration.getRetryCount(), configuration.getForceCompactionTimeout()).setRetainedGenerations(configuration.getRetainedGenerations()).setGcSizeDeltaEstimation(configuration.getSizeDeltaEstimation()).setMemoryThreshold(configuration.getMemoryThreshold()).setEstimationDisabled(configuration.getDisableEstimation()).withGCNodeWriteMonitor(configuration.getGCProcessLog());
// Build the FileStore
FileStoreBuilder builder = fileStoreBuilder(configuration.getSegmentDirectory()).withSegmentCacheSize(configuration.getSegmentCacheSize()).withStringCacheSize(configuration.getStringCacheSize()).withTemplateCacheSize(configuration.getTemplateCacheSize()).withStringDeduplicationCacheSize(configuration.getStringDeduplicationCacheSize()).withTemplateDeduplicationCacheSize(configuration.getTemplateDeduplicationCacheSize()).withNodeDeduplicationCacheSize(configuration.getNodeDeduplicationCacheSize()).withMaxFileSize(configuration.getMaxFileSize()).withMemoryMapping(configuration.getMemoryMapping()).withGCMonitor(gcMonitor).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).withGCOptions(gcOptions);
if (configuration.hasCustomBlobStore() && blobStore != null) {
log.info("Initializing SegmentNodeStore with BlobStore [{}]", blobStore);
builder.withBlobStore(blobStore);
}
if (configuration.isStandbyInstance()) {
builder.withSnfeListener(IGNORE_SNFE);
}
final FileStore store;
try {
store = builder.build();
} catch (InvalidFileStoreVersionException e) {
log.error("The storage format is not compatible with this version of Oak Segment Tar", e);
return null;
}
// store should be closed last
closeables.add(store);
// Listen for Executor services on the whiteboard
WhiteboardExecutor executor = new WhiteboardExecutor();
executor.start(whiteboard);
closeables.add(executor);
// Expose stats about the segment cache
CacheStatsMBean segmentCacheStats = store.getSegmentCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, segmentCacheStats, CacheStats.TYPE, segmentCacheStats.getName()));
// Expose stats about the string and template caches
CacheStatsMBean stringCacheStats = store.getStringCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringCacheStats, CacheStats.TYPE, stringCacheStats.getName()));
CacheStatsMBean templateCacheStats = store.getTemplateCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateCacheStats, CacheStats.TYPE, templateCacheStats.getName()));
WriterCacheManager cacheManager = builder.getCacheManager();
CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
if (stringDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringDeduplicationCacheStats, CacheStats.TYPE, stringDeduplicationCacheStats.getName()));
}
CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
if (templateDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateDeduplicationCacheStats, CacheStats.TYPE, templateDeduplicationCacheStats.getName()));
}
CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
if (nodeDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, nodeDeduplicationCacheStats, CacheStats.TYPE, nodeDeduplicationCacheStats.getName()));
}
if (configuration.isPrimarySegmentStore()) {
final FileStoreGCMonitor monitor = new FileStoreGCMonitor(Clock.SIMPLE);
closeables.add(registrations.register(GCMonitor.class, monitor));
if (!configuration.isStandbyInstance()) {
closeables.add(registrations.registerMBean(SegmentRevisionGC.class, new SegmentRevisionGCMBean(store, gcOptions, monitor), SegmentRevisionGC.TYPE, "Segment node store revision garbage collection"));
}
Runnable cancelGC = new Runnable() {
@Override
public void run() {
store.cancelGC();
}
};
Supplier<String> statusMessage = new Supplier<String>() {
@Override
public String get() {
return monitor.getStatus();
}
};
closeables.add(registrations.registerMBean(RevisionGCMBean.class, new RevisionGC(store.getGCRunner(), cancelGC, statusMessage, executor), RevisionGCMBean.TYPE, "Revision garbage collection"));
}
// Expose statistics about the FileStore
closeables.add(registrations.registerMBean(FileStoreStatsMBean.class, store.getStats(), FileStoreStatsMBean.TYPE, "FileStore statistics"));
// register segment node store
SegmentNodeStore.SegmentNodeStoreBuilder segmentNodeStoreBuilder = SegmentNodeStoreBuilders.builder(store).withStatisticsProvider(statisticsProvider);
if (configuration.isStandbyInstance() || !configuration.isPrimarySegmentStore()) {
segmentNodeStoreBuilder.dispatchChanges(false);
}
SegmentNodeStore segmentNodeStore = segmentNodeStoreBuilder.build();
if (configuration.isPrimarySegmentStore()) {
ObserverTracker observerTracker = new ObserverTracker(segmentNodeStore);
observerTracker.start(context.getBundleContext());
closeables.add(observerTracker);
}
if (configuration.isPrimarySegmentStore()) {
closeables.add(registrations.registerMBean(CheckpointMBean.class, new SegmentCheckpointMBean(segmentNodeStore), CheckpointMBean.TYPE, "Segment node store checkpoint management"));
}
if (descriptors) {
// ensure a clusterId is initialized
// and expose it as 'oak.clusterid' repository descriptor
GenericDescriptors clusterIdDesc = new GenericDescriptors();
clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(getOrCreateId(segmentNodeStore)), true, false);
closeables.add(registrations.register(Descriptors.class, clusterIdDesc));
// Register "discovery lite" descriptors
closeables.add(registrations.register(Descriptors.class, new SegmentDiscoveryLiteDescriptors(segmentNodeStore)));
}
// If a shared data store register the repo id in the data store
if (configuration.isPrimarySegmentStore() && isShared(blobStore)) {
SharedDataStore sharedDataStore = (SharedDataStore) blobStore;
try {
sharedDataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(getOrCreateId(segmentNodeStore)));
} catch (Exception e) {
throw new IOException("Could not register a unique repositoryId", e);
}
if (blobStore instanceof BlobTrackingStore) {
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
if (trackingStore.getTracker() != null) {
trackingStore.getTracker().close();
}
trackingStore.addTracker(new BlobIdTracker(configuration.getRepositoryHome(), getOrCreateId(segmentNodeStore), configuration.getBlobSnapshotInterval(), sharedDataStore));
}
}
if (configuration.isPrimarySegmentStore() && blobStore instanceof GarbageCollectableBlobStore) {
BlobGarbageCollector gc = new MarkSweepGarbageCollector(new SegmentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) blobStore, executor, TimeUnit.SECONDS.toMillis(configuration.getBlobGcMaxAge()), getOrCreateId(segmentNodeStore));
closeables.add(registrations.registerMBean(BlobGCMBean.class, new BlobGC(gc, executor), BlobGCMBean.TYPE, "Segment node store blob garbage collection"));
}
// Expose an MBean for backup/restore operations
closeables.add(registrations.registerMBean(FileStoreBackupRestoreMBean.class, new FileStoreBackupRestoreImpl(segmentNodeStore, store.getRevisions(), store.getReader(), configuration.getBackupDirectory(), executor), FileStoreBackupRestoreMBean.TYPE, "Segment node store backup/restore"));
// Expose statistics about the SegmentNodeStore
closeables.add(registrations.registerMBean(SegmentNodeStoreStatsMBean.class, segmentNodeStore.getStats(), SegmentNodeStoreStatsMBean.TYPE, "SegmentNodeStore statistics"));
if (configuration.isPrimarySegmentStore()) {
log.info("Primary SegmentNodeStore initialized");
} else {
log.info("Secondary SegmentNodeStore initialized, role={}", role);
}
// Register a factory service to expose the FileStore
closeables.add(registrations.register(SegmentStoreProvider.class, new DefaultSegmentStoreProvider(store)));
if (configuration.isStandbyInstance()) {
return segmentNodeStore;
}
if (configuration.isPrimarySegmentStore()) {
Map<String, Object> props = new HashMap<String, Object>();
props.put(Constants.SERVICE_PID, SegmentNodeStore.class.getName());
props.put("oak.nodestore.description", new String[] { "nodeStoreType=segment" });
closeables.add(registrations.register(NodeStore.class, segmentNodeStore, props));
}
return segmentNodeStore;
}
use of com.google.common.base.Supplier in project jackrabbit-oak by apache.
the class FileStore method initialNode.
@Nonnull
private Supplier<RecordId> initialNode() {
return new Supplier<RecordId>() {
@Override
public RecordId get() {
try {
SegmentWriter writer = segmentWriterBuilder("init").build(FileStore.this);
NodeBuilder builder = EMPTY_NODE.builder();
builder.setChildNode("root", EMPTY_NODE);
SegmentNodeState node = writer.writeNode(builder.getNodeState());
writer.flush();
return node.getRecordId();
} catch (IOException e) {
String msg = "Failed to write initial node";
log.error(msg, e);
throw new IllegalStateException(msg, e);
}
}
};
}
Aggregations