use of org.apache.commons.lang3.time.StopWatch in project kylo by Teradata.
the class DefaultFeedManagerFeedService method saveFeed.
@Deprecated
private void saveFeed(final FeedMetadata feed) {
Feed.ID feedId = metadataAccess.commit(() -> {
Stopwatch stopwatch = Stopwatch.createStarted();
List<? extends HadoopSecurityGroup> previousSavedSecurityGroups = null;
// Store the old security groups before saving because we need to compare afterward
if (!feed.isNew()) {
Feed previousStateBeforeSaving = feedProvider.findById(feedProvider.resolveId(feed.getId()));
Map<String, String> userProperties = previousStateBeforeSaving.getUserProperties();
previousSavedSecurityGroups = previousStateBeforeSaving.getSecurityGroups();
}
// if this is the first time saving this feed create a new one
Feed domainFeed = feedModelTransform.feedToDomain(feed);
Feed.ID domainId = domainFeed.getId();
if (domainFeed.getState() == null) {
domainFeed.setState(Feed.State.ENABLED);
}
stopwatch.stop();
log.debug("Time to transform the feed to a domain object for saving: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
// initially save the feed
if (feed.isNew()) {
stopwatch.start();
domainFeed = feedProvider.update(domainFeed);
stopwatch.stop();
log.debug("Time to save the New feed: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
}
final String feedName = FeedNameUtil.fullName(domainFeed.getCategory().getSystemName(), domainFeed.getName());
// Build preconditions
stopwatch.start();
assignFeedDependencies(feed, domainFeed);
stopwatch.stop();
log.debug("Time to assignFeedDependencies: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
// Assign the datasources
stopwatch.start();
assignFeedDatasources(feed, domainFeed);
stopwatch.stop();
log.debug("Time to assignFeedDatasources: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
stopwatch.start();
boolean isStream = feed.getRegisteredTemplate() != null ? feed.getRegisteredTemplate().isStream() : false;
Long timeBetweenBatchJobs = feed.getRegisteredTemplate() != null ? feed.getRegisteredTemplate().getTimeBetweenStartingBatchJobs() : 0L;
// sync the feed information to ops manager
metadataAccess.commit(() -> opsManagerFeedProvider.save(opsManagerFeedProvider.resolveId(domainId.toString()), feedName, isStream, timeBetweenBatchJobs));
stopwatch.stop();
log.debug("Time to sync feed data with Operations Manager: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
// Update hadoop security group polices if the groups changed
if (!feed.isNew() && !ListUtils.isEqualList(previousSavedSecurityGroups, domainFeed.getSecurityGroups())) {
stopwatch.start();
List<? extends HadoopSecurityGroup> securityGroups = domainFeed.getSecurityGroups();
List<String> groupsAsCommaList = securityGroups.stream().map(group -> group.getName()).collect(Collectors.toList());
hadoopAuthorizationService.updateSecurityGroupsForAllPolicies(feed.getSystemCategoryName(), feed.getSystemFeedName(), groupsAsCommaList, domainFeed.getProperties());
stopwatch.stop();
log.debug("Time to update hadoop security groups: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
}
// Update Hive metastore
if (hiveTargetSyncColumnDescriptions) {
stopwatch.start();
final boolean hasHiveDestination = domainFeed.getDestinations().stream().map(FeedDestination::getDatasource).filter(DerivedDatasource.class::isInstance).map(DerivedDatasource.class::cast).anyMatch(datasource -> "HiveDatasource".equals(datasource.getDatasourceType()));
if (hasHiveDestination) {
try {
feedHiveTableService.updateColumnDescriptions(feed);
} catch (final DataAccessException e) {
log.warn("Failed to update column descriptions for feed: {}", feed.getCategoryAndFeedDisplayName(), e);
}
}
stopwatch.stop();
log.debug("Time to update hive metastore: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
}
// Update Kylo metastore
stopwatch.start();
domainFeed = feedProvider.update(domainFeed);
stopwatch.stop();
log.debug("Time to call feedProvider.update: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
return domainFeed.getId();
}, (e) -> {
if (feed.isNew() && StringUtils.isNotBlank(feed.getId())) {
// Rollback ops Manager insert if it is newly created
metadataAccess.commit(() -> {
opsManagerFeedProvider.delete(opsManagerFeedProvider.resolveId(feed.getId()));
});
}
});
if (feedId != null) {
// set deployed as a service account since it needs access to the versionable node
metadataAccess.commit(() -> {
// TODO TEMPORARY
com.thinkbiganalytics.metadata.api.versioning.EntityVersion<Feed.ID, Feed> version = feedProvider.createVersion(feedId, null, false);
feedProvider.setDeployed(feedId, version.getId());
// TODO TEMPORARY
}, MetadataAccess.SERVICE);
}
}
use of org.apache.commons.lang3.time.StopWatch in project thymeleaf-tests by thymeleaf.
the class ExpressionBenchmark method main.
public static void main(String[] args) throws Exception {
final Map<String, String> expressionsMap = ExpressionBenchmarkDefinitions.createExpressionsMap();
final Configuration configuration = new Configuration();
final IProcessingContext processingContext = new ProcessingContext(new Context());
final IStandardExpressionParser parser = new StandardExpressionParser();
for (final Map.Entry<String, String> expressionEntry : expressionsMap.entrySet()) {
final String expression = expressionEntry.getKey();
final String expectedParsingResult = expressionEntry.getValue();
final IStandardExpression parsedExpression = parser.parseExpression(configuration, processingContext, expression);
Assert.assertNotNull(parsedExpression);
final String exp = parsedExpression.getStringRepresentation();
Assert.assertEquals(expectedParsingResult, exp);
}
final StopWatch sw = new StopWatch();
sw.start();
for (int x = 0; x < 1000; x++) for (final String expression : expressionsMap.keySet()) parser.parseExpression(configuration, processingContext, expression);
sw.stop();
System.out.println("First pass: " + sw.toString());
sw.reset();
sw.start();
for (int x = 0; x < 1000; x++) for (final String expression : expressionsMap.keySet()) parser.parseExpression(configuration, processingContext, expression);
sw.stop();
System.out.println("Second pass: " + sw.toString());
}
use of org.apache.commons.lang3.time.StopWatch in project hippo by NHS-digital-website.
the class BlockingPooledS3Connector method upload.
/**
* See {@linkplain PooledS3Connector#upload}.
*/
@Override
public S3ObjectMetadata upload(final Supplier<InputStream> inputStreamSupplier, final String fileName, final String mimeType) {
final StopWatch stopWatch = logger.reportUploadScheduling(fileName);
final S3ObjectMetadata uploadedS3FileMetadata;
try {
uploadedS3FileMetadata = waitFor(uploadExecutorService, () -> {
logger.reportUploadStarting(fileName);
final S3ObjectMetadata uploadedFileMetadata = s3Connector.uploadFile(inputStreamSupplier.get(), fileName, mimeType);
return uploadedFileMetadata;
});
logger.reportUploadStopped(stopWatch, uploadedS3FileMetadata.getReference(), uploadedS3FileMetadata.getSize());
return uploadedS3FileMetadata;
} catch (final RuntimeException re) {
logger.reportUploadFailed(fileName, re);
throw re;
}
}
use of org.apache.commons.lang3.time.StopWatch in project hippo by NHS-digital-website.
the class BlockingPooledS3Connector method download.
/**
* See {@linkplain PooledS3Connector#download}.
*/
@Override
public void download(final String s3FileReference, final Consumer<S3File> downloadConsumer) {
final StopWatch stopWatch = logger.reportDownloadScheduling(s3FileReference);
final S3File downloadedFileMetadata;
try {
downloadedFileMetadata = waitFor(downloadExecutorService, () -> {
logger.reportDownloadStarting(s3FileReference);
final S3File s3File = s3Connector.downloadFile(s3FileReference);
downloadConsumer.accept(s3File);
return s3File;
});
logger.reportDownloadStopped(stopWatch, s3FileReference, downloadedFileMetadata.getLength());
} catch (final RuntimeException re) {
logger.reportDownloadFailed(s3FileReference, re);
throw re;
}
}
use of org.apache.commons.lang3.time.StopWatch in project TechReborn by TechReborn.
the class Core method init.
@Mod.EventHandler
public void init(FMLInitializationEvent event) throws IllegalAccessException, InstantiationException {
// World gen
VeinWorldGenerator.registerTRVeins();
if (ConfigTechReborn.veinOres) {
GameRegistry.registerWorldGenerator(VeinWorldGenerator.INSTANCE, 0);
}
// Registers Chest Loot
ModLoot.init();
// Multiparts
ModParts.init();
// Sounds
ModSounds.init();
// Compat
for (ICompatModule compatModule : CompatManager.INSTANCE.compatModules) {
compatModule.init(event);
}
MinecraftForge.EVENT_BUS.register(new StackWIPHandler());
// Ore Dictionary
OreDict.init();
// Recipes
StopWatch watch = new StopWatch();
watch.start();
ModRecipes.init();
logHelper.all(watch + " : main recipes");
watch.stop();
// Client only init, needs to be done before parts system
proxy.init(event);
// WorldGen
worldGen.load();
if (!ConfigTechReborn.veinOres) {
GameRegistry.registerWorldGenerator(worldGen, 0);
}
// DungeonLoot.init();
// Register Gui Handler
NetworkRegistry.INSTANCE.registerGuiHandler(INSTANCE, new GuiHandler());
// Achievements
TRAchievements.init();
// Multiblock events
MinecraftForge.EVENT_BUS.register(new MultiblockEventHandler());
// IDSU manager
IDSUManager.INSTANCE = new IDSUManager();
// Event busses
MinecraftForge.EVENT_BUS.register(IDSUManager.INSTANCE);
MinecraftForge.EVENT_BUS.register(new MultiblockServerTickHandler());
MinecraftForge.EVENT_BUS.register(new TRTickHandler());
MinecraftForge.EVENT_BUS.register(new OreUnifier());
// Scrapbox
if (config.ScrapboxDispenser) {
BlockDispenser.DISPENSE_BEHAVIOR_REGISTRY.putObject(ModItems.scrapBox, new BehaviorDispenseScrapbox());
}
logHelper.info("Initialization Complete");
}
Aggregations