use of org.apache.commons.lang3.time.StopWatch in project RebornCore by TechReborn.
the class RebornExplosion method explode.
public void explode() {
StopWatch watch = new StopWatch();
watch.start();
for (int tx = -radius; tx < radius + 1; tx++) {
for (int ty = -radius; ty < radius + 1; ty++) {
for (int tz = -radius; tz < radius + 1; tz++) {
if (Math.sqrt(Math.pow(tx, 2) + Math.pow(ty, 2) + Math.pow(tz, 2)) <= radius - 2) {
BlockPos pos = center.add(tx, ty, tz);
IBlockState state = world.getBlockState(pos);
Block block = state.getBlock();
if (block != Blocks.BEDROCK && block != Blocks.AIR) {
block.onBlockDestroyedByExplosion(world, pos, this);
world.setBlockState(pos, Blocks.AIR.getDefaultState(), 3);
}
}
}
}
}
RebornCore.logHelper.info("The explosion took" + watch + " to explode");
}
use of org.apache.commons.lang3.time.StopWatch in project nd4j by deeplearning4j.
the class AeronNDArraySerdeTest method timeOldVsNew.
@Test
public void timeOldVsNew() throws Exception {
int numTrials = 1000;
long oldTotal = 0;
long newTotal = 0;
INDArray arr = Nd4j.create(100000);
Nd4j.getCompressor().compressi(arr, "GZIP");
for (int i = 0; i < numTrials; i++) {
StopWatch oldStopWatch = new StopWatch();
BufferedOutputStream bos = new BufferedOutputStream(new ByteArrayOutputStream(arr.length()));
DataOutputStream dos = new DataOutputStream(bos);
oldStopWatch.start();
Nd4j.write(arr, dos);
oldStopWatch.stop();
// System.out.println("Old " + oldStopWatch.getNanoTime());
oldTotal += oldStopWatch.getNanoTime();
StopWatch newStopWatch = new StopWatch();
newStopWatch.start();
AeronNDArraySerde.toBuffer(arr);
newStopWatch.stop();
// System.out.println("New " + newStopWatch.getNanoTime());
newTotal += newStopWatch.getNanoTime();
}
oldTotal /= numTrials;
newTotal /= numTrials;
System.out.println("Old avg " + oldTotal + " New avg " + newTotal);
}
use of org.apache.commons.lang3.time.StopWatch in project alf.io by alfio-event.
the class SpecialPriceTokenGenerator method generatePendingCodes.
public void generatePendingCodes() {
StopWatch stopWatch = new StopWatch();
log.trace("start pending codes generation");
stopWatch.start();
specialPriceRepository.findWaitingElements().forEach(this::generateCode);
stopWatch.stop();
log.trace("end. Took {} ms", stopWatch.getTime());
}
use of org.apache.commons.lang3.time.StopWatch in project kylo by Teradata.
the class AbstractMergeTable method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLog();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String blockingValue = context.getProperty(BLOCKING_KEY).evaluateAttributeExpressions(flowFile).getValue();
String flowFileId = flowFile.getAttribute(CoreAttributes.UUID.key());
boolean block = false;
if (blocking && blockingCache.putIfAbsent(blockingValue, flowFileId) != null) {
if (StringUtils.isBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
flowFile = session.putAttribute(flowFile, BLOCKED_START_TIME, String.valueOf(System.currentTimeMillis()));
getLogger().info("Transferring Flow file {} to blocked relationship", new Object[] { flowFile });
}
// penalize the flow file and transfer to BLOCKED
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_BLOCKED);
return;
}
// Add Blocking time to flow file if this was a blocked flowfile.
if (blocking && StringUtils.isNotBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
String blockedStartTime = flowFile.getAttribute(BLOCKED_START_TIME);
try {
Long l = Long.parseLong(blockedStartTime);
Long blockTime = System.currentTimeMillis() - l;
getLogger().info("Processing Blocked flow file {}. This was blocked for {} ms", new Object[] { flowFile, blockTime });
flowFile = session.putAttribute(flowFile, BLOCKED_TIME, String.valueOf(blockTime) + " ms");
} catch (NumberFormatException e) {
}
}
String PROVENANCE_EXECUTION_STATUS_KEY = context.getName() + " Execution Status";
String partitionSpecString = context.getProperty(PARTITION_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue();
String sourceSchema = context.getProperty(SOURCE_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String sourceTable = context.getProperty(SOURCE_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String targetSchema = context.getProperty(TARGET_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String targetTable = context.getProperty(TARGET_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String feedPartitionValue = context.getProperty(FEED_PARTITION).evaluateAttributeExpressions(flowFile).getValue();
String mergeStrategyValue = context.getProperty(MERGE_STRATEGY).evaluateAttributeExpressions(flowFile).getValue();
String hiveConfigurations = context.getProperty(HIVE_CONFIGURATIONS).evaluateAttributeExpressions(flowFile).getValue();
boolean resetHive = context.getProperty(RESET_HIVE).asBoolean();
final ColumnSpec[] columnSpecs = Optional.ofNullable(context.getProperty(FIELD_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue()).filter(StringUtils::isNotEmpty).map(ColumnSpec::createFromString).orElse(new ColumnSpec[0]);
if (STRATEGY_PK_MERGE.equals(mergeStrategyValue) && (columnSpecs == null || columnSpecs.length == 0)) {
getLog().error("Missing required field specification for PK merge feature");
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: Missing required field specification for PK merge feature");
release(blockingValue);
session.transfer(flowFile, IngestProperties.REL_FAILURE);
return;
}
// Maintain default for backward compatibility
if (StringUtils.isEmpty(mergeStrategyValue)) {
mergeStrategyValue = STRATEGY_DEDUPE_MERGE;
}
logger.info("Merge strategy: " + mergeStrategyValue + " Using Source: " + sourceTable + " Target: " + targetTable + " feed partition:" + feedPartitionValue + " partSpec: " + partitionSpecString);
final StopWatch stopWatch = new StopWatch(true);
try (final Connection conn = getConnection(context)) {
TableMergeSyncSupport mergeSupport = new TableMergeSyncSupport(conn);
if (resetHive) {
mergeSupport.resetHiveConf();
}
mergeSupport.enableDynamicPartitions();
if (StringUtils.isNotEmpty(hiveConfigurations)) {
mergeSupport.setHiveConf(hiveConfigurations.split("\\|"));
}
PartitionSpec partitionSpec = new PartitionSpec(partitionSpecString);
if (STRATEGY_DEDUPE_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, true);
} else if (STRATEGY_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, false);
} else if (STRATEGY_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_ROLLING_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doRollingSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_PK_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doPKMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, columnSpecs);
} else {
throw new UnsupportedOperationException("Failed to resolve the merge strategy");
}
session.getProvenanceReporter().modifyContent(flowFile, "Execution completed", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Successful");
release(blockingValue);
logger.info("Execution completed: " + stopWatch.getElapsed(TimeUnit.MILLISECONDS) + " Merge strategy: " + mergeStrategyValue + " Using Source: " + sourceTable + " Target: " + targetTable + " feed partition:" + feedPartitionValue + " partSpec: " + partitionSpecString);
session.transfer(flowFile, REL_SUCCESS);
} catch (final Exception e) {
logger.error("Unable to execute merge doMerge for {} due to {}; routing to failure", new Object[] { flowFile, e }, e);
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: " + e.getMessage());
release(blockingValue);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.commons.lang3.time.StopWatch in project kylo by Teradata.
the class DefaultFeedManagerFeedService method deployFeed.
private NifiFeed deployFeed(final FeedMetadata feedMetadata, com.thinkbiganalytics.metadata.api.versioning.EntityVersion<Feed.ID, Feed> version) throws DeployFeedException {
Stopwatch stopwatch = Stopwatch.createStarted();
boolean enabled = false;
if (feedMetadata.isActive()) {
feedMetadata.setState(Feed.State.ENABLED.name());
enabled = true;
} else {
feedMetadata.setState(Feed.State.DISABLED.name());
}
// Store ref to the originalFeedProperties before resolving and merging with the template
List<NifiProperty> originalFeedProperties = feedMetadata.getProperties();
// Get all the properties for the metadata
RegisteredTemplate registeredTemplate = registeredTemplateService.findRegisteredTemplate(new RegisteredTemplateRequest.Builder().templateId(feedMetadata.getTemplateId()).templateName(feedMetadata.getTemplateName()).isFeedEdit(true).includeSensitiveProperties(true).build());
// Copy the registered template properties it a new list so it doest get updated
List<NifiProperty> templateProperties = registeredTemplate.getProperties().stream().map(nifiProperty -> new NifiProperty(nifiProperty)).collect(Collectors.toList());
// Update the template properties with the feedMetadata properties
NifiPropertyUtil.matchAndSetPropertyByProcessorName(templateProperties, feedMetadata.getProperties(), NifiPropertyUtil.PropertyUpdateMode.UPDATE_ALL_PROPERTIES);
registeredTemplate.setProperties(templateProperties);
feedMetadata.setProperties(registeredTemplate.getProperties());
feedMetadata.setRegisteredTemplate(registeredTemplate);
// Skip any properties that the user supplied which are not ${ values
List<NifiProperty> propertiesToSkip = originalFeedProperties.stream().filter(property -> !propertyExpressionResolver.containsVariablesPatterns(property.getValue())).collect(Collectors.toList());
List<NifiProperty> templatePropertiesToSkip = registeredTemplate.getProperties().stream().filter(property -> property.isSelected() && !propertyExpressionResolver.containsVariablesPatterns(property.getValue())).collect(Collectors.toList());
if (templatePropertiesToSkip != null && !templatePropertiesToSkip.isEmpty()) {
propertiesToSkip.addAll(templatePropertiesToSkip);
}
// Resolve any ${metadata.} properties
propertyExpressionResolver.resolvePropertyExpressions(feedMetadata, propertiesToSkip);
// decrypt the metadata
feedModelTransform.decryptSensitivePropertyValues(feedMetadata);
// if this is the very first version we need to enable it later (after the data has been sync'd with ops manager)
boolean enableLater = false;
if (enabled && version.isFirstVersion()) {
enabled = false;
enableLater = true;
feedMetadata.setState(FeedMetadata.STATE.DISABLED.name());
}
CreateFeedBuilder feedBuilder = CreateFeedBuilder.newFeed(nifiRestClient, nifiFlowCache, feedMetadata, registeredTemplate.getNifiTemplateId(), propertyExpressionResolver, propertyDescriptorTransform, niFiObjectCache, templateConnectionUtil).enabled(enabled).setOriginalFeedProperties(originalFeedProperties).removeInactiveVersionedProcessGroup(removeInactiveNifiVersionedFeedFlows).autoAlign(nifiAutoFeedsAlignAfterSave).withNiFiTemplateCache(niFiTemplateCache);
if (registeredTemplate.isReusableTemplate()) {
feedBuilder.setReusableTemplate(true);
feedMetadata.setIsReusableFeed(true);
} else {
feedBuilder.inputProcessorType(feedMetadata.getInputProcessorType()).feedSchedule(feedMetadata.getSchedule()).properties(feedMetadata.getProperties());
if (registeredTemplate.usesReusableTemplate()) {
for (ReusableTemplateConnectionInfo connection : registeredTemplate.getReusableTemplateConnections()) {
feedBuilder.addInputOutputPort(new InputOutputPort(connection.getReusableTemplateInputPortName(), connection.getFeedOutputPortName()));
}
}
}
stopwatch.stop();
log.debug("Time to prepare data for saving feed in NiFi: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
stopwatch.start();
NifiProcessGroup entity = feedBuilder.build();
stopwatch.stop();
log.debug("Time to save feed in NiFi: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
NifiFeed feed = new NifiFeed(feedMetadata, entity);
// Set the original feedProperties back to the feed
feedMetadata.setProperties(originalFeedProperties);
// Encrypt the metadata properties
feedModelTransform.encryptSensitivePropertyValues(feedMetadata);
if (entity.isSuccess()) {
feedMetadata.setNifiProcessGroupId(entity.getProcessGroupEntity().getId());
try {
stopwatch.start();
saveDeployedFeed(feedMetadata, version);
// tell NiFi if this is a streaming feed or not
if (feedMetadata.getRegisteredTemplate().isStream()) {
streamingFeedJmsNotificationService.updateNiFiStatusJMSTopic(entity, feedMetadata);
}
feed.setSuccess(true);
feed.setEnableAfterSave(enableLater);
stopwatch.stop();
log.debug("Time to saveFeed in Kylo: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
stopwatch.reset();
stopwatch.start();
feedBuilder.checkAndRemoveVersionedProcessGroup();
} catch (Exception e) {
feed.setSuccess(false);
feed.addErrorMessage(e);
}
} else {
feed.setSuccess(false);
}
if (!feed.isSuccess()) {
if (!entity.isRolledBack()) {
try {
feedBuilder.rollback();
} catch (FeedRollbackException rollbackException) {
log.error("Error rolling back feed {}. {} ", feedMetadata.getCategoryAndFeedName(), rollbackException.getMessage());
feed.addErrorMessage("Error occurred in rolling back the Feed.");
}
entity.setRolledBack(true);
}
throw new DeployFeedException(feed);
}
// Move to isSuccess block??
feedHistoryDataReindexingService.updateHistoryDataReindexingFeedsAvailableCache(feedMetadata);
return feed;
}
Aggregations