use of org.apache.commons.lang.time.StopWatch in project archiva by apache.
the class SecuritySynchronization method executeEnvironmentChecks.
private void executeEnvironmentChecks() throws ArchivaException {
if ((checkers == null) || CollectionUtils.isEmpty(checkers.values())) {
throw new ArchivaException("Unable to initialize the Redback Security Environment, " + "no Environment Check components found.");
}
StopWatch stopWatch = new StopWatch();
stopWatch.reset();
stopWatch.start();
List<String> violations = new ArrayList<>();
for (Entry<String, EnvironmentCheck> entry : checkers.entrySet()) {
EnvironmentCheck check = entry.getValue();
List<String> v = new ArrayList<>();
check.validateEnvironment(v);
log.info("Environment Check: {} -> {} violation(s)", entry.getKey(), v.size());
for (String s : v) {
violations.add("[" + entry.getKey() + "] " + s);
}
}
if (CollectionUtils.isNotEmpty(violations)) {
StringBuilder msg = new StringBuilder();
msg.append("EnvironmentCheck Failure.\n");
msg.append("======================================================================\n");
msg.append(" ENVIRONMENT FAILURE !! \n");
msg.append("\n");
for (String violation : violations) {
msg.append(violation).append("\n");
}
msg.append("\n");
msg.append("======================================================================");
log.error(msg.toString());
throw new ArchivaException("Unable to initialize Redback Security Environment, [" + violations.size() + "] violation(s) encountered, See log for details.");
}
stopWatch.stop();
log.info("time to execute all EnvironmentCheck: {} ms", stopWatch.getTime());
}
use of org.apache.commons.lang.time.StopWatch in project archiva by apache.
the class DownloadRemoteIndexTask method run.
@Override
public void run() {
// so short lock : not sure we need it
synchronized (this.runningRemoteDownloadIds) {
if (this.runningRemoteDownloadIds.contains(this.remoteRepository.getId())) {
// skip it as it's running
log.info("skip download index remote for repo {} it's already running", this.remoteRepository.getId());
return;
}
this.runningRemoteDownloadIds.add(this.remoteRepository.getId());
}
Path tempIndexDirectory = null;
StopWatch stopWatch = new StopWatch();
stopWatch.start();
try {
log.info("start download remote index for remote repository {}", this.remoteRepository.getId());
if (this.remoteRepository.getIndexingContext() == null) {
throw new IndexNotFoundException("No index context set for repository " + remoteRepository.getId());
}
if (this.remoteRepository.getType() != RepositoryType.MAVEN) {
throw new RepositoryException("Bad repository type");
}
if (!this.remoteRepository.supportsFeature(RemoteIndexFeature.class)) {
throw new RepositoryException("Repository does not support RemotIndexFeature " + remoteRepository.getId());
}
RemoteIndexFeature rif = this.remoteRepository.getFeature(RemoteIndexFeature.class).get();
IndexingContext indexingContext = this.remoteRepository.getIndexingContext().getBaseContext(IndexingContext.class);
// create a temp directory to download files
tempIndexDirectory = Paths.get(indexingContext.getIndexDirectoryFile().getParent(), ".tmpIndex");
Path indexCacheDirectory = Paths.get(indexingContext.getIndexDirectoryFile().getParent(), ".indexCache");
Files.createDirectories(indexCacheDirectory);
if (Files.exists(tempIndexDirectory)) {
org.apache.archiva.common.utils.FileUtils.deleteDirectory(tempIndexDirectory);
}
Files.createDirectories(tempIndexDirectory);
tempIndexDirectory.toFile().deleteOnExit();
String baseIndexUrl = indexingContext.getIndexUpdateUrl();
String wagonProtocol = this.remoteRepository.getLocation().getScheme();
final StreamWagon wagon = (StreamWagon) wagonFactory.getWagon(new WagonFactoryRequest(wagonProtocol, this.remoteRepository.getExtraHeaders()).networkProxy(this.networkProxy));
// FIXME olamy having 2 config values
wagon.setReadTimeout((int) rif.getDownloadTimeout().toMillis());
wagon.setTimeout((int) remoteRepository.getTimeout().toMillis());
if (wagon instanceof AbstractHttpClientWagon) {
HttpConfiguration httpConfiguration = new HttpConfiguration();
HttpMethodConfiguration httpMethodConfiguration = new HttpMethodConfiguration();
httpMethodConfiguration.setUsePreemptive(true);
httpMethodConfiguration.setReadTimeout((int) rif.getDownloadTimeout().toMillis());
httpConfiguration.setGet(httpMethodConfiguration);
AbstractHttpClientWagon.class.cast(wagon).setHttpConfiguration(httpConfiguration);
}
wagon.addTransferListener(new DownloadListener());
ProxyInfo proxyInfo = null;
if (this.networkProxy != null) {
proxyInfo = new ProxyInfo();
proxyInfo.setType(this.networkProxy.getProtocol());
proxyInfo.setHost(this.networkProxy.getHost());
proxyInfo.setPort(this.networkProxy.getPort());
proxyInfo.setUserName(this.networkProxy.getUsername());
proxyInfo.setPassword(this.networkProxy.getPassword());
}
AuthenticationInfo authenticationInfo = null;
if (this.remoteRepository.getLoginCredentials() != null && this.remoteRepository.getLoginCredentials() instanceof PasswordCredentials) {
PasswordCredentials creds = (PasswordCredentials) this.remoteRepository.getLoginCredentials();
authenticationInfo = new AuthenticationInfo();
authenticationInfo.setUserName(creds.getUsername());
authenticationInfo.setPassword(new String(creds.getPassword()));
}
log.debug("Connection to {}, authInfo={}", this.remoteRepository.getId(), authenticationInfo);
wagon.connect(new Repository(this.remoteRepository.getId(), baseIndexUrl), authenticationInfo, proxyInfo);
Path indexDirectory = indexingContext.getIndexDirectoryFile().toPath();
if (!Files.exists(indexDirectory)) {
Files.createDirectories(indexDirectory);
}
log.debug("Downloading index file to {}", indexDirectory);
log.debug("Index cache dir {}", indexCacheDirectory);
ResourceFetcher resourceFetcher = new WagonResourceFetcher(log, tempIndexDirectory, wagon, remoteRepository);
IndexUpdateRequest request = new IndexUpdateRequest(indexingContext, resourceFetcher);
request.setForceFullUpdate(this.fullDownload);
request.setLocalIndexCacheDir(indexCacheDirectory.toFile());
IndexUpdateResult result = this.indexUpdater.fetchAndUpdateIndex(request);
log.debug("Update result success: {}", result.isSuccessful());
stopWatch.stop();
log.info("time update index from remote for repository {}: {}ms", this.remoteRepository.getId(), (stopWatch.getTime()));
// index packing optionnal ??
// IndexPackingRequest indexPackingRequest =
// new IndexPackingRequest( indexingContext, indexingContext.getIndexDirectoryFile() );
// indexPacker.packIndex( indexPackingRequest );
indexingContext.updateTimestamp(true);
} catch (Exception e) {
log.error(e.getMessage(), e);
throw new RuntimeException(e.getMessage(), e);
} finally {
deleteDirectoryQuiet(tempIndexDirectory);
this.runningRemoteDownloadIds.remove(this.remoteRepository.getId());
}
log.info("end download remote index for remote repository {}", this.remoteRepository.getId());
}
use of org.apache.commons.lang.time.StopWatch in project archiva by apache.
the class DefaultRepositoryArchivaTaskScheduler method startup.
@PostConstruct
public void startup() throws ArchivaException {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
archivaConfiguration.addListener(this);
List<ManagedRepositoryConfiguration> repositories = archivaConfiguration.getConfiguration().getManagedRepositories();
RepositorySession repositorySession = repositorySessionFactory.createSession();
try {
MetadataRepository metadataRepository = repositorySession.getRepository();
for (ManagedRepositoryConfiguration repoConfig : repositories) {
if (repoConfig.isScanned()) {
try {
scheduleRepositoryJobs(repoConfig);
} catch (SchedulerException e) {
throw new ArchivaException("Unable to start scheduler: " + e.getMessage(), e);
}
try {
if (!isPreviouslyScanned(repoConfig, metadataRepository)) {
queueInitialRepoScan(repoConfig);
}
} catch (MetadataRepositoryException e) {
log.warn("Unable to determine if a repository is already scanned, skipping initial scan: {}", e.getMessage(), e);
}
}
}
} finally {
repositorySession.close();
}
stopWatch.stop();
log.info("Time to initalize DefaultRepositoryArchivaTaskScheduler: {} ms", stopWatch.getTime());
}
use of org.apache.commons.lang.time.StopWatch in project cdap by caskdata.
the class DefaultAuthorizationEnforcer method isVisible.
@Override
public Set<? extends EntityId> isVisible(Set<? extends EntityId> entityIds, Principal principal) throws Exception {
if (!isSecurityAuthorizationEnabled()) {
return entityIds;
}
Set<EntityId> visibleEntities = new HashSet<>();
// filter out entity id which is in system namespace and principal is the master user
for (EntityId entityId : entityIds) {
if (isAccessingSystemNSAsMasterUser(entityId, principal) || isEnforcingOnSamePrincipalId(entityId, principal)) {
visibleEntities.add(entityId);
}
}
Set<? extends EntityId> difference = Sets.difference(entityIds, visibleEntities);
LOG.trace("Checking visibility of {} for principal {}.", difference, principal);
// create new stopwatch instance every time enforce is called since the DefaultAuthorizationEnforcer is binded as
// singleton we don't want the stopwatch instance to get re-used across multiple calls.
StopWatch watch = new StopWatch();
watch.start();
Set<? extends EntityId> moreVisibleEntities;
try {
moreVisibleEntities = authorizerInstantiator.get().isVisible(difference, principal);
} finally {
watch.stop();
long timeTaken = watch.getTime();
String logLine = "Checked visibility of {} for principal {}. Time spent in visibility check was {} ms.";
if (timeTaken > logTimeTakenAsWarn) {
LOG.warn(logLine, difference, principal, timeTaken);
} else {
LOG.trace(logLine, difference, principal, timeTaken);
}
}
visibleEntities.addAll(moreVisibleEntities);
LOG.trace("Getting {} as visible entities", visibleEntities);
return Collections.unmodifiableSet(visibleEntities);
}
use of org.apache.commons.lang.time.StopWatch in project jspwiki by apache.
the class SpamFilter method checkAkismet.
/**
* Checks against the akismet system.
*
* @param context
* @param change
* @throws RedirectException
*/
private void checkAkismet(WikiContext context, Change change) throws RedirectException {
if (m_akismetAPIKey != null) {
if (m_akismet == null) {
log.info("Initializing Akismet spam protection.");
m_akismet = new Akismet(m_akismetAPIKey, context.getEngine().getBaseURL());
if (!m_akismet.verifyAPIKey()) {
log.error("Akismet API key cannot be verified. Please check your config.");
m_akismetAPIKey = null;
m_akismet = null;
}
}
HttpServletRequest req = context.getHttpRequest();
//
if (change.m_adds == 0 && change.m_removals > 0) {
return;
}
if (req != null && m_akismet != null) {
log.debug("Calling Akismet to check for spam...");
StopWatch sw = new StopWatch();
sw.start();
String ipAddress = HttpUtil.getRemoteAddress(req);
String userAgent = req.getHeader("User-Agent");
String referrer = req.getHeader("Referer");
String permalink = context.getViewURL(context.getPage().getName());
String commentType = context.getRequestContext().equals(WikiContext.COMMENT) ? "comment" : "edit";
String commentAuthor = context.getCurrentUser().getName();
String commentAuthorEmail = null;
String commentAuthorURL = null;
boolean isSpam = m_akismet.commentCheck(ipAddress, userAgent, referrer, permalink, commentType, commentAuthor, commentAuthorEmail, commentAuthorURL, change.toString(), null);
sw.stop();
log.debug("Akismet request done in: " + sw);
if (isSpam) {
// Host host = new Host( ipAddress, null );
// m_temporaryBanList.add( host );
String uid = log(context, REJECT, REASON_AKISMET, change.toString());
log.info("SPAM:Akismet (" + uid + "). Akismet thinks this change is spam; added host to temporary ban list.");
checkStrategy(context, REASON_AKISMET, "Akismet tells Herb you're a spammer, Herb trusts Akismet, and I trust Herb! (Incident code " + uid + ")");
}
}
}
}
Aggregations