use of org.apache.archiva.common.utils.BaseFile in project archiva by apache.
the class AbstractArtifactConsumerTest method testConsumption.
@SuppressWarnings("deprecation")
@Test
public void testConsumption() {
Path localFile = repoLocation.resolve("org/apache/maven/plugins/maven-plugin-plugin/2.4.1/maven-metadata.xml");
ConsumerWantsFilePredicate predicate = new ConsumerWantsFilePredicate();
BaseFile baseFile = new BaseFile(repoLocation.toFile(), localFile.toFile());
predicate.setBasefile(baseFile);
assertFalse(predicate.evaluate(consumer));
}
use of org.apache.archiva.common.utils.BaseFile in project archiva by apache.
the class RepositoryContentConsumers method executeConsumers.
/**
* A convienence method to execute all of the active selected consumers for a
* particular arbitrary file.
* NOTE: Make sure that there is no repository scanning task executing before invoking this so as to prevent
* the index writer/reader of the current index-content consumer executing from getting closed. For an example,
* see ArchivaDavResource#executeConsumers( File ).
*
* @param repository the repository configuration to use.
* @param localFile the local file to execute the consumers against.
* @param updateRelatedArtifacts TODO
*/
public void executeConsumers(ManagedRepository repository, Path localFile, boolean updateRelatedArtifacts) throws RepositoryAdminException {
List<KnownRepositoryContentConsumer> selectedKnownConsumers = null;
// Run the repository consumers
try {
Closure<RepositoryContentConsumer> triggerBeginScan = new TriggerBeginScanClosure(repository, getStartTime(), false);
selectedKnownConsumers = getSelectedKnownConsumers();
// - do not create missing/fix invalid checksums and update metadata when deploying from webdav since these are uploaded by maven
if (!updateRelatedArtifacts) {
List<KnownRepositoryContentConsumer> clone = new ArrayList<>();
clone.addAll(selectedKnownConsumers);
for (KnownRepositoryContentConsumer consumer : clone) {
if (consumer.getId().equals("create-missing-checksums") || consumer.getId().equals("metadata-updater")) {
selectedKnownConsumers.remove(consumer);
}
}
}
List<InvalidRepositoryContentConsumer> selectedInvalidConsumers = getSelectedInvalidConsumers();
IterableUtils.forEach(selectedKnownConsumers, triggerBeginScan);
IterableUtils.forEach(selectedInvalidConsumers, triggerBeginScan);
// yuck. In case you can't read this, it says
// "process the file if the consumer has it in the includes list, and not in the excludes list"
Path repoPath = PathUtil.getPathFromUri(repository.getLocation());
BaseFile baseFile = new BaseFile(repoPath.toString(), localFile.toFile());
ConsumerWantsFilePredicate predicate = new ConsumerWantsFilePredicate(repository);
predicate.setBasefile(baseFile);
predicate.setCaseSensitive(false);
ConsumerProcessFileClosure closure = new ConsumerProcessFileClosure();
closure.setBasefile(baseFile);
closure.setExecuteOnEntireRepo(false);
Closure<RepositoryContentConsumer> processIfWanted = IfClosure.ifClosure(predicate, closure);
IterableUtils.forEach(selectedKnownConsumers, processIfWanted);
if (predicate.getWantedFileCount() <= 0) {
// Nothing known processed this file. It is invalid!
IterableUtils.forEach(selectedInvalidConsumers, closure);
}
TriggerScanCompletedClosure scanCompletedClosure = new TriggerScanCompletedClosure(repository, false);
IterableUtils.forEach(selectedKnownConsumers, scanCompletedClosure);
} finally {
/* TODO: This is never called by the repository scanner instance, so not calling here either - but it probably should be?
IterableUtils.forEach( availableKnownConsumers, triggerCompleteScan );
IterableUtils.forEach( availableInvalidConsumers, triggerCompleteScan );
*/
releaseSelectedKnownConsumers(selectedKnownConsumers);
}
}
use of org.apache.archiva.common.utils.BaseFile in project archiva by apache.
the class RepositoryScannerInstance method visitFile.
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (excludeMatcher.stream().noneMatch(m -> m.matches(file)) && includeMatcher.stream().allMatch(m -> m.matches(file))) {
log.debug("Walk Step: {}, {}", file);
stats.increaseFileCount();
// consume files regardless - the predicate will check the timestamp
Path repoPath = PathUtil.getPathFromUri(repository.getLocation());
BaseFile basefile = new BaseFile(repoPath.toString(), file.toFile());
// Timestamp finished points to the last successful scan, not this current one.
if (Files.getLastModifiedTime(file).toMillis() >= changesSince) {
stats.increaseNewFileCount();
}
consumerProcessFile.setBasefile(basefile);
consumerWantsFile.setBasefile(basefile);
Closure<RepositoryContentConsumer> processIfWanted = IfClosure.ifClosure(consumerWantsFile, consumerProcessFile);
IterableUtils.forEach(this.knownConsumers, processIfWanted);
if (consumerWantsFile.getWantedFileCount() <= 0) {
// Nothing known processed this file. It is invalid!
IterableUtils.forEach(this.invalidConsumers, consumerProcessFile);
}
}
return FileVisitResult.CONTINUE;
}
use of org.apache.archiva.common.utils.BaseFile in project archiva by apache.
the class AbstractArtifactConsumerTest method testConsumptionOfOtherMetadata.
@SuppressWarnings("deprecation")
@Test
public void testConsumptionOfOtherMetadata() {
Path localFile = repoLocation.resolve("org/apache/maven/plugins/maven-plugin-plugin/2.4.1/maven-metadata-central.xml");
ConsumerWantsFilePredicate predicate = new ConsumerWantsFilePredicate();
BaseFile baseFile = new BaseFile(repoLocation.toFile(), localFile.toFile());
predicate.setBasefile(baseFile);
assertFalse(predicate.evaluate(consumer));
}
use of org.apache.archiva.common.utils.BaseFile in project archiva by apache.
the class RepositoryPurgeConsumerTest method assertNotConsumed.
@SuppressWarnings("deprecation")
private void assertNotConsumed(String path) throws Exception {
ArchivaConfiguration archivaConfiguration = applicationContext.getBean("archivaConfiguration#default", ArchivaConfiguration.class);
FileType fileType = archivaConfiguration.getConfiguration().getRepositoryScanning().getFileTypes().get(0);
assertEquals(FileTypes.ARTIFACTS, fileType.getId());
fileType.addPattern("**/*.xml");
// FileTypes fileTypes = applicationContext.getBean( FileTypes.class );
for (FileTypes fileTypes : applicationContext.getBeansOfType(FileTypes.class).values()) {
fileTypes.afterConfigurationChange(null, "repositoryScanning.fileTypes", null);
}
KnownRepositoryContentConsumer repoPurgeConsumer = applicationContext.getBean("knownRepositoryContentConsumer#repository-purge", KnownRepositoryContentConsumer.class);
Path repoLocation = Paths.get("target/test-" + getName() + "/test-repo");
Path localFile = repoLocation.resolve(path);
ConsumerWantsFilePredicate predicate = new ConsumerWantsFilePredicate();
BaseFile baseFile = new BaseFile(repoLocation.toFile(), localFile.toFile());
predicate.setBasefile(baseFile);
assertFalse(predicate.evaluate(repoPurgeConsumer));
}
Aggregations