use of org.apache.archiva.checksum.ChecksummedFile in project archiva by apache.
the class AbstractTransactionEvent method createChecksums.
/**
* Create checksums of file using all digesters defined at construction time.
*
* @param file
* @param force whether existing checksums should be overwritten or not
* @throws IOException
*/
protected void createChecksums(Path file, boolean force) throws IOException {
for (ChecksumAlgorithm checksumAlgorithm : getChecksumAlgorithms()) {
Path checksumFile = Paths.get(file.toAbsolutePath() + "." + getChecksumFileExtension(checksumAlgorithm));
if (Files.exists(checksumFile)) {
if (!force) {
continue;
}
createBackup(checksumFile);
} else {
createdFiles.add(checksumFile);
}
}
ChecksummedFile csFile = new ChecksummedFile(file);
csFile.fixChecksums(getChecksumAlgorithms());
}
use of org.apache.archiva.checksum.ChecksummedFile in project archiva by apache.
the class DuplicateArtifactsConsumer method processFile.
@Override
public void processFile(String path) throws ConsumerException {
Path artifactFile = this.repositoryDir.resolve(path);
// TODO: would be quicker to somehow make sure it ran after the update database consumer, or as a part of that
// perhaps could use an artifact context that is retained for all consumers? First in can set the SHA-1
// alternatively this could come straight from the storage resolver, which could populate the artifact metadata
// in the later parse call with the desired checksum and use that
String checksumSha1;
ChecksummedFile checksummedFile = new ChecksummedFile(artifactFile);
try {
checksumSha1 = checksummedFile.calculateChecksum(ChecksumAlgorithm.SHA1);
} catch (IOException e) {
throw new ConsumerException(e.getMessage(), e);
}
MetadataRepository metadataRepository = repositorySession.getRepository();
Collection<ArtifactMetadata> results;
try {
results = metadataRepository.getArtifactsByChecksum(repoId, checksumSha1);
} catch (MetadataRepositoryException e) {
repositorySession.close();
throw new ConsumerException(e.getMessage(), e);
}
if (CollectionUtils.isNotEmpty(results)) {
ArtifactMetadata originalArtifact;
try {
originalArtifact = pathTranslator.getArtifactForPath(repoId, path);
} catch (Exception e) {
log.warn("Not reporting problem for invalid artifact in checksum check: {}", e.getMessage());
return;
}
for (ArtifactMetadata dupArtifact : results) {
String id = path.substring(path.lastIndexOf('/') + 1);
if (dupArtifact.getId().equals(id) && dupArtifact.getNamespace().equals(originalArtifact.getNamespace()) && dupArtifact.getProject().equals(originalArtifact.getProject()) && dupArtifact.getVersion().equals(originalArtifact.getVersion())) {
// Skip reference to itself.
log.debug("Not counting duplicate for artifact {} for path {}", dupArtifact, path);
continue;
}
RepositoryProblemFacet problem = new RepositoryProblemFacet();
problem.setRepositoryId(repoId);
problem.setNamespace(originalArtifact.getNamespace());
problem.setProject(originalArtifact.getProject());
problem.setVersion(originalArtifact.getVersion());
problem.setId(id);
// FIXME: need to get the right storage resolver for the repository the dupe artifact is in, it might be
// a different type
// FIXME: we need the project version here, not the artifact version
problem.setMessage("Duplicate Artifact Detected: " + path + " <--> " + pathTranslator.toPath(dupArtifact.getNamespace(), dupArtifact.getProject(), dupArtifact.getVersion(), dupArtifact.getId()));
problem.setProblem("duplicate-artifact");
try {
metadataRepository.addMetadataFacet(repoId, problem);
} catch (MetadataRepositoryException e) {
throw new ConsumerException(e.getMessage(), e);
}
}
}
}
Aggregations