use of org.apache.archiva.repository.content.LayoutException in project archiva by apache.
the class MetadataUpdaterConsumer method processFile.
@Override
public void processFile(String path) throws ConsumerException {
// Ignore paths like .index etc
if (!path.startsWith(".")) {
try {
BaseRepositoryContentLayout layout = repository.getLayout(BaseRepositoryContentLayout.class);
Artifact artifact = layout.getArtifact(path);
updateVersionMetadata(artifact, path);
updateProjectMetadata(artifact, path);
} catch (LayoutException e) {
log.info("Not processing path that is not an artifact: {} ({})", path, e.getMessage());
}
}
}
use of org.apache.archiva.repository.content.LayoutException in project archiva by apache.
the class ManagedDefaultRepositoryContent method newArtifactStream.
/**
* Returns all related artifacts that match the given artifact. That means all artifacts that have
* the same filename plus an additional extension, e.g. ${fileName}.sha2
*
* @param item the artifact
* @return the stream of artifacts
* @throws ContentAccessException if access to the underlying storage failed
*/
public Stream<? extends Artifact> newArtifactStream(Artifact item) throws ContentAccessException {
final Version v = item.getVersion();
final String fileName = item.getFileName();
final Predicate<StorageAsset> filter = (StorageAsset a) -> a.getName().startsWith(fileName + ".");
return v.getAsset().list().stream().filter(filter).map(a -> {
try {
return getArtifactFromPath(a);
} catch (LayoutException e) {
log.error("Not a valid artifact path " + a.getPath(), e);
return null;
}
}).filter(Objects::nonNull);
}
use of org.apache.archiva.repository.content.LayoutException in project archiva by apache.
the class DefaultRepositoriesService method removeProjectVersion.
@Override
public ActionStatus removeProjectVersion(String repositoryId, String namespace, String projectId, String version) throws ArchivaRestServiceException {
// if not a generic we can use the standard way to delete artifact
if (!VersionUtil.isGenericSnapshot(version)) {
Artifact artifact = new Artifact(namespace, projectId, version);
artifact.setRepositoryId(repositoryId);
artifact.setContext(repositoryId);
return deleteArtifact(artifact);
}
if (StringUtils.isEmpty(repositoryId)) {
throw new ArchivaRestServiceException("repositoryId cannot be null", 400, null);
}
if (!getPermissionStatus(repositoryId).isAuthorizedToDeleteArtifacts()) {
throw new ArchivaRestServiceException("not authorized to delete artifacts", 403, null);
}
if (StringUtils.isEmpty(namespace)) {
throw new ArchivaRestServiceException("groupId cannot be null", 400, null);
}
if (StringUtils.isEmpty(projectId)) {
throw new ArchivaRestServiceException("artifactId cannot be null", 400, null);
}
if (StringUtils.isEmpty(version)) {
throw new ArchivaRestServiceException("version cannot be null", 400, null);
}
RepositorySession repositorySession = null;
try {
repositorySession = repositorySessionFactory.createSession();
} catch (MetadataRepositoryException e) {
e.printStackTrace();
}
try {
ManagedRepositoryContent repository = getManagedRepositoryContent(repositoryId);
BaseRepositoryContentLayout layout = repository.getLayout(BaseRepositoryContentLayout.class);
ArchivaItemSelector selector = ArchivaItemSelector.builder().withNamespace(namespace).withProjectId(projectId).withVersion(version).build();
Version versionItem = layout.getVersion(selector);
if (versionItem != null && versionItem.exists()) {
repository.deleteItem(versionItem);
}
MetadataRepository metadataRepository = repositorySession.getRepository();
Collection<ArtifactMetadata> artifacts = metadataRepository.getArtifacts(repositorySession, repositoryId, namespace, projectId, version);
for (ArtifactMetadata artifactMetadata : artifacts) {
metadataRepository.removeTimestampedArtifact(repositorySession, artifactMetadata, version);
}
metadataRepository.removeProjectVersion(repositorySession, repositoryId, namespace, projectId, version);
} catch (MetadataRepositoryException | MetadataResolutionException | RepositoryException | ItemNotFoundException | LayoutException e) {
throw new ArchivaRestServiceException("Repository exception: " + e.getMessage(), 500, e);
} finally {
try {
repositorySession.save();
} catch (MetadataSessionException e) {
log.error("Session save failed {}", e.getMessage());
}
repositorySession.close();
}
return ActionStatus.SUCCESS;
}
use of org.apache.archiva.repository.content.LayoutException in project archiva by apache.
the class ArchivaDavResourceFactory method createResource.
@Override
public DavResource createResource(final DavResourceLocator locator, final DavServletRequest request, final DavServletResponse response) throws DavException {
final ArchivaDavResourceLocator archivaLocator = checkLocatorIsInstanceOfRepositoryLocator(locator);
final String sRepoId = archivaLocator.getRepositoryId();
RepositoryGroup repoGroup = repositoryRegistry.getRepositoryGroup(sRepoId);
final boolean isGroupRepo = repoGroup != null;
String activePrincipal = getActivePrincipal(request);
List<String> resourcesInAbsolutePath = new ArrayList<>();
boolean readMethod = WebdavMethodUtil.isReadMethod(request.getMethod());
RepositoryRequestInfo repositoryRequestInfo = null;
DavResource resource;
if (isGroupRepo) {
if (!readMethod) {
throw new DavException(HttpServletResponse.SC_METHOD_NOT_ALLOWED, "Write method not allowed for repository groups.");
}
log.debug("Repository group '{}' accessed by '{}", repoGroup.getId(), activePrincipal);
// handle browse requests for virtual repos
if (getLogicalResource(archivaLocator, null, true).endsWith("/")) {
DavResource davResource = getResourceFromGroup(request, archivaLocator, repoGroup);
setHeaders(response, locator, davResource, true);
return davResource;
} else {
// make a copy to avoid potential concurrent modifications (eg. by configuration)
// TODO: ultimately, locking might be more efficient than copying in this fashion since updates are
// infrequent
resource = processRepositoryGroup(request, archivaLocator, activePrincipal, resourcesInAbsolutePath, repoGroup);
for (ManagedRepository repo : repoGroup.getRepositories()) {
if (repo != null) {
repositoryRequestInfo = repo.getRequestInfo();
break;
}
}
}
} else {
// We do not provide folders for remote repositories
ManagedRepository repo = repositoryRegistry.getManagedRepository(sRepoId);
if (repo == null) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Invalid repository: " + archivaLocator.getRepositoryId());
}
ManagedRepositoryContent managedRepositoryContent = repo.getContent();
if (managedRepositoryContent == null) {
log.error("Inconsistency detected. Repository content not found for '{}'", archivaLocator.getRepositoryId());
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Invalid repository: " + archivaLocator.getRepositoryId());
}
log.debug("Managed repository '{}' accessed by '{}'", managedRepositoryContent.getId(), activePrincipal);
resource = processRepository(request, archivaLocator, activePrincipal, managedRepositoryContent, repo);
repositoryRequestInfo = repo.getRequestInfo();
String logicalResource = getLogicalResource(archivaLocator, null, false);
resourcesInAbsolutePath.add(managedRepositoryContent.getRepository().getRoot().getFilePath().resolve(logicalResource).toAbsolutePath().toString());
}
String requestedResource = request.getRequestURI();
// merge metadata only when requested via the repo group
if ((repositoryRequestInfo.isMetadata(requestedResource) || repositoryRequestInfo.isMetadataSupportFile(requestedResource)) && isGroupRepo) {
// this should only be at the project level not version level!
if (isProjectReference(requestedResource)) {
ArchivaDavResource res = (ArchivaDavResource) resource;
String newPath;
if (res.getAsset().hasParent()) {
newPath = res.getAsset().getParent().getPath() + "/maven-metadata-" + sRepoId + ".xml";
} else {
newPath = StringUtils.substringBeforeLast(res.getAsset().getPath(), "/") + "/maven-metadata-" + sRepoId + ".xml";
;
}
// for MRM-872 handle checksums of the merged metadata files
if (repositoryRequestInfo.isSupportFile(requestedResource)) {
String metadataChecksumPath = newPath + "." + StringUtils.substringAfterLast(requestedResource, ".");
StorageAsset metadataChecksum = repoGroup.getAsset(metadataChecksumPath);
if (repoGroup.getAsset(metadataChecksumPath).exists()) {
LogicalResource logicalResource = new LogicalResource(getLogicalResource(archivaLocator, null, false));
try {
resource = new ArchivaDavResource(metadataChecksum, logicalResource.getPath(), repoGroup, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(500, e);
}
}
} else {
if (resourcesInAbsolutePath != null && resourcesInAbsolutePath.size() > 1) {
// merge the metadata of all repos under group
ArchivaRepositoryMetadata mergedMetadata = new ArchivaRepositoryMetadata();
for (String resourceAbsPath : resourcesInAbsolutePath) {
try {
Path metadataFile = Paths.get(resourceAbsPath);
FilesystemStorage storage = new FilesystemStorage(metadataFile.getParent(), new DefaultFileLockManager());
ArchivaRepositoryMetadata repoMetadata = repositoryRegistry.getMetadataReader(repoGroup.getType()).read(storage.getAsset(metadataFile.getFileName().toString()));
mergedMetadata = RepositoryMetadataMerge.merge(mergedMetadata, repoMetadata);
} catch (RepositoryMetadataException r) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while merging metadata file.");
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while merging metadata file.");
}
}
try {
StorageAsset resourceFile = writeMergedMetadataToFile(repoGroup, mergedMetadata, newPath);
LogicalResource logicalResource = new LogicalResource(getLogicalResource(archivaLocator, null, false));
resource = new ArchivaDavResource(resourceFile, logicalResource.getPath(), repoGroup, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (RepositoryMetadataException r) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while writing metadata file.");
} catch (IOException ie) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while generating checksum files.");
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Incompatible layout for repository " + repoGroup.getId());
}
}
}
}
}
setHeaders(response, locator, resource, false);
// compatibility with MRM-440 to ensure browsing the repository works ok
if (resource.isCollection() && !request.getRequestURI().endsWith("/")) {
throw new BrowserRedirectException(resource.getHref());
}
resource.addLockManager(lockManager);
return resource;
}
use of org.apache.archiva.repository.content.LayoutException in project archiva by apache.
the class ArchivaDavResourceFactory method processRepository.
private DavResource processRepository(final DavServletRequest request, ArchivaDavResourceLocator archivaLocator, String activePrincipal, ManagedRepositoryContent managedRepositoryContent, org.apache.archiva.repository.ManagedRepository managedRepository) throws DavException {
DavResource resource = null;
if (isAuthorized(request, managedRepositoryContent.getId())) {
boolean readMethod = WebdavMethodUtil.isReadMethod(request.getMethod());
// Maven Centric part ask evaluation if -SNAPSHOT
// MRM-1846 test if read method to prevent issue with maven 2.2.1 and uniqueVersion false
String path = readMethod ? evaluatePathWithVersion(archivaLocator, managedRepositoryContent, request.getContextPath()) : getLogicalResource(archivaLocator, managedRepository, false);
if (path.startsWith("/")) {
path = path.substring(1);
}
LogicalResource logicalResource = new LogicalResource(path);
StorageAsset repoAsset = managedRepository.getAsset(path);
// Path resourceFile = Paths.get( managedRepositoryContent.getRepoRoot(), path );
try {
resource = new ArchivaDavResource(repoAsset, path, managedRepository, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e);
}
if (WebdavMethodUtil.isReadMethod(request.getMethod())) {
if (archivaLocator.getHref(false).endsWith("/") && !repoAsset.isContainer()) {
// force a resource not found
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Resource does not exist");
} else {
if (!resource.isCollection()) {
boolean previouslyExisted = repoAsset.exists();
boolean fromProxy = fetchContentFromProxies(managedRepository, request, logicalResource);
StorageAsset resourceAsset = null;
// legacy layout format.
try {
// Perform an adjustment of the resource to the managed
// repository expected path.
// String localResourcePath = managedRepository.getRequestInfo().toNativePath( logicalResource.getPath() );
resourceAsset = managedRepository.getAsset(logicalResource.getPath());
resource = new ArchivaDavResource(resourceAsset, logicalResource.getPath(), managedRepository, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
if (resourceAsset == null || !resourceAsset.exists()) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, e);
}
}
if (fromProxy) {
String action = (previouslyExisted ? AuditEvent.MODIFY_FILE : AuditEvent.CREATE_FILE) + PROXIED_SUFFIX;
log.debug("Proxied artifact '{}' in repository '{}' (current user '{}')", resourceAsset.getName(), managedRepositoryContent.getId(), activePrincipal);
triggerAuditEvent(request.getRemoteAddr(), archivaLocator.getRepositoryId(), logicalResource.getPath(), action, activePrincipal);
}
if (!resourceAsset.exists()) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Resource does not exist");
}
}
}
}
if (request.getMethod().equals(HTTP_PUT_METHOD)) {
String resourcePath = logicalResource.getPath();
RepositoryRequestInfo repositoryRequestInfo = managedRepository.getRequestInfo();
// we suppose that release-artifacts can be deployed only to repos enabled for releases
if (managedRepositoryContent.getRepository().getActiveReleaseSchemes().contains(ReleaseScheme.RELEASE) && !repositoryRequestInfo.isMetadata(resourcePath) && !repositoryRequestInfo.isSupportFile(resourcePath)) {
// ArtifactReference artifact = null;
Artifact artifact = null;
try {
BaseRepositoryContentLayout layout = managedRepositoryContent.getLayout(BaseRepositoryContentLayout.class);
ContentItem artifactItem = managedRepositoryContent.toItem(resourcePath);
artifact = layout.adaptItem(Artifact.class, artifactItem);
if (!VersionUtil.isSnapshot(artifact.getVersion().getId())) {
// check if artifact already exists and if artifact re-deployment to the repository is allowed
if (artifactItem.exists() && managedRepositoryContent.getRepository().blocksRedeployments()) {
log.warn("Overwriting released artifacts in repository '{}' is not allowed.", managedRepositoryContent.getId());
throw new DavException(HttpServletResponse.SC_CONFLICT, "Overwriting released artifacts is not allowed.");
}
}
} catch (LayoutException e) {
log.warn("Artifact path '{}' is invalid.", resourcePath);
} catch (ContentAccessException e) {
e.printStackTrace();
}
}
/*
* Create parent directories that don't exist when writing a file This actually makes this
* implementation not compliant to the WebDAV RFC - but we have enough knowledge about how the
* collection is being used to do this reasonably and some versions of Maven's WebDAV don't correctly
* create the collections themselves.
*/
StorageAsset rootDirectory = managedRepositoryContent.getRepository().getRoot();
StorageAsset destDir = rootDirectory.resolve(logicalResource.getPath()).getParent();
if (!destDir.exists()) {
try {
destDir.create();
} catch (IOException e) {
log.error("Could not create directory {}: {}", destDir, e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create directory " + destDir);
}
String relPath = PathUtil.getRelative(rootDirectory.getPath(), destDir.getPath());
log.debug("Creating destination directory '{}' (current user '{}')", destDir.getName(), activePrincipal);
triggerAuditEvent(request.getRemoteAddr(), managedRepositoryContent.getId(), relPath, AuditEvent.CREATE_DIR, activePrincipal);
}
}
}
return resource;
}
Aggregations