use of org.apache.archiva.repository.RepositoryRequestInfo in project archiva by apache.
the class ArchivaDavResourceFactory method createResource.
@Override
public DavResource createResource(final DavResourceLocator locator, final DavServletRequest request, final DavServletResponse response) throws DavException {
final ArchivaDavResourceLocator archivaLocator = checkLocatorIsInstanceOfRepositoryLocator(locator);
final String sRepoId = archivaLocator.getRepositoryId();
RepositoryGroup repoGroup = repositoryRegistry.getRepositoryGroup(sRepoId);
final boolean isGroupRepo = repoGroup != null;
String activePrincipal = getActivePrincipal(request);
List<String> resourcesInAbsolutePath = new ArrayList<>();
boolean readMethod = WebdavMethodUtil.isReadMethod(request.getMethod());
RepositoryRequestInfo repositoryRequestInfo = null;
DavResource resource;
if (isGroupRepo) {
if (!readMethod) {
throw new DavException(HttpServletResponse.SC_METHOD_NOT_ALLOWED, "Write method not allowed for repository groups.");
}
log.debug("Repository group '{}' accessed by '{}", repoGroup.getId(), activePrincipal);
// handle browse requests for virtual repos
if (getLogicalResource(archivaLocator, null, true).endsWith("/")) {
DavResource davResource = getResourceFromGroup(request, archivaLocator, repoGroup);
setHeaders(response, locator, davResource, true);
return davResource;
} else {
// make a copy to avoid potential concurrent modifications (eg. by configuration)
// TODO: ultimately, locking might be more efficient than copying in this fashion since updates are
// infrequent
resource = processRepositoryGroup(request, archivaLocator, activePrincipal, resourcesInAbsolutePath, repoGroup);
for (ManagedRepository repo : repoGroup.getRepositories()) {
if (repo != null) {
repositoryRequestInfo = repo.getRequestInfo();
break;
}
}
}
} else {
// We do not provide folders for remote repositories
ManagedRepository repo = repositoryRegistry.getManagedRepository(sRepoId);
if (repo == null) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Invalid repository: " + archivaLocator.getRepositoryId());
}
ManagedRepositoryContent managedRepositoryContent = repo.getContent();
if (managedRepositoryContent == null) {
log.error("Inconsistency detected. Repository content not found for '{}'", archivaLocator.getRepositoryId());
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Invalid repository: " + archivaLocator.getRepositoryId());
}
log.debug("Managed repository '{}' accessed by '{}'", managedRepositoryContent.getId(), activePrincipal);
resource = processRepository(request, archivaLocator, activePrincipal, managedRepositoryContent, repo);
repositoryRequestInfo = repo.getRequestInfo();
String logicalResource = getLogicalResource(archivaLocator, null, false);
resourcesInAbsolutePath.add(managedRepositoryContent.getRepository().getRoot().getFilePath().resolve(logicalResource).toAbsolutePath().toString());
}
String requestedResource = request.getRequestURI();
// merge metadata only when requested via the repo group
if ((repositoryRequestInfo.isMetadata(requestedResource) || repositoryRequestInfo.isMetadataSupportFile(requestedResource)) && isGroupRepo) {
// this should only be at the project level not version level!
if (isProjectReference(requestedResource)) {
ArchivaDavResource res = (ArchivaDavResource) resource;
String newPath;
if (res.getAsset().hasParent()) {
newPath = res.getAsset().getParent().getPath() + "/maven-metadata-" + sRepoId + ".xml";
} else {
newPath = StringUtils.substringBeforeLast(res.getAsset().getPath(), "/") + "/maven-metadata-" + sRepoId + ".xml";
;
}
// for MRM-872 handle checksums of the merged metadata files
if (repositoryRequestInfo.isSupportFile(requestedResource)) {
String metadataChecksumPath = newPath + "." + StringUtils.substringAfterLast(requestedResource, ".");
StorageAsset metadataChecksum = repoGroup.getAsset(metadataChecksumPath);
if (repoGroup.getAsset(metadataChecksumPath).exists()) {
LogicalResource logicalResource = new LogicalResource(getLogicalResource(archivaLocator, null, false));
try {
resource = new ArchivaDavResource(metadataChecksum, logicalResource.getPath(), repoGroup, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(500, e);
}
}
} else {
if (resourcesInAbsolutePath != null && resourcesInAbsolutePath.size() > 1) {
// merge the metadata of all repos under group
ArchivaRepositoryMetadata mergedMetadata = new ArchivaRepositoryMetadata();
for (String resourceAbsPath : resourcesInAbsolutePath) {
try {
Path metadataFile = Paths.get(resourceAbsPath);
FilesystemStorage storage = new FilesystemStorage(metadataFile.getParent(), new DefaultFileLockManager());
ArchivaRepositoryMetadata repoMetadata = repositoryRegistry.getMetadataReader(repoGroup.getType()).read(storage.getAsset(metadataFile.getFileName().toString()));
mergedMetadata = RepositoryMetadataMerge.merge(mergedMetadata, repoMetadata);
} catch (RepositoryMetadataException r) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while merging metadata file.");
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while merging metadata file.");
}
}
try {
StorageAsset resourceFile = writeMergedMetadataToFile(repoGroup, mergedMetadata, newPath);
LogicalResource logicalResource = new LogicalResource(getLogicalResource(archivaLocator, null, false));
resource = new ArchivaDavResource(resourceFile, logicalResource.getPath(), repoGroup, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (RepositoryMetadataException r) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while writing metadata file.");
} catch (IOException ie) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while generating checksum files.");
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Incompatible layout for repository " + repoGroup.getId());
}
}
}
}
}
setHeaders(response, locator, resource, false);
// compatibility with MRM-440 to ensure browsing the repository works ok
if (resource.isCollection() && !request.getRequestURI().endsWith("/")) {
throw new BrowserRedirectException(resource.getHref());
}
resource.addLockManager(lockManager);
return resource;
}
use of org.apache.archiva.repository.RepositoryRequestInfo in project archiva by apache.
the class ArchivaDavResourceFactory method processRepository.
private DavResource processRepository(final DavServletRequest request, ArchivaDavResourceLocator archivaLocator, String activePrincipal, ManagedRepositoryContent managedRepositoryContent, org.apache.archiva.repository.ManagedRepository managedRepository) throws DavException {
DavResource resource = null;
if (isAuthorized(request, managedRepositoryContent.getId())) {
boolean readMethod = WebdavMethodUtil.isReadMethod(request.getMethod());
// Maven Centric part ask evaluation if -SNAPSHOT
// MRM-1846 test if read method to prevent issue with maven 2.2.1 and uniqueVersion false
String path = readMethod ? evaluatePathWithVersion(archivaLocator, managedRepositoryContent, request.getContextPath()) : getLogicalResource(archivaLocator, managedRepository, false);
if (path.startsWith("/")) {
path = path.substring(1);
}
LogicalResource logicalResource = new LogicalResource(path);
StorageAsset repoAsset = managedRepository.getAsset(path);
// Path resourceFile = Paths.get( managedRepositoryContent.getRepoRoot(), path );
try {
resource = new ArchivaDavResource(repoAsset, path, managedRepository, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e);
}
if (WebdavMethodUtil.isReadMethod(request.getMethod())) {
if (archivaLocator.getHref(false).endsWith("/") && !repoAsset.isContainer()) {
// force a resource not found
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Resource does not exist");
} else {
if (!resource.isCollection()) {
boolean previouslyExisted = repoAsset.exists();
boolean fromProxy = fetchContentFromProxies(managedRepository, request, logicalResource);
StorageAsset resourceAsset = null;
// legacy layout format.
try {
// Perform an adjustment of the resource to the managed
// repository expected path.
// String localResourcePath = managedRepository.getRequestInfo().toNativePath( logicalResource.getPath() );
resourceAsset = managedRepository.getAsset(logicalResource.getPath());
resource = new ArchivaDavResource(resourceAsset, logicalResource.getPath(), managedRepository, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
if (resourceAsset == null || !resourceAsset.exists()) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, e);
}
}
if (fromProxy) {
String action = (previouslyExisted ? AuditEvent.MODIFY_FILE : AuditEvent.CREATE_FILE) + PROXIED_SUFFIX;
log.debug("Proxied artifact '{}' in repository '{}' (current user '{}')", resourceAsset.getName(), managedRepositoryContent.getId(), activePrincipal);
triggerAuditEvent(request.getRemoteAddr(), archivaLocator.getRepositoryId(), logicalResource.getPath(), action, activePrincipal);
}
if (!resourceAsset.exists()) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Resource does not exist");
}
}
}
}
if (request.getMethod().equals(HTTP_PUT_METHOD)) {
String resourcePath = logicalResource.getPath();
RepositoryRequestInfo repositoryRequestInfo = managedRepository.getRequestInfo();
// we suppose that release-artifacts can be deployed only to repos enabled for releases
if (managedRepositoryContent.getRepository().getActiveReleaseSchemes().contains(ReleaseScheme.RELEASE) && !repositoryRequestInfo.isMetadata(resourcePath) && !repositoryRequestInfo.isSupportFile(resourcePath)) {
// ArtifactReference artifact = null;
Artifact artifact = null;
try {
BaseRepositoryContentLayout layout = managedRepositoryContent.getLayout(BaseRepositoryContentLayout.class);
ContentItem artifactItem = managedRepositoryContent.toItem(resourcePath);
artifact = layout.adaptItem(Artifact.class, artifactItem);
if (!VersionUtil.isSnapshot(artifact.getVersion().getId())) {
// check if artifact already exists and if artifact re-deployment to the repository is allowed
if (artifactItem.exists() && managedRepositoryContent.getRepository().blocksRedeployments()) {
log.warn("Overwriting released artifacts in repository '{}' is not allowed.", managedRepositoryContent.getId());
throw new DavException(HttpServletResponse.SC_CONFLICT, "Overwriting released artifacts is not allowed.");
}
}
} catch (LayoutException e) {
log.warn("Artifact path '{}' is invalid.", resourcePath);
} catch (ContentAccessException e) {
e.printStackTrace();
}
}
/*
* Create parent directories that don't exist when writing a file This actually makes this
* implementation not compliant to the WebDAV RFC - but we have enough knowledge about how the
* collection is being used to do this reasonably and some versions of Maven's WebDAV don't correctly
* create the collections themselves.
*/
StorageAsset rootDirectory = managedRepositoryContent.getRepository().getRoot();
StorageAsset destDir = rootDirectory.resolve(logicalResource.getPath()).getParent();
if (!destDir.exists()) {
try {
destDir.create();
} catch (IOException e) {
log.error("Could not create directory {}: {}", destDir, e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create directory " + destDir);
}
String relPath = PathUtil.getRelative(rootDirectory.getPath(), destDir.getPath());
log.debug("Creating destination directory '{}' (current user '{}')", destDir.getName(), activePrincipal);
triggerAuditEvent(request.getRemoteAddr(), managedRepositoryContent.getId(), relPath, AuditEvent.CREATE_DIR, activePrincipal);
}
}
}
return resource;
}
use of org.apache.archiva.repository.RepositoryRequestInfo in project archiva by apache.
the class ArchivaDavResourceFactory method fetchContentFromProxies.
private boolean fetchContentFromProxies(ManagedRepository managedRepository, DavServletRequest request, LogicalResource resource) throws DavException {
String path = resource.getPath();
if (!proxyRegistry.hasHandler(managedRepository.getType())) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "No proxy handler found for repository type " + managedRepository.getType());
}
RepositoryRequestInfo repositoryRequestInfo = managedRepository.getRequestInfo();
RepositoryProxyHandler proxyHandler = proxyRegistry.getHandler(managedRepository.getType()).get(0);
if (repositoryRequestInfo.isSupportFile(path)) {
StorageAsset proxiedFile = proxyHandler.fetchFromProxies(managedRepository, path);
return (proxiedFile != null);
}
// Is it a Metadata resource?
if ("default".equals(repositoryRequestInfo.getLayout(path)) && repositoryRequestInfo.isMetadata(path)) {
return proxyHandler.fetchMetadataFromProxies(managedRepository, path).isModified();
}
// Is it an Archetype Catalog?
if (repositoryRequestInfo.isArchetypeCatalog(path)) {
// FIXME we must implement a merge of remote archetype catalog from remote servers.
StorageAsset proxiedFile = proxyHandler.fetchFromProxies(managedRepository, path);
return (proxiedFile != null);
}
// Not any of the above? Then it's gotta be an artifact reference.
try {
// Get the artifact reference in a layout neutral way.
// ArtifactReference artifact = repositoryRequestInfo.toArtifactReference( path );
ItemSelector selector = repositoryRequestInfo.toItemSelector(path);
if (selector != null) {
String repositoryLayout = managedRepository.getLayout();
RepositoryStorage repositoryStorage = this.applicationContext.getBean("repositoryStorage#" + repositoryLayout, RepositoryStorage.class);
selector = repositoryStorage.applyServerSideRelocation(managedRepository, selector);
StorageAsset proxiedFile = proxyHandler.fetchFromProxies(managedRepository, selector);
resource.setPath(managedRepository.getContent().toPath(selector));
log.debug("Proxied artifact '{}:{}:{}:{}'", selector.getNamespace(), selector.getArtifactId(), selector.getVersion(), selector.getArtifactVersion());
return (proxiedFile != null);
}
} catch (LayoutException e) {
/* eat it */
} catch (ProxyDownloadException e) {
log.error(e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Unable to fetch artifact resource.");
}
return false;
}
Aggregations