use of org.apache.archiva.common.filelock.DefaultFileLockManager in project archiva by apache.
the class ArchivaDavResourceFactory method createResource.
@Override
public DavResource createResource(final DavResourceLocator locator, final DavServletRequest request, final DavServletResponse response) throws DavException {
final ArchivaDavResourceLocator archivaLocator = checkLocatorIsInstanceOfRepositoryLocator(locator);
final String sRepoId = archivaLocator.getRepositoryId();
RepositoryGroup repoGroup = repositoryRegistry.getRepositoryGroup(sRepoId);
final boolean isGroupRepo = repoGroup != null;
String activePrincipal = getActivePrincipal(request);
List<String> resourcesInAbsolutePath = new ArrayList<>();
boolean readMethod = WebdavMethodUtil.isReadMethod(request.getMethod());
RepositoryRequestInfo repositoryRequestInfo = null;
DavResource resource;
if (isGroupRepo) {
if (!readMethod) {
throw new DavException(HttpServletResponse.SC_METHOD_NOT_ALLOWED, "Write method not allowed for repository groups.");
}
log.debug("Repository group '{}' accessed by '{}", repoGroup.getId(), activePrincipal);
// handle browse requests for virtual repos
if (getLogicalResource(archivaLocator, null, true).endsWith("/")) {
DavResource davResource = getResourceFromGroup(request, archivaLocator, repoGroup);
setHeaders(response, locator, davResource, true);
return davResource;
} else {
// make a copy to avoid potential concurrent modifications (eg. by configuration)
// TODO: ultimately, locking might be more efficient than copying in this fashion since updates are
// infrequent
resource = processRepositoryGroup(request, archivaLocator, activePrincipal, resourcesInAbsolutePath, repoGroup);
for (ManagedRepository repo : repoGroup.getRepositories()) {
if (repo != null) {
repositoryRequestInfo = repo.getRequestInfo();
break;
}
}
}
} else {
// We do not provide folders for remote repositories
ManagedRepository repo = repositoryRegistry.getManagedRepository(sRepoId);
if (repo == null) {
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Invalid repository: " + archivaLocator.getRepositoryId());
}
ManagedRepositoryContent managedRepositoryContent = repo.getContent();
if (managedRepositoryContent == null) {
log.error("Inconsistency detected. Repository content not found for '{}'", archivaLocator.getRepositoryId());
throw new DavException(HttpServletResponse.SC_NOT_FOUND, "Invalid repository: " + archivaLocator.getRepositoryId());
}
log.debug("Managed repository '{}' accessed by '{}'", managedRepositoryContent.getId(), activePrincipal);
resource = processRepository(request, archivaLocator, activePrincipal, managedRepositoryContent, repo);
repositoryRequestInfo = repo.getRequestInfo();
String logicalResource = getLogicalResource(archivaLocator, null, false);
resourcesInAbsolutePath.add(managedRepositoryContent.getRepository().getRoot().getFilePath().resolve(logicalResource).toAbsolutePath().toString());
}
String requestedResource = request.getRequestURI();
// merge metadata only when requested via the repo group
if ((repositoryRequestInfo.isMetadata(requestedResource) || repositoryRequestInfo.isMetadataSupportFile(requestedResource)) && isGroupRepo) {
// this should only be at the project level not version level!
if (isProjectReference(requestedResource)) {
ArchivaDavResource res = (ArchivaDavResource) resource;
String newPath;
if (res.getAsset().hasParent()) {
newPath = res.getAsset().getParent().getPath() + "/maven-metadata-" + sRepoId + ".xml";
} else {
newPath = StringUtils.substringBeforeLast(res.getAsset().getPath(), "/") + "/maven-metadata-" + sRepoId + ".xml";
;
}
// for MRM-872 handle checksums of the merged metadata files
if (repositoryRequestInfo.isSupportFile(requestedResource)) {
String metadataChecksumPath = newPath + "." + StringUtils.substringAfterLast(requestedResource, ".");
StorageAsset metadataChecksum = repoGroup.getAsset(metadataChecksumPath);
if (repoGroup.getAsset(metadataChecksumPath).exists()) {
LogicalResource logicalResource = new LogicalResource(getLogicalResource(archivaLocator, null, false));
try {
resource = new ArchivaDavResource(metadataChecksum, logicalResource.getPath(), repoGroup, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(500, e);
}
}
} else {
if (resourcesInAbsolutePath != null && resourcesInAbsolutePath.size() > 1) {
// merge the metadata of all repos under group
ArchivaRepositoryMetadata mergedMetadata = new ArchivaRepositoryMetadata();
for (String resourceAbsPath : resourcesInAbsolutePath) {
try {
Path metadataFile = Paths.get(resourceAbsPath);
FilesystemStorage storage = new FilesystemStorage(metadataFile.getParent(), new DefaultFileLockManager());
ArchivaRepositoryMetadata repoMetadata = repositoryRegistry.getMetadataReader(repoGroup.getType()).read(storage.getAsset(metadataFile.getFileName().toString()));
mergedMetadata = RepositoryMetadataMerge.merge(mergedMetadata, repoMetadata);
} catch (RepositoryMetadataException r) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while merging metadata file.");
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while merging metadata file.");
}
}
try {
StorageAsset resourceFile = writeMergedMetadataToFile(repoGroup, mergedMetadata, newPath);
LogicalResource logicalResource = new LogicalResource(getLogicalResource(archivaLocator, null, false));
resource = new ArchivaDavResource(resourceFile, logicalResource.getPath(), repoGroup, request.getRemoteAddr(), activePrincipal, request.getDavSession(), archivaLocator, this, mimeTypes, auditListeners, scheduler);
} catch (RepositoryMetadataException r) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while writing metadata file.");
} catch (IOException ie) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Error occurred while generating checksum files.");
} catch (LayoutException e) {
log.error("Incompatible layout: {}", e.getMessage(), e);
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Incompatible layout for repository " + repoGroup.getId());
}
}
}
}
}
setHeaders(response, locator, resource, false);
// compatibility with MRM-440 to ensure browsing the repository works ok
if (resource.isCollection() && !request.getRequestURI().endsWith("/")) {
throw new BrowserRedirectException(resource.getHref());
}
resource.addLockManager(lockManager);
return resource;
}
use of org.apache.archiva.common.filelock.DefaultFileLockManager in project archiva by apache.
the class ArchivaDavResourceFactory method getResourceFromGroup.
private DavResource getResourceFromGroup(DavServletRequest request, ArchivaDavResourceLocator locator, RepositoryGroup repositoryGroup) throws DavException {
final String id = repositoryGroup.getId();
final List<ManagedRepository> repositories = repositoryGroup.getRepositories();
if (repositories == null || repositories.isEmpty()) {
try {
return new ArchivaDavResource(repositoryGroup.getAsset("/"), "groups/" + id, null, request.getDavSession(), locator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Bad repository layout: {}", e.getMessage(), e);
throw new DavException(500, e);
}
}
List<StorageAsset> mergedRepositoryContents = new ArrayList<>();
ManagedRepository firstRepo = repositories.get(0);
String path = getLogicalResource(locator, firstRepo, false);
if (path.startsWith("/")) {
path = path.substring(1);
}
LogicalResource logicalResource = new LogicalResource(path);
// flow:
// if the current user logged in has permission to any of the repositories, allow user to
// browse the repo group but displaying only the repositories which the user has permission to access.
// otherwise, prompt for authentication.
String activePrincipal = getActivePrincipal(request);
boolean allow = isAllowedToContinue(request, repositories, activePrincipal);
// remove last /
String pathInfo = StringUtils.removeEnd(request.getPathInfo(), "/");
String mergedIndexPath = "/";
if (repositoryGroup.supportsFeature(IndexCreationFeature.class)) {
IndexCreationFeature indexCreationFeature = repositoryGroup.getFeature(IndexCreationFeature.class);
mergedIndexPath = indexCreationFeature.getIndexPath().getPath();
}
if (allow) {
if (StringUtils.endsWith(pathInfo, mergedIndexPath)) {
StorageAsset mergedRepoDirPath = buildMergedIndexDirectory(activePrincipal, request, repositoryGroup);
mergedRepositoryContents.add(mergedRepoDirPath);
} else {
if (StringUtils.equalsIgnoreCase(pathInfo, "/" + id)) {
Path tmpDirectory = Paths.get(SystemUtils.getJavaIoTmpDir().toString(), id, mergedIndexPath);
if (!Files.exists(tmpDirectory)) {
synchronized (tmpDirectory.toAbsolutePath().toString()) {
if (!Files.exists(tmpDirectory)) {
try {
Files.createDirectories(tmpDirectory);
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create direcotory " + tmpDirectory);
}
}
}
}
try {
FilesystemStorage storage = new FilesystemStorage(tmpDirectory.getParent(), new DefaultFileLockManager());
mergedRepositoryContents.add(storage.getRoot());
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create storage for " + tmpDirectory);
}
}
for (ManagedRepository repo : repositories) {
ManagedRepositoryContent managedRepository = null;
if (repo == null) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Invalid managed repository <" + repo.getId() + ">");
}
managedRepository = repo.getContent();
if (managedRepository == null) {
log.error("Inconsistency detected. Repository content not found for '{}'", repo.getId());
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Invalid managed repository <" + repo.getId() + ">");
}
// Path resourceFile = Paths.get( managedRepository.getRepoRoot(), logicalResource.getPath() );
StorageAsset resourceFile = repo.getAsset(logicalResource.getPath());
if (resourceFile.exists() && managedRepository.getRepository().supportsFeature(IndexCreationFeature.class)) {
// in case of group displaying index directory doesn't have sense !!
IndexCreationFeature idf = managedRepository.getRepository().getFeature(IndexCreationFeature.class);
StorageAsset repoIndexDirectory = idf.getLocalIndexPath();
if (!StringUtils.equals(FilenameUtils.normalize(repoIndexDirectory.getPath()), FilenameUtils.normalize(logicalResource.getPath()))) {
// for prompted authentication
if (httpAuth.getSecuritySession(request.getSession(true)) != null) {
try {
if (isAuthorized(request, repo.getId())) {
mergedRepositoryContents.add(resourceFile);
log.debug("Repository '{}' accessed by '{}'", repo.getId(), activePrincipal);
}
} catch (DavException e) {
// TODO: review exception handling
log.debug("Skipping repository '{}' for user '{}': {}", managedRepository, activePrincipal, e.getMessage());
}
} else {
// for the current user logged in
try {
if (servletAuth.isAuthorized(activePrincipal, repo.getId(), WebdavMethodUtil.getMethodPermission(request.getMethod()))) {
mergedRepositoryContents.add(resourceFile);
log.debug("Repository '{}' accessed by '{}'", repo.getId(), activePrincipal);
}
} catch (UnauthorizedException e) {
// TODO: review exception handling
log.debug("Skipping repository '{}' for user '{}': {}", managedRepository, activePrincipal, e.getMessage());
}
}
}
}
}
}
} else {
throw new UnauthorizedDavException(locator.getRepositoryId(), "User not authorized.");
}
ArchivaVirtualDavResource resource = new ArchivaVirtualDavResource(mergedRepositoryContents, logicalResource.getPath(), mimeTypes, locator, this);
// compatibility with MRM-440 to ensure browsing the repository group works ok
if (resource.isCollection() && !request.getRequestURI().endsWith("/")) {
throw new BrowserRedirectException(resource.getHref());
}
return resource;
}
use of org.apache.archiva.common.filelock.DefaultFileLockManager in project archiva by apache.
the class RssFeedServletTest method setUp.
@Before
@Override
public void setUp() throws Exception {
final MockServletContext mockServletContext = new MockServletContext();
WebApplicationContext webApplicationContext = new TestWebapplicationContext(applicationContext, mockServletContext);
mockServletContext.setAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE, webApplicationContext);
MockServletConfig mockServletConfig = new MockServletConfig() {
@Override
public ServletContext getServletContext() {
return mockServletContext;
}
};
repositoryRegistry.reload();
repositoryRegistry.putRepository(new BasicManagedRepository("internal", "internal", new FilesystemStorage(Paths.get("target/appserver-base/repositories/internal"), new DefaultFileLockManager())));
rssFeedServlet.init(mockServletConfig);
}
use of org.apache.archiva.common.filelock.DefaultFileLockManager in project archiva by apache.
the class BasicManagedRepository method newFilesystemInstance.
/**
* Creates a filesystem based repository instance. The path is built by basePath/repository-id
*
* @param id The repository id
* @param name The name of the repository
* @param repositoryPath The path to the repository
* @return The repository instance
* @throws IOException
*/
public static BasicManagedRepository newFilesystemInstance(String id, String name, Path repositoryPath) throws IOException {
FileLockManager lockManager = new DefaultFileLockManager();
FilesystemStorage storage = new FilesystemStorage(repositoryPath, lockManager);
return new BasicManagedRepository(id, name, storage);
}
use of org.apache.archiva.common.filelock.DefaultFileLockManager in project archiva by apache.
the class BasicRemoteRepository method newFilesystemInstance.
public static BasicRemoteRepository newFilesystemInstance(String id, String name, Path basePath) throws IOException {
FileLockManager lockManager = new DefaultFileLockManager();
FilesystemStorage storage = new FilesystemStorage(basePath.resolve(id), lockManager);
return new BasicRemoteRepository(id, name, storage);
}
Aggregations