use of org.apache.archiva.repository.storage.fs.FilesystemStorage in project archiva by apache.
the class ArchivaDavResourceFactory method getResourceFromGroup.
private DavResource getResourceFromGroup(DavServletRequest request, ArchivaDavResourceLocator locator, RepositoryGroup repositoryGroup) throws DavException {
final String id = repositoryGroup.getId();
final List<ManagedRepository> repositories = repositoryGroup.getRepositories();
if (repositories == null || repositories.isEmpty()) {
try {
return new ArchivaDavResource(repositoryGroup.getAsset("/"), "groups/" + id, null, request.getDavSession(), locator, this, mimeTypes, auditListeners, scheduler);
} catch (LayoutException e) {
log.error("Bad repository layout: {}", e.getMessage(), e);
throw new DavException(500, e);
}
}
List<StorageAsset> mergedRepositoryContents = new ArrayList<>();
ManagedRepository firstRepo = repositories.get(0);
String path = getLogicalResource(locator, firstRepo, false);
if (path.startsWith("/")) {
path = path.substring(1);
}
LogicalResource logicalResource = new LogicalResource(path);
// flow:
// if the current user logged in has permission to any of the repositories, allow user to
// browse the repo group but displaying only the repositories which the user has permission to access.
// otherwise, prompt for authentication.
String activePrincipal = getActivePrincipal(request);
boolean allow = isAllowedToContinue(request, repositories, activePrincipal);
// remove last /
String pathInfo = StringUtils.removeEnd(request.getPathInfo(), "/");
String mergedIndexPath = "/";
if (repositoryGroup.supportsFeature(IndexCreationFeature.class)) {
IndexCreationFeature indexCreationFeature = repositoryGroup.getFeature(IndexCreationFeature.class);
mergedIndexPath = indexCreationFeature.getIndexPath().getPath();
}
if (allow) {
if (StringUtils.endsWith(pathInfo, mergedIndexPath)) {
StorageAsset mergedRepoDirPath = buildMergedIndexDirectory(activePrincipal, request, repositoryGroup);
mergedRepositoryContents.add(mergedRepoDirPath);
} else {
if (StringUtils.equalsIgnoreCase(pathInfo, "/" + id)) {
Path tmpDirectory = Paths.get(SystemUtils.getJavaIoTmpDir().toString(), id, mergedIndexPath);
if (!Files.exists(tmpDirectory)) {
synchronized (tmpDirectory.toAbsolutePath().toString()) {
if (!Files.exists(tmpDirectory)) {
try {
Files.createDirectories(tmpDirectory);
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create direcotory " + tmpDirectory);
}
}
}
}
try {
FilesystemStorage storage = new FilesystemStorage(tmpDirectory.getParent(), new DefaultFileLockManager());
mergedRepositoryContents.add(storage.getRoot());
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create storage for " + tmpDirectory);
}
}
for (ManagedRepository repo : repositories) {
ManagedRepositoryContent managedRepository = null;
if (repo == null) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Invalid managed repository <" + repo.getId() + ">");
}
managedRepository = repo.getContent();
if (managedRepository == null) {
log.error("Inconsistency detected. Repository content not found for '{}'", repo.getId());
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Invalid managed repository <" + repo.getId() + ">");
}
// Path resourceFile = Paths.get( managedRepository.getRepoRoot(), logicalResource.getPath() );
StorageAsset resourceFile = repo.getAsset(logicalResource.getPath());
if (resourceFile.exists() && managedRepository.getRepository().supportsFeature(IndexCreationFeature.class)) {
// in case of group displaying index directory doesn't have sense !!
IndexCreationFeature idf = managedRepository.getRepository().getFeature(IndexCreationFeature.class);
StorageAsset repoIndexDirectory = idf.getLocalIndexPath();
if (!StringUtils.equals(FilenameUtils.normalize(repoIndexDirectory.getPath()), FilenameUtils.normalize(logicalResource.getPath()))) {
// for prompted authentication
if (httpAuth.getSecuritySession(request.getSession(true)) != null) {
try {
if (isAuthorized(request, repo.getId())) {
mergedRepositoryContents.add(resourceFile);
log.debug("Repository '{}' accessed by '{}'", repo.getId(), activePrincipal);
}
} catch (DavException e) {
// TODO: review exception handling
log.debug("Skipping repository '{}' for user '{}': {}", managedRepository, activePrincipal, e.getMessage());
}
} else {
// for the current user logged in
try {
if (servletAuth.isAuthorized(activePrincipal, repo.getId(), WebdavMethodUtil.getMethodPermission(request.getMethod()))) {
mergedRepositoryContents.add(resourceFile);
log.debug("Repository '{}' accessed by '{}'", repo.getId(), activePrincipal);
}
} catch (UnauthorizedException e) {
// TODO: review exception handling
log.debug("Skipping repository '{}' for user '{}': {}", managedRepository, activePrincipal, e.getMessage());
}
}
}
}
}
}
} else {
throw new UnauthorizedDavException(locator.getRepositoryId(), "User not authorized.");
}
ArchivaVirtualDavResource resource = new ArchivaVirtualDavResource(mergedRepositoryContents, logicalResource.getPath(), mimeTypes, locator, this);
// compatibility with MRM-440 to ensure browsing the repository group works ok
if (resource.isCollection() && !request.getRequestURI().endsWith("/")) {
throw new BrowserRedirectException(resource.getHref());
}
return resource;
}
use of org.apache.archiva.repository.storage.fs.FilesystemStorage in project archiva by apache.
the class RssFeedServletTest method setUp.
@Before
@Override
public void setUp() throws Exception {
final MockServletContext mockServletContext = new MockServletContext();
WebApplicationContext webApplicationContext = new TestWebapplicationContext(applicationContext, mockServletContext);
mockServletContext.setAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE, webApplicationContext);
MockServletConfig mockServletConfig = new MockServletConfig() {
@Override
public ServletContext getServletContext() {
return mockServletContext;
}
};
repositoryRegistry.reload();
repositoryRegistry.putRepository(new BasicManagedRepository("internal", "internal", new FilesystemStorage(Paths.get("target/appserver-base/repositories/internal"), new DefaultFileLockManager())));
rssFeedServlet.init(mockServletConfig);
}
use of org.apache.archiva.repository.storage.fs.FilesystemStorage in project archiva by apache.
the class DefaultRepositoryProxyHandler method transferFile.
/**
* Perform the transfer of the file.
*
* @param connector the connector configuration to use.
* @param remoteRepository the remote repository get the resource from.
* @param remotePath the path in the remote repository to the resource to get.
* @param repository the managed repository that will hold the file
* @param resource the path relative to the repository storage where the file should be downloaded to
* @param requestProperties the request properties to utilize for policy handling.
* @param executeConsumers whether to execute the consumers after proxying
* @return the local file that was downloaded, or null if not downloaded.
* @throws NotFoundException if the file was not found on the remote repository.
* @throws NotModifiedException if the localFile was present, and the resource was present on remote repository, but
* the remote resource is not newer than the local File.
* @throws ProxyException if transfer was unsuccessful.
*/
protected StorageAsset transferFile(ProxyConnector connector, RemoteRepository remoteRepository, String remotePath, ManagedRepository repository, StorageAsset resource, Properties requestProperties, boolean executeConsumers) throws ProxyException, NotModifiedException {
String url = null;
try {
url = remoteRepository.getLocation().toURL().toString();
} catch (MalformedURLException e) {
throw new ProxyException(e.getMessage(), e);
}
if (!url.endsWith("/")) {
url = url + "/";
}
if (remotePath.startsWith("/")) {
url = url + remotePath.substring(1);
} else {
url = url + remotePath;
}
requestProperties.setProperty("url", url);
// Is a whitelist defined?
if (CollectionUtils.isNotEmpty(connector.getWhitelist())) {
// Path must belong to whitelist.
if (!matchesPattern(remotePath, connector.getWhitelist())) {
log.debug("Path [{}] is not part of defined whitelist (skipping transfer from repository [{}]).", remotePath, remoteRepository.getId());
return null;
}
}
// Is target path part of blacklist?
if (matchesPattern(remotePath, connector.getBlacklist())) {
log.debug("Path [{}] is part of blacklist (skipping transfer from repository [{}]).", remotePath, remoteRepository.getId());
return null;
}
// Handle pre-download policy
try {
validatePolicies(this.preDownloadPolicies, connector.getPolicies(), requestProperties, resource);
} catch (PolicyViolationException e) {
String emsg = "Transfer not attempted on " + url + " : " + e.getMessage();
if (resource.exists()) {
log.debug("{} : using already present local file.", emsg);
return resource;
}
log.debug(emsg);
return null;
}
Path workingDirectory = createWorkingDirectory(repository);
FilesystemStorage tmpStorage = null;
try {
tmpStorage = new FilesystemStorage(workingDirectory, fileLockManager);
} catch (IOException e) {
throw new ProxyException("Could not create tmp storage");
}
StorageAsset tmpResource = tmpStorage.getAsset(resource.getName());
StorageAsset[] tmpChecksumFiles = new StorageAsset[checksumAlgorithms.size()];
for (int i = 0; i < checksumAlgorithms.size(); i++) {
ChecksumAlgorithm alg = checksumAlgorithms.get(i);
tmpChecksumFiles[i] = tmpStorage.getAsset(resource.getName() + "." + alg.getDefaultExtension());
}
try {
transferResources(connector, remoteRepository, tmpResource, tmpChecksumFiles, url, remotePath, resource, workingDirectory, repository);
// Handle post-download policies.
try {
validatePolicies(this.postDownloadPolicies, connector.getPolicies(), requestProperties, tmpResource);
} catch (PolicyViolationException e) {
log.warn("Transfer invalidated from {} : {}", url, e.getMessage());
executeConsumers = false;
if (!fileExists(tmpResource)) {
resource = null;
}
}
if (resource != null) {
synchronized (resource.getPath().intern()) {
StorageAsset directory = resource.getParent();
for (int i = 0; i < tmpChecksumFiles.length; i++) {
moveFileIfExists(tmpChecksumFiles[i], directory);
}
moveFileIfExists(tmpResource, directory);
}
}
} finally {
org.apache.archiva.common.utils.FileUtils.deleteQuietly(workingDirectory);
}
if (executeConsumers) {
// Just-in-time update of the index and database by executing the consumers for this artifact
// consumers.executeConsumers( connector.getSourceRepository().getRepository(), resource );
queueRepositoryTask(connector.getSourceRepository().getId(), resource);
}
return resource;
}
use of org.apache.archiva.repository.storage.fs.FilesystemStorage in project archiva by apache.
the class BasicManagedRepository method newFilesystemInstance.
/**
* Creates a filesystem based repository instance. The path is built by basePath/repository-id
*
* @param id The repository id
* @param name The name of the repository
* @param repositoryPath The path to the repository
* @return The repository instance
* @throws IOException
*/
public static BasicManagedRepository newFilesystemInstance(String id, String name, Path repositoryPath) throws IOException {
FileLockManager lockManager = new DefaultFileLockManager();
FilesystemStorage storage = new FilesystemStorage(repositoryPath, lockManager);
return new BasicManagedRepository(id, name, storage);
}
use of org.apache.archiva.repository.storage.fs.FilesystemStorage in project archiva by apache.
the class BasicRemoteRepository method newFilesystemInstance.
public static BasicRemoteRepository newFilesystemInstance(String id, String name, Path basePath) throws IOException {
FileLockManager lockManager = new DefaultFileLockManager();
FilesystemStorage storage = new FilesystemStorage(basePath.resolve(id), lockManager);
return new BasicRemoteRepository(id, name, storage);
}
Aggregations