use of org.apache.archiva.repository.storage.StorageAsset in project archiva by apache.
the class AbstractStorageUtilTest method testDelete.
@Test
void testDelete() {
StorageAsset root = createTree();
RepositoryStorage storage = createStorage(root);
StorageUtil.deleteRecursively(root);
int expected = LEVEL1 * LEVEL2 * LEVEL3 + LEVEL1 * LEVEL2 + LEVEL1 + 1;
testDeletionStatus(expected, storage);
}
use of org.apache.archiva.repository.storage.StorageAsset in project archiva by apache.
the class AbstractStorageUtilTest method testWalkFromRoot.
@Test
void testWalkFromRoot() {
StorageAsset root = createTree();
ConsumeVisitStatus status = new ConsumeVisitStatus();
StorageUtil.walk(root, status);
int expected = LEVEL1 * LEVEL2 * LEVEL3 + LEVEL1 * LEVEL2 + LEVEL1 + 1;
Assertions.assertEquals(expected, status.size());
StorageAsset first = root.list().get(0).list().get(0).list().get(0);
Assertions.assertEquals(first, status.getFirst());
Assertions.assertEquals(root, status.getLast());
}
use of org.apache.archiva.repository.storage.StorageAsset in project archiva by apache.
the class ArchivaDavResourceFactory method buildMergedIndexDirectory.
protected StorageAsset buildMergedIndexDirectory(String activePrincipal, DavServletRequest request, RepositoryGroup repositoryGroup) throws DavException {
try {
final List<ManagedRepository> repositories = repositoryGroup.getRepositories();
HttpSession session = request.getSession();
@SuppressWarnings("unchecked") Map<String, TemporaryGroupIndex> temporaryGroupIndexMap = (Map<String, TemporaryGroupIndex>) session.getAttribute(TemporaryGroupIndexSessionCleaner.TEMPORARY_INDEX_SESSION_KEY);
if (temporaryGroupIndexMap == null) {
temporaryGroupIndexMap = new HashMap<>();
}
final String id = repositoryGroup.getId();
TemporaryGroupIndex tmp = temporaryGroupIndexMap.get(id);
if (tmp != null && tmp.getDirectory() != null && tmp.getDirectory().exists()) {
if (System.currentTimeMillis() - tmp.getCreationTime() > (repositoryGroup.getMergedIndexTTL() * 60 * 1000)) {
log.debug(MarkerFactory.getMarker("group.merged.index"), "tmp group index '{}' is too old so delete it", id);
indexMerger.cleanTemporaryGroupIndex(tmp);
} else {
log.debug(MarkerFactory.getMarker("group.merged.index"), "merged index for group '{}' found in cache", id);
return tmp.getDirectory();
}
}
Set<String> authzRepos = new HashSet<String>();
String permission = WebdavMethodUtil.getMethodPermission(request.getMethod());
for (ManagedRepository repository : repositories) {
try {
if (servletAuth.isAuthorized(activePrincipal, repository.getId(), permission)) {
authzRepos.add(repository.getId());
authzRepos.addAll(this.repositorySearch.getRemoteIndexingContextIds(repository.getId()));
}
} catch (UnauthorizedException e) {
// TODO: review exception handling
log.debug("Skipping repository '{}' for user '{}': {}", repository, activePrincipal, e.getMessage());
}
}
log.info("generate temporary merged index for repository group '{}' for repositories '{}'", id, authzRepos);
IndexCreationFeature indexCreationFeature = repositoryGroup.getFeature(IndexCreationFeature.class);
Path indexPath = indexCreationFeature.getLocalIndexPath().getFilePath();
if (indexPath != null) {
Path tempRepoFile = Files.createTempDirectory("temp");
tempRepoFile.toFile().deleteOnExit();
FilesystemStorage storage = new FilesystemStorage(tempRepoFile, new DefaultFileLockManager());
StorageAsset tmpAsset = storage.getRoot();
IndexMergerRequest indexMergerRequest = new IndexMergerRequest(authzRepos, true, id, indexPath.toString(), repositoryGroup.getMergedIndexTTL()).mergedIndexDirectory(tmpAsset).temporary(true);
MergedRemoteIndexesTaskRequest taskRequest = new MergedRemoteIndexesTaskRequest(indexMergerRequest, indexMerger);
MergedRemoteIndexesTask job = new MergedRemoteIndexesTask(taskRequest);
ArchivaIndexingContext indexingContext = job.execute().getIndexingContext();
StorageAsset mergedRepoDir = indexingContext.getPath();
TemporaryGroupIndex temporaryGroupIndex = new TemporaryGroupIndex(mergedRepoDir, indexingContext.getId(), id, //
repositoryGroup.getMergedIndexTTL()).setCreationTime(new Date().getTime());
temporaryGroupIndexMap.put(id, temporaryGroupIndex);
session.setAttribute(TemporaryGroupIndexSessionCleaner.TEMPORARY_INDEX_SESSION_KEY, temporaryGroupIndexMap);
return mergedRepoDir;
} else {
log.error("Local index path for repository group {} does not exist.", repositoryGroup.getId());
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
} catch (RepositorySearchException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e);
} catch (IndexMergerException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e);
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e);
}
}
use of org.apache.archiva.repository.storage.StorageAsset in project archiva by apache.
the class ArchivaDavResourceFactory method writeMergedMetadataToFile.
private StorageAsset writeMergedMetadataToFile(RepositoryGroup repoGroup, ArchivaRepositoryMetadata mergedMetadata, String outputFilename) throws RepositoryMetadataException, IOException {
StorageAsset asset = repoGroup.addAsset(outputFilename, false);
OutputStream stream = asset.getWriteStream(true);
OutputStreamWriter sw = new OutputStreamWriter(stream, "UTF-8");
RepositoryMetadataWriter.write(mergedMetadata, sw);
createChecksumFiles(repoGroup, outputFilename);
return asset;
}
use of org.apache.archiva.repository.storage.StorageAsset in project archiva by apache.
the class ArchivaDavResource method addMember.
@Override
public void addMember(DavResource resource, InputContext inputContext) throws DavException {
// Path localFile = localResource.resolve( resource.getDisplayName() );
boolean exists = asset.exists();
final String newPath = asset.getPath() + "/" + resource.getDisplayName();
if (// New File
isCollection() && inputContext.hasStream()) {
Path tempFile = null;
try {
tempFile = Files.createTempFile("archiva_upload", "dat");
try (OutputStream os = Files.newOutputStream(tempFile, StandardOpenOption.CREATE)) {
IOUtils.copy(inputContext.getInputStream(), os);
}
long expectedContentLength = inputContext.getContentLength();
long actualContentLength = 0;
try {
actualContentLength = Files.size(tempFile);
} catch (IOException e) {
log.error("Could not get length of file {}: {}", tempFile, e.getMessage(), e);
}
// length of -1 is given for a chunked request or unknown length, in which case we accept what was uploaded
if (expectedContentLength >= 0 && expectedContentLength != actualContentLength) {
String msg = "Content Header length was " + expectedContentLength + " but was " + actualContentLength;
log.debug("Upload failed: {}", msg);
throw new DavException(HttpServletResponse.SC_BAD_REQUEST, msg);
}
StorageAsset member = repositoryStorage.addAsset(newPath, false);
member.create();
member.replaceDataFromFile(tempFile);
} catch (IOException e) {
throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e);
} finally {
if (tempFile != null) {
try {
Files.deleteIfExists(tempFile);
} catch (IOException e) {
log.error("Could not delete temporary file {}", tempFile);
}
}
}
// queueRepositoryTask( asset );
log.debug("File '{}{}(current user '{}')", resource.getDisplayName(), (exists ? "' modified " : "' created "), this.principal);
// triggerAuditEvent( resource, exists ? AuditEvent.MODIFY_FILE : AuditEvent.CREATE_FILE );
} else if (// New directory
!inputContext.hasStream() && isCollection()) {
try {
StorageAsset member = repositoryStorage.addAsset(newPath, true);
member.create();
} catch (IOException e) {
log.error("Could not create directory {}: {}", newPath, e.getMessage(), e);
}
log.debug("Directory '{}' (current user '{}')", resource.getDisplayName(), this.principal);
triggerAuditEvent(resource, AuditEvent.CREATE_DIR);
} else {
String msg = "Could not write member " + resource.getResourcePath() + " at " + getResourcePath() + " as this is not a DAV collection";
log.debug(msg);
throw new DavException(HttpServletResponse.SC_BAD_REQUEST, msg);
}
}
Aggregations