use of jetbrains.buildServer.artifacts.s3.FileUploadInfo in project teamcity-s3-artifact-storage-plugin by JetBrains.
the class S3ArtifactsPublisherTest method testMultipleArtifactPublishesIncludedInArtifactList.
@Test
public void testMultipleArtifactPublishesIncludedInArtifactList() throws IOException {
HashMap<File, String> artifacts1 = new HashMap<>();
File file1 = new File("artifact1");
artifacts1.put(file1, "");
List<FileUploadInfo> uploadInfos1 = Collections.singletonList(new FileUploadInfo(file1.getName(), file1.getAbsolutePath(), file1.length(), null));
HashMap<File, String> artifacts2 = new HashMap<>();
File file2 = new File("artifact2");
artifacts2.put(file2, "");
List<FileUploadInfo> uploadInfos2 = Collections.singletonList(new FileUploadInfo(file2.getName(), file2.getAbsolutePath(), file2.length(), null));
AgentArtifactHelper helper = Mockito.mock(AgentArtifactHelper.class);
when(helper.isEnabled(any(), any())).thenReturn(true);
AgentRunningBuild build = Mockito.mock(AgentRunningBuild.class);
HashMap<String, String> storageSettings = new HashMap<>();
storageSettings.put("aws.region.name", "test");
storageSettings.put("secure:aws.secret.access.key", "test");
storageSettings.put("aws.access.key.id", "test");
storageSettings.put("aws.credentials.type", "aws.access.keys");
storageSettings.put("storage.s3.bucket.name", "BUCKET_NAME");
storageSettings.put("aws.environment", "custom");
storageSettings.put("aws.service.endpoint", "http://localhost");
when(build.getSharedConfigParameters()).thenReturn(Collections.emptyMap());
when(build.getArtifactStorageSettings()).thenReturn(storageSettings);
when(build.getBuildLogger()).thenReturn(Mockito.mock(BuildProgressLogger.class, RETURNS_DEEP_STUBS));
CurrentBuildTracker tracker = Mockito.mock(CurrentBuildTracker.class);
when(tracker.getCurrentBuild()).thenReturn(build);
BuildAgentConfiguration config = Mockito.mock(BuildAgentConfiguration.class);
when(config.getAgentHomeDirectory()).thenReturn(new File(""));
PresignedUrlsProviderClientFactory clientFactory = Mockito.mock(PresignedUrlsProviderClientFactory.class);
ExtensionHolder holder = Mockito.mock(ExtensionHolder.class);
EventDispatcher<AgentLifeCycleListener> dispatcher = EventDispatcher.create(AgentLifeCycleListener.class);
S3FileUploaderFactory uploaderFactory = Mockito.mock(S3FileUploaderFactory.class);
final S3Configuration s3Configuration = Mockito.mock(S3Configuration.class);
final S3UploadLogger s3UploadLogger = Mockito.mock(S3UploadLogger.class);
S3FileUploader uploader = new S3FileUploader(s3Configuration, s3UploadLogger) {
@Override
public Collection<UploadStatistics> upload(@NotNull Map<File, String> filesToUpload, @NotNull Supplier<String> interrupter, Consumer<FileUploadInfo> uploadInfoConsumer) throws InvalidSettingsException {
uploadInfos1.forEach(i -> uploadInfoConsumer.accept(i));
return null;
}
};
when(uploaderFactory.create(any(), any(), any())).thenReturn(uploader);
S3ArtifactsPublisher publisher = new S3ArtifactsPublisher(helper, dispatcher, tracker, config, clientFactory, uploaderFactory, holder);
publisher.publishFiles(artifacts1);
ArgumentCaptor<List<ArtifactDataInstance>> argumentCaptor = ArgumentCaptor.forClass(List.class);
Mockito.verify(helper, times(1)).publishArtifactList(argumentCaptor.capture(), any());
List<ArtifactDataInstance> value1 = new ArrayList<>(argumentCaptor.getValue());
assertEquals("First publishing run should have 1 artifact in the list", value1.size(), 1);
S3FileUploader uploader2 = new S3FileUploader(s3Configuration, s3UploadLogger) {
@Override
public Collection<UploadStatistics> upload(@NotNull Map<File, String> filesToUpload, @NotNull Supplier<String> interrupter, Consumer<FileUploadInfo> uploadInfoConsumer) throws InvalidSettingsException {
uploadInfos2.forEach(i -> uploadInfoConsumer.accept(i));
return null;
}
};
when(uploaderFactory.create(any(), any(), any())).thenReturn(uploader2);
publisher.publishFiles(artifacts2);
Mockito.verify(helper, times(2)).publishArtifactList(argumentCaptor.capture(), any());
List<ArtifactDataInstance> value2 = argumentCaptor.getValue();
assertEquals("First publishing run should have 2 artifacts in the list", value2.size(), 2);
}
use of jetbrains.buildServer.artifacts.s3.FileUploadInfo in project teamcity-s3-artifact-storage-plugin by JetBrains.
the class S3RegularFileUploader method createRequest.
@Nullable
private PutObjectRequest createRequest(@NotNull final String pathPrefix, @NotNull final String bucketName, @NotNull final Consumer<FileUploadInfo> uploadConsumer, @NotNull final Pair<String, File> fileWithPath) {
final File file = fileWithPath.getSecond();
if (!file.exists()) {
myLogger.warn("Artifact \"" + file.getAbsolutePath() + "\" does not exist and will not be published to the server");
return null;
}
final String artifactPath = S3Util.normalizeArtifactPath(fileWithPath.getFirst(), file);
final String objectKey = pathPrefix + artifactPath;
uploadConsumer.accept(new FileUploadInfo(artifactPath, file.getAbsolutePath(), file.length(), null));
final ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(S3Util.getContentType(file));
return new PutObjectRequest(bucketName, objectKey, file).withCannedAcl(myS3Configuration.getAcl()).withMetadata(metadata);
}
use of jetbrains.buildServer.artifacts.s3.FileUploadInfo in project teamcity-s3-artifact-storage-plugin by JetBrains.
the class S3RegularFileUploader method upload.
@Override
@Nullable
public Collection<UploadStatistics> upload(@NotNull final Map<File, String> filesToUpload, @NotNull final Supplier<String> interrupter, Consumer<FileUploadInfo> uploadInfoConsumer) throws InvalidSettingsException {
final String bucketName = myS3Configuration.getBucketName();
try {
prepareDestination(bucketName, myS3Configuration.getSettingsMap());
LOG.debug(() -> "Publishing artifacts using S3 configuration " + myS3Configuration);
S3Util.withTransferManagerCorrectingRegion(myS3Configuration.getSettingsMap(), transferManager -> filesToUpload.entrySet().stream().map(entry -> createRequest(myS3Configuration.getPathPrefix(), bucketName, uploadInfoConsumer, new Pair<>(entry.getValue(), entry.getKey()))).filter(Objects::nonNull).map(request -> doUpload(transferManager, request)).collect(Collectors.toList()), myS3Configuration.getAdvancedConfiguration()).forEach(upload -> {
try {
upload.waitForCompletion();
} catch (Exception e) {
LOG.infoAndDebugDetails("Got exception while waiting for upload completion", e);
myLogger.info("Got error while waiting for async artifact upload " + e.getMessage());
}
});
} catch (Throwable t) {
final AWSException awsException = new AWSException(t);
final String details = awsException.getDetails();
if (StringUtil.isNotEmpty(details)) {
final String message = awsException.getMessage() + details;
LOG.warn(message);
myLogger.error(message);
}
throw new FileUploadFailedException(awsException.getMessage(), false, awsException);
}
return null;
}
use of jetbrains.buildServer.artifacts.s3.FileUploadInfo in project teamcity-s3-artifact-storage-plugin by JetBrains.
the class S3PresignedUpload method call.
@Override
public FileUploadInfo call() {
myEtags = null;
try {
if (!myFile.exists()) {
throw new FileNotFoundException(myFile.getAbsolutePath());
}
myRemainingBytes.set(myFile.length());
String digest = upload();
return new FileUploadInfo(myArtifactPath, myFile.getAbsolutePath(), myFile.length(), digest);
} catch (HttpClientUtil.HttpErrorCodeException e) {
final String msg = "Failed to upload artifact " + myArtifactPath + ": " + e.getMessage();
LOGGER.infoAndDebugDetails(msg, e);
throw new FileUploadFailedException(msg, e);
} catch (IOException e) {
LOGGER.infoAndDebugDetails("Got exception while trying to upload file: " + e.getMessage(), e);
throw new FileUploadFailedException(e.getMessage(), false, e);
}
}
use of jetbrains.buildServer.artifacts.s3.FileUploadInfo in project teamcity-s3-artifact-storage-plugin by JetBrains.
the class S3SignedUrlFileUploader method upload.
@Override
public Collection<UploadStatistics> upload(@NotNull Map<File, String> filesToUpload, @NotNull Supplier<String> interrupter, Consumer<FileUploadInfo> uploadInfoConsumer) {
LOGGER.debug(() -> "Publishing artifacts using S3 configuration " + myS3Configuration);
final Map<String, FileWithArtifactPath> normalizedObjectPaths = new HashMap<>();
for (Map.Entry<File, String> entry : filesToUpload.entrySet()) {
final File file = entry.getKey();
final String artifactPath = S3Util.normalizeArtifactPath(entry.getValue(), file);
final String objectKey = myS3Configuration.getPathPrefix() + artifactPath;
final FileWithArtifactPath existingMapping = normalizedObjectPaths.get(objectKey);
if (existingMapping != null && !existingMapping.getFile().equals(file)) {
myLogger.warn("Found clashing artifacts path: " + artifactPath + " leading to different files [" + existingMapping.getFile().getPath() + "," + file.getPath() + "].\n" + "Only the last file will be uploaded to the specified artifact path.");
}
normalizedObjectPaths.put(objectKey, FileWithArtifactPath.create(artifactPath, file));
}
final StatisticsLogger statisticsLogger = new StatisticsLogger();
final Retrier retrier = defaultRetrier(myS3Configuration.getAdvancedConfiguration().getRetriesNum(), myS3Configuration.getAdvancedConfiguration().getRetryDelay(), LOGGER);
try (final CloseableForkJoinPoolAdapter forkJoinPool = new CloseableForkJoinPoolAdapter(myS3Configuration.getAdvancedConfiguration().getNThreads());
final LowLevelS3Client lowLevelS3Client = createAwsClient(myS3Configuration);
final S3SignedUploadManager uploadManager = new S3SignedUploadManager(myPresignedUrlsProviderClient.get(), myS3Configuration.getAdvancedConfiguration(), normalizedObjectPaths.keySet())) {
LOGGER.debug("Publishing [" + filesToUpload.keySet().stream().map(f -> f.getName()).collect(Collectors.joining(",")) + "] to S3");
normalizedObjectPaths.entrySet().stream().map(objectKeyToFileWithArtifactPath -> {
try {
return forkJoinPool.submit(() -> retrier.execute(S3PresignedUpload.create(objectKeyToFileWithArtifactPath.getValue().getArtifactPath(), objectKeyToFileWithArtifactPath.getKey(), objectKeyToFileWithArtifactPath.getValue().getFile(), myS3Configuration.getAdvancedConfiguration(), uploadManager, lowLevelS3Client, new PresignedUploadProgressListenerImpl(myLogger, uploadManager, interrupter, statisticsLogger))));
} catch (RejectedExecutionException e) {
if (isPoolTerminating(forkJoinPool)) {
LOGGER.debug("Artifact publishing rejected by pool shutdown");
} else {
LOGGER.warnAndDebugDetails("Artifact publishing rejected by pool", e);
}
return null;
}
}).filter(Objects::nonNull).map((ForkJoinTask<FileUploadInfo> future) -> waitForCompletion(future, e -> {
logPublishingError(e);
if (isPublishingInterruptedException(e)) {
shutdownPool(forkJoinPool);
} else {
ExceptionUtil.rethrowAsRuntimeException(e);
}
})).filter(Objects::nonNull).forEach(uploadInfo -> {
try {
uploadInfoConsumer.accept(uploadInfo);
} catch (Throwable t) {
LOGGER.warnAndDebugDetails("Failed to send artifact upload information to consumer", t);
}
});
} catch (Throwable th) {
if (isPublishingInterruptedException(th)) {
LOGGER.info("Publishing is interrupted " + th.getMessage(), th);
} else {
if (th instanceof FileUploadFailedException) {
throw th;
}
LOGGER.warnAndDebugDetails("Got error while uploading artifacts " + th.getMessage(), th);
throw new FileUploadFailedException(th.getMessage(), false, th);
}
}
return statisticsLogger.getAllRecords();
}
Aggregations