use of org.springframework.beans.factory.annotation.Value in project tephra by heisedebaise.
the class HttpImpl method upload.
@Override
public String upload(String url, Map<String, String> requestHeaders, Map<String, String> parameters, Map<String, File> files, String charset) {
if (validator.isEmpty(files))
return post(url, requestHeaders, parameters, charset);
MultipartEntityBuilder entity = MultipartEntityBuilder.create();
ContentType contentType = ContentType.create("text/plain", context.getCharset(charset));
if (!validator.isEmpty(parameters))
parameters.forEach((key, value) -> entity.addTextBody(key, value, contentType));
files.forEach(entity::addBinaryBody);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
postByEntity(url, requestHeaders, entity.build(), null, outputStream);
return outputStream.toString();
}
use of org.springframework.beans.factory.annotation.Value in project nakadi by zalando.
the class EventStreamController method streamEvents.
@RequestMapping(value = "/event-types/{name}/events", method = RequestMethod.GET)
public StreamingResponseBody streamEvents(@PathVariable("name") final String eventTypeName, @Nullable @RequestParam(value = "batch_limit", required = false) final Integer batchLimit, @Nullable @RequestParam(value = "stream_limit", required = false) final Integer streamLimit, @Nullable @RequestParam(value = "batch_flush_timeout", required = false) final Integer batchTimeout, @Nullable @RequestParam(value = "stream_timeout", required = false) final Integer streamTimeout, @Nullable @RequestParam(value = "stream_keep_alive_limit", required = false) final Integer streamKeepAliveLimit, @Nullable @RequestHeader(name = "X-nakadi-cursors", required = false) final String cursorsStr, final HttpServletRequest request, final HttpServletResponse response, final Client client) {
final String flowId = FlowIdUtils.peek();
return outputStream -> {
FlowIdUtils.push(flowId);
if (blacklistService.isConsumptionBlocked(eventTypeName, client.getClientId())) {
writeProblemResponse(response, outputStream, Problem.valueOf(Response.Status.FORBIDDEN, "Application or event type is blocked"));
return;
}
final AtomicBoolean connectionReady = closedConnectionsCrutch.listenForConnectionClose(request);
Counter consumerCounter = null;
EventStream eventStream = null;
List<ConnectionSlot> connectionSlots = ImmutableList.of();
final AtomicBoolean needCheckAuthorization = new AtomicBoolean(false);
LOG.info("[X-NAKADI-CURSORS] \"{}\" {}", eventTypeName, Optional.ofNullable(cursorsStr).orElse("-"));
try (Closeable ignore = eventTypeChangeListener.registerListener(et -> needCheckAuthorization.set(true), Collections.singletonList(eventTypeName))) {
final EventType eventType = eventTypeRepository.findByName(eventTypeName);
authorizeStreamRead(eventTypeName);
// validate parameters
final EventStreamConfig streamConfig = EventStreamConfig.builder().withBatchLimit(batchLimit).withStreamLimit(streamLimit).withBatchTimeout(batchTimeout).withStreamTimeout(streamTimeout).withStreamKeepAliveLimit(streamKeepAliveLimit).withEtName(eventTypeName).withConsumingClient(client).withCursors(getStreamingStart(eventType, cursorsStr)).withMaxMemoryUsageBytes(maxMemoryUsageBytes).build();
// acquire connection slots to limit the number of simultaneous connections from one client
if (featureToggleService.isFeatureEnabled(LIMIT_CONSUMERS_NUMBER)) {
final List<String> partitions = streamConfig.getCursors().stream().map(NakadiCursor::getPartition).collect(Collectors.toList());
connectionSlots = consumerLimitingService.acquireConnectionSlots(client.getClientId(), eventTypeName, partitions);
}
consumerCounter = metricRegistry.counter(metricNameFor(eventTypeName, CONSUMERS_COUNT_METRIC_NAME));
consumerCounter.inc();
final String kafkaQuotaClientId = getKafkaQuotaClientId(eventTypeName, client);
response.setStatus(HttpStatus.OK.value());
response.setHeader("Warning", "299 - nakadi - the Low-level API is deprecated and will " + "be removed from a future release. Please consider migrating to the Subscriptions API.");
response.setContentType("application/x-json-stream");
final EventConsumer eventConsumer = timelineService.createEventConsumer(kafkaQuotaClientId, streamConfig.getCursors());
final String bytesFlushedMetricName = MetricUtils.metricNameForLoLAStream(client.getClientId(), eventTypeName);
final Meter bytesFlushedMeter = this.streamMetrics.meter(bytesFlushedMetricName);
eventStream = eventStreamFactory.createEventStream(outputStream, eventConsumer, streamConfig, bytesFlushedMeter);
// Flush status code to client
outputStream.flush();
eventStream.streamEvents(connectionReady, () -> {
if (needCheckAuthorization.getAndSet(false)) {
authorizeStreamRead(eventTypeName);
}
});
} catch (final UnparseableCursorException e) {
LOG.debug("Incorrect syntax of X-nakadi-cursors header: {}. Respond with BAD_REQUEST.", e.getCursors(), e);
writeProblemResponse(response, outputStream, BAD_REQUEST, e.getMessage());
} catch (final NoSuchEventTypeException e) {
writeProblemResponse(response, outputStream, NOT_FOUND, "topic not found");
} catch (final NoConnectionSlotsException e) {
LOG.debug("Connection creation failed due to exceeding max connection count");
writeProblemResponse(response, outputStream, e.asProblem());
} catch (final NakadiException e) {
LOG.error("Error while trying to stream events.", e);
writeProblemResponse(response, outputStream, e.asProblem());
} catch (final InvalidCursorException e) {
writeProblemResponse(response, outputStream, PRECONDITION_FAILED, e.getMessage());
} catch (final AccessDeniedException e) {
writeProblemResponse(response, outputStream, FORBIDDEN, e.explain());
} catch (final Exception e) {
LOG.error("Error while trying to stream events. Respond with INTERNAL_SERVER_ERROR.", e);
writeProblemResponse(response, outputStream, INTERNAL_SERVER_ERROR, e.getMessage());
} finally {
connectionReady.set(false);
consumerLimitingService.releaseConnectionSlots(connectionSlots);
if (consumerCounter != null) {
consumerCounter.dec();
}
if (eventStream != null) {
eventStream.close();
}
try {
outputStream.flush();
} finally {
outputStream.close();
}
}
};
}
use of org.springframework.beans.factory.annotation.Value in project webcert by sklintyg.
the class FragaSvarServiceImpl method getFragaSvar.
@Override
@Transactional(value = "jpaTransactionManager", readOnly = true)
public List<FragaSvarView> getFragaSvar(String intygId) {
List<FragaSvar> fragaSvarList = fragaSvarRepository.findByIntygsReferensIntygsId(intygId);
WebCertUser user = webCertUserService.getUser();
validateSekretessmarkering(intygId, fragaSvarList, user);
List<String> hsaEnhetIds = user.getIdsOfSelectedVardenhet();
// Filter questions to that current user only sees questions issued to
// units with active employment role
fragaSvarList.removeIf(fragaSvar -> fragaSvar.getVardperson() != null && !hsaEnhetIds.contains(fragaSvar.getVardperson().getEnhetsId()));
// Finally sort by senasteHandelseDatum
// We do the sorting in code, since we need to sort on a derived
// property and not a direct entity persisted
// property in which case we could have used an order by in the query.
fragaSvarList.sort(SENASTE_HANDELSE_DATUM_COMPARATOR);
List<ArendeDraft> drafts = arendeDraftService.listAnswerDrafts(intygId);
List<AnsweredWithIntyg> bmi = AnsweredWithIntygUtil.findAllKomplementForGivenIntyg(intygId, utkastRepository);
List<FragaSvarView> fragaSvarWithBesvaratMedIntygInfo = fragaSvarList.stream().map(fs -> FragaSvarView.create(fs, fs.getFrageSkickadDatum() == null ? null : AnsweredWithIntygUtil.returnOldestKompltOlderThan(fs.getFrageSkickadDatum(), bmi), drafts.stream().filter(d -> Long.toString(fs.getInternReferens()).equals(d.getQuestionId())).findAny().map(ArendeDraft::getText).orElse(null))).collect(Collectors.toList());
return fragaSvarWithBesvaratMedIntygInfo;
}
use of org.springframework.beans.factory.annotation.Value in project syndesis by syndesisio.
the class DataManager method fetch.
public <T extends WithId<T>> T fetch(Class<T> model, String id) {
Kind kind = Kind.from(model);
Map<String, T> cache = caches.getCache(kind.getModelName());
T value = cache.get(id);
if (value == null) {
value = doWithDataAccessObject(model, d -> d.fetch(id));
if (value != null) {
cache.put(id, value);
}
}
return value;
}
use of org.springframework.beans.factory.annotation.Value in project cloudbreak by hortonworks.
the class AwsResourceConnector method upscale.
@Override
public List<CloudResourceStatus> upscale(AuthenticatedContext ac, CloudStack stack, List<CloudResource> resources) {
resumeAutoScaling(ac, stack);
AmazonAutoScalingClient amazonASClient = awsClient.createAutoScalingClient(new AwsCredentialView(ac.getCloudCredential()), ac.getCloudContext().getLocation().getRegion().value());
AmazonCloudFormationClient cloudFormationClient = awsClient.createCloudFormationClient(new AwsCredentialView(ac.getCloudCredential()), ac.getCloudContext().getLocation().getRegion().value());
AmazonEC2Client amazonEC2Client = awsClient.createAccess(new AwsCredentialView(ac.getCloudCredential()), ac.getCloudContext().getLocation().getRegion().value());
List<Group> scaledGroups = getScaledGroups(stack);
for (Group group : scaledGroups) {
String asGroupName = cfStackUtil.getAutoscalingGroupName(ac, cloudFormationClient, group.getName());
amazonASClient.updateAutoScalingGroup(new UpdateAutoScalingGroupRequest().withAutoScalingGroupName(asGroupName).withMaxSize(group.getInstancesSize()).withDesiredCapacity(group.getInstancesSize()));
LOGGER.info("Updated Auto Scaling group's desiredCapacity: [stack: '{}', to: '{}']", ac.getCloudContext().getId(), resources.size());
}
scheduleStatusChecks(stack, ac, cloudFormationClient);
suspendAutoScaling(ac, stack);
boolean mapPublicIpOnLaunch = isMapPublicOnLaunch(new AwsNetworkView(stack.getNetwork()), amazonEC2Client);
List<Group> gateways = getGatewayGroups(scaledGroups);
if (mapPublicIpOnLaunch && !gateways.isEmpty()) {
String cFStackName = getCloudFormationStackResource(resources).getName();
Map<String, String> eipAllocationIds = getElasticIpAllocationIds(cFStackName, cloudFormationClient);
for (Group gateway : gateways) {
List<String> eips = getEipsForGatewayGroup(eipAllocationIds, gateway);
List<String> freeEips = getFreeIps(eips, amazonEC2Client);
List<String> instanceIds = getInstancesForGroup(ac, amazonASClient, cloudFormationClient, gateway);
List<String> newInstances = instanceIds.stream().filter(iid -> gateway.getInstances().stream().noneMatch(inst -> iid.equals(inst.getInstanceId()))).collect(Collectors.toList());
associateElasticIpsToInstances(amazonEC2Client, freeEips, newInstances);
}
}
return singletonList(new CloudResourceStatus(getCloudFormationStackResource(resources), ResourceStatus.UPDATED));
}
Aggregations