use of lombok.Data in project pravega by pravega.
the class BTreeIndex method applyUpdates.
// endregion
// region Helpers
/**
* Executes the given updates on the index. Loads up any necessary BTreePage instances in memory but does not persist
* the changes to the external data source, nor does it reassign offsets to the modified pages, perform splits, etc.
*
* @param updates An Iterator of the PageEntry instances to insert, update or remove. The Iterator must return the
* updates in sorted order (by key).
* @param timer Timer for the operation.
* @return A CompletableFuture that will contain a PageCollection with all touched pages.
*/
private CompletableFuture<UpdateablePageCollection> applyUpdates(Iterator<PageEntry> updates, TimeoutTimer timer) {
UpdateablePageCollection pageCollection = new UpdateablePageCollection(this.state.length);
AtomicReference<PageWrapper> lastPage = new AtomicReference<>(null);
val lastPageUpdates = new ArrayList<PageEntry>();
return Futures.loop(updates::hasNext, () -> {
// Locate the page where the update is to be executed. Do not apply it yet as it is more efficient
// to bulk-apply multiple at once. Collect all updates for each Page, and only apply them once we have
// "moved on" to another page.
PageEntry next = updates.next();
return locatePage(next.getKey(), pageCollection, timer).thenAccept(page -> {
PageWrapper last = lastPage.get();
if (page != last) {
// This key goes to a different page than the one we were looking at.
if (last != null) {
// Commit the outstanding updates.
last.setEntryCountDelta(last.getPage().update(lastPageUpdates));
}
// Update the pointers.
lastPage.set(page);
lastPageUpdates.clear();
}
// Record the current update.
lastPageUpdates.add(next);
});
}, this.executor).thenApplyAsync(v -> {
// We need not forget to apply the last batch of updates from the last page.
PageWrapper last = lastPage.get();
if (last != null) {
last.setEntryCountDelta(last.getPage().update(lastPageUpdates));
}
return pageCollection;
}, this.executor);
}
use of lombok.Data in project alf.io by alfio-event.
the class EventApiController method downloadSponsorScanExport.
@GetMapping("/events/{eventName}/sponsor-scan/export")
public void downloadSponsorScanExport(@PathVariable("eventName") String eventName, @RequestParam(name = "format", defaultValue = "excel") String format, HttpServletResponse response, Principal principal) throws IOException {
var event = eventManager.getSingleEvent(eventName, principal.getName());
List<TicketFieldConfiguration> fields = ticketFieldRepository.findAdditionalFieldsForEvent(event.getId());
List<String> header = new ArrayList<>();
header.add("Username/Api Key");
header.add("Description");
header.add("Timestamp");
header.add("Full name");
header.add("Email");
header.addAll(fields.stream().map(TicketFieldConfiguration::getName).collect(toList()));
header.add("Sponsor notes");
header.add("Lead Status");
Stream<String[]> sponsorScans = userManager.findAllEnabledUsers(principal.getName()).stream().map(u -> Pair.of(u, userManager.getUserRole(u))).filter(p -> p.getRight() == Role.SPONSOR).flatMap(p -> sponsorScanRepository.loadSponsorData(event.getId(), p.getKey().getId(), SponsorScanRepository.DEFAULT_TIMESTAMP).stream().map(v -> Pair.of(v, ticketFieldRepository.findAllValuesForTicketId(v.getTicket().getId())))).map(p -> {
DetailedScanData data = p.getLeft();
Map<String, String> descriptions = p.getRight();
return Pair.of(data, fields.stream().map(x -> descriptions.getOrDefault(x.getName(), "")).collect(toList()));
}).map(p -> {
List<String> line = new ArrayList<>();
Ticket ticket = p.getLeft().getTicket();
SponsorScan sponsorScan = p.getLeft().getSponsorScan();
User user = userManager.findUser(sponsorScan.getUserId());
line.add(user.getUsername());
line.add(user.getDescription());
line.add(sponsorScan.getTimestamp().toString());
line.add(ticket.getFullName());
line.add(ticket.getEmail());
line.addAll(p.getRight());
line.add(sponsorScan.getNotes());
line.add(sponsorScan.getLeadStatus().name());
return line.toArray(new String[0]);
});
if ("excel".equals(format)) {
exportSponsorScanExcel(event.getShortName(), header, sponsorScans, response);
} else {
exportSponsorScanCSV(event.getShortName(), header, sponsorScans, response);
}
}
use of lombok.Data in project lombok by rzwitserloot.
the class HandleData method handle.
@Override
public void handle(AnnotationValues<Data> annotation, Annotation ast, EclipseNode annotationNode) {
handleFlagUsage(annotationNode, ConfigurationKeys.DATA_FLAG_USAGE, "@Data");
Data ann = annotation.getInstance();
EclipseNode typeNode = annotationNode.up();
TypeDeclaration typeDecl = null;
if (typeNode.get() instanceof TypeDeclaration)
typeDecl = (TypeDeclaration) typeNode.get();
int modifiers = typeDecl == null ? 0 : typeDecl.modifiers;
boolean notAClass = (modifiers & (ClassFileConstants.AccInterface | ClassFileConstants.AccAnnotation | ClassFileConstants.AccEnum)) != 0;
if (typeDecl == null || notAClass) {
annotationNode.addError("@Data is only supported on a class.");
return;
}
// Careful: Generate the public static constructor (if there is one) LAST, so that any attempt to
// 'find callers' on the annotation node will find callers of the constructor, which is by far the
// most useful of the many methods built by @Data. This trick won't work for the non-static constructor,
// for whatever reason, though you can find callers of that one by focusing on the class name itself
// and hitting 'find callers'.
handleGetter.generateGetterForType(typeNode, annotationNode, AccessLevel.PUBLIC, true);
handleSetter.generateSetterForType(typeNode, annotationNode, AccessLevel.PUBLIC, true);
handleEqualsAndHashCode.generateEqualsAndHashCodeForType(typeNode, annotationNode);
handleToString.generateToStringForType(typeNode, annotationNode);
handleConstructor.generateRequiredArgsConstructor(typeNode, AccessLevel.PUBLIC, ann.staticConstructor(), SkipIfConstructorExists.YES, Collections.<Annotation>emptyList(), annotationNode);
}
use of lombok.Data in project pravega by pravega.
the class BTreeIndex method initialize.
/**
* Initializes the BTreeIndex by fetching metadata from the external data source. This method must be invoked (and
* completed) prior to executing any other operation on this instance.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will indicate that the operation completed.
*/
public CompletableFuture<Void> initialize(Duration timeout) {
if (isInitialized()) {
log.warn("{}: Reinitializing.", this.traceObjectId);
}
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.getLength.apply(timer.getRemaining()).thenCompose(indexInfo -> {
if (indexInfo.getIndexLength() <= FOOTER_LENGTH) {
// Empty index.
setState(indexInfo.getIndexLength(), PagePointer.NO_OFFSET, 0);
this.statistics = this.maintainStatistics ? Statistics.EMPTY : null;
return CompletableFuture.completedFuture(null);
}
long footerOffset = indexInfo.getRootPointer() >= 0 ? indexInfo.getRootPointer() : getFooterOffset(indexInfo.getIndexLength());
return this.read.apply(footerOffset, FOOTER_LENGTH, false, timer.getRemaining()).thenAcceptAsync(footer -> initialize(footer, footerOffset, indexInfo.getIndexLength()), this.executor).thenCompose(v -> loadStatistics(timer.getRemaining())).thenRun(() -> log.info("{}: Initialized. State = {}, Stats = {}.", this.traceObjectId, this.state, this.statistics));
});
}
use of lombok.Data in project pravega by pravega.
the class StreamMetadataTasksTest method sealStreamWithTxnTest.
@Test(timeout = 30000)
public void sealStreamWithTxnTest() throws Exception {
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String streamWithTxn = "streamWithTxn";
// region seal a stream with transactions
long start = System.currentTimeMillis();
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, streamWithTxn, config, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, streamWithTxn, State.ACTIVE, null, executor).get();
// create txn
VersionedTransactionData openTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData committingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData abortingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
// set transaction to committing
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, committingTxn.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// set transaction to aborting
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// Mock getActiveTransactions call such that we return committing txn as OPEN txn.
Map<UUID, ActiveTxnRecord> activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
Map<UUID, ActiveTxnRecord> retVal = activeTxns.entrySet().stream().map(tx -> {
if (!tx.getValue().getTxnStatus().equals(TxnStatus.OPEN) && !tx.getValue().getTxnStatus().equals(TxnStatus.ABORTING)) {
ActiveTxnRecord txRecord = tx.getValue();
return new AbstractMap.SimpleEntry<>(tx.getKey(), new ActiveTxnRecord(txRecord.getTxCreationTimestamp(), txRecord.getLeaseExpiryTime(), txRecord.getMaxExecutionExpiryTime(), TxnStatus.OPEN));
} else {
return tx;
}
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
List<AbortEvent> abortListBefore = abortWriter.getEventList();
streamMetadataTasks.sealStream(SCOPE, streamWithTxn, 0L);
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
requestEventWriter.eventQueue.take();
reset(streamStorePartialMock);
// verify that the txn status is set to aborting
VersionedTransactionData txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.ABORTING);
assertEquals(requestEventWriter.getEventQueue().size(), 1);
// verify that events are posted for the abort txn.
List<AbortEvent> abortListAfter = abortWriter.getEventList();
assertEquals(abortListAfter.size(), abortListBefore.size() + 2);
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(openTxn.getId())));
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(abortingTxn.getId())));
txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.COMMITTING);
// Mock getActiveTransactions call such that we return some non existent transaction id so that DataNotFound is simulated.
// returning a random transaction with list of active txns such that when its abort is attempted, Data Not Found Exception gets thrown
retVal = new HashMap<>();
retVal.put(UUID.randomUUID(), new ActiveTxnRecord(1L, 1L, 1L, TxnStatus.OPEN));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
reset(streamStorePartialMock);
// Now complete all existing transactions and verify that seal completes
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), null, executor).join();
((AbstractStreamMetadataStore) streamStorePartialMock).commitTransaction(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
assertTrue(activeTxns.isEmpty());
assertTrue(Futures.await(processEvent(requestEventWriter)));
// endregion
}
Aggregations