use of tech.pegasys.teku.infrastructure.unsigned.UInt64.ZERO in project teku by ConsenSys.
the class TekuNode method checkValidatorLiveness.
public void checkValidatorLiveness(final int epoch, final int totalValidatorCount, ValidatorLivenessExpectation... args) throws IOException {
final List<UInt64> validators = new ArrayList<>();
for (UInt64 i = UInt64.ZERO; i.isLessThan(totalValidatorCount); i = i.increment()) {
validators.add(i);
}
final Map<UInt64, Boolean> data = getValidatorLivenessAtEpoch(UInt64.valueOf(epoch), validators);
for (ValidatorLivenessExpectation expectation : args) {
expectation.verify(data);
}
}
use of tech.pegasys.teku.infrastructure.unsigned.UInt64.ZERO in project teku by ConsenSys.
the class BatchDataRequesterTest method shouldScheduleAdditionalBatchWhenThereIsOnlyOneBlockRemainingToFetch.
@Test
void shouldScheduleAdditionalBatchWhenThereIsOnlyOneBlockRemainingToFetch() {
final UInt64 firstBatchStart = targetChain.getChainHead().getSlot().minus(BATCH_SIZE);
final Batch batch = batchFactory.createBatch(targetChain, firstBatchStart, BATCH_SIZE);
batchChain.add(batch);
fillQueue(ZERO);
final List<Batch> batches = batchChain.stream().collect(toList());
assertThat(batches).hasSize(2);
final long targetSlot = targetChain.getChainHead().getSlot().longValue();
assertThatBatch(batches.get(0)).hasRange(firstBatchStart.intValue(), targetSlot - 1);
assertThatBatch(batches.get(1)).hasRange(targetSlot, targetSlot);
}
use of tech.pegasys.teku.infrastructure.unsigned.UInt64.ZERO in project teku by ConsenSys.
the class ValidatorApiHandlerTest method sendAggregateAndProofs_shouldProcessMixOfValidAndInvalidAggregates.
@Test
void sendAggregateAndProofs_shouldProcessMixOfValidAndInvalidAggregates() {
final SignedAggregateAndProof invalidAggregate = dataStructureUtil.randomSignedAggregateAndProof();
final SignedAggregateAndProof validAggregate = dataStructureUtil.randomSignedAggregateAndProof();
when(attestationManager.onAttestation(ValidateableAttestation.aggregateFromValidator(spec, invalidAggregate))).thenReturn(completedFuture(AttestationProcessingResult.invalid("Bad juju")));
when(attestationManager.onAttestation(ValidateableAttestation.aggregateFromValidator(spec, validAggregate))).thenReturn(completedFuture(SUCCESSFUL));
final SafeFuture<List<SubmitDataError>> result = validatorApiHandler.sendAggregateAndProofs(List.of(invalidAggregate, validAggregate));
assertThat(result).isCompletedWithValue(List.of(new SubmitDataError(ZERO, "Bad juju")));
// Should send both to the attestation manager.
verify(attestationManager).onAttestation(argThat(validatableAttestation -> validatableAttestation.getSignedAggregateAndProof().equals(validAggregate)));
verify(attestationManager).onAttestation(argThat(validatableAttestation -> validatableAttestation.getSignedAggregateAndProof().equals(invalidAggregate)));
}
use of tech.pegasys.teku.infrastructure.unsigned.UInt64.ZERO in project teku by ConsenSys.
the class Eth1DataCache method onEth1Block.
public void onEth1Block(final Bytes32 blockHash, final UInt64 blockTimestamp) {
final Map.Entry<UInt64, Eth1Data> previousBlock = eth1ChainCache.floorEntry(blockTimestamp);
final Eth1Data data;
if (previousBlock == null) {
data = new Eth1Data(Eth1Data.EMPTY_DEPOSIT_ROOT, UInt64.ZERO, blockHash);
} else {
data = previousBlock.getValue().withBlockHash(blockHash);
}
eth1ChainCache.put(blockTimestamp, data);
prune(blockTimestamp);
}
use of tech.pegasys.teku.infrastructure.unsigned.UInt64.ZERO in project teku by ConsenSys.
the class Eth1DataCacheTest method shouldPruneOldBlocksWhenNewerOnesReceived.
@Test
void shouldPruneOldBlocksWhenNewerOnesReceived() {
final UInt64 olderBlockTimestamp = ZERO;
final UInt64 oldBlockTimestamp = olderBlockTimestamp.plus(ONE);
final UInt64 newBlockTimestamp = oldBlockTimestamp.plus(CACHE_DURATION).plus(ONE);
final UInt64 newerBlockTimestamp = newBlockTimestamp.plus(CACHE_DURATION);
eth1DataCache.onBlockWithDeposit(olderBlockTimestamp, createEth1Data(STATE_DEPOSIT_COUNT));
eth1DataCache.onBlockWithDeposit(oldBlockTimestamp, createEth1Data(STATE_DEPOSIT_COUNT));
assertThat(getCacheSize()).isEqualTo(2);
// Push both old blocks out of the cache period
eth1DataCache.onBlockWithDeposit(newBlockTimestamp, createEth1Data(STATE_DEPOSIT_COUNT));
// But only the oldest block gets pruned because we need at least one event prior to the cache
// period so empty blocks right at the start of the period can lookup the
assertThat(getCacheSize()).isEqualTo(2);
// Third block is close enough to the second that they are both kept.
// newBlockTimestamp is now exactly at the start of the cache period so we can remove oldBlock
eth1DataCache.onBlockWithDeposit(newerBlockTimestamp, createEth1Data(STATE_DEPOSIT_COUNT));
assertThat(getCacheSize()).isEqualTo(2);
}
Aggregations