use of com.icodici.universa.node2.Config in project universa by UniversaBlockchain.
the class MainTest method testGetContractAPIforManyActive.
@Test
public void testGetContractAPIforManyActive() throws Exception {
// init network in permanet mode
List<Main> mm = new ArrayList<>();
for (int i = 0; i < 4; i++) mm.add(createMain("node" + (i + 1), "_permanet", false));
Main main = mm.get(0);
main.config.setIsFreeRegistrationsAllowedFromYaml(true);
main.config.getKeysWhiteList().add(TestKeys.publicKey(20));
Client client = new Client(TestKeys.privateKey(20), main.myInfo, null);
assertTrue(main.config.isPermanetMode());
for (int i = 1; i < 4; i++) assertTrue(mm.get(i).config.isPermanetMode());
// try cancel permanet mode
main.config.setPermanetMode(false);
assertTrue(main.config.isPermanetMode());
Set<PrivateKey> issuerPrivateKeys = new HashSet<>(asList(TestKeys.privateKey(1)));
Set<PublicKey> ownerPublicKeys = new HashSet<>(asList(TestKeys.publicKey(2)));
Set<PrivateKey> issuerPrivateKeys2 = new HashSet<>(asList(TestKeys.privateKey(2)));
List<Contract> splits = new ArrayList<>();
Integer amountTokens = 1000;
Contract tokenContract = ContractsService.createTokenContract(issuerPrivateKeys, ownerPublicKeys, new BigDecimal("1000"));
tokenContract.check();
tokenContract.traceErrors();
HashId origin = tokenContract.getId();
ItemResult itemResult = client.register(tokenContract.getPackedTransaction(), 5000);
System.out.println("tokenContract : " + itemResult + " = " + amountTokens.toString());
assertEquals(ItemState.APPROVED, itemResult.state);
for (Integer i = 1; i < 41; i++) {
Contract contractRemainder = ContractsService.createSplit(tokenContract, new BigDecimal(i), "amount", issuerPrivateKeys2, true);
Contract contractSplit = contractRemainder.getNew().get(0);
amountTokens -= i;
splits.add(contractSplit);
itemResult = client.register(contractRemainder.getPackedTransaction(), 5000);
System.out.println("contractRemainder : " + itemResult + " = " + amountTokens.toString());
assertEquals(ItemState.APPROVED, itemResult.state);
assertEquals(amountTokens.toString(), contractRemainder.getStateData().get("amount").toString());
assertEquals(i.toString(), contractSplit.getStateData().get("amount").toString());
assertEquals(ItemState.REVOKED, main.node.waitItem(tokenContract.getId(), 5000).state);
assertEquals(ItemState.APPROVED, main.node.waitItem(contractRemainder.getId(), 5000).state);
assertEquals(ItemState.APPROVED, main.node.waitItem(contractSplit.getId(), 5000).state);
tokenContract = contractRemainder;
}
// check getContract (default limit 100)
Binder result = client.getContract(origin);
List<byte[]> allKeepingIds = result.getListOrThrow("contractIds");
assertFalse(result.containsKey("packedContract"));
assertEquals(allKeepingIds.size(), splits.size() + 1);
final HashId hash = tokenContract.getId();
assertTrue(allKeepingIds.stream().anyMatch(id -> Arrays.equals(hash.getDigest(), id)));
splits.forEach(s -> assertTrue(allKeepingIds.stream().anyMatch(id -> Arrays.equals(s.getId().getDigest(), id))));
// check getContract with limit
result = client.getContract(origin, 10);
List<byte[]> keepingIds = result.getListOrThrow("contractIds");
assertFalse(result.containsKey("packedContract"));
assertEquals(keepingIds.size(), 10);
assertTrue(keepingIds.stream().anyMatch(id -> Arrays.equals(hash.getDigest(), id)));
for (int i = splits.size() - 1; i > splits.size() - 10; i--) {
final HashId hashSplit = splits.get(i).getId();
assertTrue(keepingIds.stream().anyMatch(id -> Arrays.equals(hashSplit.getDigest(), id)));
}
// set limit in node config
mm.forEach(x -> x.config.setQueryContractsLimit(20));
// check getContract (default limit 100)
result = client.getContract(origin);
keepingIds = result.getListOrThrow("contractIds");
assertFalse(result.containsKey("packedContract"));
assertEquals(keepingIds.size(), 20);
assertTrue(keepingIds.stream().anyMatch(id -> Arrays.equals(hash.getDigest(), id)));
for (int i = splits.size() - 1; i > splits.size() - 20; i--) {
final HashId hashSplit = splits.get(i).getId();
assertTrue(keepingIds.stream().anyMatch(id -> Arrays.equals(hashSplit.getDigest(), id)));
}
// check getContract with limit
result = client.getContract(origin, 30);
keepingIds = result.getListOrThrow("contractIds");
assertFalse(result.containsKey("packedContract"));
assertEquals(keepingIds.size(), 20);
assertTrue(keepingIds.stream().anyMatch(id -> Arrays.equals(hash.getDigest(), id)));
for (int i = splits.size() - 1; i > splits.size() - 20; i--) {
final HashId hashSplit = splits.get(i).getId();
assertTrue(keepingIds.stream().anyMatch(id -> Arrays.equals(hashSplit.getDigest(), id)));
}
mm.forEach(x -> x.shutdown());
}
use of com.icodici.universa.node2.Config in project ongdb by graphfoundation.
the class BoltCausalClusteringIT method shouldPickANewServerToWriteToOnLeaderSwitch.
@Test
public void shouldPickANewServerToWriteToOnLeaderSwitch() throws Throwable {
// given
cluster = clusterRule.withNumberOfReadReplicas(0).startCluster();
CoreClusterMember leader = cluster.awaitLeader();
CountDownLatch leaderSwitchLatch = new CountDownLatch(1);
LeaderSwitcher leaderSwitcher = new LeaderSwitcher(cluster, leaderSwitchLatch);
Config config = Config.build().withLogging(new JULogging(Level.OFF)).toConfig();
Set<String> seenAddresses = new HashSet<>();
try (Driver driver = GraphDatabase.driver(leader.routingURI(), basicAuthToken, config)) {
boolean success = false;
long deadline = System.currentTimeMillis() + (30 * 1000);
while (!success) {
if (System.currentTimeMillis() > deadline) {
fail("Failed to write to the new leader in time. Addresses seen: " + seenAddresses);
}
try (Session session = driver.session(AccessMode.WRITE)) {
StatementResult result = session.run("CREATE (p:Person)");
ServerInfo server = result.summary().server();
seenAddresses.add(server.address());
success = seenAddresses.size() >= 2;
} catch (Exception e) {
Thread.sleep(100);
}
/*
* Having the latch release here ensures that we've done at least one pass through the loop, which means
* we've completed a connection before the forced master switch.
*/
if (!seenAddresses.isEmpty() && !success) {
leaderSwitcher.start();
leaderSwitchLatch.await();
}
}
} finally {
leaderSwitcher.stop();
assertTrue(leaderSwitcher.hadLeaderSwitch());
assertThat(seenAddresses.size(), greaterThanOrEqualTo(2));
}
cluster.shutdown();
}
use of com.icodici.universa.node2.Config in project netconf by opendaylight.
the class MountPointEndToEndTest method setupMaster.
private void setupMaster() throws Exception {
AbstractConcurrentDataBrokerTest dataBrokerTest = newDataBrokerTest();
masterDataBroker = dataBrokerTest.getDataBroker();
deviceDOMDataBroker = dataBrokerTest.getDomBroker();
bindingToNormalized = dataBrokerTest.getDataBrokerTestCustomizer().getAdapterContext().currentSerializer();
masterSystem = ActorSystem.create(ACTOR_SYSTEM_NAME, ConfigFactory.load().getConfig("Master"));
masterClusterSingletonServiceProvider = new DOMClusterSingletonServiceProviderImpl();
masterClusterSingletonServiceProvider.initializeProvider();
doReturn(masterSystem).when(mockMasterActorSystemProvider).getActorSystem();
doReturn(MoreExecutors.newDirectExecutorService()).when(mockThreadPool).getExecutor();
final SchemaResourcesDTO resources = resourceManager.getSchemaResources(new NetconfNodeBuilder().setSchemaCacheDirectory(TEST_DEFAULT_SUBDIR).build(), "test");
resources.getSchemaRegistry().registerSchemaSource(id -> Futures.immediateFuture(YangTextSchemaSource.delegateForByteSource(id, topModuleInfo.getYangTextByteSource())), PotentialSchemaSource.create(RevisionSourceIdentifier.create(TOP_MODULE_NAME, topModuleInfo.getName().getRevision()), YangTextSchemaSource.class, 1));
masterNetconfTopologyManager = new NetconfTopologyManager(BASE_SCHEMAS, masterDataBroker, mockRpcProviderRegistry, mockActionProviderRegistry, masterClusterSingletonServiceProvider, mockKeepaliveExecutor, mockThreadPool, mockMasterActorSystemProvider, eventExecutor, mockClientDispatcher, TOPOLOGY_ID, config, masterMountPointService, mockEncryptionService, mockRpcProviderService, deviceActionFactory, resourceManager) {
@Override
protected NetconfTopologyContext newNetconfTopologyContext(final NetconfTopologySetup setup, final ServiceGroupIdentifier serviceGroupIdent, final Timeout actorResponseWaitTime, final DeviceActionFactory deviceActionFact) {
NetconfTopologyContext context = super.newNetconfTopologyContext(setup, serviceGroupIdent, actorResponseWaitTime, deviceActionFact);
NetconfTopologyContext spiedContext = spy(context);
doAnswer(invocation -> {
final MasterSalFacade spiedFacade = (MasterSalFacade) spy(invocation.callRealMethod());
doReturn(deviceDOMDataBroker).when(spiedFacade).newDeviceDataBroker();
masterSalFacadeFuture.set(spiedFacade);
return spiedFacade;
}).when(spiedContext).newMasterSalFacade();
return spiedContext;
}
};
masterNetconfTopologyManager.init();
verifyTopologyNodesCreated(masterDataBroker);
}
use of com.icodici.universa.node2.Config in project netconf by opendaylight.
the class NetconfTopologyManagerTest method setUp.
@Before
public void setUp() throws Exception {
AbstractDataBrokerTest dataBrokerTest = new AbstractDataBrokerTest() {
@Override
protected Set<YangModuleInfo> getModuleInfos() throws Exception {
return ImmutableSet.of(BindingReflections.getModuleInfo(NetworkTopology.class), BindingReflections.getModuleInfo(Topology.class));
}
};
dataBrokerTest.setup();
dataBroker = spy(dataBrokerTest.getDataBroker());
final DOMRpcProviderService rpcProviderRegistry = mock(DOMRpcProviderService.class);
final ScheduledThreadPool keepaliveExecutor = mock(ScheduledThreadPool.class);
final DOMActionProviderService actionProviderRegistry = mock(DOMActionProviderService.class);
final ThreadPool processingThreadPool = mock(ThreadPool.class);
final ExecutorService processingService = mock(ExecutorService.class);
doReturn(processingService).when(processingThreadPool).getExecutor();
final ActorSystemProvider actorSystemProvider = mock(ActorSystemProvider.class);
final EventExecutor eventExecutor = mock(EventExecutor.class);
final NetconfClientDispatcher clientDispatcher = mock(NetconfClientDispatcher.class);
final DOMMountPointService mountPointService = mock(DOMMountPointService.class);
final AAAEncryptionService encryptionService = mock(AAAEncryptionService.class);
final DeviceActionFactory deviceActionFactory = mock(DeviceActionFactory.class);
final RpcProviderService rpcProviderService = mock(RpcProviderService.class);
final Config config = new ConfigBuilder().setWriteTransactionIdleTimeout(Uint16.ZERO).build();
netconfTopologyManager = new NetconfTopologyManager(BASE_SCHEMAS, dataBroker, rpcProviderRegistry, actionProviderRegistry, clusterSingletonServiceProvider, keepaliveExecutor, processingThreadPool, actorSystemProvider, eventExecutor, clientDispatcher, TOPOLOGY_ID, config, mountPointService, encryptionService, rpcProviderService, deviceActionFactory, new DefaultSchemaResourceManager(new DefaultYangParserFactory())) {
@Override
protected NetconfTopologyContext newNetconfTopologyContext(final NetconfTopologySetup setup, final ServiceGroupIdentifier serviceGroupIdent, final Timeout actorResponseWaitTime, final DeviceActionFactory deviceActionFactory) {
assertEquals(ACTOR_RESPONSE_WAIT_TIME.toJava(), actorResponseWaitTime.duration().toSeconds());
return Objects.requireNonNull(mockContextMap.get(setup.getInstanceIdentifier()), "No mock context for " + setup.getInstanceIdentifier()).apply(setup);
}
};
doNothing().when(mockListenerReg).close();
doReturn(mockListenerReg).when(dataBroker).registerDataTreeChangeListener(any(), any());
}
use of com.icodici.universa.node2.Config in project kubernetes-client by fabric8io.
the class ImageRegistryOperatorConfigTest method get.
@Test
void get() throws ParseException {
// Given
server.expect().get().withPath("/apis/imageregistry.operator.openshift.io/v1/configs/test-get").andReturn(HttpURLConnection.HTTP_OK, createNewConfig("test-get")).once();
// When
Config Config = client.imageRegistryOperatorConfigs().withName("test-get").get();
// Then
assertThat(Config).isNotNull().hasFieldOrPropertyWithValue("metadata.name", "test-get");
}
Aggregations