use of io.camunda.zeebe.broker.Broker in project zeebe by camunda-cloud.
the class ReaderCloseTest method shouldDeleteCompactedSegmentsFilesAfterLeaderChange.
// Regression test for https://github.com/camunda/zeebe/issues/7767
@Test
public void shouldDeleteCompactedSegmentsFilesAfterLeaderChange() throws IOException {
// given
fillSegments();
final var leaderId = clusteringRule.getLeaderForPartition(1).getNodeId();
final var followerId = clusteringRule.getOtherBrokerObjects(leaderId).stream().findAny().orElseThrow().getConfig().getCluster().getNodeId();
clusteringRule.forceClusterToHaveNewLeader(followerId);
// because of https://github.com/camunda/zeebe/issues/8329
// we need to add another record so we can do a snapshot
clientRule.getClient().newPublishMessageCommand().messageName("test").correlationKey("test").send();
// when
clusteringRule.triggerAndWaitForSnapshots();
// then
for (final Broker broker : clusteringRule.getBrokers()) {
assertThatFilesOfDeletedSegmentsDoesNotExist(broker);
}
assertThat(leaderId).isNotEqualTo(clusteringRule.getLeaderForPartition(1).getNodeId());
}
use of io.camunda.zeebe.broker.Broker in project zeebe by camunda-cloud.
the class EmbeddedBrokerRule method startBroker.
public void startBroker(final PartitionListener... listeners) {
if (brokerCfg == null) {
try (final InputStream configStream = configSupplier.get()) {
if (configStream == null) {
brokerCfg = new BrokerCfg();
} else {
brokerCfg = new TestConfigurationFactory().create(null, "zeebe.broker", configStream, BrokerCfg.class);
}
configureBroker(brokerCfg);
} catch (final IOException e) {
throw new RuntimeException("Unable to open configuration", e);
}
}
systemContext = new SystemContext(brokerCfg, newTemporaryFolder.getAbsolutePath(), controlledActorClock);
systemContext.getScheduler().start();
final var additionalListeners = new ArrayList<>(Arrays.asList(listeners));
final CountDownLatch latch = new CountDownLatch(brokerCfg.getCluster().getPartitionsCount());
additionalListeners.add(new LeaderPartitionListener(latch));
broker = new Broker(systemContext, springBrokerBridge, additionalListeners);
broker.start().join();
try {
latch.await(INSTALL_TIMEOUT, INSTALL_TIMEOUT_UNIT);
} catch (final InterruptedException e) {
LOG.info("Broker was not started in 15 seconds", e);
Thread.currentThread().interrupt();
}
if (brokerCfg.getGateway().isEnable()) {
try (final var client = ZeebeClient.newClientBuilder().gatewayAddress(NetUtil.toSocketAddressString(getGatewayAddress())).usePlaintext().build()) {
Awaitility.await("until we have a complete topology").untilAsserted(() -> {
final var topology = client.newTopologyRequest().send().join();
TopologyAssert.assertThat(topology).isComplete(brokerCfg.getCluster().getClusterSize(), brokerCfg.getCluster().getPartitionsCount()).isHealthy();
});
}
}
dataDirectory = broker.getSystemContext().getBrokerConfiguration().getData().getDirectory();
}
use of io.camunda.zeebe.broker.Broker in project zeebe by camunda-cloud.
the class HealthMonitoringTest method shouldReportUnhealthyWhenRaftInactive.
@Test
public void shouldReportUnhealthyWhenRaftInactive() {
// given
final Broker leader = embeddedBrokerRule.getBroker();
/* timeouts are selected generously as at the time of this implementation there is a
* 1 minute cycle to update the state
*/
await("Broker is healthy").atMost(Duration.ofMinutes(2)).until(() -> {
embeddedBrokerRule.getClock().addTime(Duration.ofMinutes(1));
return isBrokerHealthy();
});
// when
final var raftPartition = (RaftPartition) leader.getBrokerContext().getPartitionManager().getPartitionGroup().getPartition(PartitionId.from(PartitionManagerImpl.GROUP_NAME, START_PARTITION_ID));
raftPartition.getServer().stop();
// then
/* timeouts are selected generously as at the time of this implementation there is a
* 1 minute cycle to update the state
*/
waitAtMost(Duration.ofMinutes(2)).until(() -> {
embeddedBrokerRule.getClock().addTime(Duration.ofMinutes(1));
return !isBrokerHealthy();
});
}
use of io.camunda.zeebe.broker.Broker in project zeebe by zeebe-io.
the class EmbeddedBrokerRule method startBroker.
public void startBroker() {
systemContext = new SystemContext(brokerCfg, newTemporaryFolder.getAbsolutePath(), controlledActorClock);
systemContext.getScheduler().start();
final CountDownLatch latch = new CountDownLatch(brokerCfg.getCluster().getPartitionsCount());
broker = new Broker(systemContext, springBrokerBridge, Collections.singletonList(new LeaderPartitionListener(latch)));
broker.start().join();
try {
final boolean hasLeaderPartition = latch.await(timeout.toMillis(), TimeUnit.MILLISECONDS);
assertThat(hasLeaderPartition).describedAs("Expected the broker to have a leader of the partition within %s", timeout).isTrue();
} catch (final InterruptedException e) {
LOG.info("Timeout. Broker was not started within {}", timeout, e);
Thread.currentThread().interrupt();
}
final EmbeddedGatewayService embeddedGatewayService = broker.getBrokerContext().getEmbeddedGatewayService();
if (embeddedGatewayService != null) {
final BrokerClient brokerClient = embeddedGatewayService.get().getBrokerClient();
waitUntil(() -> {
final BrokerTopologyManager topologyManager = brokerClient.getTopologyManager();
final BrokerClusterState topology = topologyManager.getTopology();
return topology != null && topology.getLeaderForPartition(1) >= 0;
});
}
}
use of io.camunda.zeebe.broker.Broker in project zeebe by zeebe-io.
the class ClusteringRule method stepDown.
public void stepDown(final Broker broker, final int partitionId) {
final var atomix = broker.getBrokerContext().getClusterServices();
final MemberId nodeId = atomix.getMembershipService().getLocalMember().id();
final var raftPartition = broker.getBrokerContext().getPartitionManager().getPartitionGroup().getPartitions().stream().filter(partition -> partition.members().contains(nodeId)).filter(partition -> partition.id().id() == partitionId).map(RaftPartition.class::cast).findFirst().orElseThrow();
raftPartition.getServer().stepDown().join();
}
Aggregations