use of kafka.utils.MockTime in project common-docker by confluentinc.
the class EmbeddedKafkaCluster method startBroker.
private void startBroker(int brokerId, String zkConnectString) throws IOException {
if (brokerId < 0) {
throw new IllegalArgumentException("broker id must not be negative");
}
Properties props = TestUtils.createBrokerConfig(brokerId, zkConnectString, ENABLE_CONTROLLED_SHUTDOWN, ENABLE_DELETE_TOPIC, 0, INTER_BROKER_SECURITY_PROTOCOL, this.brokerTrustStoreFile, this.brokerSaslProperties, ENABLE_PLAINTEXT, ENABLE_SASL_PLAINTEXT, SASL_PLAINTEXT_PORT, ENABLE_SSL, SSL_PORT, this.enableSASLSSL, 0, Option.<String>empty(), 1, false, NUM_PARTITIONS, DEFAULT_REPLICATION_FACTOR);
KafkaServer broker = TestUtils.createServer(KafkaConfig.fromProps(props), new MockTime());
brokersById.put(brokerId, broker);
}
use of kafka.utils.MockTime in project hazelcast by hazelcast.
the class KafkaTestSupport method createKafkaCluster.
public void createKafkaCluster() throws IOException {
System.setProperty("zookeeper.preAllocSize", Integer.toString(128));
zkServer = new EmbeddedZookeeper();
zkConnect = ZK_HOST + ':' + zkServer.port();
ZkClient zkClient = new ZkClient(zkConnect, SESSION_TIMEOUT, CONNECTION_TIMEOUT, ZKStringSerializer$.MODULE$);
zkUtils = ZkUtils.apply(zkClient, false);
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKER_HOST + ":0");
brokerProps.setProperty("offsets.topic.replication.factor", "1");
brokerProps.setProperty("offsets.topic.num.partitions", "1");
// we need this due to avoid OOME while running tests, see https://issues.apache.org/jira/browse/KAFKA-3872
brokerProps.setProperty("log.cleaner.dedupe.buffer.size", Long.toString(2 * 1024 * 1024L));
brokerProps.setProperty("transaction.state.log.replication.factor", "1");
brokerProps.setProperty("transaction.state.log.num.partitions", "1");
brokerProps.setProperty("transaction.state.log.min.isr", "1");
brokerProps.setProperty("transaction.abort.timed.out.transaction.cleanup.interval.ms", "200");
brokerProps.setProperty("group.initial.rebalance.delay.ms", "0");
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
brokerPort = TestUtils.boundPort(kafkaServer, SecurityProtocol.PLAINTEXT);
brokerConnectionString = BROKER_HOST + ':' + brokerPort;
}
use of kafka.utils.MockTime in project apex-malhar by apache.
the class EmbeddedKafka method start.
public void start() throws IOException {
// Find port
try {
ServerSocket serverSocket = new ServerSocket(0);
BROKERPORT = Integer.toString(serverSocket.getLocalPort());
serverSocket.close();
} catch (IOException e) {
throw Throwables.propagate(e);
}
// Setup Zookeeper
zkServer = new EmbeddedZookeeper();
String zkConnect = BROKERHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
zkUtils = ZkUtils.apply(zkClient, false);
// Setup brokers
cleanupDir();
Properties props = new Properties();
props.setProperty("zookeeper.connect", zkConnect);
props.setProperty("broker.id", "0");
props.setProperty("log.dirs", KAFKA_PATH);
props.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
use of kafka.utils.MockTime in project metron by apache.
the class KafkaComponent method start.
@Override
public void start() {
// setup Zookeeper
zookeeperConnectString = topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY);
zkClient = new ZkClient(zookeeperConnectString, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);
// setup Broker
Properties props = TestUtilsWrapper.createBrokerConfig(0, zookeeperConnectString, brokerPort);
props.setProperty("zookeeper.connection.timeout.ms", Integer.toString(KAFKA_ZOOKEEPER_TIMEOUT_MS));
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
org.apache.log4j.Level oldLevel = UnitTestHelper.getLog4jLevel(KafkaServer.class);
UnitTestHelper.setLog4jLevel(KafkaServer.class, org.apache.log4j.Level.OFF);
// do not proceed until the broker is up
TestUtilsWrapper.waitUntilBrokerIsRunning(kafkaServer, "Timed out waiting for RunningAsBroker State", 100000);
for (Topic topic : getTopics()) {
try {
createTopic(topic.name, topic.numPartitions, KAFKA_PROPAGATE_TIMEOUT_MS);
} catch (InterruptedException e) {
throw new RuntimeException("Unable to create topic", e);
}
}
UnitTestHelper.setLog4jLevel(KafkaServer.class, oldLevel);
if (postStartCallback != null) {
postStartCallback.apply(this);
}
}
use of kafka.utils.MockTime in project kafka by apache.
the class QueryableStateIntegrationTest method shouldBeAbleToQueryFilterState.
@Test
public void shouldBeAbleToQueryFilterState() throws Exception {
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass());
final StreamsBuilder builder = new StreamsBuilder();
final String[] keys = { "hello", "goodbye", "welcome", "go", "kafka" };
final Set<KeyValue<String, Long>> batch1 = new HashSet<>(Arrays.asList(new KeyValue<>(keys[0], 1L), new KeyValue<>(keys[1], 1L), new KeyValue<>(keys[2], 3L), new KeyValue<>(keys[3], 5L), new KeyValue<>(keys[4], 2L)));
final Set<KeyValue<String, Long>> expectedBatch1 = new HashSet<>(Collections.singleton(new KeyValue<>(keys[4], 2L)));
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, LongSerializer.class, new Properties()), mockTime);
final Predicate<String, Long> filterPredicate = (key, value) -> key.contains("kafka");
final KTable<String, Long> t1 = builder.table(streamOne);
final KTable<String, Long> t2 = t1.filter(filterPredicate, Materialized.as("queryFilter"));
t1.filterNot(filterPredicate, Materialized.as("queryFilterNot"));
t2.toStream().to(outputTopic);
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
startKafkaStreamsAndWaitForRunningState(kafkaStreams);
waitUntilAtLeastNumRecordProcessed(outputTopic, 1);
final ReadOnlyKeyValueStore<String, Long> myFilterStore = IntegrationTestUtils.getStore("queryFilter", kafkaStreams, keyValueStore());
final ReadOnlyKeyValueStore<String, Long> myFilterNotStore = IntegrationTestUtils.getStore("queryFilterNot", kafkaStreams, keyValueStore());
for (final KeyValue<String, Long> expectedEntry : expectedBatch1) {
TestUtils.waitForCondition(() -> expectedEntry.value.equals(myFilterStore.get(expectedEntry.key)), "Cannot get expected result");
}
for (final KeyValue<String, Long> batchEntry : batch1) {
if (!expectedBatch1.contains(batchEntry)) {
TestUtils.waitForCondition(() -> myFilterStore.get(batchEntry.key) == null, "Cannot get null result");
}
}
for (final KeyValue<String, Long> expectedEntry : expectedBatch1) {
TestUtils.waitForCondition(() -> myFilterNotStore.get(expectedEntry.key) == null, "Cannot get null result");
}
for (final KeyValue<String, Long> batchEntry : batch1) {
if (!expectedBatch1.contains(batchEntry)) {
TestUtils.waitForCondition(() -> batchEntry.value.equals(myFilterNotStore.get(batchEntry.key)), "Cannot get expected result");
}
}
}
Aggregations