use of kafka.utils.MockTime in project phoenix by apache.
the class PhoenixConsumerIT method setUp.
@Before
public void setUp() throws IOException, SQLException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
String zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
kafkaServer.startup();
// create topic
AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties());
pConsumer = new PhoenixConsumer();
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
conn = DriverManager.getConnection(getUrl(), props);
}
use of kafka.utils.MockTime in project incubator-gobblin by apache.
the class KafkaTestBase method startServer.
public static void startServer() throws RuntimeException {
if (serverStarted && serverClosed) {
throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
}
if (!serverStarted) {
serverStarted = true;
zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
kafkaPort = TestUtils.choosePort();
Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
}
use of kafka.utils.MockTime in project incubator-rya by apache.
the class EmbeddedKafkaInstance method startup.
/**
* Starts the Embedded Kafka and Zookeeper Servers.
* @throws Exception - If an exeption occurs during startup.
*/
protected void startup() throws Exception {
// Setup the embedded zookeeper
logger.info("Starting up Embedded Zookeeper...");
zkServer = new EmbeddedZookeeper();
zookeperConnect = ZKHOST + ":" + zkServer.port();
logger.info("Embedded Zookeeper started at: {}", zookeperConnect);
// setup Broker
logger.info("Starting up Embedded Kafka...");
brokerPort = Integer.toString(PortUtils.getRandomFreePort());
final Properties brokerProps = new Properties();
brokerProps.setProperty(KafkaConfig$.MODULE$.BrokerIdProp(), "0");
brokerProps.setProperty(KafkaConfig$.MODULE$.HostNameProp(), BROKERHOST);
brokerProps.setProperty(KafkaConfig$.MODULE$.PortProp(), brokerPort);
brokerProps.setProperty(KafkaConfig$.MODULE$.ZkConnectProp(), zookeperConnect);
brokerProps.setProperty(KafkaConfig$.MODULE$.LogDirsProp(), Files.createTempDirectory(getClass().getSimpleName() + "-").toAbsolutePath().toString());
brokerProps.setProperty(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), "true");
final KafkaConfig config = new KafkaConfig(brokerProps);
final Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
logger.info("Embedded Kafka Server started at: {}:{}", BROKERHOST, brokerPort);
}
use of kafka.utils.MockTime in project storm by apache.
the class KafkaUnit method setUp.
public void setUp() throws Exception {
// setup ZK
zookeeper = new TestingServer(true);
// setup Broker
kafkaDir = new TmpPath(Files.createTempDirectory("kafka-").toAbsolutePath().toString());
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zookeeper.getConnectString());
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", kafkaDir.getPath());
brokerProps.setProperty("listeners", String.format("PLAINTEXT://%s:%d", KAFKA_HOST, KAFKA_PORT));
brokerProps.setProperty("offsets.topic.replication.factor", "1");
KafkaConfig config = new KafkaConfig(brokerProps);
MockTime mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// setup default Producer
createProducer();
kafkaAdminClient = AdminClient.create(Collections.singletonMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_HOST + ":" + KAFKA_PORT));
}
use of kafka.utils.MockTime in project kafka by apache.
the class CheckpointBench method setup.
@SuppressWarnings("deprecation")
@Setup(Level.Trial)
public void setup() {
this.scheduler = new KafkaScheduler(1, "scheduler-thread", true);
this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect(), true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1));
this.metrics = new Metrics();
this.time = new MockTime();
this.failureChannel = new LogDirFailureChannel(brokerProperties.logDirs().size());
final List<File> files = JavaConverters.seqAsJavaList(brokerProperties.logDirs()).stream().map(File::new).collect(Collectors.toList());
this.logManager = TestUtils.createLogManager(JavaConverters.asScalaBuffer(files), LogConfig.apply(), new MockConfigRepository(), CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, Double.MAX_VALUE, 15 * 1000, true, "MD5"), time, ApiVersion.latestVersion());
scheduler.startup();
final BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
final MetadataCache metadataCache = MetadataCache.zkMetadataCache(this.brokerProperties.brokerId());
this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, "");
this.alterIsrManager = TestUtils.createAlterIsrManager();
this.replicaManager = new ReplicaManagerBuilder().setConfig(brokerProperties).setMetrics(metrics).setTime(time).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(quotaManagers).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(failureChannel).setAlterIsrManager(alterIsrManager).build();
replicaManager.startup();
List<TopicPartition> topicPartitions = new ArrayList<>();
for (int topicNum = 0; topicNum < numTopics; topicNum++) {
final String topicName = this.topicName + "-" + topicNum;
for (int partitionNum = 0; partitionNum < numPartitions; partitionNum++) {
topicPartitions.add(new TopicPartition(topicName, partitionNum));
}
}
OffsetCheckpoints checkpoints = (logDir, topicPartition) -> Option.apply(0L);
for (TopicPartition topicPartition : topicPartitions) {
final Partition partition = this.replicaManager.createPartition(topicPartition);
partition.createLogIfNotExists(true, false, checkpoints, Option.apply(Uuid.randomUuid()));
}
replicaManager.checkpointHighWatermarks();
}
Aggregations