use of org.apache.hive.kafka.SingleNodeKafkaCluster in project hive by apache.
the class QTestMiniClusters method setup.
public void setup(QTestArguments testArgs, HiveConf conf, String scriptsDir, String logDir) throws Exception {
this.shims = ShimLoader.getHadoopShims();
this.clusterType = testArgs.getClusterType();
this.testArgs = testArgs;
setupFileSystem(testArgs.getFsType(), conf);
this.setup = testArgs.getQTestSetup();
setup.preTest(conf);
String uriString = fs.getUri().toString();
if (clusterType == MiniClusterType.DRUID_KAFKA || clusterType == MiniClusterType.DRUID_LOCAL || clusterType == MiniClusterType.DRUID) {
final String tempDir = QTestSystemProperties.getTempDir();
druidCluster = new MiniDruidCluster(clusterType == MiniClusterType.DRUID ? "mini-druid" : "mini-druid-kafka", logDir, tempDir, setup.zkPort, Utilities.jarFinderGetJar(MiniDruidCluster.class));
final Path druidDeepStorage = fs.makeQualified(new Path(druidCluster.getDeepStorageDir()));
fs.mkdirs(druidDeepStorage);
final Path scratchDir = fs.makeQualified(new Path(QTestSystemProperties.getTempDir(), "druidStagingDir"));
fs.mkdirs(scratchDir);
conf.set("hive.druid.working.directory", scratchDir.toUri().getPath());
druidCluster.init(conf);
druidCluster.start();
}
if (clusterType == MiniClusterType.KAFKA || clusterType == MiniClusterType.DRUID_KAFKA) {
kafkaCluster = new SingleNodeKafkaCluster("kafka", QTestSystemProperties.getTempDir() + "/kafka-cluster", setup.zkPort, clusterType == MiniClusterType.KAFKA ? 9093 : 9092);
kafkaCluster.init(conf);
kafkaCluster.start();
kafkaCluster.createTopicWithData("test-topic", new File(scriptsDir, "kafka_init_data.json"));
kafkaCluster.createTopicWithData("wiki_kafka_csv", new File(scriptsDir, "kafka_init_data.csv"));
kafkaCluster.createTopicWithData("wiki_kafka_avro_table", getAvroRows());
}
String confDir = testArgs.getConfDir();
if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
if (confDir != null && !confDir.isEmpty()) {
conf.addResource(new URL("file://" + new File(confDir).toURI().getPath() + "/tez-site.xml"));
}
int numTrackers = 2;
if (EnumSet.of(MiniClusterType.LLAP, MiniClusterType.LLAP_LOCAL, MiniClusterType.DRUID_LOCAL, MiniClusterType.DRUID_KAFKA, MiniClusterType.DRUID, MiniClusterType.KAFKA).contains(clusterType)) {
llapCluster = LlapItUtils.startAndGetMiniLlapCluster(conf, setup.zooKeeperCluster, confDir);
}
if (EnumSet.of(MiniClusterType.LLAP_LOCAL, MiniClusterType.TEZ_LOCAL, MiniClusterType.DRUID_LOCAL).contains(clusterType)) {
mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.LLAP_LOCAL || clusterType == MiniClusterType.DRUID_LOCAL);
} else {
mr = shims.getMiniTezCluster(conf, numTrackers, uriString, EnumSet.of(MiniClusterType.LLAP, MiniClusterType.LLAP_LOCAL, MiniClusterType.DRUID_KAFKA, MiniClusterType.DRUID, MiniClusterType.KAFKA).contains(clusterType));
}
} else if (clusterType == MiniClusterType.MINI_SPARK_ON_YARN) {
mr = shims.getMiniSparkCluster(conf, 2, uriString, 1);
} else if (clusterType == MiniClusterType.MR) {
mr = shims.getMiniMrCluster(conf, 2, uriString, 1);
}
if (testArgs.isWithLlapIo() && (clusterType == MiniClusterType.NONE)) {
LOG.info("initializing llap IO");
LlapProxy.initializeLlapIo(conf);
}
}
Aggregations