use of org.apache.curator.test.TestingServer in project hive by apache.
the class TestServiceDiscovery method setup.
@BeforeClass
public static void setup() throws Exception {
server = new TestingServer();
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
client = builder.connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1)).build();
client.start();
}
use of org.apache.curator.test.TestingServer in project flink by apache.
the class JobClientActorRecoveryITCase method setup.
@BeforeClass
public static void setup() throws Exception {
zkServer = new TestingServer();
zkServer.start();
}
use of org.apache.curator.test.TestingServer in project hadoop by apache.
the class TestRMStoreCommands method testRemoveApplicationFromStateStoreCmdForZK.
@Test
public void testRemoveApplicationFromStateStoreCmdForZK() throws Exception {
StateChangeRequestInfo req = new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
try (TestingServer curatorTestingServer = TestZKRMStateStore.setupCuratorServer();
CuratorFramework curatorFramework = TestZKRMStateStore.setupCuratorFramework(curatorTestingServer)) {
Configuration conf = TestZKRMStateStore.createHARMConf("rm1,rm2", "rm1", 1234, false, curatorTestingServer);
ResourceManager rm = new MockRM(conf);
rm.start();
rm.getRMContext().getRMAdminService().transitionToActive(req);
rm.close();
String appId = ApplicationId.newInstance(System.currentTimeMillis(), 1).toString();
String appRootPath = YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH + "/" + ZKRMStateStore.ROOT_ZNODE_NAME + "/" + RMStateStore.RM_APP_ROOT;
String appIdPath = appRootPath + "/" + appId;
curatorFramework.create().forPath(appIdPath);
assertEquals("Application node for " + appId + "should exist", appId, curatorFramework.getChildren().forPath(appRootPath).get(0));
try {
ResourceManager.removeApplication(conf, appId);
} catch (Exception e) {
fail("Exception should not be thrown while removing app from " + "rm state store.");
}
assertTrue("After remove app from store there should be no child nodes" + " in app root path", curatorFramework.getChildren().forPath(appRootPath).isEmpty());
}
}
use of org.apache.curator.test.TestingServer in project curator by Netflix.
the class ZkPathUtilTest method testToString.
public void testToString() throws Exception {
_zkServer = new TestingServer(4711);
_client = ZkTestSystem.createZkClient("localhost:4711");
final String file1 = "/files/file1";
final String file2 = "/files/file2";
final String file3 = "/files/file2/file3";
_client.createPersistent(file1, true);
_client.createPersistent(file2, true);
_client.createPersistent(file3, true);
String stringRepresentation = ZkPathUtil.toString(_client);
System.out.println(stringRepresentation);
System.out.println("-------------------------");
assertTrue(stringRepresentation.contains("file1"));
assertTrue(stringRepresentation.contains("file2"));
assertTrue(stringRepresentation.contains("file3"));
// path filtering
stringRepresentation = ZkPathUtil.toString(_client, "/", new ZkPathUtil.PathFilter() {
@Override
public boolean showChilds(String path) {
return !file2.equals(path);
}
});
assertTrue(stringRepresentation.contains("file1"));
assertTrue(stringRepresentation.contains("file2"));
assertFalse(stringRepresentation.contains("file3"));
// start path
stringRepresentation = ZkPathUtil.toString(_client, file2, ZkPathUtil.PathFilter.ALL);
assertFalse(stringRepresentation.contains("file1"));
assertTrue(stringRepresentation.contains("file2"));
assertTrue(stringRepresentation.contains("file3"));
_zkServer.close();
}
use of org.apache.curator.test.TestingServer in project flink by apache.
the class KafkaTestEnvironmentImpl method prepare.
@Override
public void prepare(int numKafkaServers, Properties additionalServerProperties, boolean secureMode) {
//increase the timeout since in Travis ZK connection takes long time for secure connection.
if (secureMode) {
//run only one kafka server to avoid multiple ZK connections from many instances - Travis timeout
numKafkaServers = 1;
zkTimeout = String.valueOf(Integer.parseInt(zkTimeout) * 15);
}
this.additionalServerProperties = additionalServerProperties;
this.secureMode = secureMode;
File tempDir = new File(System.getProperty("java.io.tmpdir"));
tmpZkDir = new File(tempDir, "kafkaITcase-zk-dir-" + (UUID.randomUUID().toString()));
assertTrue("cannot create zookeeper temp dir", tmpZkDir.mkdirs());
tmpKafkaParent = new File(tempDir, "kafkaITcase-kafka-dir*" + (UUID.randomUUID().toString()));
assertTrue("cannot create kafka temp dir", tmpKafkaParent.mkdirs());
tmpKafkaDirs = new ArrayList<>(numKafkaServers);
for (int i = 0; i < numKafkaServers; i++) {
File tmpDir = new File(tmpKafkaParent, "server-" + i);
assertTrue("cannot create kafka temp dir", tmpDir.mkdir());
tmpKafkaDirs.add(tmpDir);
}
zookeeper = null;
brokers = null;
try {
LOG.info("Starting Zookeeper");
zookeeper = new TestingServer(-1, tmpZkDir);
zookeeperConnectionString = zookeeper.getConnectString();
LOG.info("zookeeperConnectionString: {}", zookeeperConnectionString);
LOG.info("Starting KafkaServer");
brokers = new ArrayList<>(numKafkaServers);
for (int i = 0; i < numKafkaServers; i++) {
brokers.add(getKafkaServer(i, tmpKafkaDirs.get(i)));
SocketServer socketServer = brokers.get(i).socketServer();
if (secureMode) {
brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.SASL_PLAINTEXT)) + ",";
} else {
brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.PLAINTEXT)) + ",";
}
}
LOG.info("ZK and KafkaServer started.");
} catch (Throwable t) {
t.printStackTrace();
fail("Test setup failed: " + t.getMessage());
}
LOG.info("brokerConnectionString --> {}", brokerConnectionString);
standardProps = new Properties();
standardProps.setProperty("zookeeper.connect", zookeeperConnectionString);
standardProps.setProperty("bootstrap.servers", brokerConnectionString);
standardProps.setProperty("group.id", "flink-tests");
standardProps.setProperty("enable.auto.commit", "false");
standardProps.setProperty("zookeeper.session.timeout.ms", zkTimeout);
standardProps.setProperty("zookeeper.connection.timeout.ms", zkTimeout);
// read from the beginning. (earliest is kafka 0.9 value)
standardProps.setProperty("auto.offset.reset", "earliest");
// make a lot of fetches (MESSAGES MUST BE SMALLER!)
standardProps.setProperty("max.partition.fetch.bytes", "256");
}
Aggregations