use of org.apache.helix.manager.zk.ZNRecordSerializer in project helix by apache.
the class TestZKLiveInstanceData method beforeClass.
@BeforeClass()
public void beforeClass() throws Exception {
ZkClient zkClient = null;
try {
zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(new ZNRecordSerializer());
if (zkClient.exists("/" + clusterName)) {
zkClient.deleteRecursively("/" + clusterName);
}
} finally {
if (zkClient != null) {
zkClient.close();
}
}
ClusterSetup.processCommandLineArgs(getArgs("-zkSvr", ZK_ADDR, "-addCluster", clusterName));
ClusterSetup.processCommandLineArgs(getArgs("-zkSvr", ZK_ADDR, "-addNode", clusterName, "localhost:54321"));
ClusterSetup.processCommandLineArgs(getArgs("-zkSvr", ZK_ADDR, "-addNode", clusterName, "localhost:54322"));
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project databus by linkedin.
the class ClusterCheckpointPersistenceProvider method createCluster.
/**
* Create a cluster if it doesn't exist Note: This method is not thread-safe
* as HelixAdmin.addCluster appears to fail on concurrent execution within
* threads
*
* @return true if cluster was created false otherwise
*/
public static boolean createCluster(String zkAddr, String clusterName) {
boolean created = false;
ZkClient zkClient = null;
try {
zkClient = new ZkClient(zkAddr, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(clusterName, false);
created = true;
} catch (HelixException e) {
LOG.warn("Warn! Cluster might already exist! " + clusterName);
created = false;
} finally {
// close this connection
if (zkClient != null) {
zkClient.close();
}
}
return created;
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project ambry by linkedin.
the class HelixTaskWorkflowManagerTool method main.
/**
* Runs the cluster wide aggregation tool
* @param args arguments specifying config file. For example: --propsFile /path/AggregationToolConfig
* @throws Exception in case of any error.
*/
public static void main(String[] args) throws Exception {
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
AggregationToolConfig config = new AggregationToolConfig(verifiableProperties);
Map<String, ClusterMapUtils.DcZkInfo> dataCenterToZKAddress = ClusterMapUtils.parseDcJsonAndPopulateDcInfo(Utils.readStringFromFile(config.zkLayoutFilePath));
String clusterName = config.clusterName;
String workflowName = config.workflowName;
long recurrentIntervalInMinutes = config.recurrentIntervalInMinutes;
boolean isDelete = config.deleteSpecifiedWorkflow;
boolean isRecurrentWorkflow = recurrentIntervalInMinutes != Utils.Infinite_Time;
for (ClusterMapUtils.DcZkInfo zkInfo : dataCenterToZKAddress.values()) {
// If there are multiple ZK endpoints in same dc, we trigger stats aggregation for each of them.
for (String zkAddress : zkInfo.getZkConnectStrs()) {
ZkClient zkClient = new ZkClient(zkAddress, SESSION_TIMEOUT, CONNECTION_TIMEOUT, new ZNRecordSerializer());
TaskDriver taskDriver = new TaskDriver(zkClient, clusterName);
if (isDelete) {
try {
taskDriver.waitToStop(workflowName, TIME_OUT_MILLI_SEC);
taskDriver.delete(workflowName);
System.out.println(String.format("Successfully deleted the workflow: %s in cluster %s at %s", workflowName, clusterName, zkAddress));
} catch (Exception | Error e) {
System.out.println(String.format("Failed to delete %s. Workflow not found in cluster %s at %s", workflowName, clusterName, zkAddress));
}
} else {
Workflow.Builder workflowBuilder = new Workflow.Builder(workflowName);
try {
switch(config.taskType) {
case AGGREGATE_TASK:
buildAggregationTaskWorkflow(workflowBuilder, config, isRecurrentWorkflow);
break;
case DEPRECATED_CONTAINER_CLOUD_SYNC_TASK:
buildDeprecatedContainerCloudSyncTaskWorkflow(workflowBuilder);
break;
default:
throw new IllegalArgumentException("Invalid task type: " + config.taskType);
}
if (isRecurrentWorkflow) {
workflowBuilder.setScheduleConfig(ScheduleConfig.recurringFromNow(TimeUnit.MINUTES, recurrentIntervalInMinutes));
workflowBuilder.setExpiry(TimeUnit.MINUTES.toMillis(recurrentIntervalInMinutes));
}
Workflow workflow = workflowBuilder.build();
taskDriver.start(workflow);
System.out.println(String.format("%s started successfully in cluster %s at %s", workflowName, clusterName, zkAddress));
} catch (Exception | Error e) {
System.out.println(String.format("Failed to start %s in cluster %s at %s", workflowName, clusterName, zkAddress));
}
}
}
}
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project ambry by linkedin.
the class HelixVcrPopulateToolTest method beforeTest.
@Before
public void beforeTest() throws Exception {
try (InputStream input = new ByteArrayInputStream(configData.getBytes())) {
config = new ObjectMapper().readValue(input, HelixVcrUtil.VcrHelixConfig.class);
} catch (IOException ex) {
throw new IllegalStateException("Could not load config from config data: " + configData);
}
srcZkInfo = new com.github.ambry.utils.TestUtils.ZkInfo(TestUtils.getTempDir("helixVcr"), "DC1", (byte) 1, SRC_ZK_SERVER_PORT, true);
SRC_ZK_CONNECT_STRING = SRC_ZK_SERVER_HOSTNAME + ":" + SRC_ZK_SERVER_PORT;
zkClient = SharedZkClientFactory.getInstance().buildZkClient(new HelixZkClient.ZkConnectionConfig(SRC_ZK_CONNECT_STRING));
zkClient.setZkSerializer(new ZNRecordSerializer());
clusterSetup = new ClusterSetup(zkClient);
clusterSetup.addCluster(SRC_CLUSTER_NAME, true);
srcHelixAdmin = new HelixAdminFactory().getHelixAdmin(SRC_ZK_CONNECT_STRING);
String resourceName = "1";
Set<String> partitionSet = new HashSet<>();
for (int i = 0; i < 100; i++) {
partitionSet.add(Integer.toString(i));
}
IdealState idealState = HelixVcrUtil.buildIdealState(resourceName, partitionSet, config.getIdealStateConfigFields());
srcHelixAdmin.addResource(SRC_CLUSTER_NAME, resourceName, idealState);
}
Aggregations