use of org.apache.helix.model.InstanceConfig in project pinot by linkedin.
the class ShowClusterInfoCommand method execute.
@Override
public boolean execute() throws Exception {
Set<String> includeTableSet = new HashSet<>();
String[] includeTables = _tables.split(",");
for (String includeTable : includeTables) {
String name = stripTypeFromName(includeTable.trim());
if (name.length() > 0) {
includeTableSet.add(name);
}
}
Set<String> includeTagSet = new HashSet<>();
String[] includeTags = _tags.split(",");
for (String includeTag : includeTags) {
String name = stripTypeFromName(includeTag.trim());
if (name.length() > 0) {
includeTagSet.add(name);
}
}
ClusterInfo clusterInfo = new ClusterInfo();
clusterInfo.clusterName = _clusterName;
ZKHelixAdmin zkHelixAdmin = new ZKHelixAdmin(_zkAddress);
if (!zkHelixAdmin.getClusters().contains(_clusterName)) {
LOGGER.error("Cluster {} not found in {}.", _clusterName, _zkAddress);
return false;
}
List<String> instancesInCluster = zkHelixAdmin.getInstancesInCluster(_clusterName);
List<String> tables = zkHelixAdmin.getResourcesInCluster(_clusterName);
ZkClient zkClient = new ZkClient(_zkAddress);
zkClient.setZkSerializer(new ZNRecordStreamingSerializer());
LOGGER.info("Connecting to Zookeeper at: {}", _zkAddress);
zkClient.waitUntilConnected();
ZkBaseDataAccessor<ZNRecord> baseDataAccessor = new ZkBaseDataAccessor<>(zkClient);
ZKHelixDataAccessor zkHelixDataAccessor = new ZKHelixDataAccessor(_clusterName, baseDataAccessor);
PropertyKey property = zkHelixDataAccessor.keyBuilder().liveInstances();
List<String> liveInstances = zkHelixDataAccessor.getChildNames(property);
PropertyKey controllerLeaderKey = zkHelixDataAccessor.keyBuilder().controllerLeader();
LiveInstance controllerLeaderLiveInstance = zkHelixDataAccessor.getProperty(controllerLeaderKey);
ControllerInfo controllerInfo = new ControllerInfo();
controllerInfo.leaderName = controllerLeaderLiveInstance.getId();
clusterInfo.controllerInfo = controllerInfo;
for (String server : instancesInCluster) {
if (server.startsWith("Server")) {
ServerInfo serverInfo = new ServerInfo();
serverInfo.name = server;
serverInfo.state = (liveInstances.contains(server)) ? "ONLINE" : "OFFLINE";
InstanceConfig config = zkHelixAdmin.getInstanceConfig(_clusterName, server);
serverInfo.tags = config.getRecord().getListField("TAG_LIST");
clusterInfo.addServerInfo(serverInfo);
}
if (server.startsWith("Broker")) {
BrokerInfo brokerInfo = new BrokerInfo();
brokerInfo.name = server;
brokerInfo.state = (liveInstances.contains(server)) ? "ONLINE" : "OFFLINE";
InstanceConfig config = zkHelixAdmin.getInstanceConfig(_clusterName, server);
brokerInfo.tags = config.getRecord().getListField("TAG_LIST");
clusterInfo.addBrokerInfo(brokerInfo);
}
}
for (String table : tables) {
if ("brokerResource".equalsIgnoreCase(table)) {
continue;
}
TableInfo tableInfo = new TableInfo();
IdealState idealState = zkHelixAdmin.getResourceIdealState(_clusterName, table);
ExternalView externalView = zkHelixAdmin.getResourceExternalView(_clusterName, table);
Set<String> segmentsFromIdealState = idealState.getPartitionSet();
tableInfo.tableName = table;
tableInfo.tag = idealState.getRecord().getSimpleField("INSTANCE_GROUP_TAG");
String rawTableName = stripTypeFromName(tableInfo.tableName);
String rawTagName = stripTypeFromName(tableInfo.tag);
if (!includeTableSet.isEmpty() && !includeTableSet.contains(rawTableName)) {
continue;
}
if (!includeTagSet.isEmpty() && !includeTagSet.contains(rawTagName)) {
continue;
}
for (String segment : segmentsFromIdealState) {
SegmentInfo segmentInfo = new SegmentInfo();
segmentInfo.name = segment;
Map<String, String> serverStateMapFromIS = idealState.getInstanceStateMap(segment);
if (serverStateMapFromIS == null) {
LOGGER.info("Unassigned segment {} in ideal state", segment);
serverStateMapFromIS = Collections.emptyMap();
}
Map<String, String> serverStateMapFromEV = externalView.getStateMap(segment);
if (serverStateMapFromEV == null) {
LOGGER.info("Unassigned segment {} in external view", segment);
serverStateMapFromEV = Collections.emptyMap();
}
for (String serverName : serverStateMapFromIS.keySet()) {
segmentInfo.segmentStateMap.put(serverName, serverStateMapFromEV.get(serverName));
}
tableInfo.addSegmentInfo(segmentInfo);
}
clusterInfo.addTableInfo(tableInfo);
}
Yaml yaml = new Yaml();
StringWriter sw = new StringWriter();
yaml.dump(clusterInfo, sw);
LOGGER.info(sw.toString());
return true;
}
use of org.apache.helix.model.InstanceConfig in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method addNewDataNodes.
/**
* Add nodes in the static cluster map that is not already present in Helix.
* Ignores those that are already present. This is to make upgrades smooth.
*
* Replica/Partition information is not updated by this method. That is updated when
* replicas and partitions are added.
*
* At this time, node removals are not dealt with.
*/
private void addNewDataNodes() {
for (Datacenter dc : staticClusterMap.hardwareLayout.getDatacenters()) {
HelixAdmin dcAdmin = adminForDc.get(dc.getName());
for (DataNode node : dc.getDataNodes()) {
String instanceName = getInstanceName(node);
if (!dcAdmin.getInstancesInCluster(clusterName).contains(instanceName)) {
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
instanceConfig.setHostName(node.getHostname());
instanceConfig.setPort(Integer.toString(node.getPort()));
// populate mountPath -> Disk information.
Map<String, Map<String, String>> diskInfos = new HashMap<>();
for (Disk disk : node.getDisks()) {
Map<String, String> diskInfo = new HashMap<>();
diskInfo.put(ClusterMapUtils.DISK_CAPACITY_STR, Long.toString(disk.getRawCapacityInBytes()));
diskInfo.put(ClusterMapUtils.DISK_STATE, ClusterMapUtils.AVAILABLE_STR);
// Note: An instance config has to contain the information for each disk about the replicas it hosts.
// This information will be initialized to the empty string - but will be updated whenever the partition
// is added to the cluster.
diskInfo.put(ClusterMapUtils.REPLICAS_STR, "");
diskInfos.put(disk.getMountPath(), diskInfo);
}
// Add all instance configuration.
instanceConfig.getRecord().setMapFields(diskInfos);
if (node.hasSSLPort()) {
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.SSLPORT_STR, Integer.toString(node.getSSLPort()));
}
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.DATACENTER_STR, node.getDatacenterName());
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.RACKID_STR, Long.toString(node.getRackId()));
instanceConfig.getRecord().setListField(ClusterMapUtils.SEALED_STR, new ArrayList<String>());
// Finally, add this node to the DC.
dcAdmin.addInstance(clusterName, instanceConfig);
}
}
System.out.println("Added all new nodes in datacenter " + dc.getName());
}
}
use of org.apache.helix.model.InstanceConfig in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method updateInstancesAndGetInstanceNames.
/**
* Updates instances that hosts replicas of this partition with the replica information (including the mount points
* on which these replicas should reside, which will be purely an instance level information).
* @param dcAdmin the admin to the Zk server on which this operation is to be done.
* @param partitionName the partition name.
* @param replicaList the list of replicas of this partition.
* @param sealed whether the given partition state is sealed.
* @return an array of Strings containing the names of the instances on which the replicas of this partition reside.
*/
private String[] updateInstancesAndGetInstanceNames(HelixAdmin dcAdmin, String partitionName, List<ReplicaId> replicaList, boolean sealed) {
String[] instances = new String[replicaList.size()];
for (int i = 0; i < replicaList.size(); i++) {
Replica replica = (Replica) replicaList.get(i);
DataNodeId node = replica.getDataNodeId();
String instanceName = getInstanceName(node);
instances[i] = instanceName;
InstanceConfig instanceConfig = dcAdmin.getInstanceConfig(clusterName, instanceName);
Map<String, String> diskInfo = instanceConfig.getRecord().getMapField(replica.getMountPath());
String replicasStr = diskInfo.get(ClusterMapUtils.REPLICAS_STR);
replicasStr += replica.getPartition().getId() + ClusterMapUtils.REPLICAS_STR_SEPARATOR + replica.getCapacityInBytes() + ClusterMapUtils.REPLICAS_DELIM_STR;
diskInfo.put(ClusterMapUtils.REPLICAS_STR, replicasStr);
instanceConfig.getRecord().setMapField(replica.getMountPath(), diskInfo);
if (sealed) {
List<String> currentSealedPartitions = instanceConfig.getRecord().getListField(ClusterMapUtils.SEALED_STR);
List<String> newSealedPartitionsList = new ArrayList<>(currentSealedPartitions);
newSealedPartitionsList.add(partitionName);
instanceConfig.getRecord().setListField(ClusterMapUtils.SEALED_STR, newSealedPartitionsList);
}
dcAdmin.setInstanceConfig(clusterName, instanceName, instanceConfig);
}
return instances;
}
use of org.apache.helix.model.InstanceConfig in project bookkeeper by apache.
the class HelixStorageController method addNode.
@Override
public void addNode(String clusterName, Endpoint endpoint, Optional<String> endpointNameOptional) {
String endpointName = endpointNameOptional.orElse(getEndpointName(endpoint));
if (admin.getInstancesInCluster(clusterName).contains(endpointName)) {
log.info("Instance {} already exists in cluster {}, skip creating the instance", endpointName, clusterName);
return;
}
log.info("Adding a new instance {} ({}) to the cluster {}.", new Object[] { endpointName, endpoint, clusterName });
InstanceConfig config = new InstanceConfig(endpointName);
config.setHostName(endpoint.getHostname());
config.setPort(Integer.toString(endpoint.getPort()));
admin.addInstance(clusterName, config);
}
use of org.apache.helix.model.InstanceConfig in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method addDataNodeConfigToHelix.
private void addDataNodeConfigToHelix(String dcName, DataNodeConfig dataNodeConfig, PropertyStoreToDataNodeConfigAdapter adapter, InstanceConfigToDataNodeConfigAdapter.Converter converter) {
// if this is a new instance, we should add it to both InstanceConfig and PropertyStore
if (dataNodeConfigSourceType == PROPERTY_STORE) {
// when source type is PROPERTY_STORE, we only need to add an InstanceConfig with minimum required information (i.e. hostname, port etc)
InstanceConfig instanceConfig = new InstanceConfig(dataNodeConfig.getInstanceName());
instanceConfig.setHostName(dataNodeConfig.getHostName());
instanceConfig.setPort(Integer.toString(dataNodeConfig.getPort()));
adminForDc.get(dcName).addInstance(clusterName, instanceConfig);
} else {
adminForDc.get(dcName).addInstance(clusterName, converter.convert(dataNodeConfig));
}
if (!adapter.set(dataNodeConfig)) {
logger.error("[{}] Failed to add config for new node {} in the property store.", dcName.toUpperCase(), dataNodeConfig.getInstanceName());
}
}
Aggregations