use of org.apache.asterix.event.schema.cluster.Node in project asterixdb by apache.
the class MetadataOnlyReplicationStrategy method from.
@Override
public MetadataOnlyReplicationStrategy from(Cluster cluster) throws HyracksDataException {
if (cluster.getMetadataNode() == null) {
throw new RuntimeDataException(ErrorCode.INVALID_CONFIGURATION, "Metadata node must be specified.");
}
Node metadataNode = ClusterProperties.INSTANCE.getNodeById(cluster.getMetadataNode());
if (metadataNode == null) {
throw new IllegalStateException("Invalid metadata node specified");
}
if (cluster.getHighAvailability().getFaultTolerance().getReplica() == null || cluster.getHighAvailability().getFaultTolerance().getReplica().getNodeId() == null || cluster.getHighAvailability().getFaultTolerance().getReplica().getNodeId().isEmpty()) {
throw new RuntimeDataException(ErrorCode.INVALID_CONFIGURATION, "One or more replicas must be specified for metadata node.");
}
final Set<Replica> replicas = new HashSet<>();
for (String nodeId : cluster.getHighAvailability().getFaultTolerance().getReplica().getNodeId()) {
Node node = ClusterProperties.INSTANCE.getNodeById(nodeId);
if (node == null) {
throw new RuntimeDataException(ErrorCode.INVALID_CONFIGURATION, "Invalid replica specified: " + nodeId);
}
replicas.add(new Replica(node));
}
MetadataOnlyReplicationStrategy st = new MetadataOnlyReplicationStrategy();
st.metadataNodeId = cluster.getMetadataNode();
st.metadataPrimaryReplica = new Replica(metadataNode);
st.metadataNodeReplicas = replicas;
return st;
}
use of org.apache.asterix.event.schema.cluster.Node in project asterixdb by apache.
the class ClusterWorkExecutor method run.
@Override
public void run() {
while (true) {
try {
Set<IClusterManagementWork> workSet = inbox.take();
int nodesToAdd = 0;
Set<String> nodesToRemove = new HashSet<>();
Set<IClusterManagementWork> nodeAdditionRequests = new HashSet<>();
Set<IClusterManagementWork> nodeRemovalRequests = new HashSet<>();
for (IClusterManagementWork w : workSet) {
switch(w.getClusterManagementWorkType()) {
case ADD_NODE:
if (nodesToAdd < ((AddNodeWork) w).getNumberOfNodesRequested()) {
nodesToAdd = ((AddNodeWork) w).getNumberOfNodesRequested();
}
nodeAdditionRequests.add(w);
break;
case REMOVE_NODE:
nodesToRemove.addAll(((RemoveNodeWork) w).getNodesToBeRemoved());
nodeRemovalRequests.add(w);
break;
}
}
Set<Node> addedNodes = new HashSet<>();
for (int i = 0; i < nodesToAdd; i++) {
Node node = ClusterStateManager.INSTANCE.getAvailableSubstitutionNode();
if (node != null) {
try {
ClusterManagerProvider.getClusterManager().addNode(appCtx, node);
addedNodes.add(node);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Added NC at:" + node.getId());
}
} catch (AsterixException e) {
if (LOGGER.isLoggable(Level.WARNING)) {
LOGGER.warning("Unable to add NC at:" + node.getId());
}
e.printStackTrace();
}
} else {
if (LOGGER.isLoggable(Level.WARNING)) {
LOGGER.warning("Unable to add NC: no more available nodes");
}
}
}
} catch (InterruptedException e) {
if (LOGGER.isLoggable(Level.SEVERE)) {
LOGGER.severe("interruped" + e.getMessage());
}
throw new IllegalStateException(e);
} catch (Exception e) {
if (LOGGER.isLoggable(Level.SEVERE)) {
LOGGER.severe("Unexpected exception in handling cluster event" + e.getMessage());
}
}
}
}
use of org.apache.asterix.event.schema.cluster.Node in project asterixdb by apache.
the class ValidateConfig method validateReplicationProperties.
private boolean validateReplicationProperties(Cluster cluster) {
boolean valid = true;
//if replication is disabled, no need to validate the settings
if (cluster.getHighAvailability() != null && cluster.getHighAvailability().getDataReplication() != null) {
if (cluster.getHighAvailability().getDataReplication().getReplicationPort() == null || cluster.getHighAvailability().getDataReplication().getReplicationPort().toString().length() == 0) {
valid = false;
LOGGER.fatal("Replication data port not defined for data repliaction. " + ERROR);
}
if (cluster.getHighAvailability().getDataReplication().getReplicationTimeOut() == null || String.valueOf(cluster.getHighAvailability().getDataReplication().getReplicationTimeOut().intValue()).length() == 0) {
LOGGER.warn("Replication maximum wait time not defined. Using default value (60 seconds) " + WARNING);
}
//validate all nodes have the same number of io devices
int numOfIODevices = 0;
Set<Integer> ioDevicesCount = new HashSet<Integer>();
for (int i = 0; i < cluster.getNode().size(); i++) {
Node node = cluster.getNode().get(i);
if (node.getIodevices() != null) {
numOfIODevices = node.getIodevices().length() - node.getIodevices().replace(",", "").length();
} else {
numOfIODevices = cluster.getIodevices().length() - cluster.getIodevices().replace(",", "").length();
}
ioDevicesCount.add(numOfIODevices);
if (ioDevicesCount.size() > 1) {
valid = false;
LOGGER.fatal("Replication requires all nodes to have the same number of IO devices." + ERROR);
break;
}
}
}
return valid;
}
use of org.apache.asterix.event.schema.cluster.Node in project asterixdb by apache.
the class ReplicationProperties method getDataReplicationPort.
public int getDataReplicationPort(String nodeId) {
final Cluster cluster = ClusterProperties.INSTANCE.getCluster();
Node node = ClusterProperties.INSTANCE.getNodeById(nodeId);
if (node != null) {
return node.getReplicationPort() != null ? node.getReplicationPort().intValue() : cluster.getHighAvailability().getDataReplication().getReplicationPort().intValue();
}
return REPLICATION_DATAPORT_DEFAULT;
}
use of org.apache.asterix.event.schema.cluster.Node in project asterixdb by apache.
the class VerificationUtil method getAsterixRuntimeState.
public static AsterixRuntimeState getAsterixRuntimeState(AsterixInstance instance) throws Exception {
Cluster cluster = instance.getCluster();
List<String> args = new ArrayList<String>();
args.add(instance.getName());
args.add(instance.getCluster().getMasterNode().getClusterIp());
for (Node node : cluster.getNode()) {
args.add(node.getClusterIp());
args.add(instance.getName() + "_" + node.getId());
}
Thread.sleep(2000);
String output = AsterixEventServiceUtil.executeLocalScript(VERIFY_SCRIPT_PATH, args);
boolean ccRunning = true;
List<String> failedNCs = new ArrayList<String>();
String[] infoFields;
List<ProcessInfo> processes = new ArrayList<ProcessInfo>();
for (String line : output.split("\n")) {
String nodeid = null;
infoFields = line.split(":");
try {
int pid = Integer.parseInt(infoFields[3]);
if (infoFields[0].equals("NC")) {
nodeid = infoFields[2].split("_")[1];
} else {
nodeid = instance.getCluster().getMasterNode().getId();
}
processes.add(new ProcessInfo(infoFields[0], infoFields[1], nodeid, pid));
} catch (Exception e) {
if (infoFields[0].equalsIgnoreCase("CC")) {
ccRunning = false;
} else {
failedNCs.add(infoFields[1]);
}
}
}
return new AsterixRuntimeState(processes, failedNCs, ccRunning);
}
Aggregations