use of org.apache.asterix.common.cluster.ClusterPartition in project asterixdb by apache.
the class ClusterStateManager method resetClusterPartitionConstraint.
private synchronized void resetClusterPartitionConstraint() {
ArrayList<String> clusterActiveLocations = new ArrayList<>();
for (ClusterPartition p : clusterPartitions.values()) {
if (p.isActive()) {
clusterActiveLocations.add(p.getActiveNodeId());
}
}
clusterPartitionConstraint = new AlgebricksAbsolutePartitionConstraint(clusterActiveLocations.toArray(new String[] {}));
}
use of org.apache.asterix.common.cluster.ClusterPartition in project asterixdb by apache.
the class PropertiesAccessor method configureNc.
private void configureNc(ConfigManager configManager, String ncId, MutableInt uniquePartitionId) throws AsterixException {
// Now we assign the coredump and txnlog directories for this node.
// QQQ Default values? Should they be specified here? Or should there
// be a default.ini? Certainly wherever they are, they should be platform-dependent.
IApplicationConfig nodeCfg = cfg.getNCEffectiveConfig(ncId);
coredumpConfig.put(ncId, nodeCfg.getString(NodeProperties.Option.CORE_DUMP_DIR));
transactionLogDirs.put(ncId, nodeCfg.getString(NodeProperties.Option.TXN_LOG_DIR));
int partitionId = nodeCfg.getInt(NodeProperties.Option.STARTING_PARTITION_ID);
if (partitionId != -1) {
uniquePartitionId.setValue(partitionId);
} else {
configManager.set(ncId, NodeProperties.Option.STARTING_PARTITION_ID, uniquePartitionId.getValue());
}
// Now we create an array of ClusterPartitions for all the partitions
// on this NC.
String[] iodevices = nodeCfg.getStringArray(IODEVICES);
String storageSubdir = nodeCfg.getString(STORAGE_SUBDIR);
String[] nodeStores = new String[iodevices.length];
ClusterPartition[] nodePartitions = new ClusterPartition[iodevices.length];
for (int i = 0; i < nodePartitions.length; i++) {
// Construct final storage path from iodevice dir + storage subdirs
nodeStores[i] = iodevices[i] + File.separator + storageSubdir;
// Create ClusterPartition instances for this NC.
ClusterPartition partition = new ClusterPartition(uniquePartitionId.getAndIncrement(), ncId, i);
ClusterPartition orig = clusterPartitions.put(partition.getPartitionId(), partition);
if (orig != null) {
throw AsterixException.create(ErrorCode.DUPLICATE_PARTITION_ID, partition.getPartitionId(), ncId, orig.getNodeId());
}
nodePartitions[i] = partition;
}
stores.put(ncId, nodeStores);
nodePartitionsMap.put(ncId, nodePartitions);
}
use of org.apache.asterix.common.cluster.ClusterPartition in project asterixdb by apache.
the class MetadataNodeFaultToleranceStrategy method buildParticipantStartupSequence.
private List<INCLifecycleTask> buildParticipantStartupSequence(String nodeId, SystemState state) {
final List<INCLifecycleTask> tasks = new ArrayList<>();
switch(state) {
case PERMANENT_DATA_LOSS:
// If the metadata node (or replica) failed and lost its data
// => Metadata Remote Recovery from standby replica
tasks.add(getMetadataPartitionRecoveryPlan());
// Checkpoint after remote recovery to move node to HEALTHY state
tasks.add(new CheckpointTask());
break;
case CORRUPTED:
// If the metadata node (or replica) failed and started again without losing data => Local Recovery
LocalRecoveryTask rt = new LocalRecoveryTask(Arrays.asList(clusterManager.getNodePartitions(nodeId)).stream().map(ClusterPartition::getPartitionId).collect(Collectors.toSet()));
tasks.add(rt);
break;
case BOOTSTRAPPING:
case HEALTHY:
case RECOVERING:
break;
default:
break;
}
tasks.add(new StartReplicationServiceTask());
final boolean isMetadataNode = nodeId.equals(metadataNodeId);
if (isMetadataNode) {
tasks.add(new MetadataBootstrapTask());
}
tasks.add(new ExternalLibrarySetupTask(isMetadataNode));
tasks.add(new ReportMaxResourceIdTask());
tasks.add(new CheckpointTask());
tasks.add(new StartLifecycleComponentsTask());
if (isMetadataNode) {
tasks.add(new BindMetadataNodeTask(true));
}
return tasks;
}
use of org.apache.asterix.common.cluster.ClusterPartition in project asterixdb by apache.
the class MetadataNodeFaultToleranceStrategy method buildNonParticipantStartupSequence.
private List<INCLifecycleTask> buildNonParticipantStartupSequence(String nodeId, SystemState state) {
final List<INCLifecycleTask> tasks = new ArrayList<>();
if (state == SystemState.CORRUPTED) {
//need to perform local recovery for node partitions
LocalRecoveryTask rt = new LocalRecoveryTask(Arrays.asList(clusterManager.getNodePartitions(nodeId)).stream().map(ClusterPartition::getPartitionId).collect(Collectors.toSet()));
tasks.add(rt);
}
tasks.add(new ExternalLibrarySetupTask(false));
tasks.add(new ReportMaxResourceIdTask());
tasks.add(new CheckpointTask());
tasks.add(new StartLifecycleComponentsTask());
return tasks;
}
use of org.apache.asterix.common.cluster.ClusterPartition in project asterixdb by apache.
the class NoFaultToleranceStrategy method buildNCStartupSequence.
private List<INCLifecycleTask> buildNCStartupSequence(String nodeId, SystemState state) {
final List<INCLifecycleTask> tasks = new ArrayList<>();
if (state == SystemState.CORRUPTED) {
//need to perform local recovery for node partitions
LocalRecoveryTask rt = new LocalRecoveryTask(Arrays.asList(clusterManager.getNodePartitions(nodeId)).stream().map(ClusterPartition::getPartitionId).collect(Collectors.toSet()));
tasks.add(rt);
}
final boolean isMetadataNode = nodeId.equals(metadataNodeId);
if (isMetadataNode) {
tasks.add(new MetadataBootstrapTask());
}
tasks.add(new ExternalLibrarySetupTask(isMetadataNode));
tasks.add(new ReportMaxResourceIdTask());
tasks.add(new CheckpointTask());
tasks.add(new StartLifecycleComponentsTask());
if (isMetadataNode) {
tasks.add(new BindMetadataNodeTask(true));
}
return tasks;
}
Aggregations