use of org.apache.asterix.transaction.management.resource.PersistentLocalResourceRepository in project asterixdb by apache.
the class RemoteRecoveryManager method takeoverPartitons.
@Override
public void takeoverPartitons(Integer[] partitions) throws IOException, ACIDException {
/**
* TODO even though the takeover is always expected to succeed,
* in case of any failure during the takeover, the CC should be
* notified that the takeover failed.
*/
Set<Integer> partitionsToTakeover = new HashSet<>(Arrays.asList(partitions));
replayReplicaPartitionLogs(partitionsToTakeover, false);
//mark these partitions as active in this node
PersistentLocalResourceRepository resourceRepository = (PersistentLocalResourceRepository) runtimeContext.getLocalResourceRepository();
for (Integer patitionId : partitions) {
resourceRepository.addActivePartition(patitionId);
}
}
use of org.apache.asterix.transaction.management.resource.PersistentLocalResourceRepository in project asterixdb by apache.
the class RemoteRecoveryManager method doRemoteRecoveryPlan.
//TODO refactor common code between remote recovery and failback process
@Override
public void doRemoteRecoveryPlan(Map<String, Set<Integer>> recoveryPlan) throws HyracksDataException {
int maxRecoveryAttempts = replicationProperties.getMaxRemoteRecoveryAttempts();
PersistentLocalResourceRepository resourceRepository = (PersistentLocalResourceRepository) runtimeContext.getLocalResourceRepository();
IDatasetLifecycleManager datasetLifeCycleManager = runtimeContext.getDatasetLifecycleManager();
ILogManager logManager = runtimeContext.getTransactionSubsystem().getLogManager();
while (true) {
//start recovery steps
try {
if (maxRecoveryAttempts <= 0) {
//to avoid infinite loop in case of unexpected behavior.
throw new IllegalStateException("Failed to perform remote recovery.");
}
/*** Prepare for Recovery ***/
//1. clean any memory data that could've existed from previous failed recovery attempt
datasetLifeCycleManager.closeAllDatasets();
//2. remove any existing storage data and initialize storage metadata
resourceRepository.deleteStorageData(true);
resourceRepository.initializeNewUniverse(ClusterProperties.INSTANCE.getStorageDirectoryName());
/*** Start Recovery Per Lost Replica ***/
for (Entry<String, Set<Integer>> remoteReplica : recoveryPlan.entrySet()) {
String replicaId = remoteReplica.getKey();
Set<Integer> partitionsToRecover = remoteReplica.getValue();
//Request indexes metadata and LSM components
replicationManager.requestReplicaFiles(replicaId, partitionsToRecover, new HashSet<String>());
}
//get max LSN from selected remote replicas
long maxRemoteLSN = replicationManager.getMaxRemoteLSN(recoveryPlan.keySet());
//6. force LogManager to start from a partition > maxLSN in selected remote replicas
logManager.renewLogFilesAndStartFromLSN(maxRemoteLSN);
break;
} catch (IOException e) {
if (LOGGER.isLoggable(Level.WARNING)) {
LOGGER.log(Level.WARNING, "Failed during remote recovery. Attempting again...", e);
}
maxRecoveryAttempts--;
}
}
}
Aggregations