use of alluxio.multi.process.MultiProcessCluster in project alluxio by Alluxio.
the class JournalMigrationIntegrationTest method migration.
@Test
public void migration() throws Exception {
MultiProcessCluster cluster = MultiProcessCluster.newBuilder(PortCoordination.JOURNAL_MIGRATION).setClusterName("journalMigration").setNumMasters(3).addProperty(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.UFS.toString()).addProperty(PropertyKey.ZOOKEEPER_SESSION_TIMEOUT, "1sec").build();
try {
cluster.start();
FileSystem fs = cluster.getFileSystemClient();
MetaMasterClient metaClient = new RetryHandlingMetaMasterClient(MasterClientContext.newBuilder(ClientContext.create(ServerConfiguration.global())).setMasterInquireClient(cluster.getMasterInquireClient()).build());
for (int i = 0; i < NUM_DIRS; i++) {
fs.createDirectory(new AlluxioURI("/dir" + i));
}
File backupsDir = AlluxioTestDirectory.createTemporaryDirectory("backups");
AlluxioURI zkBackup = metaClient.backup(BackupPRequest.newBuilder().setTargetDirectory(backupsDir.getAbsolutePath()).setOptions(BackupPOptions.newBuilder().setLocalFileSystem(false)).build()).getBackupUri();
cluster.updateMasterConf(PropertyKey.MASTER_JOURNAL_INIT_FROM_BACKUP, zkBackup.toString());
// Migrate to embedded journal HA.
cluster.stopMasters();
cluster.formatJournal();
cluster.updateDeployMode(DeployMode.EMBEDDED);
cluster.startMasters();
assertEquals(NUM_DIRS, fs.listStatus(new AlluxioURI("/")).size());
// Migrate back to Zookeeper HA.
cluster.stopMasters();
cluster.formatJournal();
cluster.updateDeployMode(DeployMode.ZOOKEEPER_HA);
cluster.startMasters();
assertEquals(NUM_DIRS, fs.listStatus(new AlluxioURI("/")).size());
cluster.notifySuccess();
} finally {
cluster.destroy();
}
}
use of alluxio.multi.process.MultiProcessCluster in project alluxio by Alluxio.
the class JournalShutdownIntegrationTest method singleMasterJournalStopIntegration.
@Test
public void singleMasterJournalStopIntegration() throws Exception {
MultiProcessCluster cluster = MultiProcessCluster.newBuilder(PortCoordination.JOURNAL_STOP_SINGLE_MASTER).setClusterName("singleMasterJournalStopIntegration").setNumWorkers(0).setNumMasters(1).build();
try {
cluster.start();
FileSystem fs = cluster.getFileSystemClient();
runCreateFileThread(fs);
cluster.waitForAndKillPrimaryMaster(10 * Constants.SECOND_MS);
awaitClientTermination();
cluster.startMaster(0);
int actualFiles = fs.listStatus(new AlluxioURI(TEST_FILE_DIR)).size();
int successFiles = mCreateFileThread.getSuccessNum();
assertTrue(String.format("successFiles: %s, actualFiles: %s", successFiles, actualFiles), (successFiles == actualFiles) || (successFiles + 1 == actualFiles));
cluster.notifySuccess();
} finally {
cluster.destroy();
}
}
use of alluxio.multi.process.MultiProcessCluster in project alluxio by Alluxio.
the class TriggeredCheckpointTest method ufsJournal.
@Test
public void ufsJournal() throws Exception {
int numFiles = 100;
MultiProcessCluster cluster = MultiProcessCluster.newBuilder(PortCoordination.TRIGGERED_UFS_CHECKPOINT).setClusterName("TriggeredUfsCheckpointTest").addProperty(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.UFS.toString()).addProperty(PropertyKey.MASTER_JOURNAL_CHECKPOINT_PERIOD_ENTRIES, String.valueOf(numFiles)).setNumMasters(1).setNumWorkers(1).build();
try {
cluster.start();
cluster.waitForAllNodesRegistered(20 * Constants.SECOND_MS);
// Get enough journal entries
createFiles(cluster, numFiles);
// Trigger checkpoint
Assert.assertEquals(cluster.getMasterAddresses().get(0).getHostname(), cluster.getMetaMasterClient().checkpoint());
String journalLocation = cluster.getJournalDir();
UfsJournal ufsJournal = new UfsJournal(URIUtils.appendPathOrDie(new URI(journalLocation), Constants.FILE_SYSTEM_MASTER_NAME), new NoopMaster(""), 0, Collections::emptySet);
Assert.assertEquals(1, UfsJournalSnapshot.getSnapshot(ufsJournal).getCheckpoints().size());
validateCheckpointInClusterRestart(cluster);
cluster.notifySuccess();
} finally {
cluster.destroy();
}
}
use of alluxio.multi.process.MultiProcessCluster in project alluxio by Alluxio.
the class TriggeredCheckpointTest method embeddedJournal.
@Test
public void embeddedJournal() throws Exception {
MultiProcessCluster cluster = MultiProcessCluster.newBuilder(PortCoordination.TRIGGERED_EMBEDDED_CHECKPOINT).setClusterName("TriggeredEmbeddedCheckpointTest").addProperty(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.EMBEDDED.toString()).addProperty(PropertyKey.MASTER_JOURNAL_LOG_SIZE_BYTES_MAX, String.valueOf(Constants.KB)).setNumMasters(1).setNumWorkers(1).build();
cluster.start();
try {
cluster.waitForAllNodesRegistered(20 * Constants.SECOND_MS);
// Get enough journal entries
createFiles(cluster, 100);
// Trigger checkpoint and check if checkpoint exists
Assert.assertEquals(cluster.getMasterAddresses().get(0).getHostname(), cluster.getMetaMasterClient().checkpoint());
validateCheckpointInClusterRestart(cluster);
cluster.notifySuccess();
} finally {
cluster.destroy();
}
}
use of alluxio.multi.process.MultiProcessCluster in project alluxio by Alluxio.
the class BackwardsCompatibilityJournalGenerator method main.
/**
* Generates journal files to be used by the backwards compatibility test. The files are named
* based on the current version defined in ProjectConstants.VERSION. Run this with each release,
* and commit the created journal and snapshot into the git repository.
*
* @param args no args expected
*/
public static void main(String[] args) throws Exception {
BackwardsCompatibilityJournalGenerator generator = new BackwardsCompatibilityJournalGenerator();
new JCommander(generator, args);
if (!ServerUserState.global().getUser().getName().equals("root")) {
System.err.printf("Journals must be generated as root so that they can be replayed by root%n");
System.exit(-1);
}
File journalDst = new File(generator.getOutputDirectory(), String.format("journal-%s", ProjectConstants.VERSION));
if (journalDst.exists()) {
System.err.printf("%s already exists, delete it first%n", journalDst.getAbsolutePath());
System.exit(-1);
}
File backupDst = new File(generator.getOutputDirectory(), String.format("backup-%s", ProjectConstants.VERSION));
if (backupDst.exists()) {
System.err.printf("%s already exists, delete it first%n", backupDst.getAbsolutePath());
System.exit(-1);
}
MultiProcessCluster cluster = MultiProcessCluster.newBuilder(PortCoordination.BACKWARDS_COMPATIBILITY).setClusterName("BackwardsCompatibility").setNumMasters(1).setNumWorkers(1).addProperty(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.UFS.toString()).build();
try {
cluster.start();
cluster.notifySuccess();
cluster.waitForAllNodesRegistered(10 * Constants.SECOND_MS);
for (TestOp op : OPS) {
op.apply(cluster.getClients());
}
AlluxioURI backup = cluster.getMetaMasterClient().backup(BackupPRequest.newBuilder().setTargetDirectory(new File(generator.getOutputDirectory()).getAbsolutePath()).setOptions(BackupPOptions.newBuilder().setLocalFileSystem(true)).build()).getBackupUri();
FileUtils.moveFile(new File(backup.getPath()), backupDst);
cluster.stopMasters();
FileUtils.copyDirectory(new File(cluster.getJournalDir()), journalDst);
} catch (Throwable t) {
t.printStackTrace();
} finally {
cluster.destroy();
}
System.out.printf("Artifacts successfully generated at %s and %s%n", journalDst.getAbsolutePath(), backupDst.getAbsolutePath());
}
Aggregations