use of com.github.nosan.embedded.cassandra.Cassandra in project embedded-cassandra by nosan.
the class CassandraExamples method quickStart.
private void quickStart() {
// tag::quick-start[]
Cassandra cassandra = new CassandraBuilder().build();
cassandra.start();
try {
Settings settings = cassandra.getSettings();
try (CqlSession session = CqlSession.builder().addContactPoint(new InetSocketAddress(settings.getAddress(), settings.getPort())).withLocalDatacenter("datacenter1").build()) {
CqlScript.ofClassPath("schema.cql").forEachStatement(session::execute);
}
} finally {
cassandra.stop();
}
// end::quick-start[]
}
use of com.github.nosan.embedded.cassandra.Cassandra in project esop by instaclustr.
the class LocalBackupTest method testUploadTracker.
@Test
public void testUploadTracker() throws Exception {
final String snapshotName = UUID.randomUUID().toString();
final String snapshotName2 = UUID.randomUUID().toString();
List<Path> dataDirs = Arrays.asList(cassandraDir.toAbsolutePath().resolve("data").resolve("data"), cassandraDir.toAbsolutePath().resolve("data").resolve("data2"), cassandraDir.toAbsolutePath().resolve("data").resolve("data3"));
final BackupOperationRequest backupOperationRequest = getBackupOperationRequestForTracker(snapshotName, "test,test2", dataDirs);
final BackupOperationRequest backupOperationRequest2 = getBackupOperationRequestForTracker(snapshotName2, "test", dataDirs);
UploadTracker uploadTracker = null;
Cassandra cassandra = null;
try {
cassandra = getCassandra(cassandraDir, CASSANDRA_VERSION);
cassandra.start();
try (CqlSession session = CqlSession.builder().build()) {
assertEquals(populateDatabase(session).size(), NUMBER_OF_INSERTED_ROWS);
}
final AtomicBoolean wait = new AtomicBoolean(true);
final ListeningExecutorService finisher = new Executors.FixedTasksExecutorSupplier().get(10);
uploadTracker = new UploadTracker(finisher, operationsService, new HashSpec()) {
// override for testing purposes
@Override
public UploadUnit constructUnitToSubmit(final Backuper backuper, final ManifestEntry manifestEntry, final AtomicBoolean shouldCancel, final String snapshotTag, final HashSpec hashSpec) {
return new TestingUploadUnit(wait, backuper, manifestEntry, shouldCancel, snapshotTag, hashSpec);
}
};
final LocalFileBackuper backuper = new LocalFileBackuper(backupOperationRequest);
new TakeSnapshotOperation(jmxService, new TakeSnapshotOperationRequest(backupOperationRequest.entities, backupOperationRequest.snapshotTag), cassandraVersionProvider).run();
new TakeSnapshotOperation(jmxService, new TakeSnapshotOperationRequest(backupOperationRequest2.entities, backupOperationRequest2.snapshotTag), cassandraVersionProvider).run();
final Snapshots snapshots = Snapshots.parse(dataDirs);
final Optional<Snapshot> snapshot = snapshots.get(backupOperationRequest.snapshotTag);
final Optional<Snapshot> snapshot2 = snapshots.get(backupOperationRequest2.snapshotTag);
assert snapshot.isPresent();
assert snapshot2.isPresent();
Set<String> providers = Stream.of("file").collect(Collectors.toSet());
final BackupOperation backupOperation = new BackupOperation(operationCoordinator, providers, backupOperationRequest);
final BackupOperation backupOperation2 = new BackupOperation(operationCoordinator, providers, backupOperationRequest2);
final List<ManifestEntry> manifestEntries = Manifest.from(snapshot.get()).getManifestEntries();
final List<ManifestEntry> manifestEntries2 = Manifest.from(snapshot2.get()).getManifestEntries();
Session<UploadUnit> session = uploadTracker.submit(backuper, backupOperation, manifestEntries, backupOperation.request.snapshotTag, backupOperation.request.concurrentConnections);
final int submittedUnits1 = uploadTracker.submittedUnits.intValue();
Assert.assertEquals(manifestEntries.size(), submittedUnits1);
final Session<UploadUnit> session2 = uploadTracker.submit(backuper, backupOperation2, manifestEntries2, backupOperation.request.snapshotTag, backupOperation.request.concurrentConnections);
final int submittedUnits2 = uploadTracker.submittedUnits.intValue();
// even we submitted the second session, it does not change the number of units because session2
// wants to upload "test" but it is already going to be uploaded by session1
// we have effectively submitted only what should be submitted, no duplicates
// so it is as if "test" from session2 was not submitted at all
Assert.assertEquals(submittedUnits1, submittedUnits2);
Assert.assertEquals(manifestEntries.size(), uploadTracker.submittedUnits.intValue());
// however we have submitted two sessions in total
Assert.assertEquals(2, uploadTracker.submittedSessions.intValue());
// lets upload it now
wait.set(false);
session.waitUntilConsideredFinished();
session2.waitUntilConsideredFinished();
Assert.assertTrue(session.isConsideredFinished());
Assert.assertTrue(session.isSuccessful());
Assert.assertTrue(session.getFailedUnits().isEmpty());
Assert.assertEquals(uploadTracker.submittedUnits.intValue(), session.getUnits().size());
Assert.assertTrue(session2.isConsideredFinished());
Assert.assertTrue(session2.isSuccessful());
Assert.assertTrue(session2.getFailedUnits().isEmpty());
Assert.assertTrue(submittedUnits2 > session2.getUnits().size());
for (final UploadUnit uploadUnit : session2.getUnits()) {
Assert.assertTrue(session.getUnits().contains(uploadUnit));
}
Assert.assertTrue(uploadTracker.getUnits().isEmpty());
uploadTracker.removeSession(session);
uploadTracker.removeSession(session2);
Assert.assertTrue(session.getUnits().isEmpty());
Assert.assertTrue(session2.getUnits().isEmpty());
} catch (final Exception ex) {
ex.printStackTrace();
throw ex;
} finally {
new ClearSnapshotOperation(jmxService, new ClearSnapshotOperationRequest(backupOperationRequest.snapshotTag)).run();
if (cassandra != null) {
cassandra.stop();
}
uploadTracker.stopAsync();
uploadTracker.awaitTerminated(1, MINUTES);
uploadTracker.stopAsync();
uploadTracker.awaitTerminated(1, MINUTES);
FileUtils.deleteDirectory(Paths.get(target(backupOperationRequest.storageLocation.bucket)));
}
}
use of com.github.nosan.embedded.cassandra.Cassandra in project esop by instaclustr.
the class AbstractBackupTest method liveBackupWithRestoreOnDifferentSchema.
public void liveBackupWithRestoreOnDifferentSchema(final String[][] arguments, final String cassandraVersion) throws Exception {
Cassandra cassandra = getCassandra(cassandraDir, cassandraVersion);
cassandra.start();
waitForCql();
try (CqlSession session = CqlSession.builder().build()) {
createTable(session, KEYSPACE, TABLE);
createTable(session, KEYSPACE_2, TABLE_2);
// after this, table and table2 will contain 2 rows each
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
}
});
// first backup
Esop.mainWithoutExit(arguments[0]);
String firstSchemaVersion = new CassandraSchemaVersion(new CassandraJMXServiceImpl(new CassandraJMXConnectionInfo())).act();
// create third schema, by this way, Cassandra schema will change
createTable(session, KEYSPACE_3, TABLE_3);
waitUntilSchemaChanged(firstSchemaVersion);
// after this, table and table2 will contain 4 rows each
// and table3 just 2
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
add(new String[] { KEYSPACE_3, TABLE_3 });
}
});
// second backup
Esop.mainWithoutExit(arguments[1]);
// here we want to restore into first snapshot even though schema has changed, because we added the third table
// (keep in mind we have not changed schema of any table, we changed Cassandra schema as such)
// restore into the first backup
// download
Esop.mainWithoutExit(arguments[2]);
// truncate
Esop.mainWithoutExit(arguments[3]);
// after truncating, we see that we have truncated just two tables which were
// in snapshot from snapshot1, not the third one
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
// import
Esop.mainWithoutExit(arguments[4]);
// cleanup
Esop.mainWithoutExit(arguments[5]);
// verify
// here we check that table1 and table2 contains 2 rows each (as we restored it from the first snapshot) and table 3 will contain still 2
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
} finally {
cassandra.stop();
FileUtils.deleteDirectory(cassandraDir);
deleteDirectory(Paths.get(target("backup1")));
}
}
use of com.github.nosan.embedded.cassandra.Cassandra in project esop by instaclustr.
the class AbstractBackupTest method liveBackupRestoreTest.
public void liveBackupRestoreTest(final String[][] arguments, final String cassandraVersion, int rounds) throws Exception {
Cassandra cassandra = getCassandra(cassandraDir, cassandraVersion);
cassandra.start();
waitForCql();
try (CqlSession session = CqlSession.builder().build()) {
createTable(session, KEYSPACE, TABLE);
createTable(session, KEYSPACE_2, TABLE_2);
// stefansnapshot-1
insertAndCallBackupCLI(2, session, arguments[0]);
// stefansnapshot-2
insertAndCallBackupCLI(2, session, arguments[1]);
for (int i = 1; i < rounds + 1; ++i) {
// each phase is executed twice here to check that phases are idempotent / repeatable
logger.info("Round " + i + " - Executing the first restoration phase - download {}", asList(arguments[2]));
Esop.mainWithoutExit(arguments[2]);
logger.info("Round " + i + " - Executing the first restoration phase for the second time - download {}", asList(arguments[2]));
Esop.mainWithoutExit(arguments[2]);
logger.info("Round " + i + " - Executing the second restoration phase - truncate {}", asList(arguments[3]));
Esop.mainWithoutExit(arguments[3]);
logger.info("Round " + i + " - Executing the second restoration phase for the second time - truncate {}", asList(arguments[3]));
Esop.mainWithoutExit(arguments[3]);
logger.info("Round " + i + " - Executing the third restoration phase - import {}", asList(arguments[4]));
Esop.mainWithoutExit(arguments[4]);
if (!cassandraVersion.startsWith("4")) {
// second round would not pass for 4 because import deletes files in download
logger.info("Round " + i + " - Executing the third restoration phase for the second time - import {}", asList(arguments[4]));
Esop.mainWithoutExit(arguments[4]);
}
logger.info("Round " + i + " - Executing the fourth restoration phase - cleanup {}", asList(arguments[5]));
Esop.mainWithoutExit(arguments[5]);
logger.info("Round " + i + " - Executing the fourth restoration phase for the second time - cleanup {}", asList(arguments[5]));
Esop.mainWithoutExit(arguments[5]);
// we expect 4 records to be there as 2 were there before the first backup and the second 2 before the second backup
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 4);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 4);
}
} finally {
cassandra.stop();
FileUtils.deleteDirectory(cassandraDir);
deleteDirectory(Paths.get(target("backup1")));
}
}
use of com.github.nosan.embedded.cassandra.Cassandra in project esop by instaclustr.
the class AbstractBackupTest method liveBackupWithRestoreOnDifferentTableSchema.
public void liveBackupWithRestoreOnDifferentTableSchema(final String[][] arguments, final String cassandraVersion, final boolean tableAddition) throws Exception {
Cassandra cassandra = getCassandra(cassandraDir, cassandraVersion);
cassandra.start();
waitForCql();
try (CqlSession session = CqlSession.builder().build()) {
createTable(session, KEYSPACE, TABLE);
createTable(session, KEYSPACE_2, TABLE_2);
// after this, table and table2 will contain 2 rows each
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
}
});
// first backup
Esop.mainWithoutExit(arguments[0]);
String firstSchemaVersion = new CassandraSchemaVersion(new CassandraJMXServiceImpl(new CassandraJMXConnectionInfo())).act();
// create third schema, by this way, Cassandra schema will change
createTable(session, KEYSPACE_3, TABLE_3);
waitUntilSchemaChanged(firstSchemaVersion);
// after this, table and table2 will contain 4 rows each
// and table3 just 2
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
add(new String[] { KEYSPACE_3, TABLE_3 });
}
});
// second backup
Esop.mainWithoutExit(arguments[1]);
if (tableAddition) {
addColumnToTable(session, KEYSPACE, TABLE, "newColumn", TEXT);
} else {
removeColumnFromTable(session, KEYSPACE, TABLE, TestEntity.NAME);
}
// restore into the first snapshot where table1 was without newly added column
// we effectively restored SSTables on different schema so we expect that values in the new column will be "null"
// download
Esop.mainWithoutExit(arguments[2]);
// truncate
Esop.mainWithoutExit(arguments[3]);
// after truncating, we see that we have truncated just two tables which were
// in snapshot from snapshot1, not the third one
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
// import
Esop.mainWithoutExit(arguments[4]);
// cleanup
Esop.mainWithoutExit(arguments[5]);
// verify
// here we check that table1 and table2 contains 2 rows each (as we restored it from the first snapshot) and table 3 will contain still 2
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
} finally {
cassandra.stop();
FileUtils.deleteDirectory(cassandraDir);
deleteDirectory(Paths.get(target("backup1")));
}
}
Aggregations