use of jmx.org.apache.cassandra.CassandraJMXConnectionInfo in project esop by instaclustr.
the class AbstractBackupTest method liveBackupWithRestoreOnDifferentSchema.
public void liveBackupWithRestoreOnDifferentSchema(final String[][] arguments, final String cassandraVersion) throws Exception {
Cassandra cassandra = getCassandra(cassandraDir, cassandraVersion);
cassandra.start();
waitForCql();
try (CqlSession session = CqlSession.builder().build()) {
createTable(session, KEYSPACE, TABLE);
createTable(session, KEYSPACE_2, TABLE_2);
// after this, table and table2 will contain 2 rows each
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
}
});
// first backup
Esop.mainWithoutExit(arguments[0]);
String firstSchemaVersion = new CassandraSchemaVersion(new CassandraJMXServiceImpl(new CassandraJMXConnectionInfo())).act();
// create third schema, by this way, Cassandra schema will change
createTable(session, KEYSPACE_3, TABLE_3);
waitUntilSchemaChanged(firstSchemaVersion);
// after this, table and table2 will contain 4 rows each
// and table3 just 2
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
add(new String[] { KEYSPACE_3, TABLE_3 });
}
});
// second backup
Esop.mainWithoutExit(arguments[1]);
// here we want to restore into first snapshot even though schema has changed, because we added the third table
// (keep in mind we have not changed schema of any table, we changed Cassandra schema as such)
// restore into the first backup
// download
Esop.mainWithoutExit(arguments[2]);
// truncate
Esop.mainWithoutExit(arguments[3]);
// after truncating, we see that we have truncated just two tables which were
// in snapshot from snapshot1, not the third one
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
// import
Esop.mainWithoutExit(arguments[4]);
// cleanup
Esop.mainWithoutExit(arguments[5]);
// verify
// here we check that table1 and table2 contains 2 rows each (as we restored it from the first snapshot) and table 3 will contain still 2
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
} finally {
cassandra.stop();
FileUtils.deleteDirectory(cassandraDir);
deleteDirectory(Paths.get(target("backup1")));
}
}
use of jmx.org.apache.cassandra.CassandraJMXConnectionInfo in project esop by instaclustr.
the class AbstractBackupTest method liveBackupWithRestoreOnDifferentTableSchema.
public void liveBackupWithRestoreOnDifferentTableSchema(final String[][] arguments, final String cassandraVersion, final boolean tableAddition) throws Exception {
Cassandra cassandra = getCassandra(cassandraDir, cassandraVersion);
cassandra.start();
waitForCql();
try (CqlSession session = CqlSession.builder().build()) {
createTable(session, KEYSPACE, TABLE);
createTable(session, KEYSPACE_2, TABLE_2);
// after this, table and table2 will contain 2 rows each
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
}
});
// first backup
Esop.mainWithoutExit(arguments[0]);
String firstSchemaVersion = new CassandraSchemaVersion(new CassandraJMXServiceImpl(new CassandraJMXConnectionInfo())).act();
// create third schema, by this way, Cassandra schema will change
createTable(session, KEYSPACE_3, TABLE_3);
waitUntilSchemaChanged(firstSchemaVersion);
// after this, table and table2 will contain 4 rows each
// and table3 just 2
insert(2, session, new ArrayList<String[]>() {
{
add(new String[] { KEYSPACE, TABLE });
add(new String[] { KEYSPACE_2, TABLE_2 });
add(new String[] { KEYSPACE_3, TABLE_3 });
}
});
// second backup
Esop.mainWithoutExit(arguments[1]);
if (tableAddition) {
addColumnToTable(session, KEYSPACE, TABLE, "newColumn", TEXT);
} else {
removeColumnFromTable(session, KEYSPACE, TABLE, TestEntity.NAME);
}
// restore into the first snapshot where table1 was without newly added column
// we effectively restored SSTables on different schema so we expect that values in the new column will be "null"
// download
Esop.mainWithoutExit(arguments[2]);
// truncate
Esop.mainWithoutExit(arguments[3]);
// after truncating, we see that we have truncated just two tables which were
// in snapshot from snapshot1, not the third one
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 0);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
// import
Esop.mainWithoutExit(arguments[4]);
// cleanup
Esop.mainWithoutExit(arguments[5]);
// verify
// here we check that table1 and table2 contains 2 rows each (as we restored it from the first snapshot) and table 3 will contain still 2
dumpTableAndAssertRowCount(session, KEYSPACE, TABLE, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_2, TABLE_2, 2);
dumpTableAndAssertRowCount(session, KEYSPACE_3, TABLE_3, 2);
} finally {
cassandra.stop();
FileUtils.deleteDirectory(cassandraDir);
deleteDirectory(Paths.get(target("backup1")));
}
}
use of jmx.org.apache.cassandra.CassandraJMXConnectionInfo in project esop by instaclustr.
the class Esop method init.
static void init(final Runnable command, final CassandraJMXSpec jmxSpec, final HashSpec hashSpec, final OperationRequest operationRequest, final Logger logger, final List<Module> appSpecificModules) {
final List<Module> modules = new ArrayList<>();
if (jmxSpec != null) {
modules.add(new CassandraModule(new CassandraJMXConnectionInfo(jmxSpec)));
} else {
modules.add(new AbstractModule() {
@Override
protected void configure() {
bind(StorageServiceMBean.class).toProvider(() -> null);
bind(Cassandra4StorageServiceMBean.class).toProvider(() -> null);
}
});
}
modules.add(new JacksonModule());
modules.add(new OperationsModule());
modules.add(new StorageModules());
modules.add(new ExecutorsModule());
modules.add(new UploadingModule());
modules.add(new DownloadingModule());
modules.add(new HashModule(hashSpec));
modules.addAll(appSpecificModules);
final Injector injector = Guice.createInjector(// production binds singletons as eager by default
Stage.PRODUCTION, modules);
GuiceInjectorHolder.INSTANCE.setInjector(injector);
injector.injectMembers(command);
final Validator validator = Validation.byDefaultProvider().configure().constraintValidatorFactory(new GuiceInjectingConstraintValidatorFactory()).buildValidatorFactory().getValidator();
final Set<ConstraintViolation<OperationRequest>> violations = validator.validate(operationRequest);
if (!violations.isEmpty()) {
violations.forEach(violation -> logger.error(violation.getMessage()));
throw new ValidationException();
}
}
Aggregations