use of org.apache.cassandra.config.ConfigurationException in project brisk by riptano.
the class CassandraStorage method getValidatorMap.
private Map<ByteBuffer, AbstractType> getValidatorMap(CfDef cfDef) throws IOException {
Map<ByteBuffer, AbstractType> validators = new HashMap<ByteBuffer, AbstractType>();
for (ColumnDef cd : cfDef.column_metadata) {
if (cd.getValidation_class() != null && !cd.getValidation_class().isEmpty()) {
AbstractType validator = null;
try {
validator = TypeParser.parse(cd.getValidation_class());
validators.put(cd.name, validator);
} catch (ConfigurationException e) {
throw new IOException(e);
}
}
}
return validators;
}
use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.
the class BootStrapper method getBootstrapToken.
/**
* if initialtoken was specified, use that.
* otherwise, pick a token to assume half the load of the most-loaded node.
*/
public static Token getBootstrapToken(final TokenMetadata metadata, final Map<InetAddress, Double> load) throws IOException, ConfigurationException {
if (DatabaseDescriptor.getInitialToken() != null) {
logger.debug("token manually specified as " + DatabaseDescriptor.getInitialToken());
Token token = StorageService.getPartitioner().getTokenFactory().fromString(DatabaseDescriptor.getInitialToken());
if (metadata.getEndpoint(token) != null)
throw new ConfigurationException("Bootstraping to existing token " + token + " is not allowed (decommission/removetoken the old node first).");
return token;
}
return getBalancedToken(metadata, load);
}
use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.
the class CompressionParameters method validate.
// chunkLength must be a power of 2 because we assume so when
// computing the chunk number from an uncompressed file offset (see
// CompressedRandomAccessReader.decompresseChunk())
private void validate() throws ConfigurationException {
// if chunk length was not set (chunkLength == null), this is fine, default will be used
if (chunkLength != null) {
if (chunkLength <= 0)
throw new ConfigurationException("Invalid negative or null " + CHUNK_LENGTH_KB);
int c = chunkLength;
boolean found = false;
while (c != 0) {
if ((c & 0x01) != 0) {
if (found)
throw new ConfigurationException(CHUNK_LENGTH_KB + " must be a power of 2");
else
found = true;
}
c >>= 1;
}
}
if (crcChance > 1.0d || crcChance < 0.0d)
throw new ConfigurationException("crc_check_chance should be between 0.0 to 1.0");
}
use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file,
* export the contents of the SSTable to JSON.
*
* @param args command lines arguments
*
* @throws IOException on failure to open/read/write files or output streams
* @throws ConfigurationException on configuration failure (wrong params given)
*/
public static void main(String[] args) throws IOException, ConfigurationException {
String usage = String.format("Usage: %s <sstable> [-k key [-k key [...]] -x key [-x key [...]]]%n", SSTableExport.class.getName());
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
System.err.println(usage);
System.exit(1);
}
if (cmd.getArgs().length != 1) {
System.err.println("You must supply exactly one sstable");
System.err.println(usage);
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
String[] excludes = cmd.getOptionValues(EXCLUDEKEY_OPTION);
String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
DatabaseDescriptor.loadSchemas();
if (Schema.instance.getNonSystemTables().size() < 1) {
String msg = "no non-system tables are defined";
System.err.println(msg);
throw new ConfigurationException(msg);
}
if (cmd.hasOption(ENUMERATEKEYS_OPTION)) {
enumeratekeys(ssTableFileName, System.out);
} else {
if ((keys != null) && (keys.length > 0))
export(ssTableFileName, System.out, Arrays.asList(keys), excludes);
else
export(ssTableFileName, excludes);
}
System.exit(0);
}
use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.
the class Migration method apply.
/**
* apply changes
*/
public final void apply() throws IOException, ConfigurationException {
// ensure migration is serial. don't apply unless the previous version matches.
if (!schema.getVersion().equals(lastVersion))
throw new ConfigurationException("Previous version mismatch. cannot apply.");
if (newVersion.timestamp() <= lastVersion.timestamp())
throw new ConfigurationException("New version timestamp is not newer than the current version timestamp.");
// write to schema
assert rm != null;
if (!StorageService.instance.isClientMode()) {
rm.apply();
long now = LamportClock.getVersion();
ByteBuffer buf = serialize();
RowMutation migration = new RowMutation(Table.SYSTEM_TABLE, MIGRATIONS_KEY);
ColumnFamily cf = ColumnFamily.create(Table.SYSTEM_TABLE, MIGRATIONS_CF);
column = new Column(ByteBuffer.wrap(UUIDGen.decompose(newVersion)), buf, now);
cf.addColumn(column);
migration.add(cf);
migration.apply();
// note that we're storing this in the system table, which is not replicated
logger.info("Applying migration {} {}", newVersion.toString(), toString());
migration = new RowMutation(Table.SYSTEM_TABLE, LAST_MIGRATION_KEY);
migration.add(new QueryPath(SCHEMA_CF, null, LAST_MIGRATION_KEY), ByteBuffer.wrap(UUIDGen.decompose(newVersion)), now);
migration.apply();
// if we fail here, there will be schema changes in the CL that will get replayed *AFTER* the schema is loaded.
// CassandraDaemon checks for this condition (the stored version will be greater than the loaded version)
// and calls MigrationManager.applyMigrations(loaded version, stored version).
// flush changes out of memtables so we don't need to rely on the commit log.
ColumnFamilyStore[] schemaStores = new ColumnFamilyStore[] { Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(Migration.MIGRATIONS_CF), Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(Migration.SCHEMA_CF) };
List<Future<?>> flushes = new ArrayList<Future<?>>();
for (ColumnFamilyStore cfs : schemaStores) flushes.add(cfs.forceFlush());
for (Future<?> f : flushes) {
if (f == null)
// applying the migration triggered a flush independently
continue;
try {
f.get();
} catch (ExecutionException e) {
throw new IOException(e);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
applyModels();
}
Aggregations