use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.
the class PropertyFileSnitch method reloadConfiguration.
public void reloadConfiguration(boolean isUpdate) throws ConfigurationException {
HashMap<InetAddressAndPort, String[]> reloadedMap = new HashMap<>();
String[] reloadedDefaultDCRack = null;
Properties properties = new Properties();
try (InputStream stream = getClass().getClassLoader().getResourceAsStream(SNITCH_PROPERTIES_FILENAME)) {
properties.load(stream);
} catch (Exception e) {
throw new ConfigurationException("Unable to read " + SNITCH_PROPERTIES_FILENAME, e);
}
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
String key = (String) entry.getKey();
String value = (String) entry.getValue();
if ("default".equals(key)) {
String[] newDefault = value.split(":");
if (newDefault.length < 2)
reloadedDefaultDCRack = new String[] { "default", "default" };
else
reloadedDefaultDCRack = new String[] { newDefault[0].trim(), newDefault[1].trim() };
} else {
InetAddressAndPort host;
String hostString = StringUtils.remove(key, '/');
try {
host = InetAddressAndPort.getByName(hostString);
} catch (UnknownHostException e) {
throw new ConfigurationException("Unknown host " + hostString, e);
}
String[] token = value.split(":");
if (token.length < 2)
token = new String[] { "default", "default" };
else
token = new String[] { token[0].trim(), token[1].trim() };
reloadedMap.put(host, token);
}
}
InetAddressAndPort broadcastAddress = FBUtilities.getBroadcastAddressAndPort();
String[] localInfo = reloadedMap.get(broadcastAddress);
if (reloadedDefaultDCRack == null && localInfo == null)
throw new ConfigurationException(String.format("Snitch definitions at %s do not define a location for " + "this node's broadcast address %s, nor does it provides a default", SNITCH_PROPERTIES_FILENAME, broadcastAddress));
// internode messaging code converts our broadcast address to local,
// make sure we can be found at that as well.
InetAddressAndPort localAddress = FBUtilities.getLocalAddressAndPort();
if (!localAddress.equals(broadcastAddress) && !reloadedMap.containsKey(localAddress))
reloadedMap.put(localAddress, localInfo);
if (isUpdate && !livenessCheck(reloadedMap, reloadedDefaultDCRack))
return;
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<InetAddressAndPort, String[]> entry : reloadedMap.entrySet()) sb.append(entry.getKey()).append(':').append(Arrays.toString(entry.getValue())).append(", ");
logger.trace("Loaded network topology from property file: {}", StringUtils.removeEnd(sb.toString(), ", "));
}
defaultDCRack = reloadedDefaultDCRack;
endpointMap = reloadedMap;
if (// null check tolerates circular dependency; see CASSANDRA-4145
StorageService.instance != null) {
if (isUpdate)
StorageService.instance.updateTopology();
else
StorageService.instance.getTokenMetadata().invalidateCachedRings();
}
if (gossipStarted)
StorageService.instance.gossipSnitchInfo();
}
use of org.apache.cassandra.exceptions.ConfigurationException in project titan by thinkaurelius.
the class CassandraEmbeddedStoreManager method ensureColumnFamilyExists.
private void ensureColumnFamilyExists(String keyspaceName, String columnfamilyName, AbstractType comparator) throws StorageException {
if (null != Schema.instance.getCFMetaData(keyspaceName, columnfamilyName))
return;
// Column Family not found; create it
CFMetaData cfm = new CFMetaData(keyspaceName, columnfamilyName, ColumnFamilyType.Standard, comparator, null);
// Hard-coded caching settings
if (columnfamilyName.startsWith(Backend.EDGESTORE_NAME)) {
cfm.caching(Caching.KEYS_ONLY);
} else if (columnfamilyName.startsWith(Backend.VERTEXINDEX_STORE_NAME)) {
cfm.caching(Caching.ROWS_ONLY);
}
// Configure sstable compression
final CompressionParameters cp;
if (compressionEnabled) {
try {
cp = new CompressionParameters(compressionClass, compressionChunkSizeKB * 1024, Collections.<String, String>emptyMap());
// CompressionParameters doesn't override toString(), so be explicit
log.debug("Creating CF {}: setting {}={} and {}={} on {}", new Object[] { columnfamilyName, CompressionParameters.SSTABLE_COMPRESSION, compressionClass, CompressionParameters.CHUNK_LENGTH_KB, compressionChunkSizeKB, cp });
} catch (ConfigurationException ce) {
throw new PermanentStorageException(ce);
}
} else {
cp = new CompressionParameters(null);
log.debug("Creating CF {}: setting {} to null to disable compression", columnfamilyName, CompressionParameters.SSTABLE_COMPRESSION);
}
cfm.compressionParameters(cp);
try {
cfm.addDefaultIndexNames();
} catch (ConfigurationException e) {
throw new PermanentStorageException("Failed to create column family metadata for " + keyspaceName + ":" + columnfamilyName, e);
}
try {
MigrationManager.announceNewColumnFamily(cfm);
} catch (ConfigurationException e) {
throw new PermanentStorageException("Failed to create column family " + keyspaceName + ":" + columnfamilyName, e);
}
}
use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.
the class MigrationManager method announceNewTable.
private static void announceNewTable(TableMetadata cfm, boolean announceLocally, boolean throwOnDuplicate) {
cfm.validate();
KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(cfm.keyspace);
if (ksm == null)
throw new ConfigurationException(String.format("Cannot add table '%s' to non existing keyspace '%s'.", cfm.name, cfm.keyspace));
else // If we have a table or a view which has the same name, we can't add a new one
if (throwOnDuplicate && ksm.getTableOrViewNullable(cfm.name) != null)
throw new AlreadyExistsException(cfm.keyspace, cfm.name);
logger.info("Create new table: {}", cfm);
announce(SchemaKeyspace.makeCreateTableMutation(ksm, cfm, FBUtilities.timestampMicros()), announceLocally);
}
use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.
the class MigrationManager method announceViewDrop.
public static void announceViewDrop(String ksName, String viewName, boolean announceLocally) throws ConfigurationException {
ViewMetadata view = Schema.instance.getView(ksName, viewName);
if (view == null)
throw new ConfigurationException(String.format("Cannot drop non existing materialized view '%s' in keyspace '%s'.", viewName, ksName));
KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(ksName);
logger.info("Drop table '{}/{}'", view.keyspace, view.name);
announce(SchemaKeyspace.makeDropViewMutation(ksm, view, FBUtilities.timestampMicros()), announceLocally);
}
use of org.apache.cassandra.exceptions.ConfigurationException in project janusgraph by JanusGraph.
the class CassandraEmbeddedStoreManager method ensureColumnFamilyExists.
private void ensureColumnFamilyExists(String keyspaceName, String columnFamilyName) throws BackendException {
if (null != Schema.instance.getCFMetaData(keyspaceName, columnFamilyName))
return;
// Column Family not found; create it
final CFMetaData cfm = new CFMetaData(keyspaceName, columnFamilyName, ColumnFamilyType.Standard, CellNames.fromAbstractType(BytesType.instance, true));
try {
if (storageConfig.has(COMPACTION_STRATEGY)) {
cfm.compactionStrategyClass(CFMetaData.createCompactionStrategy(storageConfig.get(COMPACTION_STRATEGY)));
}
if (!compactionOptions.isEmpty()) {
cfm.compactionStrategyOptions(compactionOptions);
}
} catch (ConfigurationException e) {
throw new PermanentBackendException("Failed to create column family metadata for " + keyspaceName + ":" + columnFamilyName, e);
}
// Hard-coded caching settings
if (columnFamilyName.startsWith(Backend.EDGESTORE_NAME)) {
cfm.caching(CachingOptions.KEYS_ONLY);
} else if (columnFamilyName.startsWith(Backend.INDEXSTORE_NAME)) {
cfm.caching(CachingOptions.ROWS_ONLY);
}
// Configure sstable compression
final CompressionParameters cp;
if (compressionEnabled) {
try {
cp = new CompressionParameters(compressionClass, compressionChunkSizeKB * 1024, Collections.emptyMap());
// CompressionParameters doesn't override toString(), so be explicit
log.debug("Creating CF {}: setting {}={} and {}={} on {}", columnFamilyName, CompressionParameters.SSTABLE_COMPRESSION, compressionClass, CompressionParameters.CHUNK_LENGTH_KB, compressionChunkSizeKB, cp);
} catch (ConfigurationException ce) {
throw new PermanentBackendException(ce);
}
} else {
cp = new CompressionParameters(null);
log.debug("Creating CF {}: setting {} to null to disable compression", columnFamilyName, CompressionParameters.SSTABLE_COMPRESSION);
}
cfm.compressionParameters(cp);
try {
cfm.addDefaultIndexNames();
} catch (ConfigurationException e) {
throw new PermanentBackendException("Failed to create column family metadata for " + keyspaceName + ":" + columnFamilyName, e);
}
try {
MigrationManager.announceNewColumnFamily(cfm);
log.info("Created CF {} in KS {}", columnFamilyName, keyspaceName);
} catch (ConfigurationException e) {
throw new PermanentBackendException("Failed to create column family " + keyspaceName + ":" + columnFamilyName, e);
}
/*
* I'm chasing a nondeterministic exception that appears only rarely on my
* machine when executing the embedded cassandra tests. If these dummy
* reads ever actually fail and dump a log message, it could help debug
* the root cause.
*
* java.lang.RuntimeException: java.lang.IllegalArgumentException: Unknown table/cf pair (InternalCassandraEmbeddedKeyColumnValueTest.testStore1)
* at org.apache.cassandra.service.StorageProxy$DroppableRunnable.run(StorageProxy.java:1582)
* at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
* at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
* at java.lang.Thread.run(Thread.java:744)
* Caused by: java.lang.IllegalArgumentException: Unknown table/cf pair (InternalCassandraEmbeddedKeyColumnValueTest.testStore1)
* at org.apache.cassandra.db.Table.getColumnFamilyStore(Table.java:166)
* at org.apache.cassandra.db.Table.getRow(Table.java:354)
* at org.apache.cassandra.db.SliceFromReadCommand.getRow(SliceFromReadCommand.java:70)
* at org.apache.cassandra.service.StorageProxy$LocalReadRunnable.runMayThrow(StorageProxy.java:1052)
* at org.apache.cassandra.service.StorageProxy$DroppableRunnable.run(StorageProxy.java:1578)
* ... 3 more
*/
retryDummyRead(keyspaceName, columnFamilyName);
}
Aggregations