Search in sources :

Example 11 with ConfigurationException

use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.

the class CommitLogDescriptorTest method testDescriptorInvalidParametersSize.

// migrated from CommitLogTest
@Test
public void testDescriptorInvalidParametersSize() throws IOException {
    Map<String, String> params = new HashMap<>();
    for (int i = 0; i < 65535; ++i) params.put("key" + i, Integer.toString(i, 16));
    try {
        CommitLogDescriptor desc = new CommitLogDescriptor(CommitLogDescriptor.current_version, 21, new ParameterizedClass("LZ4Compressor", params), neverEnabledEncryption);
        ByteBuffer buf = ByteBuffer.allocate(1024000);
        CommitLogDescriptor.writeHeader(buf, desc);
        Assert.fail("Parameter object too long should fail on writing descriptor.");
    } catch (ConfigurationException e) {
    // correct path
    }
}
Also used : HashMap(java.util.HashMap) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) ParameterizedClass(org.apache.cassandra.config.ParameterizedClass) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 12 with ConfigurationException

use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.

the class DatabaseDescriptor method applySimpleConfig.

private static void applySimpleConfig() {
    if (conf.commitlog_sync == null) {
        throw new ConfigurationException("Missing required directive CommitLogSync", false);
    }
    if (conf.commitlog_sync == Config.CommitLogSync.batch) {
        if (Double.isNaN(conf.commitlog_sync_batch_window_in_ms) || conf.commitlog_sync_batch_window_in_ms <= 0d) {
            throw new ConfigurationException("Missing value for commitlog_sync_batch_window_in_ms: positive double value expected.", false);
        } else if (conf.commitlog_sync_period_in_ms != 0) {
            throw new ConfigurationException("Batch sync specified, but commitlog_sync_period_in_ms found. Only specify commitlog_sync_batch_window_in_ms when using batch sync", false);
        }
        logger.debug("Syncing log with a batch window of {}", conf.commitlog_sync_batch_window_in_ms);
    } else {
        if (conf.commitlog_sync_period_in_ms <= 0) {
            throw new ConfigurationException("Missing value for commitlog_sync_period_in_ms: positive integer expected", false);
        } else if (!Double.isNaN(conf.commitlog_sync_batch_window_in_ms)) {
            throw new ConfigurationException("commitlog_sync_period_in_ms specified, but commitlog_sync_batch_window_in_ms found.  Only specify commitlog_sync_period_in_ms when using periodic sync.", false);
        }
        logger.debug("Syncing log with a period of {}", conf.commitlog_sync_period_in_ms);
    }
    /* evaluate the DiskAccessMode Config directive, which also affects indexAccessMode selection */
    if (conf.disk_access_mode == Config.DiskAccessMode.auto) {
        conf.disk_access_mode = hasLargeAddressSpace() ? Config.DiskAccessMode.mmap : Config.DiskAccessMode.standard;
        indexAccessMode = conf.disk_access_mode;
        logger.info("DiskAccessMode 'auto' determined to be {}, indexAccessMode is {}", conf.disk_access_mode, indexAccessMode);
    } else if (conf.disk_access_mode == Config.DiskAccessMode.mmap_index_only) {
        conf.disk_access_mode = Config.DiskAccessMode.standard;
        indexAccessMode = Config.DiskAccessMode.mmap;
        logger.info("DiskAccessMode is {}, indexAccessMode is {}", conf.disk_access_mode, indexAccessMode);
    } else {
        indexAccessMode = conf.disk_access_mode;
        logger.info("DiskAccessMode is {}, indexAccessMode is {}", conf.disk_access_mode, indexAccessMode);
    }
    if (conf.gc_warn_threshold_in_ms < 0) {
        throw new ConfigurationException("gc_warn_threshold_in_ms must be a positive integer");
    }
    /* phi convict threshold for FailureDetector */
    if (conf.phi_convict_threshold < 5 || conf.phi_convict_threshold > 16) {
        throw new ConfigurationException("phi_convict_threshold must be between 5 and 16, but was " + conf.phi_convict_threshold, false);
    }
    /* Thread per pool */
    if (conf.concurrent_reads < 2) {
        throw new ConfigurationException("concurrent_reads must be at least 2, but was " + conf.concurrent_reads, false);
    }
    if (conf.concurrent_writes < 2 && System.getProperty("cassandra.test.fail_mv_locks_count", "").isEmpty()) {
        throw new ConfigurationException("concurrent_writes must be at least 2, but was " + conf.concurrent_writes, false);
    }
    if (conf.concurrent_counter_writes < 2)
        throw new ConfigurationException("concurrent_counter_writes must be at least 2, but was " + conf.concurrent_counter_writes, false);
    if (conf.concurrent_replicates != null)
        logger.warn("concurrent_replicates has been deprecated and should be removed from cassandra.yaml");
    if (conf.file_cache_size_in_mb == null)
        conf.file_cache_size_in_mb = Math.min(512, (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)));
    if (conf.memtable_offheap_space_in_mb == null)
        conf.memtable_offheap_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576));
    if (conf.memtable_offheap_space_in_mb < 0)
        throw new ConfigurationException("memtable_offheap_space_in_mb must be positive, but was " + conf.memtable_offheap_space_in_mb, false);
    // for the moment, we default to twice as much on-heap space as off-heap, as heap overhead is very large
    if (conf.memtable_heap_space_in_mb == null)
        conf.memtable_heap_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576));
    if (conf.memtable_heap_space_in_mb <= 0)
        throw new ConfigurationException("memtable_heap_space_in_mb must be positive, but was " + conf.memtable_heap_space_in_mb, false);
    logger.info("Global memtable on-heap threshold is enabled at {}MB", conf.memtable_heap_space_in_mb);
    if (conf.memtable_offheap_space_in_mb == 0)
        logger.info("Global memtable off-heap threshold is disabled, HeapAllocator will be used instead");
    else
        logger.info("Global memtable off-heap threshold is enabled at {}MB", conf.memtable_offheap_space_in_mb);
    if (conf.native_transport_max_frame_size_in_mb <= 0)
        throw new ConfigurationException("native_transport_max_frame_size_in_mb must be positive, but was " + conf.native_transport_max_frame_size_in_mb, false);
    // use -Dcassandra.storagedir (set in cassandra-env.sh) as the parent dir for data/, commitlog/, and saved_caches/
    if (conf.commitlog_directory == null) {
        conf.commitlog_directory = storagedirFor("commitlog");
    }
    if (conf.hints_directory == null) {
        conf.hints_directory = storagedirFor("hints");
    }
    if (conf.cdc_raw_directory == null) {
        conf.cdc_raw_directory = storagedirFor("cdc_raw");
    }
    if (conf.commitlog_total_space_in_mb == null) {
        int preferredSize = 8192;
        int minSize = 0;
        try {
            // use 1/4 of available space.  See discussion on #10013 and #10199
            minSize = Ints.checkedCast((guessFileStore(conf.commitlog_directory).getTotalSpace() / 1048576) / 4);
        } catch (IOException e) {
            logger.debug("Error checking disk space", e);
            throw new ConfigurationException(String.format("Unable to check disk space available to %s. Perhaps the Cassandra user does not have the necessary permissions", conf.commitlog_directory), e);
        }
        if (minSize < preferredSize) {
            logger.warn("Small commitlog volume detected at {}; setting commitlog_total_space_in_mb to {}.  You can override this in cassandra.yaml", conf.commitlog_directory, minSize);
            conf.commitlog_total_space_in_mb = minSize;
        } else {
            conf.commitlog_total_space_in_mb = preferredSize;
        }
    }
    if (conf.cdc_total_space_in_mb == 0) {
        int preferredSize = 4096;
        int minSize = 0;
        try {
            // use 1/8th of available space.  See discussion on #10013 and #10199 on the CL, taking half that for CDC
            minSize = Ints.checkedCast((guessFileStore(conf.cdc_raw_directory).getTotalSpace() / 1048576) / 8);
        } catch (IOException e) {
            logger.debug("Error checking disk space", e);
            throw new ConfigurationException(String.format("Unable to check disk space available to %s. Perhaps the Cassandra user does not have the necessary permissions", conf.cdc_raw_directory), e);
        }
        if (minSize < preferredSize) {
            logger.warn("Small cdc volume detected at {}; setting cdc_total_space_in_mb to {}.  You can override this in cassandra.yaml", conf.cdc_raw_directory, minSize);
            conf.cdc_total_space_in_mb = minSize;
        } else {
            conf.cdc_total_space_in_mb = preferredSize;
        }
    }
    if (conf.cdc_enabled) {
        logger.info("cdc_enabled is true. Starting casssandra node with Change-Data-Capture enabled.");
    }
    if (conf.saved_caches_directory == null) {
        conf.saved_caches_directory = storagedirFor("saved_caches");
    }
    if (conf.data_file_directories == null || conf.data_file_directories.length == 0) {
        conf.data_file_directories = new String[] { storagedir("data_file_directories") + File.separator + "data" };
    }
    long dataFreeBytes = 0;
    /* data file and commit log directories. they get created later, when they're needed. */
    for (String datadir : conf.data_file_directories) {
        if (datadir.equals(conf.commitlog_directory))
            throw new ConfigurationException("commitlog_directory must not be the same as any data_file_directories", false);
        if (datadir.equals(conf.hints_directory))
            throw new ConfigurationException("hints_directory must not be the same as any data_file_directories", false);
        if (datadir.equals(conf.saved_caches_directory))
            throw new ConfigurationException("saved_caches_directory must not be the same as any data_file_directories", false);
        try {
            dataFreeBytes += guessFileStore(datadir).getUnallocatedSpace();
        } catch (IOException e) {
            logger.debug("Error checking disk space", e);
            throw new ConfigurationException(String.format("Unable to check disk space available to %s. Perhaps the Cassandra user does not have the necessary permissions", datadir), e);
        }
    }
    if (// 64 GB
    dataFreeBytes < 64L * 1024 * 1048576)
        logger.warn("Only {} free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots", FBUtilities.prettyPrintMemory(dataFreeBytes));
    if (conf.commitlog_directory.equals(conf.saved_caches_directory))
        throw new ConfigurationException("saved_caches_directory must not be the same as the commitlog_directory", false);
    if (conf.commitlog_directory.equals(conf.hints_directory))
        throw new ConfigurationException("hints_directory must not be the same as the commitlog_directory", false);
    if (conf.hints_directory.equals(conf.saved_caches_directory))
        throw new ConfigurationException("saved_caches_directory must not be the same as the hints_directory", false);
    if (conf.memtable_flush_writers == 0) {
        conf.memtable_flush_writers = conf.data_file_directories.length == 1 ? 2 : 1;
    }
    if (conf.memtable_flush_writers < 1)
        throw new ConfigurationException("memtable_flush_writers must be at least 1, but was " + conf.memtable_flush_writers, false);
    if (conf.memtable_cleanup_threshold == null) {
        conf.memtable_cleanup_threshold = (float) (1.0 / (1 + conf.memtable_flush_writers));
    } else {
        logger.warn("memtable_cleanup_threshold has been deprecated and should be removed from cassandra.yaml");
    }
    if (conf.memtable_cleanup_threshold < 0.01f)
        throw new ConfigurationException("memtable_cleanup_threshold must be >= 0.01, but was " + conf.memtable_cleanup_threshold, false);
    if (conf.memtable_cleanup_threshold > 0.99f)
        throw new ConfigurationException("memtable_cleanup_threshold must be <= 0.99, but was " + conf.memtable_cleanup_threshold, false);
    if (conf.memtable_cleanup_threshold < 0.1f)
        logger.warn("memtable_cleanup_threshold is set very low [{}], which may cause performance degradation", conf.memtable_cleanup_threshold);
    if (conf.concurrent_compactors == null)
        conf.concurrent_compactors = Math.min(8, Math.max(2, Math.min(FBUtilities.getAvailableProcessors(), conf.data_file_directories.length)));
    if (conf.concurrent_compactors <= 0)
        throw new ConfigurationException("concurrent_compactors should be strictly greater than 0, but was " + conf.concurrent_compactors, false);
    if (conf.num_tokens > MAX_NUM_TOKENS)
        throw new ConfigurationException(String.format("A maximum number of %d tokens per node is supported", MAX_NUM_TOKENS), false);
    try {
        // if prepared_statements_cache_size_mb option was set to "auto" then size of the cache should be "max(1/256 of Heap (in MB), 10MB)"
        preparedStatementsCacheSizeInMB = (conf.prepared_statements_cache_size_mb == null) ? Math.max(10, (int) (Runtime.getRuntime().maxMemory() / 1024 / 1024 / 256)) : conf.prepared_statements_cache_size_mb;
        if (preparedStatementsCacheSizeInMB <= 0)
            // to escape duplicating error message
            throw new NumberFormatException();
    } catch (NumberFormatException e) {
        throw new ConfigurationException("prepared_statements_cache_size_mb option was set incorrectly to '" + conf.prepared_statements_cache_size_mb + "', supported values are <integer> >= 0.", false);
    }
    try {
        // if key_cache_size_in_mb option was set to "auto" then size of the cache should be "min(5% of Heap (in MB), 100MB)
        keyCacheSizeInMB = (conf.key_cache_size_in_mb == null) ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024)), 100) : conf.key_cache_size_in_mb;
        if (keyCacheSizeInMB < 0)
            // to escape duplicating error message
            throw new NumberFormatException();
    } catch (NumberFormatException e) {
        throw new ConfigurationException("key_cache_size_in_mb option was set incorrectly to '" + conf.key_cache_size_in_mb + "', supported values are <integer> >= 0.", false);
    }
    try {
        // if counter_cache_size_in_mb option was set to "auto" then size of the cache should be "min(2.5% of Heap (in MB), 50MB)
        counterCacheSizeInMB = (conf.counter_cache_size_in_mb == null) ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.025 / 1024 / 1024)), 50) : conf.counter_cache_size_in_mb;
        if (counterCacheSizeInMB < 0)
            // to escape duplicating error message
            throw new NumberFormatException();
    } catch (NumberFormatException e) {
        throw new ConfigurationException("counter_cache_size_in_mb option was set incorrectly to '" + conf.counter_cache_size_in_mb + "', supported values are <integer> >= 0.", false);
    }
    // if set to empty/"auto" then use 5% of Heap size
    indexSummaryCapacityInMB = (conf.index_summary_capacity_in_mb == null) ? Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024)) : conf.index_summary_capacity_in_mb;
    if (indexSummaryCapacityInMB < 0)
        throw new ConfigurationException("index_summary_capacity_in_mb option was set incorrectly to '" + conf.index_summary_capacity_in_mb + "', it should be a non-negative integer.", false);
    if (conf.encryption_options != null) {
        logger.warn("Please rename encryption_options as server_encryption_options in the yaml");
        //operate under the assumption that server_encryption_options is not set in yaml rather than both
        conf.server_encryption_options = conf.encryption_options;
    }
    if (conf.user_defined_function_fail_timeout < 0)
        throw new ConfigurationException("user_defined_function_fail_timeout must not be negative", false);
    if (conf.user_defined_function_warn_timeout < 0)
        throw new ConfigurationException("user_defined_function_warn_timeout must not be negative", false);
    if (conf.user_defined_function_fail_timeout < conf.user_defined_function_warn_timeout)
        throw new ConfigurationException("user_defined_function_warn_timeout must less than user_defined_function_fail_timeout", false);
    if (conf.max_mutation_size_in_kb == null)
        conf.max_mutation_size_in_kb = conf.commitlog_segment_size_in_mb * 1024 / 2;
    else if (conf.commitlog_segment_size_in_mb * 1024 < 2 * conf.max_mutation_size_in_kb)
        throw new ConfigurationException("commitlog_segment_size_in_mb must be at least twice the size of max_mutation_size_in_kb / 1024", false);
    // native transport encryption options
    if (conf.native_transport_port_ssl != null && conf.native_transport_port_ssl != conf.native_transport_port && !conf.client_encryption_options.enabled) {
        throw new ConfigurationException("Encryption must be enabled in client_encryption_options for native_transport_port_ssl", false);
    }
    if (conf.max_value_size_in_mb <= 0)
        throw new ConfigurationException("max_value_size_in_mb must be positive", false);
    switch(conf.disk_optimization_strategy) {
        case ssd:
            diskOptimizationStrategy = new SsdDiskOptimizationStrategy(conf.disk_optimization_page_cross_chance);
            break;
        case spinning:
            diskOptimizationStrategy = new SpinningDiskOptimizationStrategy();
            break;
    }
    try {
        ParameterizedClass strategy = conf.back_pressure_strategy != null ? conf.back_pressure_strategy : RateBasedBackPressure.withDefaultParams();
        Class<?> clazz = Class.forName(strategy.class_name);
        if (!BackPressureStrategy.class.isAssignableFrom(clazz))
            throw new ConfigurationException(strategy + " is not an instance of " + BackPressureStrategy.class.getCanonicalName(), false);
        Constructor<?> ctor = clazz.getConstructor(Map.class);
        BackPressureStrategy instance = (BackPressureStrategy) ctor.newInstance(strategy.parameters);
        logger.info("Back-pressure is {} with strategy {}.", backPressureEnabled() ? "enabled" : "disabled", conf.back_pressure_strategy);
        backPressureStrategy = instance;
    } catch (ConfigurationException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new ConfigurationException("Error configuring back-pressure strategy: " + conf.back_pressure_strategy, ex);
    }
    if (conf.otc_coalescing_enough_coalesced_messages > 128)
        throw new ConfigurationException("otc_coalescing_enough_coalesced_messages must be smaller than 128", false);
    if (conf.otc_coalescing_enough_coalesced_messages <= 0)
        throw new ConfigurationException("otc_coalescing_enough_coalesced_messages must be positive", false);
}
Also used : SpinningDiskOptimizationStrategy(org.apache.cassandra.io.util.SpinningDiskOptimizationStrategy) IOException(java.io.IOException) NoSuchFileException(java.nio.file.NoSuchFileException) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) IOException(java.io.IOException) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) SsdDiskOptimizationStrategy(org.apache.cassandra.io.util.SsdDiskOptimizationStrategy) BackPressureStrategy(org.apache.cassandra.net.BackPressureStrategy)

Example 13 with ConfigurationException

use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.

the class DatabaseDescriptor method getNetworkInterfaceAddress.

private static InetAddress getNetworkInterfaceAddress(String intf, String configName, boolean preferIPv6) throws ConfigurationException {
    try {
        NetworkInterface ni = NetworkInterface.getByName(intf);
        if (ni == null)
            throw new ConfigurationException("Configured " + configName + " \"" + intf + "\" could not be found", false);
        Enumeration<InetAddress> addrs = ni.getInetAddresses();
        if (!addrs.hasMoreElements())
            throw new ConfigurationException("Configured " + configName + " \"" + intf + "\" was found, but had no addresses", false);
        /*
             * Try to return the first address of the preferred type, otherwise return the first address
             */
        InetAddress retval = null;
        while (addrs.hasMoreElements()) {
            InetAddress temp = addrs.nextElement();
            if (preferIPv6 && temp instanceof Inet6Address)
                return temp;
            if (!preferIPv6 && temp instanceof Inet4Address)
                return temp;
            if (retval == null)
                retval = temp;
        }
        return retval;
    } catch (SocketException e) {
        throw new ConfigurationException("Configured " + configName + " \"" + intf + "\" caused an exception", e);
    }
}
Also used : ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException)

Example 14 with ConfigurationException

use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.

the class YamlConfigurationLoader method getStorageConfigURL.

/**
     * Inspect the classpath to find storage configuration file
     */
private static URL getStorageConfigURL() throws ConfigurationException {
    String configUrl = System.getProperty("cassandra.config");
    if (configUrl == null)
        configUrl = DEFAULT_CONFIGURATION;
    URL url;
    try {
        url = new URL(configUrl);
        // catches well-formed but bogus URLs
        url.openStream().close();
    } catch (Exception e) {
        ClassLoader loader = DatabaseDescriptor.class.getClassLoader();
        url = loader.getResource(configUrl);
        if (url == null) {
            String required = "file:" + File.separator + File.separator;
            if (!configUrl.startsWith(required))
                throw new ConfigurationException(String.format("Expecting URI in variable: [cassandra.config]. Found[%s]. Please prefix the file with [%s%s] for local " + "files and [%s<server>%s] for remote files. If you are executing this from an external tool, it needs " + "to set Config.setClientMode(true) to avoid loading configuration.", configUrl, required, File.separator, required, File.separator));
            throw new ConfigurationException("Cannot locate " + configUrl + ".  If this is a local file, please confirm you've provided " + required + File.separator + " as a URI prefix.");
        }
    }
    logger.info("Configuration location: {}", url);
    return url;
}
Also used : ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) URL(java.net.URL) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) IOException(java.io.IOException) IntrospectionException(java.beans.IntrospectionException) YAMLException(org.yaml.snakeyaml.error.YAMLException)

Example 15 with ConfigurationException

use of org.apache.cassandra.exceptions.ConfigurationException in project cassandra by apache.

the class DatabaseDescriptor method createAllDirectories.

/**
     * Creates all storage-related directories.
     */
public static void createAllDirectories() {
    try {
        if (conf.data_file_directories.length == 0)
            throw new ConfigurationException("At least one DataFileDirectory must be specified", false);
        for (String dataFileDirectory : conf.data_file_directories) FileUtils.createDirectory(dataFileDirectory);
        if (conf.commitlog_directory == null)
            throw new ConfigurationException("commitlog_directory must be specified", false);
        FileUtils.createDirectory(conf.commitlog_directory);
        if (conf.hints_directory == null)
            throw new ConfigurationException("hints_directory must be specified", false);
        FileUtils.createDirectory(conf.hints_directory);
        if (conf.saved_caches_directory == null)
            throw new ConfigurationException("saved_caches_directory must be specified", false);
        FileUtils.createDirectory(conf.saved_caches_directory);
        if (conf.cdc_enabled) {
            if (conf.cdc_raw_directory == null)
                throw new ConfigurationException("cdc_raw_directory must be specified", false);
            FileUtils.createDirectory(conf.cdc_raw_directory);
        }
    } catch (ConfigurationException e) {
        throw new IllegalArgumentException("Bad configuration; unable to start server: " + e.getMessage());
    } catch (FSWriteError e) {
        throw new IllegalStateException(e.getCause().getMessage() + "; unable to start server");
    }
}
Also used : ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) FSWriteError(org.apache.cassandra.io.FSWriteError)

Aggregations

ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)61 IOException (java.io.IOException)11 URL (java.net.URL)5 InvocationTargetException (java.lang.reflect.InvocationTargetException)4 DataInputStream (java.io.DataInputStream)3 File (java.io.File)3 HttpURLConnection (java.net.HttpURLConnection)3 InetAddress (java.net.InetAddress)3 UnknownHostException (java.net.UnknownHostException)3 HashMap (java.util.HashMap)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 FilterInputStream (java.io.FilterInputStream)2 InputStream (java.io.InputStream)2 NoSuchFileException (java.nio.file.NoSuchFileException)2 Map (java.util.Map)2 CFMetaData (org.apache.cassandra.config.CFMetaData)2 KSMetaData (org.apache.cassandra.config.KSMetaData)2 StartupException (org.apache.cassandra.exceptions.StartupException)2 CorruptSSTableException (org.apache.cassandra.io.sstable.CorruptSSTableException)2 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)2