Search in sources :

Example 6 with ConfigurationException

use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.

the class CassandraStorage method getDefaultMarshallers.

private List<AbstractType> getDefaultMarshallers(CfDef cfDef) throws IOException {
    ArrayList<AbstractType> marshallers = new ArrayList<AbstractType>();
    AbstractType comparator = null;
    AbstractType default_validator = null;
    AbstractType key_validator = null;
    try {
        comparator = TypeParser.parse(cfDef.getComparator_type());
        default_validator = TypeParser.parse(cfDef.getDefault_validation_class());
        key_validator = TypeParser.parse(cfDef.getKey_validation_class());
    } catch (ConfigurationException e) {
        throw new IOException(e);
    }
    marshallers.add(comparator);
    marshallers.add(default_validator);
    marshallers.add(key_validator);
    return marshallers;
}
Also used : ConfigurationException(org.apache.cassandra.config.ConfigurationException) IOException(java.io.IOException)

Example 7 with ConfigurationException

use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.

the class AbstractCassandraDaemon method setup.

/**
 * This is a hook for concrete daemons to initialize themselves suitably.
 *
 * Subclasses should override this to finish the job (listening on ports, etc.)
 *
 * @throws IOException
 */
protected void setup() throws IOException {
    logger.info("JVM vendor/version: {}/{}", System.getProperty("java.vm.name"), System.getProperty("java.version"));
    logger.info("Heap size: {}/{}", Runtime.getRuntime().totalMemory(), Runtime.getRuntime().maxMemory());
    logger.info("Classpath: {}", System.getProperty("java.class.path"));
    CLibrary.tryMlockall();
    listenPort = DatabaseDescriptor.getRpcPort();
    listenAddr = DatabaseDescriptor.getRpcAddress();
    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {

        public void uncaughtException(Thread t, Throwable e) {
            exceptions.incrementAndGet();
            logger.error("Fatal exception in thread " + t, e);
            for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
                // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
                if (e2 instanceof OutOfMemoryError)
                    System.exit(100);
            }
        }
    });
    // check all directories(data, commitlog, saved cache) for existence and permission
    Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays.asList(new String[] { DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation() }));
    for (String dataDir : dirs) {
        logger.debug("Checking directory {}", dataDir);
        File dir = new File(dataDir);
        if (dir.exists())
            assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute() : String.format("Directory %s is not accessible.", dataDir);
    }
    // Migrate sstables from pre-#2749 to the correct location
    if (Directories.sstablesNeedsMigration())
        Directories.migrateSSTables();
    if (// should never happen
    CacheService.instance == null)
        throw new RuntimeException("Failed to initialize Cache Service.");
    // until system table is opened.
    for (CFMetaData cfm : Schema.instance.getTableMetaData(Table.SYSTEM_TABLE).values()) ColumnFamilyStore.scrubDataDirectories(Table.SYSTEM_TABLE, cfm.cfName);
    try {
        SystemTable.checkHealth();
    } catch (ConfigurationException e) {
        logger.error("Fatal exception during initialization", e);
        System.exit(100);
    }
    // load keyspace descriptions.
    try {
        DatabaseDescriptor.loadSchemas();
    } catch (IOException e) {
        logger.error("Fatal exception during initialization", e);
        System.exit(100);
    }
    // clean up debris in the rest of the tables
    for (String table : Schema.instance.getTables()) {
        for (CFMetaData cfm : Schema.instance.getTableMetaData(table).values()) {
            ColumnFamilyStore.scrubDataDirectories(table, cfm.cfName);
        }
    }
    // initialize keyspaces
    for (String table : Schema.instance.getTables()) {
        if (logger.isDebugEnabled())
            logger.debug("opening keyspace " + table);
        Table.open(table);
    }
    if (CacheService.instance.keyCache.size() > 0)
        logger.info("completed pre-loading ({} keys) key cache.", CacheService.instance.keyCache.size());
    if (CacheService.instance.rowCache.size() > 0)
        logger.info("completed pre-loading ({} keys) row cache.", CacheService.instance.rowCache.size());
    try {
        GCInspector.instance.start();
    } catch (Throwable t) {
        logger.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
    }
    // replay the log if necessary
    CommitLog.instance.recover();
    // check to see if CL.recovery modified the lastMigrationId. if it did, we need to re apply migrations. this isn't
    // the same as merely reloading the schema (which wouldn't perform file deletion after a DROP). The solution
    // is to read those migrations from disk and apply them.
    UUID currentMigration = Schema.instance.getVersion();
    UUID lastMigration = Migration.getLastMigrationId();
    if ((lastMigration != null) && (lastMigration.timestamp() > currentMigration.timestamp())) {
        Gossiper.instance.maybeInitializeLocalState(SystemTable.incrementAndGetGeneration());
        MigrationManager.applyMigrations(currentMigration, lastMigration);
    }
    SystemTable.finishStartup();
    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        logger.error("Fatal configuration error", e);
        System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server.  See log for stacktrace.");
        System.exit(1);
    }
    Mx4jTool.maybeLoad();
}
Also used : ConfigurationException(org.apache.cassandra.config.ConfigurationException) CFMetaData(org.apache.cassandra.config.CFMetaData) IOException(java.io.IOException) UUID(java.util.UUID) File(java.io.File)

Example 8 with ConfigurationException

use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.

the class MessagingService method getServerSocket.

private List<ServerSocket> getServerSocket(InetAddress localEp) throws IOException, ConfigurationException {
    final List<ServerSocket> ss = new ArrayList<ServerSocket>();
    if (DatabaseDescriptor.getEncryptionOptions().internode_encryption != EncryptionOptions.InternodeEncryption.none) {
        ss.add(SSLFactory.getServerSocket(DatabaseDescriptor.getEncryptionOptions(), localEp, DatabaseDescriptor.getSSLStoragePort()));
        // setReuseAddress happens in the factory.
        logger_.info("Starting Encrypted Messaging Service on SSL port {}", DatabaseDescriptor.getSSLStoragePort());
    }
    ServerSocketChannel serverChannel = ServerSocketChannel.open();
    ServerSocket socket = serverChannel.socket();
    socket.setReuseAddress(true);
    InetSocketAddress address = new InetSocketAddress(localEp, DatabaseDescriptor.getStoragePort());
    try {
        socket.bind(address);
    } catch (BindException e) {
        if (e.getMessage().contains("in use"))
            throw new ConfigurationException(address + " is in use by another process.  Change listen_address:storage_port in cassandra.yaml to values that do not conflict with other services");
        else if (e.getMessage().contains("Cannot assign requested address"))
            throw new ConfigurationException("Unable to bind to address " + address + ". Set listen_address in cassandra.yaml to an interface you can bind to, e.g., your private IP address on EC2");
        else
            throw e;
    }
    logger_.info("Starting Messaging Service on port {}", DatabaseDescriptor.getStoragePort());
    ss.add(socket);
    return ss;
}
Also used : ConfigurationException(org.apache.cassandra.config.ConfigurationException) ServerSocketChannel(java.nio.channels.ServerSocketChannel)

Example 9 with ConfigurationException

use of org.apache.cassandra.config.ConfigurationException in project eiger by wlloyd.

the class SystemTable method checkHealth.

/**
 * One of three things will happen if you try to read the system table:
 * 1. files are present and you can read them: great
 * 2. no files are there: great (new node is assumed)
 * 3. files are present but you can't read them: bad
 * @throws ConfigurationException
 */
public static void checkHealth() throws ConfigurationException, IOException {
    Table table = null;
    try {
        table = Table.open(Table.SYSTEM_TABLE);
    } catch (AssertionError err) {
        // this happens when a user switches from OPP to RP.
        ConfigurationException ex = new ConfigurationException("Could not read system table!");
        ex.initCause(err);
        throw ex;
    }
    SortedSet<ByteBuffer> cols = new TreeSet<ByteBuffer>(BytesType.instance);
    cols.add(CLUSTERNAME);
    QueryFilter filter = QueryFilter.getNamesFilter(decorate(LOCATION_KEY), new QueryPath(STATUS_CF), cols);
    ColumnFamily cf = table.getColumnFamilyStore(STATUS_CF).getColumnFamily(filter);
    if (cf == null) {
        // this is a brand new node
        ColumnFamilyStore cfs = table.getColumnFamilyStore(STATUS_CF);
        if (!cfs.getSSTables().isEmpty())
            throw new ConfigurationException("Found system table files, but they couldn't be loaded!");
        // no system files.  this is a new node.
        RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
        cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.STATUS_CF);
        cf.addColumn(new Column(CLUSTERNAME, ByteBufferUtil.bytes(DatabaseDescriptor.getClusterName()), LamportClock.getVersion()));
        rm.add(cf);
        rm.apply();
        return;
    }
    IColumn clusterCol = cf.getColumn(CLUSTERNAME);
    assert clusterCol != null;
    String savedClusterName = ByteBufferUtil.string(clusterCol.value());
    if (!DatabaseDescriptor.getClusterName().equals(savedClusterName))
        throw new ConfigurationException("Saved cluster name " + savedClusterName + " != configured name " + DatabaseDescriptor.getClusterName());
}
Also used : ByteBuffer(java.nio.ByteBuffer) QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) ConfigurationException(org.apache.cassandra.config.ConfigurationException)

Example 10 with ConfigurationException

use of org.apache.cassandra.config.ConfigurationException in project brisk by riptano.

the class SchemaManagerService method buildTable.

private Table buildTable(CfDef cfDef) {
    Table table = new Table();
    table.setDbName(cfDef.keyspace);
    table.setTableName(cfDef.name);
    table.setTableType(TableType.EXTERNAL_TABLE.toString());
    table.putToParameters("EXTERNAL", "TRUE");
    table.putToParameters("cassandra.ks.name", cfDef.keyspace);
    table.putToParameters("cassandra.cf.name", cfDef.name);
    table.putToParameters("cassandra.slice.predicate.size", "100");
    table.putToParameters("storage_handler", "org.apache.hadoop.hive.cassandra.CassandraStorageHandler");
    table.setPartitionKeys(new ArrayList<FieldSchema>());
    // cassandra.column.mapping
    StorageDescriptor sd = new StorageDescriptor();
    sd.setInputFormat("org.apache.hadoop.hive.cassandra.input.HiveCassandraStandardColumnInputFormat");
    sd.setOutputFormat("org.apache.hadoop.hive.cassandra.output.HiveCassandraOutputFormat");
    sd.setParameters(new HashMap<String, String>());
    try {
        sd.setLocation(warehouse.getDefaultTablePath(cfDef.keyspace, cfDef.name).toString());
    } catch (MetaException me) {
        log.error("could not build path information correctly", me);
    }
    SerDeInfo serde = new SerDeInfo();
    serde.setSerializationLib("org.apache.hadoop.hive.cassandra.serde.CassandraColumnSerDe");
    serde.putToParameters("serialization.format", "1");
    StringBuilder mapping = new StringBuilder();
    StringBuilder validator = new StringBuilder();
    try {
        CFMetaData cfm = CFMetaData.fromThrift(cfDef);
        AbstractType keyValidator = cfDef.key_validation_class != null ? TypeParser.parse(cfDef.key_validation_class) : BytesType.instance;
        addTypeToStorageDescriptor(sd, ByteBufferUtil.bytes("row_key"), keyValidator, keyValidator);
        mapping.append(":key");
        validator.append(keyValidator.toString());
        for (ColumnDef column : cfDef.getColumn_metadata()) {
            addTypeToStorageDescriptor(sd, column.name, TypeParser.parse(cfDef.comparator_type), TypeParser.parse(column.getValidation_class()));
            try {
                mapping.append(",");
                mapping.append(ByteBufferUtil.string(column.name));
                validator.append(",");
                validator.append(column.getValidation_class());
            } catch (CharacterCodingException e) {
                log.error("could not build column mapping correctly", e);
            }
        }
        serde.putToParameters("cassandra.columns.mapping", mapping.toString());
        serde.putToParameters("cassandra.cf.validatorType", validator.toString());
        sd.setSerdeInfo(serde);
    } catch (ConfigurationException ce) {
        throw new CassandraHiveMetaStoreException("Problem converting comparator type: " + cfDef.comparator_type, ce);
    } catch (InvalidRequestException ire) {
        throw new CassandraHiveMetaStoreException("Problem parsing CfDef: " + cfDef.name, ire);
    }
    table.setSd(sd);
    if (log.isDebugEnabled())
        log.debug("constructed table for CF:{} {}", cfDef.name, table.toString());
    return table;
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ColumnDef(org.apache.cassandra.thrift.ColumnDef) CharacterCodingException(java.nio.charset.CharacterCodingException) ConfigurationException(org.apache.cassandra.config.ConfigurationException) AbstractType(org.apache.cassandra.db.marshal.AbstractType) CFMetaData(org.apache.cassandra.config.CFMetaData) InvalidRequestException(org.apache.cassandra.thrift.InvalidRequestException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Aggregations

ConfigurationException (org.apache.cassandra.config.ConfigurationException)26 IOException (java.io.IOException)10 ByteBuffer (java.nio.ByteBuffer)6 HashMap (java.util.HashMap)4 InvalidRequestException (org.apache.cassandra.thrift.InvalidRequestException)4 Map (java.util.Map)3 CFMetaData (org.apache.cassandra.config.CFMetaData)3 AbstractType (org.apache.cassandra.db.marshal.AbstractType)3 File (java.io.File)2 URL (java.net.URL)2 ArrayList (java.util.ArrayList)2 UUID (java.util.UUID)2 ExecutionException (java.util.concurrent.ExecutionException)2 Future (java.util.concurrent.Future)2 QueryPath (org.apache.cassandra.db.filter.QueryPath)2 Migration (org.apache.cassandra.db.migration.Migration)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 DataInputStream (java.io.DataInputStream)1 IOError (java.io.IOError)1 InputStream (java.io.InputStream)1