Search in sources :

Example 1 with ClientContext

use of org.apache.cassandra.client.ClientContext in project eiger by wlloyd.

the class CassandraServer method multiget_count.

@Override
public MultigetCountResult multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, long lts) throws InvalidRequestException, UnavailableException, TimedOutException {
    LamportClock.updateTime(lts);
    logger.debug("multiget_count");
    state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
    String keyspace = state().getKeyspace();
    Map<ByteBuffer, CountWithMetadata> results = new HashMap<ByteBuffer, CountWithMetadata>();
    Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
    for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
        //excludes deleted columns from the count; calculates dependencies (including deleted columns), evt, and lvt
        //use a clientContext to simplify calculating deps
        ClientContext countContext = new ClientContext();
        long maxEarliestValidTime = Long.MIN_VALUE;
        long minLatestValidTime = Long.MAX_VALUE;
        for (Iterator<ColumnOrSuperColumn> cosc_it = cf.getValue().iterator(); cosc_it.hasNext(); ) {
            ColumnOrSuperColumn cosc = cosc_it.next();
            EvtAndLvt evtAndLvt = ColumnOrSuperColumnHelper.extractEvtAndLvt(cosc);
            maxEarliestValidTime = Math.max(maxEarliestValidTime, evtAndLvt.getEarliestValidTime());
            minLatestValidTime = Math.min(minLatestValidTime, evtAndLvt.getLatestValidTime());
            try {
                countContext.addDep(cf.getKey(), cosc);
            } catch (NotFoundException nfe) {
                cosc_it.remove();
            }
        }
        results.put(cf.getKey(), new CountWithMetadata(cf.getValue().size(), maxEarliestValidTime, minLatestValidTime, countContext.getDeps()));
    }
    if (logger.isTraceEnabled()) {
        logger.trace("multiget_count({}, {}, {}, {}, {}) = {}", new Object[] { ByteBufferUtil.listBytesToHex(keys), column_parent, predicate, consistency_level, lts, results });
    }
    return new MultigetCountResult(results, LamportClock.sendTimestamp());
}
Also used : EvtAndLvt(org.apache.cassandra.utils.ColumnOrSuperColumnHelper.EvtAndLvt) ClientContext(org.apache.cassandra.client.ClientContext) ByteBuffer(java.nio.ByteBuffer)

Example 2 with ClientContext

use of org.apache.cassandra.client.ClientContext in project eiger by wlloyd.

the class CassandraServer method add.

// counter methods
@Override
public WriteResult add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level, Set<Dep> deps, long lts) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
    LamportClock.updateTime(lts);
    logger.debug("add");
    state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
    String keyspace = state().getKeyspace();
    CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true);
    ThriftValidation.validateKey(metadata, key);
    ThriftValidation.validateCommutativeForWrite(metadata, consistency_level);
    ThriftValidation.validateColumnParent(metadata, column_parent);
    // SuperColumn field is usually optional, but not when we're adding
    if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null) {
        throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
    }
    ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
    Set<Dependency> dependencies = new HashSet<Dependency>();
    for (Dep dep : deps) {
        dependencies.add(new Dependency(dep));
    }
    //add operations also need a dependency on the previous value for this datacenter
    try {
        ColumnOrSuperColumn cosc = this.internal_get(key, new ColumnPath(column_parent.column_family).setSuper_column(column_parent.super_column).setColumn(column.name), ConsistencyLevel.ONE);
        if (cosc.isSetColumn()) {
            //on that delete because clients have to wait until it reaches all nodes before resurrecting it
            assert cosc.column.isSetDeleted_time();
        } else {
            ClientContext tmpContext = new ClientContext();
            tmpContext.addDep(key, cosc);
            if (tmpContext.getDeps().size() > 0) {
                Dependency newDep = new Dependency(tmpContext.getDeps().iterator().next());
                dependencies.add(newDep);
                logger.debug("Adding a dependency on the previous value from this dc: " + newDep);
            }
        }
    } catch (NotFoundException e1) {
    //this is fine, it's the first add for this datacenter, no dep needed
    }
    RowMutation rm = new RowMutation(keyspace, key, dependencies);
    long timestamp = LamportClock.getVersion();
    try {
        rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, timestamp, timestamp, null);
    } catch (MarshalException e) {
        throw new InvalidRequestException(e.getMessage());
    }
    doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, consistency_level)));
    if (logger.isTraceEnabled()) {
        logger.trace("add({}, {}, {}, {}, {}, {}) = {}", new Object[] { ByteBufferUtil.bytesToHex(key), column_parent, column, consistency_level, deps, lts, timestamp });
    }
    return new WriteResult(timestamp, LamportClock.sendTimestamp());
}
Also used : MarshalException(org.apache.cassandra.db.marshal.MarshalException) ClientContext(org.apache.cassandra.client.ClientContext) QueryPath(org.apache.cassandra.db.filter.QueryPath)

Example 3 with ClientContext

use of org.apache.cassandra.client.ClientContext in project eiger by wlloyd.

the class CassandraServer method get_count.

@Override
public GetCountResult get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, long lts) throws InvalidRequestException, UnavailableException, TimedOutException {
    LamportClock.updateTime(lts);
    logger.debug("get_count");
    state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
    Table table = Table.open(state().getKeyspace());
    ColumnFamilyStore cfs = table.getColumnFamilyStore(column_parent.column_family);
    if (predicate.column_names != null) {
        GetSliceResult result = get_slice(key, column_parent, predicate, consistency_level, LamportClock.NO_CLOCK_TICK);
        //we use a ClientContext to simplify determining dependencies
        //filter out deleted columns, but keep dependencies on them
        ClientContext countContext = new ClientContext();
        for (Iterator<ColumnOrSuperColumn> cosc_it = result.value.iterator(); cosc_it.hasNext(); ) {
            ColumnOrSuperColumn cosc = cosc_it.next();
            try {
                countContext.addDep(key, cosc);
            } catch (NotFoundException nfe) {
                cosc_it.remove();
            }
        }
        return new GetCountResult(result.value.size(), countContext.getDeps(), LamportClock.sendTimestamp());
    }
    int pageSize;
    // request by page if this is a large row
    if (cfs.getMeanColumns() > 0) {
        int averageColumnSize = (int) (cfs.getMeanRowSize() / cfs.getMeanColumns());
        pageSize = Math.min(COUNT_PAGE_SIZE, DatabaseDescriptor.getInMemoryCompactionLimit() / averageColumnSize);
        pageSize = Math.max(2, pageSize);
        logger.debug("average row column size is {}; using pageSize of {}", averageColumnSize, pageSize);
    } else {
        pageSize = COUNT_PAGE_SIZE;
    }
    int totalCount = 0;
    List<ColumnOrSuperColumn> columns;
    if (predicate.slice_range == null) {
        predicate.slice_range = new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE);
    }
    //we use a ClientContext to simplify determining dependencies
    ClientContext countContext = new ClientContext();
    int requestedCount = predicate.slice_range.count;
    while (true) {
        predicate.slice_range.count = Math.min(pageSize, requestedCount);
        GetSliceResult result = get_slice(key, column_parent, predicate, consistency_level, LamportClock.NO_CLOCK_TICK);
        if (result.value.isEmpty())
            break;
        ColumnOrSuperColumn lastColumn = result.value.get(result.value.size() - 1);
        //filter out deleted columns, but keep dependencies on them
        boolean lastResultDeleted = false;
        for (Iterator<ColumnOrSuperColumn> cosc_it = result.value.iterator(); cosc_it.hasNext(); ) {
            ColumnOrSuperColumn cosc = cosc_it.next();
            try {
                countContext.addDep(key, cosc);
                lastResultDeleted = false;
            } catch (NotFoundException nfe) {
                cosc_it.remove();
                lastResultDeleted = true;
            }
        }
        columns = result.value;
        totalCount += columns.size();
        requestedCount -= columns.size();
        ByteBuffer lastName = lastColumn.isSetSuper_column() ? lastColumn.super_column.name : (lastColumn.isSetColumn() ? lastColumn.column.name : (lastColumn.isSetCounter_column() ? lastColumn.counter_column.name : lastColumn.counter_super_column.name));
        if ((requestedCount == 0) || ((columns.size() <= 1) && (lastName.equals(predicate.slice_range.start)))) {
            break;
        } else {
            predicate.slice_range.start = lastName;
            // remove the count for the column that starts the next slice (unless it's a deleted result)
            if (!lastResultDeleted) {
                totalCount--;
                requestedCount++;
            }
        }
    }
    if (logger.isTraceEnabled()) {
        logger.trace("get_count({}, {}, {}, {}, {}) = {}", new Object[] { ByteBufferUtil.bytesToHex(key), column_parent, predicate, consistency_level, lts, totalCount });
    }
    return new GetCountResult(totalCount, countContext.getDeps(), LamportClock.sendTimestamp());
}
Also used : ClientContext(org.apache.cassandra.client.ClientContext) ByteBuffer(java.nio.ByteBuffer)

Example 4 with ClientContext

use of org.apache.cassandra.client.ClientContext in project eiger by wlloyd.

the class EmbeddedCassandraServiceTest method testEmbeddedCassandraService.

@Test
public void testEmbeddedCassandraService() throws AuthenticationException, AuthorizationException, InvalidRequestException, UnavailableException, TimedOutException, TException, NotFoundException, CharacterCodingException {
    Cassandra.Client client = getClient();
    ClientContext clientContext = new ClientContext();
    client.set_keyspace("Keyspace1", LamportClock.sendTimestamp());
    ByteBuffer key_user_id = ByteBufferUtil.bytes("1");
    long timestamp = System.currentTimeMillis();
    ColumnPath cp = new ColumnPath("Standard1");
    ColumnParent par = new ColumnParent("Standard1");
    cp.column = ByteBufferUtil.bytes("name");
    // insert
    client.insert(key_user_id, par, new Column(ByteBufferUtil.bytes("name")).setValue(ByteBufferUtil.bytes("Ran")).setTimestamp(timestamp), ConsistencyLevel.ONE, new HashSet<Dep>(), LamportClock.COPS_UNSUPPORTED);
    // read
    ColumnOrSuperColumn got = client.get(key_user_id, cp, ConsistencyLevel.ONE, LamportClock.COPS_UNSUPPORTED).value;
    // assert
    assertNotNull("Got a null ColumnOrSuperColumn", got);
    assertEquals("Ran", ByteBufferUtil.string(got.getColumn().value));
}
Also used : ClientContext(org.apache.cassandra.client.ClientContext) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 5 with ClientContext

use of org.apache.cassandra.client.ClientContext in project eiger by wlloyd.

the class Stress method main.

public static void main(String[] arguments) throws Exception {
    try {
        session = new Session(arguments);
    } catch (IllegalArgumentException e) {
        e.printStackTrace();
        printHelpMessage();
        return;
    }
    PrintStream outStream = session.getOutputStream();
    if (session.sendToDaemon != null) {
        Socket socket = new Socket(session.sendToDaemon, 2159);
        ObjectOutputStream out = new ObjectOutputStream(socket.getOutputStream());
        BufferedReader inp = new BufferedReader(new InputStreamReader(socket.getInputStream()));
        Runtime.getRuntime().addShutdownHook(new ShutDown(socket, out));
        out.writeObject(session);
        String line;
        try {
            while (!socket.isClosed() && (line = inp.readLine()) != null) {
                if (line.equals("END")) {
                    out.writeInt(1);
                    break;
                }
                outStream.println(line);
            }
        } catch (SocketException e) {
            if (!stopped)
                e.printStackTrace();
        }
        out.close();
        inp.close();
        socket.close();
    } else {
        // For now stress testing will use a single client context, this is
        // probably not what we really want.
        ClientContext clientContext = new ClientContext();
        new StressAction(session, outStream, clientContext).run();
    }
}
Also used : SocketException(java.net.SocketException) ClientContext(org.apache.cassandra.client.ClientContext) Socket(java.net.Socket)

Aggregations

ClientContext (org.apache.cassandra.client.ClientContext)6 ByteBuffer (java.nio.ByteBuffer)3 IOException (java.io.IOException)1 InetAddress (java.net.InetAddress)1 ServerSocket (java.net.ServerSocket)1 Socket (java.net.Socket)1 SocketException (java.net.SocketException)1 QueryPath (org.apache.cassandra.db.filter.QueryPath)1 MarshalException (org.apache.cassandra.db.marshal.MarshalException)1 StressThread (org.apache.cassandra.stress.server.StressThread)1 EvtAndLvt (org.apache.cassandra.utils.ColumnOrSuperColumnHelper.EvtAndLvt)1 CommandLine (org.apache.commons.cli.CommandLine)1 CommandLineParser (org.apache.commons.cli.CommandLineParser)1 ParseException (org.apache.commons.cli.ParseException)1 PosixParser (org.apache.commons.cli.PosixParser)1 Test (org.junit.Test)1