use of com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException in project atlasdb by palantir.
the class CqlKeyValueService method getRangeWithPageCreator.
public <T> ClosableIterator<RowResult<T>> getRangeWithPageCreator(TableReference tableRef, RangeRequest rangeRequest, long timestamp, com.datastax.driver.core.ConsistencyLevel consistency, Supplier<ResultsExtractor<T>> resultsExtractor) {
if (rangeRequest.isReverse()) {
throw new UnsupportedOperationException();
}
if (rangeRequest.isEmptyRange()) {
return ClosableIterators.wrap(ImmutableList.<RowResult<T>>of().iterator());
}
final int batchHint = rangeRequest.getBatchHint() == null ? 100 : rangeRequest.getBatchHint();
final ColumnSelection selection = rangeRequest.getColumnNames().isEmpty() ? ColumnSelection.all() : ColumnSelection.create(rangeRequest.getColumnNames());
final byte[] endExclusive = rangeRequest.getEndExclusive();
final StringBuilder bindQuery = new StringBuilder();
bindQuery.append("SELECT * FROM " + getFullTableName(tableRef) + " WHERE token(" + fieldNameProvider.row() + ") >= token(?) ");
if (endExclusive.length > 0) {
bindQuery.append("AND token(" + fieldNameProvider.row() + ") < token(?) ");
}
bindQuery.append("LIMIT " + batchHint);
final String getLastRowQuery = "SELECT * FROM " + getFullTableName(tableRef) + " WHERE " + fieldNameProvider.row() + " = ?";
return ClosableIterators.wrap(new AbstractPagingIterable<RowResult<T>, TokenBackedBasicResultsPage<RowResult<T>, byte[]>>() {
@Override
protected TokenBackedBasicResultsPage<RowResult<T>, byte[]> getFirstPage() throws Exception {
return getPage(rangeRequest.getStartInclusive());
}
@Override
protected TokenBackedBasicResultsPage<RowResult<T>, byte[]> getNextPage(TokenBackedBasicResultsPage<RowResult<T>, byte[]> previous) throws Exception {
return getPage(previous.getTokenForNextPage());
}
TokenBackedBasicResultsPage<RowResult<T>, byte[]> getPage(final byte[] startKey) throws Exception {
BoundStatement boundStatement = getPreparedStatement(tableRef, bindQuery.toString(), session).setConsistencyLevel(consistency).bind();
boundStatement.setBytes(0, ByteBuffer.wrap(startKey));
if (endExclusive.length > 0) {
boundStatement.setBytes(1, ByteBuffer.wrap(endExclusive));
}
ResultSet resultSet = session.execute(boundStatement);
List<Row> rows = Lists.newArrayList(resultSet.all());
cqlKeyValueServices.logTracedQuery(bindQuery.toString(), resultSet, session, cqlStatementCache.normalQuery);
byte[] maxRow = null;
ResultsExtractor<T> extractor = resultsExtractor.get();
for (Row row : rows) {
byte[] rowName = getRowName(row);
if (maxRow == null) {
maxRow = rowName;
} else {
maxRow = PtBytes.BYTES_COMPARATOR.max(maxRow, rowName);
}
}
if (maxRow == null) {
return new SimpleTokenBackedResultsPage<>(endExclusive, ImmutableList.of(), false);
}
// get the rest of the last row
BoundStatement boundLastRow = getPreparedStatement(tableRef, getLastRowQuery, session).bind();
boundLastRow.setBytes(fieldNameProvider.row(), ByteBuffer.wrap(maxRow));
try {
resultSet = session.execute(boundLastRow);
} catch (com.datastax.driver.core.exceptions.UnavailableException e) {
throw new InsufficientConsistencyException("This operation requires all Cassandra" + " nodes to be up and available.", e);
}
rows.addAll(resultSet.all());
cqlKeyValueServices.logTracedQuery(getLastRowQuery, resultSet, session, cqlStatementCache.normalQuery);
for (Row row : rows) {
extractor.internalExtractResult(timestamp, selection, getRowName(row), getColName(row), getValue(row), getTs(row));
}
SortedMap<byte[], SortedMap<byte[], T>> resultsByRow = Cells.breakCellsUpByRow(extractor.asMap());
return ResultsExtractor.getRowResults(endExclusive, maxRow, resultsByRow);
}
}.iterator());
}
use of com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException in project atlasdb by palantir.
the class CqlKeyValueService method alterTableForCompaction.
private void alterTableForCompaction(TableReference tableRef, int gcGraceSeconds, float tombstoneThreshold) {
log.trace("Altering table {} to have gc_grace_seconds={} and tombstone_threshold={}", tableRef, gcGraceSeconds, String.format("%.2f", tombstoneThreshold));
String alterTableQuery = "ALTER TABLE " + getFullTableName(tableRef) + " WITH gc_grace_seconds = " + gcGraceSeconds + " and compaction = {'class':'" + CassandraConstants.LEVELED_COMPACTION_STRATEGY + "', 'tombstone_threshold':" + tombstoneThreshold + "};";
BoundStatement alterTable = getPreparedStatement(tableRef, alterTableQuery, longRunningQuerySession).setConsistencyLevel(ConsistencyLevel.ALL).bind();
ResultSet resultSet;
try {
resultSet = longRunningQuerySession.execute(alterTable);
} catch (UnavailableException e) {
throw new InsufficientConsistencyException("Alter table requires all Cassandra" + " nodes to be up and available.", e);
} catch (Exception e) {
throw Throwables.throwUncheckedException(e);
}
cqlKeyValueServices.logTracedQuery(alterTableQuery, resultSet, session, cqlStatementCache.normalQuery);
}
use of com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException in project atlasdb by palantir.
the class SchemaMutationLock method queryExistingLockColumn.
private Optional<Column> queryExistingLockColumn(CassandraClient client) throws TException {
TableReference lockTableRef = lockTable.get();
Column existingColumn = null;
ConsistencyLevel localQuorum = ConsistencyLevel.LOCAL_QUORUM;
try {
ColumnOrSuperColumn result = queryRunner.run(client, lockTableRef, () -> client.get(lockTableRef, getGlobalDdlLockRowName(), getGlobalDdlLockColumnName(), localQuorum));
existingColumn = result.getColumn();
} catch (UnavailableException e) {
throw new InsufficientConsistencyException("Checking the schema lock requires " + localQuorum + " Cassandra nodes to be up and available.", e);
} catch (NotFoundException e) {
log.debug("No existing schema lock found in table [{}]", SafeArg.of("tableName", lockTableRef));
}
return Optional.ofNullable(existingColumn);
}
use of com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException in project atlasdb by palantir.
the class CassandraKeyValueServiceImpl method executeCheckAndSet.
private CASResult executeCheckAndSet(CassandraClient client, CheckAndSetRequest request) throws TException {
try {
TableReference table = request.table();
Cell cell = request.cell();
long timestamp = AtlasDbConstants.TRANSACTION_TS;
ByteBuffer rowName = ByteBuffer.wrap(cell.getRowName());
byte[] colName = CassandraKeyValueServices.makeCompositeBuffer(cell.getColumnName(), timestamp).array();
List<Column> oldColumns;
java.util.Optional<byte[]> oldValue = request.oldValue();
if (oldValue.isPresent()) {
oldColumns = ImmutableList.of(makeColumn(colName, oldValue.get(), timestamp));
} else {
oldColumns = ImmutableList.of();
}
Column newColumn = makeColumn(colName, request.newValue(), timestamp);
return queryRunner.run(client, table, () -> client.cas(table, rowName, oldColumns, ImmutableList.of(newColumn), ConsistencyLevel.SERIAL, writeConsistency));
} catch (UnavailableException e) {
throw new InsufficientConsistencyException("Check-and-set requires " + writeConsistency + " Cassandra nodes to be up and available.", e);
}
}
use of com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException in project atlasdb by palantir.
the class CqlKeyValueService method createTables.
@Override
public void createTables(final Map<TableReference, byte[]> tableRefsToTableMetadata) {
Collection<com.datastax.driver.core.TableMetadata> tables = cluster.getMetadata().getKeyspace(config.getKeyspaceOrThrow()).getTables();
Set<TableReference> existingTables = Sets.newHashSet(Iterables.transform(tables, input -> TableReference.createUnsafe(input.getName())));
// ScrubberStore likes to call createTable before our setup gets called...
if (!existingTables.contains(AtlasDbConstants.DEFAULT_METADATA_TABLE)) {
cqlKeyValueServices.createTableWithSettings(AtlasDbConstants.DEFAULT_METADATA_TABLE, AtlasDbConstants.EMPTY_TABLE_METADATA, this);
}
Set<TableReference> tablesToCreate = Sets.difference(tableRefsToTableMetadata.keySet(), existingTables);
for (TableReference tableRef : tablesToCreate) {
try {
cqlKeyValueServices.createTableWithSettings(tableRef, tableRefsToTableMetadata.get(tableRef), this);
} catch (com.datastax.driver.core.exceptions.UnavailableException e) {
throw new InsufficientConsistencyException("Creating tables requires all Cassandra" + " nodes to be up and available.", e);
}
}
if (!tablesToCreate.isEmpty()) {
CqlKeyValueServices.waitForSchemaVersionsToCoalesce("createTables(" + tableRefsToTableMetadata.size() + " tables)", this);
}
internalPutMetadataForTables(tableRefsToTableMetadata, false);
}
Aggregations