use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class Mutation method merge.
/**
* Creates a new mutation that merges all the provided mutations.
*
* @param mutations the mutations to merge together. All mutation must be
* on the same keyspace and partition key. There should also be at least one
* mutation.
* @return a mutation that contains all the modifications contained in {@code mutations}.
*
* @throws IllegalArgumentException if not all the mutations are on the same
* keyspace and key.
*/
public static Mutation merge(List<Mutation> mutations) {
assert !mutations.isEmpty();
if (mutations.size() == 1)
return mutations.get(0);
Set<TableId> updatedTables = new HashSet<>();
String ks = null;
DecoratedKey key = null;
for (Mutation mutation : mutations) {
updatedTables.addAll(mutation.modifications.keySet());
if (ks != null && !ks.equals(mutation.keyspaceName))
throw new IllegalArgumentException();
if (key != null && !key.equals(mutation.key))
throw new IllegalArgumentException();
ks = mutation.keyspaceName;
key = mutation.key;
}
List<PartitionUpdate> updates = new ArrayList<>(mutations.size());
Map<TableId, PartitionUpdate> modifications = new HashMap<>(updatedTables.size());
for (TableId table : updatedTables) {
for (Mutation mutation : mutations) {
PartitionUpdate upd = mutation.modifications.get(table);
if (upd != null)
updates.add(upd);
}
if (updates.isEmpty())
continue;
modifications.put(table, updates.size() == 1 ? updates.get(0) : PartitionUpdate.merge(updates));
updates.clear();
}
return new Mutation(ks, key, modifications);
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class Mutation method toString.
public String toString(boolean shallow) {
StringBuilder buff = new StringBuilder("Mutation(");
buff.append("keyspace='").append(keyspaceName).append('\'');
buff.append(", key='").append(ByteBufferUtil.bytesToHex(key.getKey())).append('\'');
buff.append(", modifications=[");
if (shallow) {
List<String> cfnames = new ArrayList<>(modifications.size());
for (TableId tableId : modifications.keySet()) {
TableMetadata cfm = Schema.instance.getTableMetadata(tableId);
cfnames.add(cfm == null ? "-dropped-" : cfm.name);
}
buff.append(StringUtils.join(cfnames, ", "));
} else {
buff.append("\n ").append(StringUtils.join(modifications.values(), "\n ")).append('\n');
}
return buff.append("])").toString();
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class AbstractCommitLogSegmentManager method flushDataFrom.
/**
* Force a flush on all CFs that are still dirty in @param segments.
*
* @return a Future that will finish when all the flushes are complete.
*/
private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force) {
if (segments.isEmpty())
return Futures.immediateFuture(null);
final CommitLogPosition maxCommitLogPosition = segments.get(segments.size() - 1).getCurrentCommitLogPosition();
// a map of CfId -> forceFlush() to ensure we only queue one flush per cf
final Map<TableId, ListenableFuture<?>> flushes = new LinkedHashMap<>();
for (CommitLogSegment segment : segments) {
for (TableId dirtyTableId : segment.getDirtyTableIds()) {
TableMetadata metadata = Schema.instance.getTableMetadata(dirtyTableId);
if (metadata == null) {
// even though we remove the schema entry before a final flush when dropping a CF,
// it's still possible for a writer to race and finish his append after the flush.
logger.trace("Marking clean CF {} that doesn't exist anymore", dirtyTableId);
segment.markClean(dirtyTableId, CommitLogPosition.NONE, segment.getCurrentCommitLogPosition());
} else if (!flushes.containsKey(dirtyTableId)) {
final ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(dirtyTableId);
// can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
// no deadlock possibility since switchLock removal
flushes.put(dirtyTableId, force ? cfs.forceFlush() : cfs.forceFlush(maxCommitLogPosition));
}
}
}
return Futures.allAsList(flushes.values());
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class ActiveRepairService method prepareForRepair.
public UUID prepareForRepair(UUID parentRepairSession, InetAddress coordinator, Set<InetAddress> endpoints, RepairOption options, List<ColumnFamilyStore> columnFamilyStores) {
// we only want repairedAt for incremental repairs, for non incremental repairs, UNREPAIRED_SSTABLE will preserve repairedAt on streamed sstables
long repairedAt = options.isIncremental() ? Clock.instance.currentTimeMillis() : ActiveRepairService.UNREPAIRED_SSTABLE;
registerParentRepairSession(parentRepairSession, coordinator, columnFamilyStores, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal());
final CountDownLatch prepareLatch = new CountDownLatch(endpoints.size());
final AtomicBoolean status = new AtomicBoolean(true);
final Set<String> failedNodes = Collections.synchronizedSet(new HashSet<String>());
IAsyncCallbackWithFailure callback = new IAsyncCallbackWithFailure() {
public void response(MessageIn msg) {
prepareLatch.countDown();
}
public boolean isLatencyForSnitch() {
return false;
}
public void onFailure(InetAddress from, RequestFailureReason failureReason) {
status.set(false);
failedNodes.add(from.getHostAddress());
prepareLatch.countDown();
}
};
List<TableId> tableIds = new ArrayList<>(columnFamilyStores.size());
for (ColumnFamilyStore cfs : columnFamilyStores) tableIds.add(cfs.metadata.id);
for (InetAddress neighbour : endpoints) {
if (FailureDetector.instance.isAlive(neighbour)) {
PrepareMessage message = new PrepareMessage(parentRepairSession, tableIds, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal());
MessageOut<RepairMessage> msg = message.createMessage();
MessagingService.instance().sendRR(msg, neighbour, callback, DatabaseDescriptor.getRpcTimeout(), true);
} else {
status.set(false);
failedNodes.add(neighbour.getHostAddress());
prepareLatch.countDown();
}
}
try {
prepareLatch.await(DatabaseDescriptor.getRpcTimeout(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
removeParentRepairSession(parentRepairSession);
throw new RuntimeException("Did not get replies from all endpoints. List of failed endpoint(s): " + failedNodes, e);
}
if (!status.get()) {
removeParentRepairSession(parentRepairSession);
throw new RuntimeException("Did not get positive replies from all endpoints. List of failed endpoint(s): " + failedNodes);
}
return parentRepairSession;
}
Aggregations