use of org.apache.cassandra.streaming.messages.OutgoingFileMessage in project cassandra by apache.
the class StreamTransferTask method complete.
/**
* Received ACK for file at {@code sequenceNumber}.
*
* @param sequenceNumber sequence number of file
*/
public void complete(int sequenceNumber) {
boolean signalComplete;
synchronized (this) {
ScheduledFuture timeout = timeoutTasks.remove(sequenceNumber);
if (timeout != null)
timeout.cancel(false);
OutgoingFileMessage file = files.remove(sequenceNumber);
if (file != null)
file.complete();
signalComplete = files.isEmpty();
}
// all file sent, notify session this task is complete.
if (signalComplete)
session.taskCompleted(this);
}
use of org.apache.cassandra.streaming.messages.OutgoingFileMessage in project cassandra by apache.
the class StreamTransferTaskTest method testFailSessionDuringTransferShouldNotReleaseReferences.
@Test
public void testFailSessionDuringTransferShouldNotReleaseReferences() throws Exception {
InetAddress peer = FBUtilities.getBroadcastAddress();
StreamCoordinator streamCoordinator = new StreamCoordinator(1, true, false, null, false, null);
StreamResultFuture future = StreamResultFuture.init(UUID.randomUUID(), "", Collections.<StreamEventHandler>emptyList(), streamCoordinator);
StreamSession session = new StreamSession(peer, peer, null, 0, true, false, null);
session.init(future);
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
// create two sstables
for (int i = 0; i < 2; i++) {
SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
cfs.forceBlockingFlush();
}
// create streaming task that streams those two sstables
StreamTransferTask task = new StreamTransferTask(session, cfs.metadata.id);
List<Ref<SSTableReader>> refs = new ArrayList<>(cfs.getLiveSSTables().size());
for (SSTableReader sstable : cfs.getLiveSSTables()) {
List<Range<Token>> ranges = new ArrayList<>();
ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
Ref<SSTableReader> ref = sstable.selfRef();
refs.add(ref);
task.addTransferFile(ref, 1, sstable.getPositionsForRanges(ranges), 0);
}
assertEquals(2, task.getTotalNumberOfFiles());
//add task to stream session, so it is aborted when stream session fails
session.transfers.put(TableId.generate(), task);
//make a copy of outgoing file messages, since task is cleared when it's aborted
Collection<OutgoingFileMessage> files = new LinkedList<>(task.files.values());
//simulate start transfer
for (OutgoingFileMessage file : files) {
file.startTransfer();
}
//fail stream session mid-transfer
session.onError(new Exception("Fake exception"));
//make sure reference was not released
for (Ref<SSTableReader> ref : refs) {
assertEquals(1, ref.globalCount());
}
//simulate finish transfer
for (OutgoingFileMessage file : files) {
file.finishTransfer();
}
//now reference should be released
for (Ref<SSTableReader> ref : refs) {
assertEquals(0, ref.globalCount());
}
}
use of org.apache.cassandra.streaming.messages.OutgoingFileMessage in project cassandra by apache.
the class StreamTransferTask method addTransferFile.
public synchronized void addTransferFile(Ref<SSTableReader> ref, long estimatedKeys, List<Pair<Long, Long>> sections, long repairedAt) {
assert ref.get() != null && tableId.equals(ref.get().metadata().id);
OutgoingFileMessage message = new OutgoingFileMessage(ref, sequenceNumber.getAndIncrement(), estimatedKeys, sections, repairedAt, session.keepSSTableLevel());
message = StreamHook.instance.reportOutgoingFile(session, ref.get(), message);
files.put(message.header.sequenceNumber, message);
totalSize += message.header.size();
}
use of org.apache.cassandra.streaming.messages.OutgoingFileMessage in project cassandra by apache.
the class StreamTransferTask method abort.
public synchronized void abort() {
if (aborted)
return;
aborted = true;
for (ScheduledFuture future : timeoutTasks.values()) future.cancel(false);
timeoutTasks.clear();
Throwable fail = null;
for (OutgoingFileMessage file : files.values()) {
try {
file.complete();
} catch (Throwable t) {
if (fail == null)
fail = t;
else
fail.addSuppressed(t);
}
}
files.clear();
if (fail != null)
Throwables.propagate(fail);
}
Aggregations