use of org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory in project cassandra by apache.
the class CassandraEntireSSTableStreamWriterTest method setupStreamingSessionForTest.
private StreamSession setupStreamingSessionForTest() {
StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.BOOTSTRAP, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
streamCoordinator.addSessionInfo(new SessionInfo(peer, 0, peer, Collections.emptyList(), Collections.emptyList(), StreamSession.State.INITIALIZED));
StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
session.init(future);
return session;
}
use of org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory in project cassandra by apache.
the class StreamStateStoreTest method testUpdateAndQueryAvailableRanges.
@Test
public void testUpdateAndQueryAvailableRanges() {
// let range (0, 100] of keyspace1 be bootstrapped.
IPartitioner p = new Murmur3Partitioner();
Token.TokenFactory factory = p.getTokenFactory();
Range<Token> range = new Range<>(factory.fromString("0"), factory.fromString("100"));
InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, local, new NettyStreamingConnectionFactory(), null, current_version, false, 0, null, PreviewKind.NONE);
session.addStreamRequest("keyspace1", RangesAtEndpoint.toDummyList(Collections.singleton(range)), RangesAtEndpoint.toDummyList(Collections.emptyList()), Collections.singleton("cf"));
StreamStateStore store = new StreamStateStore();
// session complete event that is not completed makes data not available for keyspace/ranges
store.handleStreamEvent(new StreamEvent.SessionCompleteEvent(session));
assertFalse(store.isDataAvailable("keyspace1", factory.fromString("50")));
// successfully completed session adds available keyspace/ranges
session.state(StreamSession.State.COMPLETE);
store.handleStreamEvent(new StreamEvent.SessionCompleteEvent(session));
// check if token in range (0, 100] appears available.
assertTrue(store.isDataAvailable("keyspace1", factory.fromString("50")));
// check if token out of range returns false
assertFalse(store.isDataAvailable("keyspace1", factory.fromString("0")));
assertFalse(store.isDataAvailable("keyspace1", factory.fromString("101")));
// check if different keyspace returns false
assertFalse(store.isDataAvailable("keyspace2", factory.fromString("50")));
// add different range within the same keyspace
Range<Token> range2 = new Range<>(factory.fromString("100"), factory.fromString("200"));
session = new StreamSession(StreamOperation.BOOTSTRAP, local, new NettyStreamingConnectionFactory(), null, current_version, false, 0, null, PreviewKind.NONE);
session.addStreamRequest("keyspace1", RangesAtEndpoint.toDummyList(Collections.singleton(range2)), RangesAtEndpoint.toDummyList(Collections.emptyList()), Collections.singleton("cf"));
session.state(StreamSession.State.COMPLETE);
store.handleStreamEvent(new StreamEvent.SessionCompleteEvent(session));
// newly added range should be available
assertTrue(store.isDataAvailable("keyspace1", factory.fromString("101")));
// as well as the old one
assertTrue(store.isDataAvailable("keyspace1", factory.fromString("50")));
}
use of org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory in project cassandra by apache.
the class EntireSSTableStreamingCorrectFilesCountTest method setupStreamingSessionForTest.
private StreamSession setupStreamingSessionForTest(StreamEventHandler streamEventHandler) {
StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.BOOTSTRAP, Collections.singleton(streamEventHandler), streamCoordinator);
InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
streamCoordinator.addSessionInfo(new SessionInfo(peer, 0, peer, Collections.emptyList(), Collections.emptyList(), StreamSession.State.INITIALIZED));
StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
session.init(future);
return session;
}
use of org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory in project cassandra by apache.
the class EntireSSTableStreamConcurrentComponentMutationTest method setupStreamingSessionForTest.
private StreamSession setupStreamingSessionForTest() {
StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.BOOTSTRAP, Collections.emptyList(), streamCoordinator);
InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
streamCoordinator.addSessionInfo(new SessionInfo(peer, 0, peer, Collections.emptyList(), Collections.emptyList(), StreamSession.State.INITIALIZED));
StreamSession session = streamCoordinator.getOrCreateOutboundSession(peer);
session.init(future);
return session;
}
use of org.apache.cassandra.streaming.async.NettyStreamingConnectionFactory in project cassandra by apache.
the class StreamTransferTaskTest method testFailSessionDuringTransferShouldNotReleaseReferences.
@Test
public void testFailSessionDuringTransferShouldNotReleaseReferences() throws Exception {
InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
StreamCoordinator streamCoordinator = new StreamCoordinator(StreamOperation.BOOTSTRAP, 1, new NettyStreamingConnectionFactory(), false, false, null, PreviewKind.NONE);
StreamResultFuture future = StreamResultFuture.createInitiator(UUID.randomUUID(), StreamOperation.OTHER, Collections.<StreamEventHandler>emptyList(), streamCoordinator);
StreamSession session = new StreamSession(StreamOperation.BOOTSTRAP, peer, FACTORY, null, current_version, false, 0, null, PreviewKind.NONE);
session.init(future);
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
// create two sstables
for (int i = 0; i < 2; i++) {
SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
cfs.forceBlockingFlush();
}
// create streaming task that streams those two sstables
StreamTransferTask task = new StreamTransferTask(session, cfs.metadata.id);
List<Ref<SSTableReader>> refs = new ArrayList<>(cfs.getLiveSSTables().size());
for (SSTableReader sstable : cfs.getLiveSSTables()) {
List<Range<Token>> ranges = new ArrayList<>();
ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
Ref<SSTableReader> ref = sstable.selfRef();
refs.add(ref);
task.addTransferStream(new CassandraOutgoingFile(StreamOperation.BOOTSTRAP, ref, sstable.getPositionsForRanges(ranges), ranges, 1));
}
assertEquals(14, task.getTotalNumberOfFiles());
// add task to stream session, so it is aborted when stream session fails
session.transfers.put(TableId.generate(), task);
// make a copy of outgoing file messages, since task is cleared when it's aborted
Collection<OutgoingStreamMessage> files = new LinkedList<>(task.streams.values());
// simulate start transfer
for (OutgoingStreamMessage file : files) {
file.startTransfer();
}
// fail stream session mid-transfer
session.onError(new Exception("Fake exception")).get(5, TimeUnit.SECONDS);
// make sure reference was not released
for (Ref<SSTableReader> ref : refs) {
assertEquals(1, ref.globalCount());
}
// wait for stream to abort asynchronously
int tries = 10;
while (ScheduledExecutors.nonPeriodicTasks.getActiveTaskCount() > 0) {
if (tries < 1)
throw new RuntimeException("test did not complete in time");
Thread.sleep(10);
tries--;
}
// simulate finish transfer
for (OutgoingStreamMessage file : files) {
file.finishTransfer();
}
// now reference should be released
for (Ref<SSTableReader> ref : refs) {
assertEquals(0, ref.globalCount());
}
}
Aggregations