use of io.pravega.client.stream.impl.StreamSegments in project pravega by pravega.
the class ControllerImplTest method testGetEpochSegments.
@Test
public void testGetEpochSegments() throws Exception {
CompletableFuture<StreamSegments> streamSegments;
streamSegments = controllerClient.getEpochSegments("scope1", "stream1", 0);
assertTrue(streamSegments.get().getSegments().size() == 2);
assertEquals(new Segment("scope1", "stream1", 6), streamSegments.get().getSegmentForKey(0.2));
assertEquals(new Segment("scope1", "stream1", 7), streamSegments.get().getSegmentForKey(0.6));
streamSegments = controllerClient.getEpochSegments("scope1", "stream2", 0);
AssertExtensions.assertFutureThrows("Should throw Exception", streamSegments, throwable -> true);
}
use of io.pravega.client.stream.impl.StreamSegments in project pravega by pravega.
the class ByteStreamWriterTest method setup.
@Before
public void setup() {
PravegaNodeUri endpoint = new PravegaNodeUri("localhost", 0);
connectionFactory = new MockConnectionFactoryImpl();
ClientConnection connection = mock(ClientConnection.class);
connectionFactory.provideConnection(endpoint, connection);
controller = new MockController(endpoint.getEndpoint(), endpoint.getPort(), connectionFactory, false);
controller.createScope(SCOPE);
controller.createStream(SCOPE, STREAM, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build());
MockSegmentStreamFactory streamFactory = new MockSegmentStreamFactory();
clientFactory = new ByteStreamClientImpl(SCOPE, controller, connectionFactory, streamFactory, streamFactory, streamFactory);
StreamSegments segments = Futures.getThrowingException(controller.getCurrentSegments(SCOPE, STREAM));
Preconditions.checkState(segments.getNumberOfSegments() > 0, "Stream is sealed");
Preconditions.checkState(segments.getNumberOfSegments() == 1, "Stream is configured with more than one segment");
}
use of io.pravega.client.stream.impl.StreamSegments in project pravega by pravega.
the class ByteStreamClientImpl method createByteStreamWriter.
@Override
public ByteStreamWriter createByteStreamWriter(String streamName) {
StreamSegments segments = Futures.getThrowingException(controller.getCurrentSegments(scope, streamName));
Preconditions.checkState(segments.getNumberOfSegments() > 0, "Stream is sealed");
Preconditions.checkState(segments.getNumberOfSegments() == 1, "Stream is configured with more than one segment");
Segment segment = segments.getSegments().iterator().next();
// The writer should not give up connecting to SegmentStore in the background until the ByteStreamWriter is closed.
EventWriterConfig config = EventWriterConfig.builder().retryAttempts(Integer.MAX_VALUE).build();
DelegationTokenProvider tokenProvider = DelegationTokenProviderFactory.create(controller, segment, AccessOperation.WRITE);
return new BufferedByteStreamWriterImpl(new ByteStreamWriterImpl(outputStreamFactory.createOutputStreamForSegment(segment, config, tokenProvider), metaStreamFactory.createSegmentMetadataClient(segment, tokenProvider)));
}
use of io.pravega.client.stream.impl.StreamSegments in project pravega by pravega.
the class ControllerFailoverTest method failoverTest.
@Test
public void failoverTest() throws InterruptedException, ExecutionException {
String scope = "testFailoverScope" + RandomStringUtils.randomAlphabetic(5);
String stream = "testFailoverStream" + RandomStringUtils.randomAlphabetic(5);
int initialSegments = 1;
List<Long> segmentsToSeal = Collections.singletonList(0L);
Map<Double, Double> newRangesToCreate = new HashMap<>();
newRangesToCreate.put(0.0, 1.0);
ClientConfig clientConfig = Utils.buildClientConfig(controllerURIDirect);
// Connect with first controller instance.
final Controller controller1 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), executorService);
// Create scope, stream, and a transaction with high timeout value.
controller1.createScope(scope).join();
log.info("Scope {} created successfully", scope);
createStream(controller1, scope, stream, ScalingPolicy.fixed(initialSegments));
log.info("Stream {}/{} created successfully", scope, stream);
long txnCreationTimestamp = System.nanoTime();
StreamImpl stream1 = new StreamImpl(scope, stream);
// Initiate scale operation. It will block until ongoing transaction is complete.
controller1.startScale(stream1, segmentsToSeal, newRangesToCreate).join();
// Now stop the controller instance executing scale operation.
Futures.getAndHandleExceptions(controllerService.scaleService(0), ExecutionException::new);
log.info("Successfully stopped one instance of controller service");
// restart controller service
Futures.getAndHandleExceptions(controllerService.scaleService(1), ExecutionException::new);
log.info("Successfully stopped one instance of controller service");
List<URI> controllerUris = controllerService.getServiceDetails();
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = controllerUris.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
ClientConfig clientConf = Utils.buildClientConfig(controllerURIDirect);
// Connect to another controller instance.
@Cleanup final Controller controller2 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConf).build(), executorService);
// Note: if scale does not complete within desired time, test will timeout.
boolean scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
while (!scaleStatus) {
scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
Thread.sleep(30000);
}
segmentsToSeal = Collections.singletonList(NameUtils.computeSegmentId(1, 1));
newRangesToCreate = new HashMap<>();
newRangesToCreate.put(0.0, 0.5);
newRangesToCreate.put(0.5, 1.0);
controller2.scaleStream(stream1, segmentsToSeal, newRangesToCreate, executorService).getFuture().join();
log.info("Checking whether scale operation succeeded by fetching current segments");
StreamSegments streamSegments = controller2.getCurrentSegments(scope, stream).join();
log.info("Current segment count= {}", streamSegments.getSegments().size());
Assert.assertEquals(2, streamSegments.getSegments().size());
}
use of io.pravega.client.stream.impl.StreamSegments in project pravega by pravega.
the class ParseReaderGroupStreamCommand method readRGSegmentToFile.
/**
* Reads the contents of the segment starting from the given offset and writes into the provided file.
*
* @param segmentHelper A {@link SegmentHelper} instance to read the segment.
* @param segmentStoreHost Address of the segment-store to read from.
* @param controller A {@link Controller} instance.
* @param scope The name of the scope.
* @param stream The name of the stream.
* @param fileName A name of the file to which the data will be written.
* @throws IOException if the file create/write fails.
* @throws Exception if the request fails.
*/
private void readRGSegmentToFile(SegmentHelper segmentHelper, String segmentStoreHost, Controller controller, String scope, String stream, String fileName) throws Exception {
String tmpfilename = "tmp/" + stream + System.currentTimeMillis();
File outputfile = FileHelper.createFileAndDirectory(fileName);
@Cleanup BufferedWriter writer = new BufferedWriter(new FileWriter(outputfile));
CompletableFuture<StreamSegments> streamSegments = controller.getCurrentSegments(scope, stream);
StreamSegments segments = streamSegments.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS);
Preconditions.checkArgument(segments.getSegments().size() == 1, "The reader group stream should contain only one segment.");
String fullyQualifiedSegmentName = segments.getSegments().iterator().next().getScopedName();
CompletableFuture<WireCommands.StreamSegmentInfo> segmentInfo = segmentHelper.getSegmentInfo(fullyQualifiedSegmentName, new PravegaNodeUri(segmentStoreHost, getServiceConfig().getAdminGatewayPort()), authHelper.retrieveMasterToken(), 0L);
WireCommands.StreamSegmentInfo streamSegmentInfo = segmentInfo.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS);
long startOffset = streamSegmentInfo.getStartOffset();
long length = streamSegmentInfo.getWriteOffset();
// Create a temp file and write contents of the segment into it.
readAndWriteSegmentToFile(segmentHelper, segmentStoreHost, fullyQualifiedSegmentName, startOffset, length, tmpfilename, getServiceConfig().getAdminGatewayPort(), authHelper.retrieveMasterToken());
// Read contents from the temp file and serialize it to store the reader group state information at various offsets into the output file.
parseRGStateFromFile(tmpfilename, writer, startOffset);
}
Aggregations