use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class StreamMetadataStoreTest method scaleWithTxTest.
@Test
public void scaleWithTxTest() throws Exception {
final String scope = "ScopeScaleWithTx";
final String stream = "StreamScaleWithTx";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
List<Integer> scale1SealedSegments = Collections.singletonList(1);
// region Txn created before scale and during scale
// scale with transaction test
VersionedTransactionData tx1 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(0, tx1.getEpoch());
StartScaleResponse response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, false, null, executor).join();
final List<Segment> scale1SegmentsCreated = response.getSegmentsCreated();
final int epoch = response.getActiveEpoch();
assertEquals(0, epoch);
assertNotNull(scale1SegmentsCreated);
store.setState(scope, stream, State.SCALING, null, executor).join();
// assert that txn is created on old epoch
VersionedTransactionData tx2 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(0, tx2.getEpoch());
store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
VersionedTransactionData tx3 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(1, tx3.getEpoch());
// should not delete epoch
DeleteEpochResponse deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 0, null, executor).get();
assertEquals(false, deleteResponse.isDeleted());
assertEquals(null, deleteResponse.getSegmentsCreated());
assertEquals(null, deleteResponse.getSegmentsSealed());
store.sealTransaction(scope, stream, tx2.getId(), true, Optional.of(tx2.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, tx2.getEpoch(), tx2.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 0, null, executor).get();
assertEquals(false, deleteResponse.isDeleted());
store.sealTransaction(scope, stream, tx1.getId(), true, Optional.of(tx1.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, tx1.getEpoch(), tx1.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 0, null, executor).get();
assertEquals(true, deleteResponse.isDeleted());
store.sealTransaction(scope, stream, tx3.getId(), true, Optional.of(tx3.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, tx3.getEpoch(), tx3.getId(), null, executor).get();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 1, null, executor).get();
assertEquals(false, deleteResponse.isDeleted());
// endregion
// region Txn created and deleted after scale starts
List<Integer> scale2SealedSegments = Collections.singletonList(0);
long scaleTs2 = System.currentTimeMillis();
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.0, 0.25);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.25, 0.5);
StartScaleResponse response2 = store.startScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, false, null, executor).join();
final List<Segment> scale2SegmentsCreated = response2.getSegmentsCreated();
final int epoch2 = response2.getActiveEpoch();
assertEquals(1, epoch2);
assertNotNull(scale2SegmentsCreated);
VersionedTransactionData txn = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(1, txn.getEpoch());
store.sealTransaction(scope, stream, txn.getId(), true, Optional.of(txn.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, txn.getEpoch(), txn.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 1, null, executor).get();
// verify that epoch is not deleted as new epoch is not yet created
assertEquals(false, deleteResponse.isDeleted());
// verify that new txns can be created and are created on old epoch
VersionedTransactionData txn2 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(1, txn2.getEpoch());
store.setState(scope, stream, State.SCALING, null, executor).get();
store.scaleNewSegmentsCreated(scope, stream, scale2SealedSegments, scale2SegmentsCreated, response2.getActiveEpoch(), scaleTs2, null, executor).join();
store.sealTransaction(scope, stream, txn2.getId(), true, Optional.of(txn2.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, txn2.getEpoch(), txn2.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 1, null, executor).get();
// now that new segments are created, we should be able to delete old epoch.
assertEquals(true, deleteResponse.isDeleted());
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class StreamMetadataStoreTest method retentionSetTest.
@Test
public void retentionSetTest() throws Exception {
final String scope = "ScopeRetain";
final String stream = "StreamRetain";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofDays(2).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
AtomicReference<BucketChangeListener.StreamNotification> notificationRef = new AtomicReference<>();
store.registerBucketChangeListener(0, notificationRef::set);
store.unregisterBucketListener(0);
store.addUpdateStreamForAutoStreamCut(scope, stream, retentionPolicy, null, executor).get();
List<String> streams = store.getStreamsForBucket(0, executor).get();
assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
Map<Integer, Long> map1 = new HashMap<>();
map1.put(0, 0L);
map1.put(1, 0L);
long recordingTime = System.currentTimeMillis();
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, Long.MIN_VALUE, map1);
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Integer, Long> map2 = new HashMap<>();
map2.put(0, 10L);
map2.put(1, 10L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, Long.MIN_VALUE, map2);
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Integer, Long> map3 = new HashMap<>();
map3.put(0, 20L);
map3.put(1, 20L);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, Long.MIN_VALUE, map3);
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
List<StreamCutRecord> list = store.getStreamCutsFromRetentionSet(scope, stream, null, executor).get();
assertTrue(list.contains(streamCut1));
assertTrue(list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
store.deleteStreamCutBefore(scope, stream, streamCut2, null, executor).get();
list = store.getStreamCutsFromRetentionSet(scope, stream, null, executor).get();
assertTrue(!list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
store.removeStreamFromAutoStreamCut(scope, stream, null, executor).get();
streams = store.getStreamsForBucket(0, executor).get();
assertTrue(!streams.contains(String.format("%s/%s", scope, stream)));
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class StreamTest method testStream.
private void testStream(PersistentStreamBase<Integer> stream) throws InterruptedException, ExecutionException {
long creationTime1 = System.currentTimeMillis();
long creationTime2 = creationTime1 + 1;
final ScalingPolicy policy1 = ScalingPolicy.fixed(5);
final ScalingPolicy policy2 = ScalingPolicy.fixed(6);
final StreamConfiguration streamConfig1 = StreamConfiguration.builder().scope("test").streamName("test").scalingPolicy(policy1).build();
final StreamConfiguration streamConfig2 = StreamConfiguration.builder().scope("test").streamName("test").scalingPolicy(policy2).build();
CreateStreamResponse response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
stream.storeCreationTimeIfAbsent(creationTime1).get();
response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime2).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
stream.createConfigurationIfAbsent(StreamProperty.complete(streamConfig1)).get();
response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime2).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_CREATING, response.getStatus());
stream.createStateIfAbsent(State.UNKNOWN).get();
response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime2).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_CREATING, response.getStatus());
stream.updateState(State.CREATING).get();
response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.NEW, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime2).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_CREATING, response.getStatus());
stream.updateState(State.ACTIVE).get();
response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime2).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, response.getStatus());
stream.updateState(State.SEALING).get();
response = stream.checkStreamExists(streamConfig1, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime1).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, response.getStatus());
response = stream.checkStreamExists(streamConfig2, creationTime2).get();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, response.getStatus());
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class StreamTest method testConcurrentGetSuccessorScale.
@Test(timeout = 10000)
public void testConcurrentGetSuccessorScale() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamMetadataStore store = new ZKStreamMetadataStore(cli, executor);
final String streamName = "test";
String scopeName = "test";
store.createScope(scopeName).get();
ZKStoreHelper zkStoreHelper = new ZKStoreHelper(cli, executor);
StreamConfiguration streamConfig = StreamConfiguration.builder().scope(streamName).streamName(streamName).scalingPolicy(policy).build();
store.createStream(scopeName, streamName, streamConfig, System.currentTimeMillis(), null, executor).get();
store.setState(scopeName, streamName, State.ACTIVE, null, executor).get();
ZKStream zkStream = spy(new ZKStream("test", "test", zkStoreHelper));
List<AbstractMap.SimpleEntry<Double, Double>> newRanges;
newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.0, 0.5), new AbstractMap.SimpleEntry<>(0.5, 1.0));
long scale = System.currentTimeMillis();
ArrayList<Integer> sealedSegments = Lists.newArrayList(0);
StartScaleResponse response = zkStream.startScale(sealedSegments, newRanges, scale, false).join();
List<Segment> newSegments = response.getSegmentsCreated();
zkStream.updateState(State.SCALING).join();
List<Integer> newSegmentInt = newSegments.stream().map(Segment::getNumber).collect(Collectors.toList());
zkStream.scaleNewSegmentsCreated(sealedSegments, newSegmentInt, response.getActiveEpoch(), scale).get();
// history table has a partial record at this point.
// now we could have sealed the segments so get successors could be called.
final CompletableFuture<Data<Integer>> segmentTable = zkStream.getSegmentTable();
final CompletableFuture<Data<Integer>> historyTable = zkStream.getHistoryTable();
AtomicBoolean historyCalled = new AtomicBoolean(false);
AtomicBoolean segmentCalled = new AtomicBoolean(false);
// mock.. If segment table is fetched before history table, throw runtime exception so that the test fails
doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
if (!historyCalled.get() && segmentCalled.get()) {
throw new RuntimeException();
}
historyCalled.set(true);
return historyTable;
}).when(zkStream).getHistoryTable();
doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
if (!historyCalled.get()) {
throw new RuntimeException();
}
segmentCalled.set(true);
return segmentTable;
}).when(zkStream).getSegmentTable();
Map<Integer, List<Integer>> successors = zkStream.getSuccessorsWithPredecessors(0).get();
assertTrue(successors.containsKey(1) && successors.containsKey(2));
// reset mock so that we can resume scale operation
doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> historyTable).when(zkStream).getHistoryTable();
doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> segmentTable).when(zkStream).getSegmentTable();
zkStream.scaleOldSegmentsSealed(sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), newSegmentInt, response.getActiveEpoch(), scale).get();
// scale is completed, history table also has completed record now.
final CompletableFuture<Data<Integer>> segmentTable2 = zkStream.getSegmentTable();
final CompletableFuture<Data<Integer>> historyTable2 = zkStream.getHistoryTable();
// mock such that if segment table is fetched before history table, throw runtime exception so that the test fails
segmentCalled.set(false);
historyCalled.set(false);
doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
if (!historyCalled.get() && segmentCalled.get()) {
throw new RuntimeException();
}
historyCalled.set(true);
return historyTable2;
}).when(zkStream).getHistoryTable();
doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
if (!historyCalled.get()) {
throw new RuntimeException();
}
segmentCalled.set(true);
return segmentTable2;
}).when(zkStream).getSegmentTable();
successors = zkStream.getSuccessorsWithPredecessors(0).get();
assertTrue(successors.containsKey(1) && successors.containsKey(2));
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class ZKStreamMetadataStoreTest method testScaleMetadata.
@Test
public void testScaleMetadata() throws Exception {
String scope = "testScopeScale";
String stream = "testStreamScale";
ScalingPolicy policy = ScalingPolicy.fixed(3);
StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 1.0);
List<SimpleEntry<Double, Double>> newRanges = Arrays.asList(segment1, segment2);
store.createScope(scope).get();
store.createStream(scope, stream, configuration, System.currentTimeMillis(), null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
List<ScaleMetadata> scaleIncidents = store.getScaleMetadata(scope, stream, null, executor).get();
assertTrue(scaleIncidents.size() == 1);
assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
// scale
scale(scope, stream, scaleIncidents.get(0).getSegments(), newRanges);
scaleIncidents = store.getScaleMetadata(scope, stream, null, executor).get();
assertTrue(scaleIncidents.size() == 2);
assertTrue(scaleIncidents.get(0).getSegments().size() == 2);
assertTrue(scaleIncidents.get(1).getSegments().size() == 3);
// scale again
scale(scope, stream, scaleIncidents.get(0).getSegments(), newRanges);
scaleIncidents = store.getScaleMetadata(scope, stream, null, executor).get();
assertTrue(scaleIncidents.size() == 3);
assertTrue(scaleIncidents.get(0).getSegments().size() == 2);
assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
// scale again
scale(scope, stream, scaleIncidents.get(0).getSegments(), newRanges);
scaleIncidents = store.getScaleMetadata(scope, stream, null, executor).get();
assertTrue(scaleIncidents.size() == 4);
assertTrue(scaleIncidents.get(0).getSegments().size() == 2);
assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
}
Aggregations