use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class ControllerServiceTest method setup.
@Before
public void setup() throws ExecutionException, InterruptedException {
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(stream1).scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scope(SCOPE).streamName(stream2).scalingPolicy(policy2).build();
// createScope
streamStore.createScope(SCOPE).get();
// region createStream
startTs = System.currentTimeMillis();
OperationContext context = streamStore.createContext(SCOPE, stream1);
streamStore.createStream(SCOPE, stream1, configuration1, startTs, context, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, context, executor);
OperationContext context2 = streamStore.createContext(SCOPE, stream2);
streamStore.createStream(SCOPE, stream2, configuration2, startTs, context2, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, context2, executor);
// endregion
// region scaleSegments
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
List<Integer> sealedSegments = Collections.singletonList(1);
scaleTs = System.currentTimeMillis();
StartScaleResponse startScaleResponse = streamStore.startScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), startTs + 20, false, null, executor).get();
List<Segment> segmentCreated = startScaleResponse.getSegmentsCreated();
streamStore.setState(SCOPE, stream1, State.SCALING, null, executor).get();
streamStore.scaleNewSegmentsCreated(SCOPE, stream1, sealedSegments, segmentCreated, startScaleResponse.getActiveEpoch(), scaleTs, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), segmentCreated, startScaleResponse.getActiveEpoch(), scaleTs, null, executor).get();
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
sealedSegments = Arrays.asList(0, 1, 2);
startScaleResponse = streamStore.startScale(SCOPE, stream2, sealedSegments, Arrays.asList(segment3, segment4, segment5), startTs + 20, false, null, executor).get();
segmentCreated = startScaleResponse.getSegmentsCreated();
streamStore.setState(SCOPE, stream2, State.SCALING, null, executor).get();
streamStore.scaleNewSegmentsCreated(SCOPE, stream2, sealedSegments, segmentCreated, startScaleResponse.getActiveEpoch(), scaleTs, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), segmentCreated, startScaleResponse.getActiveEpoch(), scaleTs, null, executor).get();
// endregion
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class StreamMetadataStoreTest method truncationTest.
@Test
public void truncationTest() throws Exception {
final String scope = "ScopeTruncate";
final String stream = "ScopeTruncate";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
Map<Integer, Long> truncation = new HashMap<>();
truncation.put(0, 0L);
truncation.put(1, 0L);
assertTrue(Futures.await(store.startTruncation(scope, stream, truncation, null, executor)));
StreamProperty<StreamTruncationRecord> truncationProperty = store.getTruncationProperty(scope, stream, true, null, executor).join();
assertTrue(truncationProperty.isUpdating());
Map<Integer, Long> truncation2 = new HashMap<>();
truncation2.put(0, 0L);
truncation2.put(1, 0L);
assertFalse(Futures.await(store.startTruncation(scope, stream, truncation2, null, executor)));
assertTrue(Futures.await(store.completeTruncation(scope, stream, null, executor)));
truncationProperty = store.getTruncationProperty(scope, stream, true, null, executor).join();
assertEquals(truncation, truncationProperty.getProperty().getStreamCut());
assertTrue(truncationProperty.getProperty().getCutEpochMap().size() == 2);
Map<Integer, Long> truncation3 = new HashMap<>();
truncation3.put(0, 0L);
truncation3.put(1, 0L);
assertTrue(Futures.await(store.startTruncation(scope, stream, truncation3, null, executor)));
assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, null, executor)));
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class StreamMetadataStoreTest method updateTest.
@Test
public void updateTest() throws Exception {
final String scope = "ScopeUpdate";
final String stream = "StreamUpdate";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
StreamProperty<StreamConfiguration> configProperty = store.getConfigurationProperty(scope, stream, true, null, executor).join();
assertFalse(configProperty.isUpdating());
// run update configuration multiple times
assertTrue(Futures.await(store.startUpdateConfiguration(scope, stream, configuration2, null, executor)));
configProperty = store.getConfigurationProperty(scope, stream, true, null, executor).join();
assertTrue(configProperty.isUpdating());
final StreamConfiguration configuration3 = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
assertFalse(Futures.await(store.startUpdateConfiguration(scope, stream, configuration3, null, executor)));
assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, null, executor)));
configProperty = store.getConfigurationProperty(scope, stream, true, null, executor).join();
assertEquals(configuration2, configProperty.getProperty());
assertTrue(Futures.await(store.startUpdateConfiguration(scope, stream, configuration3, null, executor)));
assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, null, executor)));
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class StreamMetadataStoreTest method scaleWithTxTest.
@Test
public void scaleWithTxTest() throws Exception {
final String scope = "ScopeScaleWithTx";
final String stream = "StreamScaleWithTx";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
List<Integer> scale1SealedSegments = Collections.singletonList(1);
// region Txn created before scale and during scale
// scale with transaction test
VersionedTransactionData tx1 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(0, tx1.getEpoch());
StartScaleResponse response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, false, null, executor).join();
final List<Segment> scale1SegmentsCreated = response.getSegmentsCreated();
final int epoch = response.getActiveEpoch();
assertEquals(0, epoch);
assertNotNull(scale1SegmentsCreated);
store.setState(scope, stream, State.SCALING, null, executor).join();
// assert that txn is created on old epoch
VersionedTransactionData tx2 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(0, tx2.getEpoch());
store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
VersionedTransactionData tx3 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(1, tx3.getEpoch());
// should not delete epoch
DeleteEpochResponse deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 0, null, executor).get();
assertEquals(false, deleteResponse.isDeleted());
assertEquals(null, deleteResponse.getSegmentsCreated());
assertEquals(null, deleteResponse.getSegmentsSealed());
store.sealTransaction(scope, stream, tx2.getId(), true, Optional.of(tx2.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, tx2.getEpoch(), tx2.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 0, null, executor).get();
assertEquals(false, deleteResponse.isDeleted());
store.sealTransaction(scope, stream, tx1.getId(), true, Optional.of(tx1.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, tx1.getEpoch(), tx1.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 0, null, executor).get();
assertEquals(true, deleteResponse.isDeleted());
store.sealTransaction(scope, stream, tx3.getId(), true, Optional.of(tx3.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, tx3.getEpoch(), tx3.getId(), null, executor).get();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 1, null, executor).get();
assertEquals(false, deleteResponse.isDeleted());
// endregion
// region Txn created and deleted after scale starts
List<Integer> scale2SealedSegments = Collections.singletonList(0);
long scaleTs2 = System.currentTimeMillis();
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.0, 0.25);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.25, 0.5);
StartScaleResponse response2 = store.startScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, false, null, executor).join();
final List<Segment> scale2SegmentsCreated = response2.getSegmentsCreated();
final int epoch2 = response2.getActiveEpoch();
assertEquals(1, epoch2);
assertNotNull(scale2SegmentsCreated);
VersionedTransactionData txn = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(1, txn.getEpoch());
store.sealTransaction(scope, stream, txn.getId(), true, Optional.of(txn.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, txn.getEpoch(), txn.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 1, null, executor).get();
// verify that epoch is not deleted as new epoch is not yet created
assertEquals(false, deleteResponse.isDeleted());
// verify that new txns can be created and are created on old epoch
VersionedTransactionData txn2 = store.createTransaction(scope, stream, UUID.randomUUID(), 100, 100, 100, null, executor).get();
assertEquals(1, txn2.getEpoch());
store.setState(scope, stream, State.SCALING, null, executor).get();
store.scaleNewSegmentsCreated(scope, stream, scale2SealedSegments, scale2SegmentsCreated, response2.getActiveEpoch(), scaleTs2, null, executor).join();
store.sealTransaction(scope, stream, txn2.getId(), true, Optional.of(txn2.getVersion()), null, executor).get();
// should not happen
store.commitTransaction(scope, stream, txn2.getEpoch(), txn2.getId(), null, executor).get();
// should not delete epoch
deleteResponse = store.tryDeleteEpochIfScaling(scope, stream, 1, null, executor).get();
// now that new segments are created, we should be able to delete old epoch.
assertEquals(true, deleteResponse.isDeleted());
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class StreamMetadataStoreTest method retentionSetTest.
@Test
public void retentionSetTest() throws Exception {
final String scope = "ScopeRetain";
final String stream = "StreamRetain";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofDays(2).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
AtomicReference<BucketChangeListener.StreamNotification> notificationRef = new AtomicReference<>();
store.registerBucketChangeListener(0, notificationRef::set);
store.unregisterBucketListener(0);
store.addUpdateStreamForAutoStreamCut(scope, stream, retentionPolicy, null, executor).get();
List<String> streams = store.getStreamsForBucket(0, executor).get();
assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
Map<Integer, Long> map1 = new HashMap<>();
map1.put(0, 0L);
map1.put(1, 0L);
long recordingTime = System.currentTimeMillis();
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, Long.MIN_VALUE, map1);
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Integer, Long> map2 = new HashMap<>();
map2.put(0, 10L);
map2.put(1, 10L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, Long.MIN_VALUE, map2);
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Integer, Long> map3 = new HashMap<>();
map3.put(0, 20L);
map3.put(1, 20L);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, Long.MIN_VALUE, map3);
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
List<StreamCutRecord> list = store.getStreamCutsFromRetentionSet(scope, stream, null, executor).get();
assertTrue(list.contains(streamCut1));
assertTrue(list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
store.deleteStreamCutBefore(scope, stream, streamCut2, null, executor).get();
list = store.getStreamCutsFromRetentionSet(scope, stream, null, executor).get();
assertTrue(!list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
store.removeStreamFromAutoStreamCut(scope, stream, null, executor).get();
streams = store.getStreamsForBucket(0, executor).get();
assertTrue(!streams.contains(String.format("%s/%s", scope, stream)));
}
Aggregations