use of org.apache.beam.sdk.io.gcp.spanner.changestreams.model.PartitionMetadata in project beam by apache.
the class ChildPartitionsRecordActionTest method testRestrictionClaimedAndIsMergeCaseAndChildExists.
@Test
public void testRestrictionClaimedAndIsMergeCaseAndChildExists() {
final String partitionToken = "partitionToken";
final String anotherPartitionToken = "anotherPartitionToken";
final String childPartitionToken = "childPartition1";
final HashSet<String> parentTokens = Sets.newHashSet(partitionToken, anotherPartitionToken);
final long heartbeat = 30L;
final Timestamp startTimestamp = Timestamp.ofTimeMicroseconds(10L);
final Timestamp endTimestamp = Timestamp.ofTimeMicroseconds(20L);
final PartitionMetadata partition = mock(PartitionMetadata.class);
final ChildPartitionsRecord record = new ChildPartitionsRecord(startTimestamp, "recordSequence", Collections.singletonList(new ChildPartition(childPartitionToken, parentTokens)), null);
when(partition.getEndTimestamp()).thenReturn(endTimestamp);
when(partition.getHeartbeatMillis()).thenReturn(heartbeat);
when(partition.getPartitionToken()).thenReturn(partitionToken);
when(tracker.tryClaim(10L)).thenReturn(true);
when(transaction.getPartition(childPartitionToken)).thenReturn(mock(Struct.class));
final Optional<ProcessContinuation> maybeContinuation = action.run(partition, record, tracker, watermarkEstimator);
assertEquals(Optional.empty(), maybeContinuation);
verify(watermarkEstimator).setWatermark(new Instant(startTimestamp.toSqlTimestamp().getTime()));
verify(transaction, never()).insert(any());
}
use of org.apache.beam.sdk.io.gcp.spanner.changestreams.model.PartitionMetadata in project beam by apache.
the class ChildPartitionsRecordActionTest method testRestrictionClaimedAndIsSplitCase.
@Test
public void testRestrictionClaimedAndIsSplitCase() {
final String partitionToken = "partitionToken";
final long heartbeat = 30L;
final Timestamp startTimestamp = Timestamp.ofTimeMicroseconds(10L);
final Timestamp endTimestamp = Timestamp.ofTimeMicroseconds(20L);
final PartitionMetadata partition = mock(PartitionMetadata.class);
final ChildPartitionsRecord record = new ChildPartitionsRecord(startTimestamp, "recordSequence", Arrays.asList(new ChildPartition("childPartition1", partitionToken), new ChildPartition("childPartition2", partitionToken)), null);
when(partition.getEndTimestamp()).thenReturn(endTimestamp);
when(partition.getHeartbeatMillis()).thenReturn(heartbeat);
when(partition.getPartitionToken()).thenReturn(partitionToken);
when(tracker.tryClaim(10L)).thenReturn(true);
when(transaction.getPartition("childPartition1")).thenReturn(null);
when(transaction.getPartition("childPartition2")).thenReturn(null);
final Optional<ProcessContinuation> maybeContinuation = action.run(partition, record, tracker, watermarkEstimator);
assertEquals(Optional.empty(), maybeContinuation);
verify(watermarkEstimator).setWatermark(new Instant(startTimestamp.toSqlTimestamp().getTime()));
verify(transaction).insert(PartitionMetadata.newBuilder().setPartitionToken("childPartition1").setParentTokens(Sets.newHashSet(partitionToken)).setStartTimestamp(startTimestamp).setEndTimestamp(endTimestamp).setHeartbeatMillis(heartbeat).setState(CREATED).setWatermark(startTimestamp).build());
verify(transaction).insert(PartitionMetadata.newBuilder().setPartitionToken("childPartition2").setParentTokens(Sets.newHashSet(partitionToken)).setStartTimestamp(startTimestamp).setEndTimestamp(endTimestamp).setHeartbeatMillis(heartbeat).setState(CREATED).setWatermark(startTimestamp).build());
}
use of org.apache.beam.sdk.io.gcp.spanner.changestreams.model.PartitionMetadata in project beam by apache.
the class ChildPartitionsRecordActionTest method testRestrictionClaimedAndIsMergeCaseAndChildNotExists.
@Test
public void testRestrictionClaimedAndIsMergeCaseAndChildNotExists() {
final String partitionToken = "partitionToken";
final String anotherPartitionToken = "anotherPartitionToken";
final String childPartitionToken = "childPartition1";
final HashSet<String> parentTokens = Sets.newHashSet(partitionToken, anotherPartitionToken);
final long heartbeat = 30L;
final Timestamp startTimestamp = Timestamp.ofTimeMicroseconds(10L);
final Timestamp endTimestamp = Timestamp.ofTimeMicroseconds(20L);
final PartitionMetadata partition = mock(PartitionMetadata.class);
final ChildPartitionsRecord record = new ChildPartitionsRecord(startTimestamp, "recordSequence", Collections.singletonList(new ChildPartition(childPartitionToken, parentTokens)), null);
when(partition.getEndTimestamp()).thenReturn(endTimestamp);
when(partition.getHeartbeatMillis()).thenReturn(heartbeat);
when(partition.getPartitionToken()).thenReturn(partitionToken);
when(tracker.tryClaim(10L)).thenReturn(true);
when(transaction.getPartition(childPartitionToken)).thenReturn(null);
final Optional<ProcessContinuation> maybeContinuation = action.run(partition, record, tracker, watermarkEstimator);
assertEquals(Optional.empty(), maybeContinuation);
verify(watermarkEstimator).setWatermark(new Instant(startTimestamp.toSqlTimestamp().getTime()));
verify(transaction).insert(PartitionMetadata.newBuilder().setPartitionToken(childPartitionToken).setParentTokens(parentTokens).setStartTimestamp(startTimestamp).setEndTimestamp(endTimestamp).setHeartbeatMillis(heartbeat).setState(CREATED).setWatermark(startTimestamp).build());
}
use of org.apache.beam.sdk.io.gcp.spanner.changestreams.model.PartitionMetadata in project beam by apache.
the class PartitionMetadataMapperTest method testMapPartitionMetadataFromResultSetWithNulls.
@Test
public void testMapPartitionMetadataFromResultSetWithNulls() {
final Struct row = Struct.newBuilder().set(COLUMN_PARTITION_TOKEN).to("token").set(COLUMN_PARENT_TOKENS).toStringArray(Collections.singletonList("parentToken")).set(COLUMN_START_TIMESTAMP).to(Timestamp.ofTimeMicroseconds(10L)).set(COLUMN_END_TIMESTAMP).to((Timestamp) null).set(COLUMN_HEARTBEAT_MILLIS).to(5_000L).set(COLUMN_STATE).to(State.CREATED.name()).set(COLUMN_WATERMARK).to(Timestamp.ofTimeMicroseconds(30L)).set(COLUMN_CREATED_AT).to(Timestamp.ofTimeMicroseconds(40L)).set(COLUMN_SCHEDULED_AT).to((Timestamp) null).set(COLUMN_RUNNING_AT).to((Timestamp) null).set(COLUMN_FINISHED_AT).to((Timestamp) null).build();
final PartitionMetadata partition = mapper.from(row);
assertEquals(new PartitionMetadata("token", Sets.newHashSet("parentToken"), Timestamp.ofTimeMicroseconds(10L), null, 5_000L, State.CREATED, Timestamp.ofTimeMicroseconds(30), Timestamp.ofTimeMicroseconds(40), null, null, null), partition);
}
use of org.apache.beam.sdk.io.gcp.spanner.changestreams.model.PartitionMetadata in project beam by apache.
the class ReadChangeStreamPartitionRangeTrackerTest method testTrySplitReturnsNullForInitialPartition.
@Test
public void testTrySplitReturnsNullForInitialPartition() {
final PartitionMetadata partition = mock(PartitionMetadata.class);
final OffsetRange range = new OffsetRange(100, 200);
final ReadChangeStreamPartitionRangeTracker tracker = new ReadChangeStreamPartitionRangeTracker(partition, range);
when(partition.getPartitionToken()).thenReturn(InitialPartition.PARTITION_TOKEN);
assertNull(tracker.trySplit(0.0D));
}
Aggregations