use of com.datastax.oss.dsbulk.workflow.commons.statement.UnmappableStatement in project dsbulk by datastax.
the class DefaultRecordMapperTest method should_return_unmappable_statement_when_pk_column_unmapped.
@Test
void should_return_unmappable_statement_when_pk_column_unmapped() {
when(boundStatementBuilder.allIndicesOf(C1.asIdentifier())).thenReturn(Collections.singletonList(0));
when(boundStatementBuilder.isSet(0)).thenReturn(false);
RecordMapper mapper = new DefaultRecordMapper(Collections.singletonList(insertStatement), set(C1), set(C2, C3), V4, mapping, recordMetadata, false, true, false, statement -> boundStatementBuilder);
Statement<?> result = mapper.map(record).single().block();
assertThat(result).isInstanceOf(UnmappableStatement.class);
UnmappableStatement unmappableStatement = (UnmappableStatement) result;
assertThat(unmappableStatement.getError()).isInstanceOf(InvalidMappingException.class).hasMessageContaining("Primary key column col1 cannot be left unset");
}
use of com.datastax.oss.dsbulk.workflow.commons.statement.UnmappableStatement in project dsbulk by datastax.
the class MetricsManagerTest method setUp.
@BeforeEach
void setUp() throws Exception {
URI resource1 = new URI("file:///file1.csv");
URI resource2 = new URI("file:///file2.csv");
URI resource3 = new URI("file:///file3.csv");
String source1 = "line1\n";
String source2 = "line2\n";
String source3 = "line3\n";
record1 = DefaultRecord.indexed(source1, resource1, -1, "irrelevant");
record2 = DefaultRecord.indexed(source2, resource2, -1, "irrelevant");
record3 = new DefaultErrorRecord(source3, resource3, -1, new RuntimeException("irrelevant"));
BatchableStatement<?> stmt1 = new MappedSimpleStatement(record1, SimpleStatement.newInstance("irrelevant"));
BatchableStatement<?> stmt2 = new MappedSimpleStatement(record2, SimpleStatement.newInstance("irrelevant"));
stmt3 = new UnmappableStatement(record3, new RuntimeException("irrelevant"));
batch = BatchStatement.newInstance(DefaultBatchType.UNLOGGED).add(stmt1).add(stmt2);
}
Aggregations