use of com.evolveum.midpoint.repo.sqlbase.JdbcSession in project midpoint by Evolveum.
the class SqaleAuditServiceFactory method initCustomColumns.
private void initCustomColumns(@NotNull Configuration configuration, SqaleRepoContext sqlRepoContext) {
List<HierarchicalConfiguration<ImmutableNode>> subConfigColumns = ((BaseHierarchicalConfiguration) configuration).configurationsAt(CONF_AUDIT_SERVICE_COLUMNS);
// here we use config from context, it can be main repository configuration
SqaleRepositoryConfiguration repoConfig = (SqaleRepositoryConfiguration) sqlRepoContext.getJdbcRepositoryConfiguration();
boolean createMissing = repoConfig.isCreateMissingCustomColumns() || // but we'll consider the flag also on audit configuration, just in case
configuration.getBoolean(PROPERTY_CREATE_MISSING_CUSTOM_COLUMNS, false);
SqlTableMetadata tableMetadata = null;
if (createMissing) {
try (JdbcSession jdbcSession = sqlRepoContext.newJdbcSession().startReadOnlyTransaction()) {
tableMetadata = SqlTableMetadata.create(jdbcSession.connection(), QAuditEventRecord.TABLE_NAME);
}
}
for (Configuration subConfigColumn : subConfigColumns) {
String columnName = getStringFromConfig(subConfigColumn, CONF_AUDIT_SERVICE_COLUMN_NAME);
String propertyName = getStringFromConfig(subConfigColumn, CONF_AUDIT_SERVICE_EVENT_RECORD_PROPERTY_NAME);
// No type definition for now, it's all String or String implicit conversion.
ColumnMetadata columnMetadata = ColumnMetadata.named(columnName).ofType(Types.VARCHAR);
QAuditEventRecordMapping.get().addExtensionColumn(propertyName, columnMetadata);
if (tableMetadata != null && tableMetadata.get(columnName) == null) {
try (JdbcSession jdbcSession = sqlRepoContext.newJdbcSession().startTransaction()) {
jdbcSession.addColumn(QAuditEventRecord.TABLE_NAME, columnMetadata);
jdbcSession.commit();
}
}
}
}
use of com.evolveum.midpoint.repo.sqlbase.JdbcSession in project midpoint by Evolveum.
the class QOperationExecutionMapping method createRowTransformer.
@Override
public ResultListRowTransformer<OperationExecutionType, QOperationExecution<OR>, MOperationExecution> createRowTransformer(SqlQueryContext<OperationExecutionType, QOperationExecution<OR>, MOperationExecution> sqlQueryContext, JdbcSession jdbcSession) {
Map<UUID, ObjectType> owners = new HashMap<>();
return new ResultListRowTransformer<>() {
@Override
public void beforeTransformation(List<Tuple> rowTuples, QOperationExecution<OR> entityPath) throws SchemaException {
Set<UUID> ownerOids = rowTuples.stream().map(row -> Objects.requireNonNull(row.get(entityPath)).ownerOid).collect(Collectors.toSet());
// TODO do we need get options here as well? Is there a scenario where we load container
// and define what to load for referenced/owner object?
QObject<?> o = QObjectMapping.getObjectMapping().defaultAlias();
List<Tuple> result = jdbcSession.newQuery().select(o.oid, o.fullObject).from(o).where(o.oid.in(ownerOids)).fetch();
for (Tuple row : result) {
UUID oid = Objects.requireNonNull(row.get(o.oid));
ObjectType owner = parseSchemaObject(row.get(o.fullObject), oid.toString(), ObjectType.class);
owners.put(oid, owner);
}
}
@Override
public OperationExecutionType transform(Tuple rowTuple, QOperationExecution<OR> entityPath, Collection<SelectorOptions<GetOperationOptions>> options) {
MOperationExecution row = Objects.requireNonNull(rowTuple.get(entityPath));
ObjectType object = Objects.requireNonNull(owners.get(row.ownerOid), () -> "Missing owner with OID " + row.ownerOid + " for OperationExecution with ID " + row.cid);
PrismContainer<OperationExecutionType> opexContainer = object.asPrismObject().findContainer(ObjectType.F_OPERATION_EXECUTION);
if (opexContainer == null) {
throw new SystemException("Object " + object + " has no operation execution as expected from " + row);
}
PrismContainerValue<OperationExecutionType> pcv = opexContainer.findValue(row.cid);
if (pcv == null) {
throw new SystemException("Object " + object + " has no operation execution with ID " + row.cid);
}
return pcv.asContainerable();
}
};
}
use of com.evolveum.midpoint.repo.sqlbase.JdbcSession in project midpoint by Evolveum.
the class SqaleRepoSmokeTest method test900WorkingWithPgArraysJsonbAndBytea.
// region low-level tests
/**
* This tests our type mapper/converter classes and related column mapping.
*/
@Test
public void test900WorkingWithPgArraysJsonbAndBytea() {
QUser u = aliasFor(QUser.class);
MUser user = new MUser();
String userName = "user" + getTestNumber();
setName(user, userName);
user.policySituations = new Integer[] { 1, 2 };
user.subtypes = new String[] { "subtype1", "subtype2" };
// more whitespaces/lines
user.ext = new Jsonb("{\"key\" : \"value\",\n\"number\": 47} ");
user.photo = new byte[] { 0, 1, 0, 1 };
try (JdbcSession jdbcSession = startTransaction()) {
jdbcSession.newInsert(u).populate(user).execute();
jdbcSession.commit();
}
MUser row = selectOne(u, u.nameNorm.eq(userName));
assertThat(row.policySituations).contains(1, 2);
assertThat(row.subtypes).contains("subtype1", "subtype2");
// normalized
assertThat(row.ext.value).isEqualTo("{\"key\": \"value\", \"number\": 47}");
// byte[] is used for fullObject, there is no chance to miss a problem with it
assertThat(row.photo).hasSize(4);
// setting NULLs
try (JdbcSession jdbcSession = startTransaction()) {
jdbcSession.newUpdate(u).setNull(u.policySituations).set(u.subtypes, // this should do the same
(String[]) null).setNull(u.ext).setNull(u.photo).where(u.oid.eq(row.oid)).execute();
jdbcSession.commit();
}
row = selectOne(u, u.nameNorm.eq(userName));
assertThat(row.policySituations).isNull();
assertThat(row.subtypes).isNull();
assertThat(row.ext).isNull();
// but we never set fullObject to null, so this is a good test for doing so with byte[]
assertThat(row.photo).isNull();
}
use of com.evolveum.midpoint.repo.sqlbase.JdbcSession in project midpoint by Evolveum.
the class SqaleRepoBaseTest method clearAudit.
// Called on demand
public void clearAudit() {
try (JdbcSession jdbcSession = startTransaction()) {
jdbcSession.executeStatement("TRUNCATE ma_audit_event CASCADE;");
jdbcSession.commit();
display("AUDIT tables cleared");
}
}
use of com.evolveum.midpoint.repo.sqlbase.JdbcSession in project midpoint by Evolveum.
the class SqaleRepoBaseTest method clearDatabase.
/**
* Database cleanup for Sqale tests only.
* Check TestSqaleRepositoryBeanConfig.clearDatabase(SqaleRepoContext) for integration tests.
*/
private void clearDatabase() {
try (JdbcSession jdbcSession = startTransaction()) {
// object delete cascades to sub-rows of the "object aggregate"
jdbcSession.executeStatement("TRUNCATE m_object CASCADE;");
// truncate does not run ON DELETE trigger, many refs/container tables are not cleaned
jdbcSession.executeStatement("TRUNCATE m_object_oid CASCADE;");
// but after truncating m_object_oid it cleans all the tables
// audit is cleaned on-demand using clearAudit()
/*
Truncates are much faster than this delete probably because it works row by row:
long count = jdbcSession.newDelete(QObjectMapping.INSTANCE.defaultAlias()).execute();
display("Deleted " + count + " objects from DB");
*/
jdbcSession.commit();
display("OBJECT tables cleared");
}
// this is "suite" scope code, but @BeforeSuite can't use injected fields
if (!cacheTablesCleared) {
try (JdbcSession jdbcSession = startTransaction()) {
// URI cache must work even when default relation ID is not 0, so we can wipe it all.
jdbcSession.executeStatement("TRUNCATE m_uri CASCADE;");
jdbcSession.executeStatement("TRUNCATE m_ext_item CASCADE;");
jdbcSession.commit();
}
// uses its own transaction
sqlRepoContext.clearCaches();
// It would work with URI cache cleared before every class, but that's not
// how midPoint will work either.
cacheTablesCleared = true;
display("URI cache and Extension item catalog tables cleared");
}
}
Aggregations