use of org.umlg.sqlg.structure.PropertyType in project sqlg by pietermartin.
the class AndOrHasContainer method toSql.
private void toSql(SqlgGraph sqlgGraph, SchemaTableTree schemaTableTree, StringBuilder result, int depth) {
if (!this.hasContainers.isEmpty()) {
boolean first = true;
for (HasContainer h : this.hasContainers) {
if (!SqlgUtil.isBulkWithin(sqlgGraph, h)) {
if (first) {
first = false;
result.append("(");
} else {
result.append(" AND ");
}
String k = h.getKey();
WhereClause whereClause = WhereClause.from(h.getPredicate());
// check if property exists
String bool = null;
if (!k.equals(T.id.getAccessor())) {
Map<String, PropertyType> pts = sqlgGraph.getTopology().getTableFor(schemaTableTree.getSchemaTable());
if (pts != null && !pts.containsKey(k)) {
// verify if we have a value
Multimap<String, Object> keyValueMap = LinkedListMultimap.create();
whereClause.putKeyValueMap(h, keyValueMap);
// we do
if (keyValueMap.size() > 0) {
bool = "? is null";
} else {
if (Existence.NULL.equals(h.getBiPredicate())) {
bool = "1=1";
} else {
bool = "1=0";
}
}
}
}
if (bool != null) {
result.append(bool);
} else {
result.append(whereClause.toSql(sqlgGraph, schemaTableTree, h));
}
}
}
if (!first) {
result.append(")");
}
}
int count = 1;
if (!this.andOrHasContainers.isEmpty()) {
result.append("\n");
for (int i = 0; i < depth; i++) {
result.append("\t");
}
result.append("(");
}
for (AndOrHasContainer andOrHasContainer : this.andOrHasContainers) {
andOrHasContainer.toSql(sqlgGraph, schemaTableTree, result, depth + 1);
if (count++ < this.andOrHasContainers.size()) {
switch(this.type) {
case AND:
result.append(" AND ");
break;
case OR:
result.append(" OR ");
break;
case NONE:
break;
}
}
}
if (!this.andOrHasContainers.isEmpty()) {
result.append("\n");
for (int i = 0; i < depth - 1; i++) {
result.append("\t");
}
result.append(")");
}
}
use of org.umlg.sqlg.structure.PropertyType in project sqlg by pietermartin.
the class SchemaTableTree method bulkWithJoin.
private String bulkWithJoin(SqlgGraph sqlgGraph) {
StringBuilder sb = new StringBuilder();
List<HasContainer> bulkHasContainers = this.hasContainers.stream().filter(h -> SqlgUtil.isBulkWithinAndOut(sqlgGraph, h)).collect(Collectors.toList());
for (HasContainer hasContainer : bulkHasContainers) {
P<List<Object>> predicate = (P<List<Object>>) hasContainer.getPredicate();
Collection<Object> withInList = predicate.getValue();
Set<Object> withInOuts = new HashSet<>(withInList);
Map<String, PropertyType> columns = new HashMap<>();
Object next = withInOuts.iterator().next();
if (next instanceof RecordId) {
next = ((RecordId) next).getId();
}
if (hasContainer.getBiPredicate() == Contains.within) {
columns.put(WITHIN, PropertyType.from(next));
} else if (hasContainer.getBiPredicate() == Contains.without) {
columns.put(WITHOUT, PropertyType.from(next));
} else {
throw new UnsupportedOperationException("Only Contains.within and Contains.without is supported!");
}
if (hasContainer.getBiPredicate() == Contains.within) {
sb.append(" INNER JOIN\n\t");
} else {
// left join and in the where clause add a IS NULL, to find the values not in the right hand table
sb.append(" LEFT JOIN\n\t");
}
sb.append("(VALUES ");
boolean first = true;
for (Object withInOutValue : withInOuts) {
if (!first) {
sb.append(", ");
}
first = false;
if (withInOutValue instanceof RecordId) {
withInOutValue = ((RecordId) withInOutValue).getId();
}
sb.append("(");
PropertyType propertyType = PropertyType.from(withInOutValue);
sb.append(sqlgGraph.getSqlDialect().valueToValuesString(propertyType, withInOutValue));
sb.append(")");
}
sb.append(") as tmp");
sb.append(this.rootSchemaTableTree().tmpTableAliasCounter);
sb.append("(");
if (hasContainer.getBiPredicate() == Contains.within) {
sb.append(WITHIN);
} else {
sb.append(WITHOUT);
}
sb.append(") ");
sb.append(" on ");
sb.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(this.getSchemaTable().getSchema()));
sb.append(".");
sb.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(this.getSchemaTable().getTable()));
sb.append(".");
if (hasContainer.getKey().equals(T.id.getAccessor())) {
sb.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes("ID"));
} else {
sb.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(hasContainer.getKey()));
}
if (hasContainer.getBiPredicate() == Contains.within) {
sb.append(" = tmp");
sb.append(this.rootSchemaTableTree().tmpTableAliasCounter++);
sb.append(".within");
} else {
sb.append(" = tmp");
sb.append(this.rootSchemaTableTree().tmpTableAliasCounter++);
sb.append(".without");
}
}
return sb.toString();
}
use of org.umlg.sqlg.structure.PropertyType in project sqlg by pietermartin.
the class SchemaTableTree method loadProperty.
public void loadProperty(ResultSet resultSet, SqlgElement sqlgElement) throws SQLException {
for (int ix = 1; ix <= resultSet.getMetaData().getColumnCount(); ix++) {
// entry.getKey();
String columnName = resultSet.getMetaData().getColumnLabel(ix);
Pair<String, PropertyType> p = getColumnNamePropertyName().get(columnName);
if (p != null) {
String propertyName = p.getKey();
PropertyType propertyType = p.getValue();
if (propertyName.endsWith(Topology.IN_VERTEX_COLUMN_END)) {
((SqlgEdge) sqlgElement).loadInVertex(resultSet, propertyName, ix);
} else if (propertyName.endsWith(Topology.OUT_VERTEX_COLUMN_END)) {
((SqlgEdge) sqlgElement).loadOutVertex(resultSet, propertyName, ix);
} else {
sqlgElement.loadProperty(resultSet, propertyName, ix, getColumnNameAliasMap(), this.stepDepth, propertyType);
}
}
}
}
use of org.umlg.sqlg.structure.PropertyType in project sqlg by pietermartin.
the class TestMultipleThreadMultipleJvm method testMultiThreadedVertexLabelCreation.
@Test
public void testMultiThreadedVertexLabelCreation() throws Exception {
// number graphs, pretending its a separate jvm
int NUMBER_OF_GRAPHS = 5;
int NUMBER_OF_SCHEMAS = 100;
// Pre-create all the graphs
List<SqlgGraph> graphs = new ArrayList<>();
for (int i = 0; i < NUMBER_OF_GRAPHS; i++) {
graphs.add(SqlgGraph.open(configuration));
}
logger.info(String.format("Done firing up %d graphs", NUMBER_OF_GRAPHS));
ExecutorService poolPerGraph = Executors.newFixedThreadPool(NUMBER_OF_GRAPHS);
CompletionService<SqlgGraph> poolPerGraphsExecutorCompletionService = new ExecutorCompletionService<>(poolPerGraph);
try {
Map<String, PropertyType> properties = new HashMap<>();
properties.put("name", PropertyType.STRING);
properties.put("age", PropertyType.INTEGER);
List<Future<SqlgGraph>> results = new ArrayList<>();
for (final SqlgGraph sqlgGraphAsync : graphs) {
for (int i = 0; i < NUMBER_OF_SCHEMAS; i++) {
final int count = i;
results.add(poolPerGraphsExecutorCompletionService.submit(() -> {
// noinspection Duplicates
try {
VertexLabel outVertexLabel = sqlgGraphAsync.getTopology().ensureVertexLabelExist("schema_" + count, "tableOut_" + count, properties);
VertexLabel inVertexLabel = sqlgGraphAsync.getTopology().ensureVertexLabelExist("schema_" + count, "tableIn_" + count, properties);
sqlgGraphAsync.getTopology().ensureEdgeLabelExist("edge_" + count, outVertexLabel, inVertexLabel, properties);
final Random random = new Random();
if (random.nextBoolean()) {
sqlgGraphAsync.tx().commit();
} else {
sqlgGraphAsync.tx().rollback();
}
} catch (Exception e) {
sqlgGraphAsync.tx().rollback();
throw new RuntimeException(e);
}
return sqlgGraphAsync;
}));
}
}
for (Future<SqlgGraph> result : results) {
result.get(5, TimeUnit.MINUTES);
}
Thread.sleep(1000);
for (SqlgGraph graph : graphs) {
assertEquals(this.sqlgGraph.getTopology(), graph.getTopology());
assertEquals(this.sqlgGraph.getTopology().toJson(), graph.getTopology().toJson());
}
logger.info("starting inserting data");
for (final SqlgGraph sqlgGraphAsync : graphs) {
for (int i = 0; i < NUMBER_OF_SCHEMAS; i++) {
final int count = i;
results.add(poolPerGraphsExecutorCompletionService.submit(() -> {
// noinspection Duplicates
try {
Vertex v1 = sqlgGraphAsync.addVertex(T.label, "schema_" + count + "." + "tableOut_" + count, "name", "asdasd", "age", 1);
Vertex v2 = sqlgGraphAsync.addVertex(T.label, "schema_" + count + "." + "tableIn_" + count, "name", "asdasd", "age", 1);
v1.addEdge("edge_" + count, v2, "name", "asdasd", "age", 1);
final Random random = new Random();
if (random.nextBoolean()) {
sqlgGraphAsync.tx().rollback();
} else {
sqlgGraphAsync.tx().commit();
}
} catch (Exception e) {
sqlgGraphAsync.tx().rollback();
throw new RuntimeException(e);
}
return sqlgGraphAsync;
}));
}
}
poolPerGraph.shutdown();
for (Future<SqlgGraph> result : results) {
result.get(30, TimeUnit.SECONDS);
}
// Because of the rollBack logic the insert code may also create topology elements, so sleep a bit for notify to do its thing.
Thread.sleep(1000);
logger.info("starting querying data");
Set<Vertex> vertices = this.sqlgGraph.traversal().V().out().toSet();
this.sqlgGraph.tx().rollback();
for (SqlgGraph graph : graphs) {
logger.info("assert querying data");
Set<Vertex> actual = graph.traversal().V().out().toSet();
logger.info("vertices.size = " + vertices.size() + " actual.size = " + actual.size());
assertEquals(vertices, actual);
graph.tx().rollback();
}
} finally {
for (SqlgGraph graph : graphs) {
graph.close();
}
}
}
use of org.umlg.sqlg.structure.PropertyType in project sqlg by pietermartin.
the class PostgresDialect method mapVertexToInputStream.
private InputStream mapVertexToInputStream(Map<String, PropertyType> propertyTypeMap, Pair<SortedSet<String>, Map<SqlgVertex, Map<String, Object>>> vertexCache) throws SQLException {
// String str = "2,peter\n3,john";
StringBuilder sb = new StringBuilder();
int count = 1;
for (SqlgVertex sqlgVertex : vertexCache.getRight().keySet()) {
Map<String, Object> triple = vertexCache.getRight().get(sqlgVertex);
// set the internal batch id to be used with inserting batch edges
if (!vertexCache.getLeft().isEmpty()) {
int countKeys = 1;
for (String key : vertexCache.getLeft()) {
PropertyType propertyType = propertyTypeMap.get(key);
if (countKeys > 1 && countKeys <= vertexCache.getLeft().size()) {
sb.append(COPY_COMMAND_DELIMITER);
}
countKeys++;
Object value = triple.get(key);
switch(propertyType) {
case BYTE_ARRAY:
String valueOfArrayAsString = PGbytea.toPGString((byte[]) SqlgUtil.convertByteArrayToPrimitiveArray((Byte[]) value));
sb.append(valueOfArrayAsString);
break;
case byte_ARRAY:
valueOfArrayAsString = PGbytea.toPGString((byte[]) value);
sb.append(valueOfArrayAsString);
break;
default:
sb.append(valueToStringForBulkLoad(propertyType, value));
}
}
} else {
sb.append("0");
}
if (count++ < vertexCache.getRight().size()) {
sb.append("\n");
}
}
return new ByteArrayInputStream(sb.toString().getBytes());
}
Aggregations