use of org.apache.commons.lang3.tuple.Triple in project mlib by myshzzx.
the class SpringExporter method proxySock.
public static <T> T proxySock(String host, int port, Class<T> type, @Nullable String beanName) {
ThreadLocal<Triple<Socket, ObjectInputStream, ObjectOutputStream>> tt = ThreadLocal.withInitial(() -> Triple.of(null, null, null));
Enhancer enhancer = new Enhancer();
enhancer.setSuperclass(type);
enhancer.setCallback((InvocationHandler) (o, method, args) -> {
Triple<Socket, ObjectInputStream, ObjectOutputStream> tsoo = tt.get();
Socket socket = tsoo.getLeft();
ObjectOutputStream out = tsoo.getRight();
ObjectInputStream in = tsoo.getMiddle();
if (socket == null || socket.isClosed()) {
socket = new Socket(host, port);
out = new ObjectOutputStream(socket.getOutputStream());
in = new ObjectInputStream(socket.getInputStream());
tt.set(Triple.of(socket, in, out));
}
out.writeObject(new Invoke(type, beanName, method.getDeclaringClass(), method.getName(), method.getParameterTypes(), args));
out.flush();
Result r = (Result) in.readObject();
return r.getResult();
});
return (T) enhancer.create();
}
use of org.apache.commons.lang3.tuple.Triple in project incubator-pulsar by apache.
the class CompactedTopicTest method buildCompactedLedger.
/**
* Build a compacted ledger, and return the id of the ledger, the position of the different
* entries in the ledger, and a list of gaps, and the entry which should be returned after the gap.
*/
private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger(BookKeeper bk, int count) throws Exception {
LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD);
List<Pair<MessageIdData, Long>> positions = new ArrayList<>();
List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>();
AtomicLong ledgerIds = new AtomicLong(10L);
AtomicLong entryIds = new AtomicLong(0L);
CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> {
List<MessageIdData> idsInGap = new ArrayList<MessageIdData>();
if (r.nextInt(10) == 1) {
long delta = r.nextInt(10) + 1;
idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1).build());
ledgerIds.addAndGet(delta);
entryIds.set(0);
}
long delta = r.nextInt(5);
if (delta != 0) {
idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1).build());
}
MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.addAndGet(delta + 1)).build();
@Cleanup RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER);
CompletableFuture<Void> f = new CompletableFuture<>();
ByteBuf buffer = m.serialize();
lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> {
if (rc != BKException.Code.OK) {
f.completeExceptionally(BKException.create(rc));
} else {
positions.add(Pair.of(id, eid));
idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid)));
f.complete(null);
}
}, null);
buffer.release();
return f;
}).toArray(CompletableFuture[]::new)).get();
lh.close();
return Triple.of(lh.getId(), positions, idsInGaps);
}
use of org.apache.commons.lang3.tuple.Triple in project incubator-pulsar by apache.
the class CompactedTopicTest method testEntryLookup.
@Test
public void testEntryLookup() throws Exception {
BookKeeper bk = pulsar.getBookKeeperClientFactory().create(this.conf, null);
Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> compactedLedgerData = buildCompactedLedger(bk, 500);
List<Pair<MessageIdData, Long>> positions = compactedLedgerData.getMiddle();
List<Pair<MessageIdData, Long>> idsInGaps = compactedLedgerData.getRight();
LedgerHandle lh = bk.openLedger(compactedLedgerData.getLeft(), Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD);
long lastEntryId = lh.getLastAddConfirmed();
AsyncLoadingCache<Long, MessageIdData> cache = CompactedTopicImpl.createCache(lh, 50);
MessageIdData firstPositionId = positions.get(0).getLeft();
Pair<MessageIdData, Long> lastPosition = positions.get(positions.size() - 1);
// check ids before and after ids in compacted ledger
Assert.assertEquals(CompactedTopicImpl.findStartPoint(new PositionImpl(0, 0), lastEntryId, cache).get(), Long.valueOf(0));
Assert.assertEquals(CompactedTopicImpl.findStartPoint(new PositionImpl(Long.MAX_VALUE, 0), lastEntryId, cache).get(), Long.valueOf(CompactedTopicImpl.NEWER_THAN_COMPACTED));
// entry 0 is never in compacted ledger due to how we generate dummy
Assert.assertEquals(CompactedTopicImpl.findStartPoint(new PositionImpl(firstPositionId.getLedgerId(), 0), lastEntryId, cache).get(), Long.valueOf(0));
// check next id after last id in compacted ledger
Assert.assertEquals(CompactedTopicImpl.findStartPoint(new PositionImpl(lastPosition.getLeft().getLedgerId(), lastPosition.getLeft().getEntryId() + 1), lastEntryId, cache).get(), Long.valueOf(CompactedTopicImpl.NEWER_THAN_COMPACTED));
// shuffle to make cache work hard
Collections.shuffle(positions, r);
Collections.shuffle(idsInGaps, r);
// Check ids we know are in compacted ledger
for (Pair<MessageIdData, Long> p : positions) {
PositionImpl pos = new PositionImpl(p.getLeft().getLedgerId(), p.getLeft().getEntryId());
Long got = CompactedTopicImpl.findStartPoint(pos, lastEntryId, cache).get();
Assert.assertEquals(got, Long.valueOf(p.getRight()));
}
// Check ids we know are in the gaps of the compacted ledger
for (Pair<MessageIdData, Long> gap : idsInGaps) {
PositionImpl pos = new PositionImpl(gap.getLeft().getLedgerId(), gap.getLeft().getEntryId());
Assert.assertEquals(CompactedTopicImpl.findStartPoint(pos, lastEntryId, cache).get(), Long.valueOf(gap.getRight()));
}
}
use of org.apache.commons.lang3.tuple.Triple in project sqlg by pietermartin.
the class BaseSqlDialect method flushEdgeGlobalUniqueIndexes.
@Override
public void flushEdgeGlobalUniqueIndexes(SqlgGraph sqlgGraph, Map<MetaEdge, Pair<SortedSet<String>, Map<SqlgEdge, Triple<SqlgVertex, SqlgVertex, Map<String, Object>>>>> edgeCache) {
for (MetaEdge metaEdge : edgeCache.keySet()) {
Pair<SortedSet<String>, Map<SqlgEdge, Triple<SqlgVertex, SqlgVertex, Map<String, Object>>>> triples = edgeCache.get(metaEdge);
Map<SqlgEdge, Triple<SqlgVertex, SqlgVertex, Map<String, Object>>> edgeMap = triples.getRight();
Map<String, PropertyColumn> propertyColumnMap = sqlgGraph.getTopology().getPropertiesFor(metaEdge.getSchemaTable().withPrefix(EDGE_PREFIX));
Map<SqlgEdge, Triple<SqlgVertex, SqlgVertex, Map<String, Object>>> rows = triples.getRight();
for (Map.Entry<String, PropertyColumn> propertyColumnEntry : propertyColumnMap.entrySet()) {
PropertyColumn propertyColumn = propertyColumnEntry.getValue();
for (GlobalUniqueIndex globalUniqueIndex : propertyColumn.getGlobalUniqueIndices()) {
StringBuilder sql = new StringBuilder();
sql.append("INSERT INTO ");
sql.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(Schema.GLOBAL_UNIQUE_INDEX_SCHEMA));
sql.append(".");
sql.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(VERTEX_PREFIX + globalUniqueIndex.getName()));
sql.append(" (");
PropertyType propertyType = propertyColumn.getPropertyType();
String[] sqlDefinitions = sqlgGraph.getSqlDialect().propertyTypeToSqlDefinition(propertyType);
int count = 1;
for (@SuppressWarnings("unused") String sqlDefinition : sqlDefinitions) {
if (count++ > 1) {
sql.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(GlobalUniqueIndex.GLOBAL_UNIQUE_INDEX_VALUE + propertyType.getPostFixes()[count - 2]));
} else {
sql.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(GlobalUniqueIndex.GLOBAL_UNIQUE_INDEX_VALUE));
}
sql.append(",");
}
sql.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(GlobalUniqueIndex.GLOBAL_UNIQUE_INDEX_RECORD_ID));
sql.append(",");
sql.append(sqlgGraph.getSqlDialect().maybeWrapInQoutes(GlobalUniqueIndex.GLOBAL_UNIQUE_INDEX_PROPERTY_NAME));
sql.append(") VALUES ( ");
count = 1;
// noinspection Duplicates
for (@SuppressWarnings("unused") String sqlDefinition : sqlDefinitions) {
if (count++ > 1) {
sql.append("?");
} else {
sql.append("?");
}
sql.append(", ");
}
sql.append("?, ?)");
if (sqlgGraph.getSqlDialect().needsSemicolon()) {
sql.append(";");
}
if (logger.isDebugEnabled()) {
logger.debug(sql.toString());
}
Connection conn = sqlgGraph.tx().getConnection();
try (PreparedStatement preparedStatement = conn.prepareStatement(sql.toString())) {
for (Map.Entry<SqlgEdge, Triple<SqlgVertex, SqlgVertex, Map<String, Object>>> rowEntry : rows.entrySet()) {
SqlgEdge sqlgEdge = rowEntry.getKey();
Map<String, Object> parameterValueMap = rowEntry.getValue().getRight();
Object value = parameterValueMap.get(propertyColumn.getName());
List<Pair<PropertyType, Object>> typeAndValues = new ArrayList<>();
typeAndValues.add(Pair.of(propertyColumn.getPropertyType(), value));
typeAndValues.add(Pair.of(PropertyType.STRING, sqlgEdge.id().toString()));
typeAndValues.add(Pair.of(PropertyType.STRING, propertyColumn.getName()));
SqlgUtil.setKeyValuesAsParameterUsingPropertyColumn(sqlgGraph, true, 1, preparedStatement, typeAndValues);
preparedStatement.addBatch();
}
preparedStatement.executeBatch();
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
}
}
use of org.apache.commons.lang3.tuple.Triple in project sqlg by pietermartin.
the class SqlgSqlExecutor method executeDropQuery.
public static void executeDropQuery(SqlgGraph sqlgGraph, SchemaTableTree rootSchemaTableTree, LinkedList<SchemaTableTree> distinctQueryStack) {
List<Triple<DROP_QUERY, String, SchemaTable>> sqls = rootSchemaTableTree.constructDropSql(distinctQueryStack);
for (Triple<DROP_QUERY, String, SchemaTable> sqlPair : sqls) {
DROP_QUERY dropQuery = sqlPair.getLeft();
String sql = sqlPair.getMiddle();
SchemaTable deletedSchemaTable = sqlPair.getRight();
switch(dropQuery) {
case ALTER:
executeDropQuery(sqlgGraph, sql, new LinkedList<>(), deletedSchemaTable);
break;
case EDGE:
LinkedList<SchemaTableTree> tmp = new LinkedList<>(distinctQueryStack);
tmp.removeLast();
executeDropQuery(sqlgGraph, sql, tmp, deletedSchemaTable);
break;
case NORMAL:
executeDropQuery(sqlgGraph, sql, distinctQueryStack, deletedSchemaTable);
break;
case TRUNCATE:
executeDropQuery(sqlgGraph, sql, new LinkedList<>(), deletedSchemaTable);
break;
default:
throw new IllegalStateException("Unknown DROP_QUERY " + dropQuery.toString());
}
}
}
Aggregations