use of com.orientechnologies.orient.server.distributed.ServerRun in project orientdb by orientechnologies.
the class AbstractScenarioTest method executeMultipleWrites.
/*
* It executes multiple writes using different concurrent writers (as specified by the value writerCount) on all the servers
* present in the collection passed as parameter. Each write performs a document insert and some update and check operations on
* it. Tha target db is passed as parameter, otherwise is kept the default one on servers.
*/
protected void executeMultipleWrites(List<ServerRun> executeOnServers, String storageType, String dbURL) throws InterruptedException, ExecutionException {
ODatabaseDocumentTx database;
if (dbURL == null) {
database = poolFactory.get(getPlocalDatabaseURL(serverInstance.get(0)), "admin", "admin").acquire();
} else {
database = poolFactory.get(dbURL, "admin", "admin").acquire();
}
try {
List<ODocument> result = database.query(new OSQLSynchQuery<OIdentifiable>("select count(*) from Person"));
baseCount = ((Number) result.get(0).field("count")).intValue();
} finally {
database.close();
}
System.out.println("Creating Writers and Readers threads...");
final ExecutorService writerExecutors = Executors.newCachedThreadPool();
final ExecutorService readerExecutors = Executors.newCachedThreadPool();
runningWriters = new CountDownLatch(executeOnServers.size() * writerCount);
int serverId = 0;
int threadId = 0;
List<Callable<Void>> writerWorkers = new ArrayList<Callable<Void>>();
for (ServerRun server : executeOnServers) {
if (server.isActive()) {
for (int j = 0; j < writerCount; j++) {
Callable writer = null;
if (storageType.equals("plocal")) {
writer = createWriter(serverId, threadId++, getPlocalDatabaseURL(server));
} else if (storageType.equals("remote")) {
writer = createWriter(serverId, threadId++, getRemoteDatabaseURL(server));
} else
throw new IllegalArgumentException("storageType " + storageType + " not supported");
writerWorkers.add(writer);
}
serverId++;
}
}
expected = writerCount * count * serverId + baseCount;
System.out.println("Writes started.");
List<Future<Void>> futures = writerExecutors.invokeAll(writerWorkers);
List<Callable<Void>> readerWorkers = new ArrayList<Callable<Void>>();
for (ServerRun server : executeOnServers) {
if (server.isActive()) {
Callable<Void> reader = createReader(getPlocalDatabaseURL(server));
readerWorkers.add(reader);
}
}
List<Future<Void>> rFutures = readerExecutors.invokeAll(readerWorkers);
System.out.println("Threads started, waiting for the end");
for (Future<Void> future : futures) {
future.get();
}
writerExecutors.shutdown();
assertTrue(writerExecutors.awaitTermination(1, TimeUnit.MINUTES));
System.out.println("All writer threads have finished, shutting down readers");
for (Future<Void> future : rFutures) {
future.get();
}
readerExecutors.shutdown();
assertTrue(readerExecutors.awaitTermination(1, TimeUnit.MINUTES));
System.out.println("All threads have finished, shutting down server instances");
for (ServerRun server : executeOnServers) {
if (server.isActive()) {
printStats(getPlocalDatabaseURL(server));
}
}
onBeforeChecks();
checkInsertedEntries();
checkIndexedEntries();
}
use of com.orientechnologies.orient.server.distributed.ServerRun in project orientdb by orientechnologies.
the class AbstractScenarioTest method checkWritesAboveCluster.
// checks the consistency in the cluster after the writes in a simple distributed scenario
protected void checkWritesAboveCluster(List<ServerRun> checkConsistencyOnServers, List<ServerRun> writerServer) {
String checkOnServer = "";
for (ServerRun server : checkConsistencyOnServers) {
checkOnServer += server.getServerInstance().getDistributedManager().getLocalNodeName() + ",";
}
checkOnServer = checkOnServer.substring(0, checkOnServer.length() - 1);
String writtenServer = "";
for (ServerRun server : writerServer) {
writtenServer += server.getServerInstance().getDistributedManager().getLocalNodeName() + ",";
}
writtenServer = writtenServer.substring(0, writtenServer.length() - 1);
List<ODatabaseDocumentTx> dbs = new LinkedList<ODatabaseDocumentTx>();
for (ServerRun server : checkConsistencyOnServers) {
dbs.add(poolFactory.get(getPlocalDatabaseURL(server), "admin", "admin").acquire());
}
Map<Integer, Integer> serverIndex2thresholdThread = new LinkedHashMap<Integer, Integer>();
Map<Integer, String> serverIndex2serverName = new LinkedHashMap<Integer, String>();
int lastThread = 0;
int serverIndex = 0;
for (ServerRun server : writerServer) {
serverIndex2thresholdThread.put(serverIndex, lastThread + 5);
serverIndex++;
lastThread += 5;
}
serverIndex = 0;
for (ServerRun server : writerServer) {
serverIndex2serverName.put(serverIndex, server.getServerInstance().getDistributedManager().getLocalNodeName());
serverIndex++;
}
List<ODocument> docsToCompare = new LinkedList<ODocument>();
super.banner("Checking consistency among servers...\nChecking on servers {" + checkOnServer + "} that all the records written on {" + writtenServer + "} are consistent.");
try {
int index = 0;
String serverName = null;
for (int serverId : serverIndex2thresholdThread.keySet()) {
serverName = serverIndex2serverName.get(serverId);
System.out.println("Checking records originally inserted on server " + serverName + "...");
// checking records inserted on server0
int i;
if (serverId == 0)
i = 0;
else
i = serverIndex2thresholdThread.get(serverId - 1);
while (i < serverIndex2thresholdThread.get(serverId)) {
for (int j = 0; j < 100; j++) {
// load records to compare
for (ODatabaseDocumentTx db : dbs) {
docsToCompare.add(loadRecord(db, serverId, i, j + baseCount));
}
// checking that record is present on each server db
for (ODocument doc : docsToCompare) {
assertTrue(doc != null);
}
// checking that all the records have the same version and values (each record is equal to the next one)
int k = 0;
while (k <= docsToCompare.size() - 2) {
assertEquals("Inconsistency detected on version. Record: " + docsToCompare.get(k).toString() + "; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("@version"), docsToCompare.get(k + 1).field("@version"));
assertEquals("Inconsistency detected on name. Record: " + docsToCompare.get(k).toString() + "; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("name"), docsToCompare.get(k + 1).field("name"));
assertEquals("Inconsistency detected on surname. Record: " + docsToCompare.get(k).toString() + "; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("surname"), docsToCompare.get(k + 1).field("surname"));
assertEquals("Inconsistency detected on birthday. Record: " + docsToCompare.get(k).toString() + "; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("birthday"), docsToCompare.get(k + 1).field("birthday"));
assertEquals("Inconsistency detected on children. Record: " + docsToCompare.get(k).toString() + "; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("children"), docsToCompare.get(k + 1).field("children"));
k++;
}
docsToCompare.clear();
}
i++;
}
System.out.println("All records originally inserted on server " + serverName + " are consistent in the cluster.");
index++;
}
} catch (Exception e) {
e.printStackTrace();
} finally {
for (ODatabaseDocumentTx db : dbs) {
ODatabaseRecordThreadLocal.INSTANCE.set(db);
db.close();
ODatabaseRecordThreadLocal.INSTANCE.set(null);
}
}
}
use of com.orientechnologies.orient.server.distributed.ServerRun in project orientdb by orientechnologies.
the class MultipleDBAlignmentOnNodesJoining method compareDBOnServer.
// compares a database consistency on multiple servers
protected void compareDBOnServer(List<ServerRun> checkConsistencyOnServers, String databaseName) {
/*
* Preliminar checks
*/
// database must be present on all the servers
String checkOnServer = "";
List<ODatabaseDocumentTx> dbs = new LinkedList<ODatabaseDocumentTx>();
for (ServerRun server : checkConsistencyOnServers) {
try {
dbs.add(poolFactory.get(getPlocalDatabaseURL(server, databaseName), "admin", "admin").acquire());
checkOnServer += server.getServerInstance().getDistributedManager().getLocalNodeName() + ",";
} catch (Exception e) {
fail(databaseName + " is not present on server" + server.getServerId());
}
}
checkOnServer = checkOnServer.substring(0, checkOnServer.length() - 1);
super.banner("Checking " + databaseName + " consistency among servers...\nChecking on servers {" + checkOnServer + "}.");
// class person is Present in each database
for (ODatabaseDocumentTx db : dbs) {
assertTrue(db.getMetadata().getSchema().existsClass("Person"));
}
// each database on each server has the same number of records in class Person
int j = 0;
while (j <= dbs.size() - 2) {
long count1 = dbs.get(j).getMetadata().getSchema().getClass("Person").count();
long count2 = dbs.get(j + 1).getMetadata().getSchema().getClass("Person").count();
assertEquals(count1, count2);
j++;
}
/*
* Checking record by record
*/
List<ODocument> docsToCompare = new LinkedList<ODocument>();
super.banner("Checking " + databaseName + " consistency among servers...\nChecking on servers {" + checkOnServer + "}.");
try {
for (int i = 0; i < count; i++) {
// load records to compare
for (ODatabaseDocumentTx db : dbs) {
docsToCompare.add(loadRecord(db, i + baseCount));
}
// checking that record is present on each server db
for (ODocument doc : docsToCompare) {
assertTrue(doc != null);
}
// checking that all the records have the same version and values (each record is equal to the next one)
int k = 0;
while (k <= docsToCompare.size() - 2) {
assertEquals("Inconsistency detected. Record: " + docsToCompare.get(k).toString() + " ; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("@version"), docsToCompare.get(k + 1).field("@version"));
assertEquals("Inconsistency detected. Record: " + docsToCompare.get(k).toString() + " ; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("name"), docsToCompare.get(k + 1).field("name"));
assertEquals("Inconsistency detected. Record: " + docsToCompare.get(k).toString() + " ; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("surname"), docsToCompare.get(k + 1).field("surname"));
assertEquals("Inconsistency detected. Record: " + docsToCompare.get(k).toString() + " ; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("birthday"), docsToCompare.get(k + 1).field("birthday"));
assertEquals("Inconsistency detected. Record: " + docsToCompare.get(k).toString() + " ; Servers: " + (k + 1) + "," + (k + 2), docsToCompare.get(k).field("children"), docsToCompare.get(k + 1).field("children"));
k++;
}
docsToCompare.clear();
}
System.out.println("The database " + databaseName + " is consistent in the cluster.");
} catch (Exception e) {
e.printStackTrace();
} finally {
for (ODatabaseDocumentTx db : dbs) {
ODatabaseRecordThreadLocal.INSTANCE.set(db);
db.close();
ODatabaseRecordThreadLocal.INSTANCE.set(null);
}
}
}
use of com.orientechnologies.orient.server.distributed.ServerRun in project orientdb by orientechnologies.
the class AbstractShardingScenarioTest method checkAvailabilityOnShardsNoReplica.
// checks the consistency in the cluster after the writes in a no-replica sharding scenario
protected void checkAvailabilityOnShardsNoReplica(List<ServerRun> checkConsistencyOnServers, List<ServerRun> writerServer) {
String checkOnServer = "";
for (ServerRun server : checkConsistencyOnServers) {
checkOnServer += server.getServerInstance().getDistributedManager().getLocalNodeName() + ",";
}
checkOnServer = checkOnServer.substring(0, checkOnServer.length() - 1);
String writtenServer = "";
for (ServerRun server : writerServer) {
writtenServer += server.getServerInstance().getDistributedManager().getLocalNodeName() + ",";
}
writtenServer = writtenServer.substring(0, writtenServer.length() - 1);
List<OrientBaseGraph> dbs = new LinkedList<OrientBaseGraph>();
OrientGraphFactory localFactory = null;
for (ServerRun server : checkConsistencyOnServers) {
localFactory = new OrientGraphFactory(getPlocalDatabaseURL(server));
dbs.add(localFactory.getNoTx());
}
Map<Integer, Integer> serverIndex2thresholdThread = new LinkedHashMap<Integer, Integer>();
Map<Integer, String> serverIndex2serverName = new LinkedHashMap<Integer, String>();
int lastThread = 0;
int serverIndex = 0;
for (ServerRun server : writerServer) {
serverIndex2thresholdThread.put(serverIndex, lastThread + 5);
serverIndex++;
lastThread += 5;
}
serverIndex = 0;
for (ServerRun server : writerServer) {
serverIndex2serverName.put(serverIndex, server.getServerInstance().getDistributedManager().getLocalNodeName());
serverIndex++;
}
List<OrientVertex> verticesToCheck = new LinkedList<OrientVertex>();
super.banner("Checking consistency among servers...\nChecking on servers {" + checkOnServer + "} that all the vertices written on {" + writtenServer + "} are consistent.");
try {
int index = 0;
String serverName = null;
for (int serverId : serverIndex2thresholdThread.keySet()) {
serverName = serverIndex2serverName.get(serverId);
System.out.println("Checking records originally inserted on server " + serverName + "...");
String clusterName = "client_" + serverName;
// checking records inserted on server0
int i;
if (serverId == 0)
i = 0;
else
i = serverIndex2thresholdThread.get(serverId - 1);
while (i < serverIndex2thresholdThread.get(serverId)) {
for (int j = 0; j < 100; j++) {
// load records to compare
for (OrientBaseGraph db : dbs) {
verticesToCheck.add(loadVertex(db, clusterName, serverId, i, j + baseCount));
}
// checking that record is present on each server db
OrientVertex currentVertex = null;
int k = 0;
while (k < verticesToCheck.size()) {
assertTrue(verticesToCheck.get(k) != null);
k++;
}
// checking that all the records have the same version and values (each record is equal to the next one)
k = 0;
while (k <= verticesToCheck.size() - 2) {
assertEquals(verticesToCheck.get(k).getProperty("@version"), verticesToCheck.get(k + 1).getProperty("@version"));
assertEquals(verticesToCheck.get(k).getProperty("name"), verticesToCheck.get(k + 1).getProperty("name"));
assertEquals(verticesToCheck.get(k).getProperty("updated"), verticesToCheck.get(k + 1).getProperty("updated"));
k++;
}
verticesToCheck.clear();
}
i++;
}
System.out.println("All records originally inserted on server " + serverName + " in the cluster " + clusterName + " available in the shard.");
index++;
}
} catch (Exception e) {
e.printStackTrace();
} finally {
for (OrientBaseGraph db : dbs) {
ODatabaseRecordThreadLocal.INSTANCE.set(db.getRawGraph());
db.getRawGraph().close();
ODatabaseRecordThreadLocal.INSTANCE.set(null);
}
}
}
use of com.orientechnologies.orient.server.distributed.ServerRun in project orientdb by orientechnologies.
the class AbstractShardingScenarioTest method executeMultipleWritesOnShards.
/*
* It executes multiple writes using different concurrent writers (as specified by the value writerCount) on all the servers
* present in the collection passed as parameter in a specific cluster-shards. Each write performs a vertex insert and some update
* and check operations on it. Vertex name: <shardName>-s<serverId>-t<threadId>-<recordId>
*/
protected void executeMultipleWritesOnShards(List<ServerRun> executeOnServers, String storageType) throws InterruptedException, ExecutionException {
System.out.println("Creating Writers threads...");
final ExecutorService writerExecutors = Executors.newCachedThreadPool();
runningWriters = new CountDownLatch(executeOnServers.size() * writerCount);
String shardName = "client_";
int serverId = 0;
int threadId = 0;
List<Callable<Void>> writerWorkers = new ArrayList<Callable<Void>>();
for (ServerRun server : executeOnServers) {
if (server.isActive()) {
shardName += server.getServerInstance().getDistributedManager().getLocalNodeName();
for (int j = 0; j < writerCount; j++) {
Callable writer = null;
if (storageType.equals("plocal")) {
writer = new ShardWriter(serverId, shardName, threadId++, getPlocalDatabaseURL(server));
} else if (storageType.equals("remote")) {
writer = new ShardWriter(serverId, shardName, threadId++, getPlocalDatabaseURL(server));
}
writerWorkers.add(writer);
}
}
serverId++;
shardName = "client_";
}
expected = writerCount * count * serverId + baseCount;
List<Future<Void>> futures = writerExecutors.invokeAll(writerWorkers);
System.out.println("Threads started, waiting for the end");
for (Future<Void> future : futures) {
future.get();
}
writerExecutors.shutdown();
assertTrue(writerExecutors.awaitTermination(1, TimeUnit.MINUTES));
System.out.println("All writer threads have finished.");
// checking inserted vertices
OrientBaseGraph graph;
OrientGraphFactory graphFactory;
// checking total amount of records (map-reduce aggregation)
graphFactory = new OrientGraphFactory("plocal:target/server0/databases/" + getDatabaseName());
graph = graphFactory.getNoTx();
try {
OResultSet<ODocument> clients = new OCommandSQL("select from Client").execute();
int total = clients.size();
assertEquals(expected, total);
List<ODocument> result = new OCommandSQL("select count(*) from Client").execute();
total = ((Number) result.get(0).field("count")).intValue();
// assertEquals(expected, total);
} finally {
graph.getRawGraph().close();
}
serverId = 0;
for (ServerRun server : serverInstance) {
if (server.isActive()) {
graphFactory = new OrientGraphFactory("plocal:target/server" + serverId + "/databases/" + getDatabaseName());
graph = graphFactory.getNoTx();
try {
String sqlCommand = "select from cluster:client_" + server.getServerInstance().getDistributedManager().getLocalNodeName();
List<ODocument> result = new OCommandSQL(sqlCommand).execute();
int total = result.size();
// assertEquals(count * writerCount, total);
sqlCommand = "select count(*) from cluster:client_" + server.getServerInstance().getDistributedManager().getLocalNodeName();
result = new OCommandSQL(sqlCommand).execute();
total = ((Number) result.get(0).field("count")).intValue();
// assertEquals(count * writerCount, total);
} catch (Exception e) {
e.printStackTrace();
} finally {
graph.getRawGraph().close();
}
}
serverId++;
}
// checking indexes
// serverId = 0;
// for (ServerRun server : serverInstance) {
// if (server.isActive()) {
// graphFactory = new OrientGraphFactory("plocal:target/server" + serverId + "/databases/" + getDatabaseName());
// graph = graphFactory.getNoTx();
// try {
// final long indexSize = graph.getRawGraph().getMetadata().getIndexManager().getIndex("Client.name").getSize();
//
// if (indexSize != count) {
// // ERROR: DUMP ALL THE RECORDS
// List<ODocument> result = graph.command(new OCommandSQL("select from index:Client.name")).execute();
// int i = 0;
// for (ODocument d : result) {
// System.out.println((i++) + ": " + ((OIdentifiable) d.field("rid")).getRecord());
// }
// }
//
// junit.framework.Assert.assertEquals(count, indexSize);
//
// System.out.println("From metadata: indexes " + indexSize + " items");
//
// List<ODocument> result = graph.command(new OCommandSQL("select count(*) from index:Client.name")).execute();
// junit.framework.Assert.assertEquals(count, ((Long) result.get(0).field("count")).longValue());
//
// System.out.println("From sql: indexes " + indexSize + " items");
// } finally {
// graph.getRawGraph().close();
// }
// }
// serverId++;
// }
}
Aggregations