use of org.apache.zookeeper.server.Request in project zookeeper by apache.
the class CommitProcessorConcurrencyTest method committedAndUncommittedOfTheSameSessionRaceTest.
/**
* We place a read request followed by committed update request of the same
* session in queuedRequests. We verify that both requests are processed,
* according to the order of the session (first read, then the write).
*/
@Test
public void committedAndUncommittedOfTheSameSessionRaceTest() throws Exception {
final String path = "/testCvsUCRace";
Request readReq = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x0, 0);
Request writeReq = newRequest(new SetDataRequest(path, new byte[16], -1), OpCode.setData, 0x0, 1);
processor.committedRequests.add(writeReq);
processor.queuedRequests.add(readReq);
processor.queuedRequests.add(writeReq);
processor.queuedWriteRequests.add(writeReq);
processor.initThreads(1);
processor.stoppedMainLoop = true;
processor.run();
assertTrue(processedRequests.peek() != null && processedRequests.peek().equals(readReq), "Request was not processed " + readReq + " instead " + processedRequests.peek());
processedRequests.poll();
assertTrue(processedRequests.peek() != null && processedRequests.peek().equals(writeReq), "Request was not processed " + writeReq + " instead " + processedRequests.peek());
}
use of org.apache.zookeeper.server.Request in project zookeeper by apache.
the class CommitProcessorConcurrencyTest method noStarvationOfReadRequestsTest.
/**
* In the following test, we verify that committed writes are not causing
* reads starvation. We populate the commit processor with the following
* order of requests: 1 committed local updated, 1 read request, 100
* committed non-local updates. 50 read requests. We verify that after the
* first call to processor.run, only the first write is processed, then
* after the second call, all reads are processed along with the second
* write.
*/
@Test
public void noStarvationOfReadRequestsTest() throws Exception {
final String path = "/noStarvationOfReadRequests";
// +1 committed requests (also head of queuedRequests)
Request firstCommittedReq = newRequest(new CreateRequest(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x3, 1);
processor.queuedRequests.add(firstCommittedReq);
processor.queuedWriteRequests.add(firstCommittedReq);
processor.committedRequests.add(firstCommittedReq);
Set<Request> allReads = new HashSet<Request>();
// +1 read request to queuedRequests
Request firstRead = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x1, 0);
allReads.add(firstRead);
processor.queuedRequests.add(firstRead);
// +1 non local commit
Request secondCommittedReq = newRequest(new CreateRequest(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x99, 2);
processor.committedRequests.add(secondCommittedReq);
Set<Request> waitingCommittedRequests = new HashSet<Request>();
// +99 non local committed requests
for (int writeReqId = 3; writeReqId < 102; ++writeReqId) {
Request writeReq = newRequest(new CreateRequest(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x8, writeReqId);
processor.committedRequests.add(writeReq);
waitingCommittedRequests.add(writeReq);
}
// +50 read requests to queuedRequests
for (int readReqId = 1; readReqId <= 50; ++readReqId) {
Request readReq = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x5, readReqId);
allReads.add(readReq);
processor.queuedRequests.add(readReq);
}
processor.initThreads(defaultSizeOfThreadPool);
processor.stoppedMainLoop = true;
processor.run();
assertTrue(processedRequests.contains(firstCommittedReq), "Did not process the first write request");
for (Request r : allReads) {
assertTrue(!processedRequests.contains(r), "Processed read request");
}
processor.run();
assertTrue(processedRequests.containsAll(allReads), "did not processed all reads");
assertTrue(processedRequests.contains(secondCommittedReq), "Did not process the second write request");
for (Request r : waitingCommittedRequests) {
assertTrue(!processedRequests.contains(r), "Processed additional committed request");
}
}
use of org.apache.zookeeper.server.Request in project zookeeper by apache.
the class CommitProcessorConcurrencyTest method processAllWritesMaxBatchSize.
/**
* In the following test, we add a write request followed by several read
* requests of the same session. We will do this for 2 sessions. For the
* second session, we will queue up another write after the reads, and
* we verify several things - 1. The writes are not processed until
* the commits arrive. 2. Only 2 writes are processed, with maxCommitBatchSize
* of 3, due to the blocking reads. 3. Once the writes are processed,
* all the read requests are processed as well. 4. All read requests are
* executed after the write, before any other write for that session,
* along with new reads. 5. Then we add another read for session 1, and
* another write and commit for session 2. 6. Only the old write, and the read
* are processed, leaving the commit in the queue. 7. Last write is executed
* in the last iteration, and all lists are empty.
*/
@Test
public void processAllWritesMaxBatchSize() throws Exception {
final String path = "/processAllWritesMaxBatchSize";
HashSet<Request> shouldBeProcessedAfterPending = new HashSet<Request>();
Request writeReq = newRequest(new CreateRequest(path + "_1", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x1, 1);
processor.queuedRequests.add(writeReq);
processor.queuedWriteRequests.add(writeReq);
Request writeReq2 = newRequest(new CreateRequest(path + "_2", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x2, 1);
processor.queuedRequests.add(writeReq2);
processor.queuedWriteRequests.add(writeReq2);
for (int readReqId = 2; readReqId <= 5; ++readReqId) {
Request readReq = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x1, readReqId);
Request readReq2 = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x2, readReqId);
processor.queuedRequests.add(readReq);
shouldBeProcessedAfterPending.add(readReq);
processor.queuedRequests.add(readReq2);
shouldBeProcessedAfterPending.add(readReq2);
}
Request writeReq3 = newRequest(new CreateRequest(path + "_3", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x2, 6);
processor.queuedRequests.add(writeReq3);
processor.queuedWriteRequests.add(writeReq3);
processor.initThreads(defaultSizeOfThreadPool);
processor.stoppedMainLoop = true;
CommitProcessor.setMaxCommitBatchSize(2);
processor.run();
assertTrue(processedRequests.isEmpty(), "Processed without waiting for commit");
assertTrue(processor.queuedRequests.isEmpty(), "Did not handled all of queuedRequests' requests");
assertTrue(!processor.queuedWriteRequests.isEmpty(), "Removed from blockedQueuedRequests before commit");
assertTrue(processor.pendingRequests.containsKey(writeReq.sessionId), "Missing session 1 in pending queue");
assertTrue(processor.pendingRequests.containsKey(writeReq2.sessionId), "Missing session 2 in pending queue");
processor.committedRequests.add(writeReq);
processor.committedRequests.add(writeReq2);
processor.committedRequests.add(writeReq3);
processor.stoppedMainLoop = true;
CommitProcessor.setMaxCommitBatchSize(3);
processor.run();
processor.initThreads(defaultSizeOfThreadPool);
Thread.sleep(500);
assertTrue(processedRequests.peek() == writeReq, "Did not process committed request");
assertTrue(processedRequests.containsAll(shouldBeProcessedAfterPending), "Did not process following read request");
assertTrue(!processor.committedRequests.isEmpty(), "Processed committed request");
assertTrue(processor.committedRequests.peek() == writeReq3, "Removed commit for write req 3");
assertTrue(!processor.pendingRequests.isEmpty(), "Processed committed request");
assertTrue(processor.pendingRequests.containsKey(writeReq3.sessionId), "Missing session 2 in pending queue");
assertTrue(processor.pendingRequests.get(writeReq3.sessionId).peek() == writeReq3, "Missing write 3 in pending queue");
assertTrue(!processor.queuedWriteRequests.isEmpty(), "Removed from blockedQueuedRequests");
assertTrue(processor.queuedWriteRequests.peek() == writeReq3, "Removed write req 3 from blockedQueuedRequests");
Request readReq3 = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x1, 7);
processor.queuedRequests.add(readReq3);
shouldBeProcessedAfterPending.add(readReq3);
Request writeReq4 = newRequest(new CreateRequest(path + "_4", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x2, 7);
processor.queuedRequests.add(writeReq4);
processor.queuedWriteRequests.add(writeReq4);
processor.committedRequests.add(writeReq4);
processor.stoppedMainLoop = true;
CommitProcessor.setMaxCommitBatchSize(3);
processor.run();
processor.initThreads(defaultSizeOfThreadPool);
Thread.sleep(500);
assertTrue(processedRequests.peek() == writeReq, "Did not process committed request");
assertTrue(processedRequests.containsAll(shouldBeProcessedAfterPending), "Did not process following read request");
assertTrue(!processor.committedRequests.isEmpty(), "Processed unexpected committed request");
assertTrue(processor.pendingRequests.isEmpty(), "Unexpected pending request");
assertTrue(!processor.queuedWriteRequests.isEmpty(), "Removed from blockedQueuedRequests");
assertTrue(processor.queuedWriteRequests.peek() == writeReq4, "Removed write req 4 from blockedQueuedRequests");
processor.stoppedMainLoop = true;
CommitProcessor.setMaxCommitBatchSize(3);
processor.run();
processor.initThreads(defaultSizeOfThreadPool);
Thread.sleep(500);
assertTrue(processedRequests.peek() == writeReq, "Did not process committed request");
assertTrue(processedRequests.containsAll(shouldBeProcessedAfterPending), "Did not process following read request");
assertTrue(processor.committedRequests.isEmpty(), "Did not process committed request");
assertTrue(processor.pendingRequests.isEmpty(), "Did not process committed request");
assertTrue(processor.queuedWriteRequests.isEmpty(), "Did not remove from blockedQueuedRequests");
}
use of org.apache.zookeeper.server.Request in project zookeeper by apache.
the class FileTxnSnapLogTest method testSnapshotSerializationCompatibility.
void testSnapshotSerializationCompatibility(Boolean digestEnabled, Boolean snappyEnabled) throws IOException {
File dataDir = ClientBase.createEmptyTestDir();
FileTxnSnapLog snaplog = new FileTxnSnapLog(dataDir, dataDir);
DataTree dataTree = new DataTree();
ConcurrentHashMap<Long, Integer> sessions = new ConcurrentHashMap<>();
SnapStream.setStreamMode(snappyEnabled ? SnapStream.StreamMode.SNAPPY : SnapStream.StreamMode.DEFAULT_MODE);
ZooKeeperServer.setDigestEnabled(digestEnabled);
TxnHeader txnHeader = new TxnHeader(1, 1, 1, 1 + 1, ZooDefs.OpCode.create);
CreateTxn txn = new CreateTxn("/" + 1, "data".getBytes(), null, false, 1);
Request request = new Request(1, 1, 1, txnHeader, txn, 1);
dataTree.processTxn(request.getHdr(), request.getTxn());
snaplog.save(dataTree, sessions, true);
int expectedNodeCount = dataTree.getNodeCount();
ZooKeeperServer.setDigestEnabled(!digestEnabled);
snaplog.restore(dataTree, sessions, (hdr, rec, digest) -> {
});
assertEquals(expectedNodeCount, dataTree.getNodeCount());
}
use of org.apache.zookeeper.server.Request in project zookeeper by apache.
the class CommitProcessorMetricsTest method testReadCommitProcTime.
@Test
public void testReadCommitProcTime() throws Exception {
setupProcessors(0, 0);
processRequestWithWait(createReadRequest(1L, 1));
Map<String, Object> values = MetricsUtils.currentServerMetrics();
assertEquals(1L, values.get("cnt_read_commitproc_time_ms"));
checkTimeMetric((long) values.get("max_read_commitproc_time_ms"), 0L, 1000L);
Request req1 = createWriteRequest(1L, 2);
processRequestWithWait(req1);
processRequestWithWait(createReadRequest(1L, 3));
// the second read will be stuck in the session queue for at least one second
Thread.sleep(1000);
commitWithWait(req1);
values = MetricsUtils.currentServerMetrics();
assertEquals(2L, values.get("cnt_read_commitproc_time_ms"));
checkTimeMetric((long) values.get("max_read_commitproc_time_ms"), 1000L, 2000L);
}
Aggregations