use of org.apache.zookeeper.proto.CreateRequest in project zookeeper by apache.
the class CommitProcessorConcurrencyTest method noStarvationOfNonLocalCommittedRequestsTest.
/**
* In the following test, we verify that committed requests are processed
* even when queuedRequests never gets empty. We add 10 committed request
* and use infinite queuedRequests. We verify that the committed request was
* processed.
*/
@Test
@Timeout(value = 1)
public void noStarvationOfNonLocalCommittedRequestsTest() throws Exception {
final String path = "/noStarvationOfCommittedRequests";
processor.queuedRequests = new MockRequestsQueue();
Set<Request> nonLocalCommits = new HashSet<Request>();
for (int i = 0; i < 10; i++) {
Request nonLocalCommitReq = newRequest(new CreateRequest(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 51, i + 1);
processor.committedRequests.add(nonLocalCommitReq);
nonLocalCommits.add(nonLocalCommitReq);
}
for (int i = 0; i < 10; i++) {
processor.initThreads(defaultSizeOfThreadPool);
processor.stoppedMainLoop = true;
processor.run();
}
assertTrue(processedRequests.containsAll(nonLocalCommits), "commit request was not processed");
}
use of org.apache.zookeeper.proto.CreateRequest in project zookeeper by apache.
the class CommitProcessorConcurrencyTest method processAsMuchUncommittedRequestsAsPossibleTest.
/**
* Here we create the following requests queue structure: R1_1, W1_2, R1_3,
* R2_1, R2_2, W2_3, R2_4, R3_1, R3_2, R3_3, W3_4, R3_5, ... , W5_6, R5_7
* i.e., 5 sessions, each has different amount or read requests, followed by
* single write and afterwards single read. The idea is to check that all of
* the reads that can be processed concurrently do so, and that none of the
* uncommited requests, followed by the reads are processed.
*/
@Test
public void processAsMuchUncommittedRequestsAsPossibleTest() throws Exception {
final String path = "/testAsMuchAsPossible";
List<Request> shouldBeProcessed = new LinkedList<Request>();
Set<Request> shouldNotBeProcessed = new HashSet<Request>();
for (int sessionId = 1; sessionId <= 5; ++sessionId) {
for (int readReqId = 1; readReqId <= sessionId; ++readReqId) {
Request readReq = newRequest(new GetDataRequest(path, false), OpCode.getData, sessionId, readReqId);
shouldBeProcessed.add(readReq);
processor.queuedRequests.add(readReq);
}
Request writeReq = newRequest(new CreateRequest(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, sessionId, sessionId + 1);
Request readReq = newRequest(new GetDataRequest(path, false), OpCode.getData, sessionId, sessionId + 2);
processor.queuedRequests.add(writeReq);
processor.queuedWriteRequests.add(writeReq);
processor.queuedRequests.add(readReq);
shouldNotBeProcessed.add(writeReq);
shouldNotBeProcessed.add(readReq);
}
processor.initThreads(defaultSizeOfThreadPool);
processor.stoppedMainLoop = true;
processor.run();
Thread.sleep(1000);
shouldBeProcessed.removeAll(processedRequests);
for (Request r : shouldBeProcessed) {
LOG.error("Did not process {}", r);
}
assertTrue(shouldBeProcessed.isEmpty(), "Not all requests were processed");
assertFalse(shouldNotBeProcessed.removeAll(processedRequests), "Processed a wrong request");
}
use of org.apache.zookeeper.proto.CreateRequest in project zookeeper by apache.
the class CommitProcessorConcurrencyTest method processAllFollowingUncommittedAfterFirstCommitTest.
/**
* In the following test, we add a write request followed by several read
* requests of the same session, and we verify several things - 1. The write
* is not processed until commit arrives. 2. Once the write is processed,
* all the read requests are processed as well. 3. All read requests are
* executed after the write, before any other write, along with new reads.
*/
@Test
public void processAllFollowingUncommittedAfterFirstCommitTest() throws Exception {
final String path = "/testUncommittedFollowingCommited";
Set<Request> shouldBeInPending = new HashSet<Request>();
Set<Request> shouldBeProcessedAfterPending = new HashSet<Request>();
Request writeReq = newRequest(new CreateRequest(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL.toFlag()), OpCode.create, 0x1, 1);
processor.queuedRequests.add(writeReq);
processor.queuedWriteRequests.add(writeReq);
shouldBeInPending.add(writeReq);
for (int readReqId = 2; readReqId <= 5; ++readReqId) {
Request readReq = newRequest(new GetDataRequest(path, false), OpCode.getData, 0x1, readReqId);
processor.queuedRequests.add(readReq);
shouldBeInPending.add(readReq);
shouldBeProcessedAfterPending.add(readReq);
}
processor.initThreads(defaultSizeOfThreadPool);
processor.stoppedMainLoop = true;
processor.run();
assertTrue(processedRequests.isEmpty(), "Processed without waiting for commit");
assertTrue(processor.queuedRequests.isEmpty(), "Did not handled all of queuedRequests' requests");
assertTrue(!processor.queuedWriteRequests.isEmpty(), "Removed from blockedQueuedRequests before commit");
shouldBeInPending.removeAll(processor.pendingRequests.get(writeReq.sessionId));
for (Request r : shouldBeInPending) {
LOG.error("Should be in pending {}", r);
}
assertTrue(shouldBeInPending.isEmpty(), "Not all requests moved to pending from queuedRequests");
processor.committedRequests.add(writeReq);
processor.stoppedMainLoop = true;
processor.run();
processor.initThreads(defaultSizeOfThreadPool);
Thread.sleep(500);
assertTrue(processedRequests.peek() == writeReq, "Did not process committed request");
assertTrue(processedRequests.containsAll(shouldBeProcessedAfterPending), "Did not process following read request");
assertTrue(processor.committedRequests.isEmpty(), "Did not process committed request");
assertTrue(processor.pendingRequests.isEmpty(), "Did not process committed request");
assertTrue(processor.queuedWriteRequests.isEmpty(), "Did not remove from blockedQueuedRequests");
}
use of org.apache.zookeeper.proto.CreateRequest in project zookeeper by apache.
the class AuditHelper method getCreateModes.
private static Map<String, String> getCreateModes(Request request) throws IOException, KeeperException {
Map<String, String> createModes = new HashMap<>();
if (!ZKAuditProvider.isAuditEnabled()) {
return createModes;
}
MultiOperationRecord multiRequest = new MultiOperationRecord();
deserialize(request, multiRequest);
for (Op op : multiRequest) {
if (op.getType() == ZooDefs.OpCode.create || op.getType() == ZooDefs.OpCode.create2 || op.getType() == ZooDefs.OpCode.createContainer) {
CreateRequest requestRecord = (CreateRequest) op.toRequestRecord();
createModes.put(requestRecord.getPath(), getCreateMode(requestRecord));
}
}
return createModes;
}
use of org.apache.zookeeper.proto.CreateRequest in project zookeeper by apache.
the class SessionInvalidationTest method testCreateAfterCloseShouldFail.
/**
* Test solution for ZOOKEEPER-1208. Verify that operations are not
* accepted after a close session.
*
* We're using our own marshalling here in order to force an operation
* after the session is closed (ZooKeeper.class will not allow this). Also
* by filling the pipe with operations it increases the likelyhood that
* the server will process the create before FinalRequestProcessor
* removes the session from the tracker.
*/
@Test
public void testCreateAfterCloseShouldFail() throws Exception {
for (int i = 0; i < 10; i++) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BinaryOutputArchive boa = BinaryOutputArchive.getArchive(baos);
// open a connection
boa.writeInt(44, "len");
ConnectRequest conReq = new ConnectRequest(0, 0, 30000, 0, new byte[16]);
conReq.serialize(boa, "connect");
// close connection
boa.writeInt(8, "len");
RequestHeader h = new RequestHeader(1, ZooDefs.OpCode.closeSession);
h.serialize(boa, "header");
// create ephemeral znode
// We'll fill this in later
boa.writeInt(52, "len");
RequestHeader header = new RequestHeader(2, OpCode.create);
header.serialize(boa, "header");
CreateRequest createReq = new CreateRequest("/foo" + i, new byte[0], Ids.OPEN_ACL_UNSAFE, 1);
createReq.serialize(boa, "request");
baos.close();
System.out.println("Length:" + baos.toByteArray().length);
String[] hp = hostPort.split(":");
Socket sock = new Socket(hp[0], Integer.parseInt(hp[1]));
InputStream resultStream = null;
try {
OutputStream outstream = sock.getOutputStream();
byte[] data = baos.toByteArray();
outstream.write(data);
outstream.flush();
resultStream = sock.getInputStream();
byte[] b = new byte[10000];
int len;
while ((len = resultStream.read(b)) >= 0) {
// got results
System.out.println("gotlen:" + len);
}
} finally {
if (resultStream != null) {
resultStream.close();
}
sock.close();
}
}
ZooKeeper zk = createClient();
assertEquals(1, zk.getChildren("/", false).size());
zk.close();
}
Aggregations