use of org.apache.jute.Record in project zookeeper by apache.
the class TruncateTest method append.
private void append(ZKDatabase zkdb, int i) throws IOException {
TxnHeader hdr = new TxnHeader(1, 1, i, 1, ZooDefs.OpCode.setData);
Record txn = new SetDataTxn("/foo" + i, new byte[0], 1);
Request req = new Request(0, 0, 0, hdr, txn, 0);
zkdb.append(req);
zkdb.commit();
}
use of org.apache.jute.Record in project zookeeper by apache.
the class ZooKeeperServer method processSasl.
private void processSasl(ByteBuffer incomingBuffer, ServerCnxn cnxn, RequestHeader requestHeader) throws IOException {
LOG.debug("Responding to client SASL token.");
GetSASLRequest clientTokenRecord = new GetSASLRequest();
ByteBufferInputStream.byteBuffer2Record(incomingBuffer, clientTokenRecord);
byte[] clientToken = clientTokenRecord.getToken();
LOG.debug("Size of client SASL token: {}", clientToken.length);
byte[] responseToken = null;
try {
ZooKeeperSaslServer saslServer = cnxn.zooKeeperSaslServer;
try {
// note that clientToken might be empty (clientToken.length == 0):
// if using the DIGEST-MD5 mechanism, clientToken will be empty at the beginning of the
// SASL negotiation process.
responseToken = saslServer.evaluateResponse(clientToken);
if (saslServer.isComplete()) {
String authorizationID = saslServer.getAuthorizationID();
LOG.info("Session 0x{}: adding SASL authorization for authorizationID: {}", Long.toHexString(cnxn.getSessionId()), authorizationID);
cnxn.addAuthInfo(new Id("sasl", authorizationID));
if (isSaslSuperUser(authorizationID)) {
cnxn.addAuthInfo(new Id("super", ""));
LOG.info("Session 0x{}: Authenticated Id '{}' as super user", Long.toHexString(cnxn.getSessionId()), authorizationID);
}
}
} catch (SaslException e) {
LOG.warn("Client {} failed to SASL authenticate: {}", cnxn.getRemoteSocketAddress(), e);
if (shouldAllowSaslFailedClientsConnect() && !authHelper.isSaslAuthRequired()) {
LOG.warn("Maintaining client connection despite SASL authentication failure.");
} else {
int error;
if (authHelper.isSaslAuthRequired()) {
LOG.warn("Closing client connection due to server requires client SASL authenticaiton," + "but client SASL authentication has failed, or client is not configured with SASL " + "authentication.");
error = Code.SESSIONCLOSEDREQUIRESASLAUTH.intValue();
} else {
LOG.warn("Closing client connection due to SASL authentication failure.");
error = Code.AUTHFAILED.intValue();
}
ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, error);
cnxn.sendResponse(replyHeader, new SetSASLResponse(null), "response");
cnxn.sendCloseSession();
cnxn.disableRecv();
return;
}
}
} catch (NullPointerException e) {
LOG.error("cnxn.saslServer is null: cnxn object did not initialize its saslServer properly.");
}
if (responseToken != null) {
LOG.debug("Size of server SASL response: {}", responseToken.length);
}
ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, Code.OK.intValue());
Record record = new SetSASLResponse(responseToken);
cnxn.sendResponse(replyHeader, record, "response");
}
use of org.apache.jute.Record in project zookeeper by apache.
the class LeaderBeanTest method createMockRequest.
private Request createMockRequest() throws IOException {
TxnHeader header = mock(TxnHeader.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
OutputArchive oa = (OutputArchive) args[0];
oa.writeString("header", "test");
return null;
}
}).when(header).serialize(any(OutputArchive.class), anyString());
Record txn = mock(Record.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
OutputArchive oa = (OutputArchive) args[0];
oa.writeString("record", "test");
return null;
}
}).when(txn).serialize(any(OutputArchive.class), anyString());
return new Request(1, 2, 3, header, txn, 4);
}
use of org.apache.jute.Record in project zookeeper by apache.
the class SerializeUtils method deserializeTxn.
public static TxnLogEntry deserializeTxn(byte[] txnBytes) throws IOException {
TxnHeader hdr = new TxnHeader();
final ByteArrayInputStream bais = new ByteArrayInputStream(txnBytes);
InputArchive ia = BinaryInputArchive.getArchive(bais);
hdr.deserialize(ia, "hdr");
bais.mark(bais.available());
Record txn = null;
switch(hdr.getType()) {
case OpCode.createSession:
// This isn't really an error txn; it just has the same
// format. The error represents the timeout
txn = new CreateSessionTxn();
break;
case OpCode.closeSession:
txn = ZooKeeperServer.isCloseSessionTxnEnabled() ? new CloseSessionTxn() : null;
break;
case OpCode.create:
case OpCode.create2:
txn = new CreateTxn();
break;
case OpCode.createTTL:
txn = new CreateTTLTxn();
break;
case OpCode.createContainer:
txn = new CreateContainerTxn();
break;
case OpCode.delete:
case OpCode.deleteContainer:
txn = new DeleteTxn();
break;
case OpCode.reconfig:
case OpCode.setData:
txn = new SetDataTxn();
break;
case OpCode.setACL:
txn = new SetACLTxn();
break;
case OpCode.error:
txn = new ErrorTxn();
break;
case OpCode.multi:
txn = new MultiTxn();
break;
default:
throw new IOException("Unsupported Txn with type=%d" + hdr.getType());
}
if (txn != null) {
try {
txn.deserialize(ia, "txn");
} catch (EOFException e) {
// perhaps this is a V0 Create
if (hdr.getType() == OpCode.create) {
CreateTxn create = (CreateTxn) txn;
bais.reset();
CreateTxnV0 createv0 = new CreateTxnV0();
createv0.deserialize(ia, "txn");
// cool now make it V1. a -1 parentCVersion will
// trigger fixup processing in processTxn
create.setPath(createv0.getPath());
create.setData(createv0.getData());
create.setAcl(createv0.getAcl());
create.setEphemeral(createv0.getEphemeral());
create.setParentCVersion(-1);
} else if (hdr.getType() == OpCode.closeSession) {
// perhaps this is before CloseSessionTxn was added,
// ignore it and reset txn to null
txn = null;
} else {
throw e;
}
}
}
TxnDigest digest = null;
if (ZooKeeperServer.isDigestEnabled()) {
digest = new TxnDigest();
try {
digest.deserialize(ia, "digest");
} catch (EOFException exception) {
// may not have digest in the txn
digest = null;
}
}
return new TxnLogEntry(txn, hdr, digest);
}
use of org.apache.jute.Record in project zookeeper by apache.
the class LogChopper method chop.
public static boolean chop(InputStream is, OutputStream os, long zxid) throws IOException {
BinaryInputArchive logStream = BinaryInputArchive.getArchive(is);
BinaryOutputArchive choppedStream = BinaryOutputArchive.getArchive(os);
FileHeader fhdr = new FileHeader();
fhdr.deserialize(logStream, "fileheader");
if (fhdr.getMagic() != FileTxnLog.TXNLOG_MAGIC) {
System.err.println("Invalid magic number in txn log file");
return false;
}
System.out.println("ZooKeeper Transactional Log File with dbid " + fhdr.getDbid() + " txnlog format version " + fhdr.getVersion());
fhdr.serialize(choppedStream, "fileheader");
int count = 0;
boolean hasZxid = false;
long previousZxid = -1;
while (true) {
long crcValue;
byte[] bytes;
try {
crcValue = logStream.readLong("crcvalue");
bytes = logStream.readBuffer("txnEntry");
} catch (EOFException e) {
System.out.println("EOF reached after " + count + " txns.");
// returning false because nothing was chopped
return false;
}
if (bytes.length == 0) {
// Since we preallocate, we define EOF to be an
// empty transaction
System.out.println("EOF reached after " + count + " txns.");
// returning false because nothing was chopped
return false;
}
Checksum crc = new Adler32();
crc.update(bytes, 0, bytes.length);
if (crcValue != crc.getValue()) {
throw new IOException("CRC doesn't match " + crcValue + " vs " + crc.getValue());
}
TxnLogEntry entry = SerializeUtils.deserializeTxn(bytes);
TxnHeader hdr = entry.getHeader();
Record txn = entry.getTxn();
if (logStream.readByte("EOR") != 'B') {
System.out.println("Last transaction was partial.");
throw new EOFException("Last transaction was partial.");
}
final long txnZxid = hdr.getZxid();
if (txnZxid == zxid) {
hasZxid = true;
}
// logging the gap to make the inconsistency investigation easier
if (previousZxid != -1 && txnZxid != previousZxid + 1) {
long txnEpoch = ZxidUtils.getEpochFromZxid(txnZxid);
long txnCounter = ZxidUtils.getCounterFromZxid(txnZxid);
long previousEpoch = ZxidUtils.getEpochFromZxid(previousZxid);
if (txnEpoch == previousEpoch) {
System.out.println(String.format("There is intra-epoch gap between %x and %x", previousZxid, txnZxid));
} else if (txnCounter != 1) {
System.out.println(String.format("There is inter-epoch gap between %x and %x", previousZxid, txnZxid));
}
}
previousZxid = txnZxid;
if (txnZxid > zxid) {
if (count == 0 || !hasZxid) {
System.out.println(String.format("This log does not contain zxid %x", zxid));
return false;
}
System.out.println(String.format("Chopping at %x new log has %d records", zxid, count));
return true;
}
choppedStream.writeLong(crcValue, "crcvalue");
choppedStream.writeBuffer(bytes, "txnEntry");
choppedStream.writeByte((byte) 'B', "EOR");
count++;
}
}
Aggregations