use of org.agrona.DirectBuffer in project nd4j by deeplearning4j.
the class RoutedTransport method sendMessageToAllClients.
@Override
public void sendMessageToAllClients(VoidMessage message, Long... exclusions) {
if (nodeRole != NodeRole.SHARD)
throw new ND4JIllegalStateException("Only SHARD allowed to send messages to all Clients");
final DirectBuffer buffer = message.asUnsafeBuffer();
// no need to search for matches above number of then exclusions
final AtomicInteger cnt = new AtomicInteger(0);
// final StringBuilder builder = new StringBuilder("Got message from: [").append(message.getOriginatorId()).append("]; Resend: {");
clients.values().parallelStream().filter(rc -> {
// do not send message back to yourself :)
if (rc.getLongHash() == this.originatorId || rc.getLongHash() == 0) {
// builder.append(", SKIP: ").append(rc.getLongHash());
return false;
}
// we skip exclusions here
if (exclusions != null && cnt.get() < exclusions.length) {
for (Long exclude : exclusions) if (exclude.longValue() == rc.getLongHash()) {
cnt.incrementAndGet();
// builder.append(", SKIP: ").append(rc.getLongHash());
return false;
}
}
// builder.append(", PASS: ").append(rc.getLongHash());
return true;
}).forEach((rc) -> {
// log.info("Sending message to {}", rc.getLongHash());
RetransmissionHandler.TransmissionStatus res;
long retr = 0;
boolean delivered = false;
while (!delivered) {
// still stupid. maybe use real reentrant lock here?
synchronized (rc.locker) {
res = RetransmissionHandler.getTransmissionStatus(rc.getPublication().offer(buffer));
}
switch(res) {
case NOT_CONNECTED:
{
if (!rc.getActivated().get()) {
retr++;
if (retr > 20)
throw new ND4JIllegalStateException("Can't connect to Shard: [" + rc.getPublication().channel() + "]");
try {
// Thread.sleep(voidConfiguration.getRetransmitTimeout());
LockSupport.parkNanos(voidConfiguration.getRetransmitTimeout() * 1000000);
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
throw new ND4JIllegalStateException("Shards reassignment is to be implemented yet");
}
}
break;
case ADMIN_ACTION:
case BACKPRESSURE:
{
try {
// Thread.sleep(voidConfiguration.getRetransmitTimeout());
LockSupport.parkNanos(voidConfiguration.getRetransmitTimeout() * 1000000);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
break;
case MESSAGE_SENT:
delivered = true;
rc.getActivated().set(true);
break;
}
}
});
// s log.info("RESULT: {}", builder.toString());
}
use of org.agrona.DirectBuffer in project nd4j by deeplearning4j.
the class AeronStat method mapCounters.
public static CountersReader mapCounters() {
final File cncFile = CommonContext.newDefaultCncFile();
System.out.println("Command `n Control file " + cncFile);
final MappedByteBuffer cncByteBuffer = IoUtil.mapExistingFile(cncFile, "cnc");
final DirectBuffer cncMetaData = createMetaDataBuffer(cncByteBuffer);
final int cncVersion = cncMetaData.getInt(cncVersionOffset(0));
if (CncFileDescriptor.CNC_VERSION != cncVersion) {
throw new IllegalStateException("CnC version not supported: file version=" + cncVersion);
}
return new CountersReader(createCountersMetaDataBuffer(cncByteBuffer, cncMetaData), createCountersValuesBuffer(cncByteBuffer, cncMetaData));
}
use of org.agrona.DirectBuffer in project nd4j by deeplearning4j.
the class NDArrayMessageChunkTests method testChunkSerialization.
@Test
public void testChunkSerialization() {
NDArrayMessage message = NDArrayMessage.wholeArrayUpdate(Nd4j.ones(1000));
int chunkSize = 128;
int numChunks = NDArrayMessage.numChunksForMessage(message, chunkSize);
NDArrayMessageChunk[] chunks = NDArrayMessage.chunks(message, chunkSize);
assertEquals(numChunks, chunks.length);
for (int i = 1; i < numChunks; i++) {
assertEquals(chunks[0].getMessageType(), chunks[i].getMessageType());
assertEquals(chunks[0].getId(), chunks[i].getId());
assertEquals(chunks[0].getChunkSize(), chunks[i].getChunkSize());
assertEquals(chunks[0].getNumChunks(), chunks[i].getNumChunks());
}
ByteBuffer[] concat = new ByteBuffer[chunks.length];
for (int i = 0; i < concat.length; i++) concat[i] = chunks[i].getData();
DirectBuffer buffer = NDArrayMessage.toBuffer(message);
// test equality of direct byte buffer contents vs chunked
ByteBuffer byteBuffer = buffer.byteBuffer();
ByteBuffer concatAll = BufferUtil.concat(concat, buffer.capacity());
byte[] arrays = new byte[byteBuffer.capacity()];
byteBuffer.rewind();
byteBuffer.get(arrays);
byte[] arrays2 = new byte[concatAll.capacity()];
concatAll.rewind();
concatAll.get(arrays2);
assertArrayEquals(arrays, arrays2);
NDArrayMessage message1 = NDArrayMessage.fromChunks(chunks);
assertEquals(message, message1);
}
use of org.agrona.DirectBuffer in project zeebe by zeebe-io.
the class CreateTopicTest method shouldRejectPartitionCreationAndNotBreak.
@Test
public void shouldRejectPartitionCreationAndNotBreak() {
// given
final ClientTransport transport = apiRule.getTransport();
final RemoteAddress remoteAddress = transport.registerRemoteAndAwaitChannel(BROKER_MGMT_ADDRESS);
final ClientOutput output = transport.getOutput();
final CreatePartitionRequest partitionMessage = new CreatePartitionRequest();
final DirectBuffer topicName = BufferUtil.wrapString("foo");
final int partition1 = 142;
final int partition2 = 143;
partitionMessage.topicName(topicName);
partitionMessage.partitionId(partition1);
// => should create partition
doRepeatedly(() -> output.sendRequest(remoteAddress, partitionMessage)).until(r -> r != null);
// => should be rejected/ignored
doRepeatedly(() -> output.sendRequest(remoteAddress, partitionMessage)).until(r -> r != null);
// when creating another partition
partitionMessage.partitionId(partition2);
doRepeatedly(() -> output.sendRequest(remoteAddress, partitionMessage)).until(r -> r != null);
// then this should be successful (i.e. the rejected request should not have jammed the broker)
waitUntil(() -> arePublished(partition1, partition2));
}
use of org.agrona.DirectBuffer in project zeebe by zeebe-io.
the class BrokerTaskEvent method wrap.
@Override
public void wrap(DirectBuffer buff, int offset, int length) {
super.wrap(buff, offset, length);
if (accessValuesOnDeserialization) {
final DirectBuffer typeValueBuffer = typeProp.getValue();
readString(typeValueBuffer, 0, typeValueBuffer.capacity());
final DirectBuffer headersBuffer = headersProp.getValue();
reader.wrap(headersBuffer, 0, headersBuffer.capacity());
final int numHeaders = reader.readMapHeader();
final Map<String, String> headers = new HashMap<>();
for (int i = 0; i < numHeaders; i++) {
final int keyLength = reader.readStringLength();
final String key = readString(buff, reader.getOffset(), keyLength);
reader.skipBytes(keyLength);
final int valLength = reader.readStringLength();
final String value = readString(buff, reader.getOffset(), valLength);
reader.skipBytes(valLength);
headers.put(key, value);
}
final DirectBuffer payloadBuffer = payloadProp.getValue();
payloadBuffer.getBytes(0, new byte[payloadBuffer.capacity()]);
}
}
Aggregations