use of herddb.network.Channel in project herddb by diennea.
the class NetworkChannelTest method testCloseServer.
@Test
public void testCloseServer() throws Exception {
try (NettyChannelAcceptor server = new NettyChannelAcceptor("localhost", NetworkUtils.assignFirstFreePort(), true)) {
server.setEnableJVMNetwork(false);
server.setEnableRealNetwork(true);
server.setAcceptor((Channel channel) -> {
channel.setMessagesReceiver(new ChannelEventListener() {
});
return (ServerSideConnection) () -> new Random().nextLong();
});
server.start();
ExecutorService executor = Executors.newCachedThreadPool();
AtomicBoolean closeNotificationReceived = new AtomicBoolean();
try (Channel client = NettyConnector.connect(server.getHost(), server.getPort(), true, 0, 0, new ChannelEventListener() {
@Override
public void channelClosed(Channel channel) {
System.out.println("client channelClosed");
closeNotificationReceived.set(true);
}
}, executor, new NioEventLoopGroup(10, executor))) {
// closing the server should eventually close the client
server.close();
TestUtils.waitForCondition(() -> closeNotificationReceived.get(), NOOP, 100);
assertTrue(closeNotificationReceived.get());
} finally {
executor.shutdown();
}
}
}
use of herddb.network.Channel in project herddb by diennea.
the class RetryOnLeaderChangedTest method testSwitchLeaderAndAuthTimeout.
@Test
public void testSwitchLeaderAndAuthTimeout() throws Exception {
TestStatsProvider statsProvider = new TestStatsProvider();
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
final AtomicBoolean suspendProcessing = new AtomicBoolean(false);
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
try (Server server_2 = new Server(serverconfig_2) {
@Override
protected ServerSideConnectionPeer buildPeer(Channel channel) {
return new ServerSideConnectionPeer(channel, this) {
@Override
public void requestReceived(Pdu message, Channel channel) {
if (suspendProcessing.get()) {
LOG.log(Level.INFO, "dropping message type " + message.type + " id " + message.messageId);
message.close();
return;
}
super.requestReceived(message, channel);
}
};
}
}) {
server_2.start();
TestUtils.execute(server_1.getManager(), "CREATE TABLESPACE 'ttt','leader:" + server_2.getNodeId() + "','expectedreplicacount:2'", Collections.emptyList());
// wait for server_2 to wake up
for (int i = 0; i < 40; i++) {
TableSpaceManager tableSpaceManager2 = server_2.getManager().getTableSpaceManager("ttt");
if (tableSpaceManager2 != null && tableSpaceManager2.isLeader()) {
break;
}
Thread.sleep(500);
}
assertTrue(server_2.getManager().getTableSpaceManager("ttt") != null && server_2.getManager().getTableSpaceManager("ttt").isLeader());
// wait for server_1 to announce as follower
waitClusterStatus(server_1.getManager(), server_1.getNodeId(), "follower");
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.set(ClientConfiguration.PROPERTY_MODE, ClientConfiguration.PROPERTY_MODE_CLUSTER);
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
clientConfiguration.set(ClientConfiguration.PROPERTY_MAX_CONNECTIONS_PER_SERVER, 2);
clientConfiguration.set(ClientConfiguration.PROPERTY_TIMEOUT, 2000);
StatsLogger logger = statsProvider.getStatsLogger("ds");
try (HDBClient client1 = new HDBClient(clientConfiguration, logger) {
@Override
public HDBConnection openConnection() {
HDBConnection con = new VisibleRouteHDBConnection(this);
registerConnection(con);
return con;
}
}) {
try (VisibleRouteHDBConnection connection = (VisibleRouteHDBConnection) client1.openConnection()) {
// create table and insert data
connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE ttt.t1(k1 int primary key, n1 int)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(1,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
assertEquals("server2", connection.getRouteToTableSpace("ttt").getNodeId());
// change leader
switchLeader(server_1.getNodeId(), server_2.getNodeId(), server_1.getManager());
try (VisibleRouteHDBConnection connection2 = (VisibleRouteHDBConnection) client1.openConnection()) {
// connection routing still point to old leader (now follower)
assertEquals("server2", connection2.getRouteToTableSpace("ttt").getNodeId());
// suspend server_2 authentication
suspendProcessing.set(true);
// attempt an insert with old routing. Suspended autentication generates a timeout
// and routing will be reevaluated
assertEquals(1, connection2.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(2,2)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList()).updateCount);
// right routing to current master
assertEquals("server1", connection2.getRouteToTableSpace("ttt").getNodeId());
suspendProcessing.set(false);
}
}
}
}
}
}
use of herddb.network.Channel in project herddb by diennea.
the class TableSpaceManager method sendTransactionsDump.
private void sendTransactionsDump(List<Transaction> batch, Channel _channel, String dumpId, final int timeout, Message response_to_start) throws TimeoutException, InterruptedException {
if (batch.isEmpty()) {
return;
}
Map<String, Object> transactionsData = new HashMap<>();
transactionsData.put("command", "transactions");
List<byte[]> encodedTransactions = batch.stream().map(tr -> {
return tr.serialize();
}).collect(Collectors.toList());
transactionsData.put("transactions", encodedTransactions);
Message response_to_transactionsData = _channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, transactionsData), timeout);
if (response_to_transactionsData.type != Message.TYPE_ACK) {
LOGGER.log(Level.SEVERE, "error response at transactionsData command: " + response_to_start.parameters);
}
batch.clear();
}
use of herddb.network.Channel in project herddb by diennea.
the class RoutedClientSideConnection method messageReceived.
@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
public void messageReceived(Message message, Channel _channel) {
switch(message.type) {
case Message.TYPE_TABLESPACE_DUMP_DATA:
{
String dumpId = (String) message.parameters.get("dumpId");
TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
if (receiver == null) {
if (_channel != null) {
_channel.sendReplyMessage(message, Message.ERROR(clientId, new Exception("no such dump receiver " + dumpId)));
}
return;
}
try {
Map<String, Object> values = (Map<String, Object>) message.parameters.get("values");
String command = (String) values.get("command") + "";
boolean sendAck = true;
switch(command) {
case "start":
{
long ledgerId = (long) values.get("ledgerid");
long offset = (long) values.get("offset");
receiver.start(new LogSequenceNumber(ledgerId, offset));
break;
}
case "beginTable":
{
byte[] tableDefinition = (byte[]) values.get("table");
Table table = Table.deserialize(tableDefinition);
Long estimatedSize = (Long) values.get("estimatedSize");
long dumpLedgerId = (Long) values.get("dumpLedgerid");
long dumpOffset = (Long) values.get("dumpOffset");
List<byte[]> indexesDef = (List<byte[]>) values.get("indexes");
List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
Map<String, Object> stats = new HashMap<>();
stats.put("estimatedSize", estimatedSize);
stats.put("dumpLedgerId", dumpLedgerId);
stats.put("dumpOffset", dumpOffset);
receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
break;
}
case "endTable":
{
receiver.endTable();
break;
}
case "finish":
{
long ledgerId = (long) values.get("ledgerid");
long offset = (long) values.get("offset");
receiver.finish(new LogSequenceNumber(ledgerId, offset));
sendAck = false;
break;
}
case "data":
{
List<KeyValue> data = (List<KeyValue>) values.get("records");
List<Record> records = new ArrayList<>(data.size());
for (KeyValue kv : data) {
records.add(new Record(new Bytes(kv.key), new Bytes(kv.value)));
}
receiver.receiveTableDataChunk(records);
break;
}
case "txlog":
{
List<KeyValue> data = (List<KeyValue>) values.get("records");
List<DumpedLogEntry> records = new ArrayList<>(data.size());
for (KeyValue kv : data) {
records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(kv.key), kv.value));
}
receiver.receiveTransactionLogChunk(records);
break;
}
case "transactions":
{
String tableSpace = (String) values.get("tableSpace");
List<byte[]> data = (List<byte[]>) values.get("transactions");
List<Transaction> transactions = data.stream().map(array -> {
return Transaction.deserialize(tableSpace, array);
}).collect(Collectors.toList());
receiver.receiveTransactionsAtDump(transactions);
break;
}
default:
throw new DataStorageManagerException("invalid dump command:" + command);
}
if (_channel != null && sendAck) {
_channel.sendReplyMessage(message, Message.ACK(clientId));
}
} catch (DataStorageManagerException error) {
LOGGER.log(Level.SEVERE, "error while handling dump data", error);
if (_channel != null) {
_channel.sendReplyMessage(message, Message.ERROR(clientId, error));
}
}
}
break;
}
}
use of herddb.network.Channel in project herddb by diennea.
the class LocalChannelTest method testCloseServer.
@Test
public void testCloseServer() throws Exception {
InetSocketAddress addr = new InetSocketAddress("localhost", NetworkUtils.assignFirstFreePort());
try (NettyChannelAcceptor server = new NettyChannelAcceptor(addr.getHostName(), addr.getPort(), true)) {
server.setEnableRealNetwork(false);
server.setAcceptor((Channel channel) -> {
channel.setMessagesReceiver(new ChannelEventListener() {
});
return (ServerSideConnection) () -> new Random().nextLong();
});
server.start();
assertNotNull(LocalServerRegistry.getLocalServer(NetworkUtils.getAddress(addr), addr.getPort()));
ExecutorService executor = Executors.newCachedThreadPool();
AtomicBoolean closeNotificationReceived = new AtomicBoolean();
try (Channel client = NettyConnector.connect(addr.getHostName(), addr.getPort(), true, 0, 0, new ChannelEventListener() {
@Override
public void channelClosed(Channel channel) {
System.out.println("client channelClosed");
closeNotificationReceived.set(true);
}
}, executor, null)) {
// closing the server should close the client
server.close();
assertTrue(client.isClosed());
TestUtils.waitForCondition(() -> closeNotificationReceived.get(), NOOP, 100);
} finally {
executor.shutdown();
}
}
assertNull(LocalServerRegistry.getLocalServer(NetworkUtils.getAddress(addr), addr.getPort()));
}
Aggregations