use of com.linkedin.pinot.transport.common.SegmentIdSet in project pinot by linkedin.
the class RandomRoutingTableTest method testHelixExternalViewBasedRoutingTable.
@Test
public void testHelixExternalViewBasedRoutingTable() throws Exception {
URL resourceUrl = getClass().getClassLoader().getResource("SampleExternalView.json");
Assert.assertNotNull(resourceUrl);
String fileName = resourceUrl.getFile();
String tableName = "testTable_OFFLINE";
InputStream evInputStream = new FileInputStream(fileName);
ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
ZNRecord externalViewRecord = (ZNRecord) znRecordSerializer.deserialize(IOUtils.toByteArray(evInputStream));
int totalRuns = 10000;
RoutingTableBuilder routingStrategy = new BalancedRandomRoutingTableBuilder(10);
HelixExternalViewBasedRouting routingTable = new HelixExternalViewBasedRouting(null, new PercentageBasedRoutingTableSelector(), null, new BaseConfiguration());
routingTable.setSmallClusterRoutingTableBuilder(routingStrategy);
ExternalView externalView = new ExternalView(externalViewRecord);
routingTable.markDataResourceOnline(tableName, externalView, getInstanceConfigs(externalView));
double[] globalArrays = new double[9];
for (int numRun = 0; numRun < totalRuns; ++numRun) {
RoutingTableLookupRequest request = new RoutingTableLookupRequest(tableName, Collections.<String>emptyList());
Map<ServerInstance, SegmentIdSet> serversMap = routingTable.findServers(request);
TreeSet<ServerInstance> serverInstances = new TreeSet<ServerInstance>(serversMap.keySet());
int i = 0;
double[] arrays = new double[9];
for (ServerInstance serverInstance : serverInstances) {
globalArrays[i] += serversMap.get(serverInstance).getSegments().size();
arrays[i++] = serversMap.get(serverInstance).getSegments().size();
}
for (int j = 0; i < arrays.length; ++j) {
Assert.assertTrue(arrays[j] / totalRuns <= 31);
Assert.assertTrue(arrays[j] / totalRuns >= 28);
}
// System.out.println(Arrays.toString(arrays) + " : " + new StandardDeviation().evaluate(arrays) + " : " + new Mean().evaluate(arrays));
}
for (int i = 0; i < globalArrays.length; ++i) {
Assert.assertTrue(globalArrays[i] / totalRuns <= 31);
Assert.assertTrue(globalArrays[i] / totalRuns >= 28);
}
// System.out.println(Arrays.toString(globalArrays) + " : " + new StandardDeviation().evaluate(globalArrays) + " : "
// + new Mean().evaluate(globalArrays));
}
use of com.linkedin.pinot.transport.common.SegmentIdSet in project pinot by linkedin.
the class PerTableRoutingConfig method buildRequestRoutingMap.
//
// /**
// * Builds a map needed for routing the partitions in the partition-group passed.
// * There could be different set of servers for each partition in the passed partition-group.
// *
// * @param pg segmentSet for which the routing map needs to be built.
// * @return
// */
// public Map<SegmentIdSet, List<ServerInstance>> buildRequestRoutingMap() {
// Map<SegmentIdSet, List<ServerInstance>> resultMap = new HashMap<SegmentIdSet, List<ServerInstance>>();
//
// /**
// * NOTE: After we removed the concept of partition, this needed rewriting.
// * For now, The File-based routing config maps nodeIds to Instances instead of segments to instances.
// * This is because, it becomes difficult for configuring all segments in routing config. Instead,
// * we configure the number of nodes that constitute a replica-set. For each node, different instances
// * (as comma-seperated list) is provided. we pick one instance from each node.
// *
// */
// for (Entry<Integer, List<ServerInstance>> e : _nodeToInstancesMap.entrySet()) {
// SegmentId id = new SegmentId("" + e.getKey());
// SegmentIdSet idSet = new SegmentIdSet();
// idSet.addSegment(id);
// resultMap.put(idSet, e.getValue());
// }
//
// // Add default
// SegmentId id = new SegmentId("default");
// SegmentIdSet idSet = new SegmentIdSet();
// idSet.addSegment(id);
// resultMap.put(idSet, _defaultServers);
// return resultMap;
// }
/**
* Builds a map needed for routing the partitions in the partition-group passed.
* There could be different set of servers for each partition in the passed partition-group.
*
* @return
*/
public Map<ServerInstance, SegmentIdSet> buildRequestRoutingMap() {
Map<ServerInstance, SegmentIdSet> resultMap = new HashMap<ServerInstance, SegmentIdSet>();
for (ServerInstance serverInstance : _defaultServers) {
SegmentId id = new SegmentId("default");
SegmentIdSet idSet = new SegmentIdSet();
idSet.addSegment(id);
resultMap.put(serverInstance, idSet);
}
return resultMap;
}
use of com.linkedin.pinot.transport.common.SegmentIdSet in project pinot by linkedin.
the class ScatterGatherTest method testSingleServer.
@Test
public void testSingleServer() throws Exception {
MetricsRegistry registry = new MetricsRegistry();
// Server start
int serverPort = 7071;
NettyTCPServer server1 = new NettyTCPServer(serverPort, new TestRequestHandlerFactory(0, 1), null);
Thread t1 = new Thread(server1);
t1.start();
//Client setup
ScheduledExecutorService timedExecutor = new ScheduledThreadPoolExecutor(1);
ExecutorService poolExecutor = MoreExecutors.sameThreadExecutor();
ExecutorService service = new ThreadPoolExecutor(1, 1, 1, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_");
PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), clientMetrics);
KeyedPoolImpl<ServerInstance, NettyClientConnection> pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, 1, 300000, 1, rm, timedExecutor, poolExecutor, registry);
rm.setPool(pool);
ScatterGatherImpl scImpl = new ScatterGatherImpl(pool, service);
SegmentIdSet pg = new SegmentIdSet();
pg.addSegment(new SegmentId("0"));
ServerInstance serverInstance1 = new ServerInstance("localhost", serverPort);
List<ServerInstance> instances = new ArrayList<ServerInstance>();
instances.add(serverInstance1);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
pgMap.put(serverInstance1, pg);
String request = "request_0";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg, request);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr);
final ScatterGatherStats scatterGatherStats = new ScatterGatherStats();
BrokerMetrics brokerMetrics = new BrokerMetrics(new MetricsRegistry());
CompositeFuture<ServerInstance, ByteBuf> fut = scImpl.scatterGather(req, scatterGatherStats, brokerMetrics);
Map<ServerInstance, ByteBuf> v = fut.get();
ByteBuf b = v.get(serverInstance1);
byte[] b2 = new byte[b.readableBytes()];
b.readBytes(b2);
String response = new String(b2);
Assert.assertEquals(response, "response_0_0");
Assert.assertEquals(v.size(), 1);
server1.shutdownGracefully();
pool.shutdown();
service.shutdown();
eventLoopGroup.shutdownGracefully();
}
use of com.linkedin.pinot.transport.common.SegmentIdSet in project pinot by linkedin.
the class ScatterGatherTest method testMultipleServerError.
@Test
public void testMultipleServerError() throws Exception {
MetricsRegistry registry = new MetricsRegistry();
// Server start
int serverPort1 = 7091;
int serverPort2 = 7092;
int serverPort3 = 7093;
// error server
int serverPort4 = 7094;
NettyTCPServer server1 = new NettyTCPServer(serverPort1, new TestRequestHandlerFactory(0, 1), null);
NettyTCPServer server2 = new NettyTCPServer(serverPort2, new TestRequestHandlerFactory(1, 1), null);
NettyTCPServer server3 = new NettyTCPServer(serverPort3, new TestRequestHandlerFactory(2, 1), null);
NettyTCPServer server4 = new NettyTCPServer(serverPort4, new TestRequestHandlerFactory(3, 1, 1000, true), null);
Thread t1 = new Thread(server1);
Thread t2 = new Thread(server2);
Thread t3 = new Thread(server3);
Thread t4 = new Thread(server4);
t1.start();
t2.start();
t3.start();
t4.start();
//Client setup
ScheduledExecutorService timedExecutor = new ScheduledThreadPoolExecutor(1);
ExecutorService poolExecutor = MoreExecutors.sameThreadExecutor();
ExecutorService service = new ThreadPoolExecutor(1, 1, 1, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_");
PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), clientMetrics);
KeyedPoolImpl<ServerInstance, NettyClientConnection> pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, 1, 300000, 1, rm, timedExecutor, poolExecutor, registry);
rm.setPool(pool);
SegmentIdSet pg1 = new SegmentIdSet();
pg1.addSegment(new SegmentId("0"));
SegmentIdSet pg2 = new SegmentIdSet();
pg2.addSegment(new SegmentId("1"));
SegmentIdSet pg3 = new SegmentIdSet();
pg3.addSegment(new SegmentId("2"));
SegmentIdSet pg4 = new SegmentIdSet();
pg4.addSegment(new SegmentId("3"));
ServerInstance serverInstance1 = new ServerInstance("localhost", serverPort1);
ServerInstance serverInstance2 = new ServerInstance("localhost", serverPort2);
ServerInstance serverInstance3 = new ServerInstance("localhost", serverPort3);
ServerInstance serverInstance4 = new ServerInstance("localhost", serverPort4);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
pgMap.put(serverInstance1, pg1);
pgMap.put(serverInstance2, pg2);
pgMap.put(serverInstance3, pg3);
pgMap.put(serverInstance4, pg4);
String request1 = "request_0";
String request2 = "request_1";
String request3 = "request_2";
String request4 = "request_3";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg1, request1);
pgMapStr.put(pg2, request2);
pgMapStr.put(pg3, request3);
pgMapStr.put(pg4, request4);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr, new RoundRobinReplicaSelection(), ReplicaSelectionGranularity.SEGMENT_ID_SET, 0, 1000);
ScatterGatherImpl scImpl = new ScatterGatherImpl(pool, service);
final ScatterGatherStats scatterGatherStats = new ScatterGatherStats();
final BrokerMetrics brokerMetrics = new BrokerMetrics(new MetricsRegistry());
CompositeFuture<ServerInstance, ByteBuf> fut = scImpl.scatterGather(req, scatterGatherStats, brokerMetrics);
Map<ServerInstance, ByteBuf> v = fut.get();
//Only 3 servers return value.
Assert.assertEquals(v.size(), 3);
ByteBuf b = v.get(serverInstance1);
byte[] b2 = new byte[b.readableBytes()];
b.readBytes(b2);
String response = new String(b2);
Assert.assertEquals(response, "response_0_0");
b = v.get(serverInstance2);
b2 = new byte[b.readableBytes()];
b.readBytes(b2);
response = new String(b2);
Assert.assertEquals(response, "response_1_0");
b = v.get(serverInstance3);
b2 = new byte[b.readableBytes()];
b.readBytes(b2);
response = new String(b2);
Assert.assertEquals(response, "response_2_0");
//No response from 4th server
Assert.assertNull(v.get(serverInstance4), "No response from 4th server");
Map<ServerInstance, Throwable> errorMap = fut.getError();
Assert.assertEquals(errorMap.size(), 1, "One error");
Assert.assertNotNull(errorMap.get(serverInstance4), "Server4 returned timeout");
// System.out.println("Error is :" + errorMap.get(serverInstance4));
Thread.sleep(3000);
// System.out.println("Pool Stats :" + pool.getStats());
pool.getStats().refresh();
Assert.assertEquals(pool.getStats().getTotalBadDestroyed(), 1, "Total Bad destroyed");
pool.shutdown();
service.shutdown();
eventLoopGroup.shutdownGracefully();
server1.shutdownGracefully();
server2.shutdownGracefully();
server3.shutdownGracefully();
server4.shutdownGracefully();
}
use of com.linkedin.pinot.transport.common.SegmentIdSet in project pinot by linkedin.
the class ScatterGatherTest method testSelectServers.
@Test
public void testSelectServers() throws Exception {
ExecutorService poolExecutor = MoreExecutors.sameThreadExecutor();
ScatterGatherImpl scImpl = new ScatterGatherImpl(null, poolExecutor);
{
// 1 server with 2 partitions
SegmentIdSet pg = new SegmentIdSet();
pg.addSegment(new SegmentId("0"));
pg.addSegment(new SegmentId("1"));
ServerInstance serverInstance1 = new ServerInstance("localhost", 1011);
List<ServerInstance> instances = new ArrayList<ServerInstance>();
instances.add(serverInstance1);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
Map<List<ServerInstance>, SegmentIdSet> invMap = new HashMap<List<ServerInstance>, SegmentIdSet>();
pgMap.put(serverInstance1, pg);
invMap.put(instances, pg);
String request = "request_0";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg, request);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr);
ScatterGatherRequestContext ctxt = new ScatterGatherRequestContext(req);
ctxt.setInvertedMap(invMap);
scImpl.selectServices(ctxt);
Map<ServerInstance, SegmentIdSet> resultMap = ctxt.getSelectedServers();
Assert.assertEquals(resultMap.size(), 1, "Count");
Assert.assertEquals(resultMap.get(serverInstance1), pg, "Element check");
// System.out.println(ctxt);
}
{
// 2 server with 2 partitions each
SegmentIdSet pg = new SegmentIdSet();
pg.addSegment(new SegmentId("0"));
pg.addSegment(new SegmentId("1"));
SegmentIdSet pg2 = new SegmentIdSet();
pg2.addSegment(new SegmentId("2"));
pg2.addSegment(new SegmentId("3"));
ServerInstance serverInstance1 = new ServerInstance("localhost", 1011);
ServerInstance serverInstance2 = new ServerInstance("localhost", 1012);
List<ServerInstance> instances = new ArrayList<ServerInstance>();
instances.add(serverInstance1);
List<ServerInstance> instances2 = new ArrayList<ServerInstance>();
instances2.add(serverInstance2);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
Map<List<ServerInstance>, SegmentIdSet> invMap = new HashMap<List<ServerInstance>, SegmentIdSet>();
pgMap.put(serverInstance1, pg);
pgMap.put(serverInstance2, pg2);
invMap.put(instances, pg);
invMap.put(instances2, pg2);
String request = "request_0";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg, request);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr);
ScatterGatherRequestContext ctxt = new ScatterGatherRequestContext(req);
ctxt.setInvertedMap(invMap);
scImpl.selectServices(ctxt);
Map<ServerInstance, SegmentIdSet> resultMap = ctxt.getSelectedServers();
Assert.assertEquals(resultMap.size(), 2, "Count");
Assert.assertEquals(resultMap.get(serverInstance1), pg, "Element check");
Assert.assertEquals(resultMap.get(serverInstance2), pg2, "Element check");
// System.out.println(ctxt);
}
{
// 2 servers sharing 2 partitions (Round-Robin selection) Partition-Group Granularity
SegmentIdSet pg = new SegmentIdSet();
pg.addSegment(new SegmentId("0"));
pg.addSegment(new SegmentId("1"));
ServerInstance serverInstance1 = new ServerInstance("localhost", 1011);
ServerInstance serverInstance2 = new ServerInstance("localhost", 1012);
List<ServerInstance> instances = new ArrayList<ServerInstance>();
instances.add(serverInstance1);
instances.add(serverInstance2);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
Map<List<ServerInstance>, SegmentIdSet> invMap = new HashMap<List<ServerInstance>, SegmentIdSet>();
pgMap.put(serverInstance1, pg);
pgMap.put(serverInstance2, pg);
invMap.put(instances, pg);
String request = "request_0";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg, request);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr, new RoundRobinReplicaSelection(), ReplicaSelectionGranularity.SEGMENT_ID_SET, 0, 10000);
ScatterGatherRequestContext ctxt = new ScatterGatherRequestContext(req);
ctxt.setInvertedMap(invMap);
scImpl.selectServices(ctxt);
Map<ServerInstance, SegmentIdSet> resultMap = ctxt.getSelectedServers();
Assert.assertEquals(resultMap.size(), 1, "Count");
// first server is getting selected
Assert.assertEquals(resultMap.get(serverInstance1), pg, "Element check");
// System.out.println(ctxt);
// Run selection again. Now the second server should be selected
scImpl.selectServices(ctxt);
resultMap = ctxt.getSelectedServers();
Assert.assertEquals(resultMap.size(), 1, "Count");
// second server is getting selected
Assert.assertEquals(resultMap.get(serverInstance2), pg, "Element check");
// System.out.println(ctxt);
}
{
// 2 servers sharing 2 partitions (Round-Robin selection) Partition Granularity
SegmentIdSet pg = new SegmentIdSet();
pg.addSegment(new SegmentId("0"));
pg.addSegment(new SegmentId("1"));
ServerInstance serverInstance1 = new ServerInstance("localhost", 1011);
ServerInstance serverInstance2 = new ServerInstance("localhost", 1012);
List<ServerInstance> instances = new ArrayList<ServerInstance>();
instances.add(serverInstance1);
instances.add(serverInstance2);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
Map<List<ServerInstance>, SegmentIdSet> invMap = new HashMap<List<ServerInstance>, SegmentIdSet>();
pgMap.put(serverInstance1, pg);
pgMap.put(serverInstance1, pg);
invMap.put(instances, pg);
String request = "request_0";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg, request);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr, new RoundRobinReplicaSelection(), ReplicaSelectionGranularity.SEGMENT_ID, 0, 10000);
ScatterGatherRequestContext ctxt = new ScatterGatherRequestContext(req);
ctxt.setInvertedMap(invMap);
scImpl.selectServices(ctxt);
Map<ServerInstance, SegmentIdSet> resultMap = ctxt.getSelectedServers();
Assert.assertEquals(resultMap.size(), 2, "Count");
// first server is getting selected
Assert.assertFalse(resultMap.get(serverInstance1).equals(resultMap.get(serverInstance2)), "Element check");
// System.out.println(ctxt);
// Run selection again. Now the second server should be selected
scImpl.selectServices(ctxt);
resultMap = ctxt.getSelectedServers();
Assert.assertEquals(resultMap.size(), 2, "Count");
// first server is getting selected
Assert.assertFalse(resultMap.get(serverInstance1).equals(resultMap.get(serverInstance2)), "Element check");
// System.out.println(ctxt);
}
}
Aggregations