use of org.apache.cassandra.locator.NetworkTopologyStrategy in project eiger by wlloyd.
the class StorageProxy method getRangeSlice.
/*
* This function executes the read protocol locally. Consistency checks are performed in the background.
*/
public static List<Row> getRangeSlice(RangeSliceCommand command, ConsistencyLevel consistency_level) throws IOException, UnavailableException, TimeoutException {
if (logger.isDebugEnabled())
logger.debug("Command/ConsistencyLevel is {}/{}", command.toString(), consistency_level);
long startTime = System.nanoTime();
List<Row> rows;
// now scan until we have enough results
try {
int columnsCount = 0;
rows = new ArrayList<Row>();
List<AbstractBounds<RowPosition>> ranges = getRestrictedRanges(command.range);
for (AbstractBounds<RowPosition> range : ranges) {
RangeSliceCommand nodeCmd = new RangeSliceCommand(command.keyspace, command.column_family, command.super_column, command.predicate, range, command.row_filter, command.maxResults, command.maxIsColumns);
List<InetAddress> liveEndpoints = StorageService.instance.getLiveNaturalEndpoints(nodeCmd.keyspace, range.right);
DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), liveEndpoints);
int overallReplicationFactor = Table.open(command.keyspace).getReplicationStrategy().getReplicationFactor();
int dcReplicationFactor = Integer.MAX_VALUE;
if (Table.open(command.keyspace).getReplicationStrategy() instanceof NetworkTopologyStrategy) {
String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getDatacenter(FBUtilities.getBroadcastAddress());
dcReplicationFactor = ((NetworkTopologyStrategy) Table.open(command.keyspace).getReplicationStrategy()).getReplicationFactor(localDataCenter);
}
if ((consistency_level == ConsistencyLevel.ONE || overallReplicationFactor == 1 || dcReplicationFactor == 1) && !liveEndpoints.isEmpty() && liveEndpoints.contains(FBUtilities.getBroadcastAddress())) {
if (logger.isDebugEnabled())
logger.debug("local range slice");
try {
rows.addAll(RangeSliceVerbHandler.executeLocally(nodeCmd));
for (Row row : rows) columnsCount += row.getLiveColumnCount();
} catch (ExecutionException e) {
throw new RuntimeException(e.getCause());
} catch (InterruptedException e) {
throw new AssertionError(e);
}
} else {
System.out.println("local address: " + FBUtilities.getBroadcastAddress());
for (int i = 0; i < liveEndpoints.size(); i++) {
System.out.println("live endpoint: " + liveEndpoints.get(i));
}
assert false : "Client should send requests directly to their local nodes in COPS2 " + overallReplicationFactor + ":" + dcReplicationFactor + ":" + liveEndpoints.isEmpty() + ":" + liveEndpoints.get(0);
// collect replies and resolve according to consistency level
RangeSliceResponseResolver resolver = new RangeSliceResponseResolver(nodeCmd.keyspace, liveEndpoints);
ReadCallback<Iterable<Row>> handler = getReadCallback(resolver, nodeCmd, consistency_level, liveEndpoints);
handler.assureSufficientLiveNodes();
for (InetAddress endpoint : handler.endpoints) {
MessagingService.instance().sendRR(nodeCmd, endpoint, handler);
if (logger.isDebugEnabled())
logger.debug("reading " + nodeCmd + " from " + endpoint);
}
try {
for (Row row : handler.get()) {
rows.add(row);
columnsCount += row.getLiveColumnCount();
logger.debug("range slices read {}", row.key);
}
FBUtilities.waitOnFutures(resolver.repairResults, DatabaseDescriptor.getRpcTimeout());
} catch (TimeoutException ex) {
if (logger.isDebugEnabled())
logger.debug("Range slice timeout: {}", ex.toString());
throw ex;
} catch (DigestMismatchException e) {
// no digests in range slices yet
throw new AssertionError(e);
}
}
// if we're done, great, otherwise, move to the next range
int count = nodeCmd.maxIsColumns ? columnsCount : rows.size();
if (count >= nodeCmd.maxResults)
break;
}
} finally {
rangeStats.addNano(System.nanoTime() - startTime);
}
return trim(command, rows);
}
Aggregations