use of gnu.trove.map.hash.TObjectIntHashMap in project OpenTripPlanner by opentripplanner.
the class RoundBasedProfileRouter method findInitialStops.
/**
* find the boarding stops
*/
private Collection<ProfileState> findInitialStops(boolean dest) {
double lat = dest ? request.toLat : request.fromLat;
double lon = dest ? request.toLon : request.fromLon;
QualifiedModeSet modes = dest ? request.accessModes : request.egressModes;
List<ProfileState> stops = Lists.newArrayList();
RoutingRequest rr = new RoutingRequest(TraverseMode.WALK);
rr.dominanceFunction = new DominanceFunction.EarliestArrival();
rr.batch = true;
rr.from = new GenericLocation(lat, lon);
rr.walkSpeed = request.walkSpeed;
rr.to = rr.from;
rr.setRoutingContext(graph);
// RoutingRequest dateTime defaults to currentTime.
// If elapsed time is not capped, searches are very slow.
rr.worstTime = (rr.dateTime + request.maxWalkTime * 60);
AStar astar = new AStar();
rr.longDistance = true;
rr.setNumItineraries(1);
// timeout in seconds
ShortestPathTree spt = astar.getShortestPathTree(rr, 5);
for (TransitStop tstop : graph.index.stopVertexForStop.values()) {
State s = spt.getState(tstop);
if (s != null) {
ProfileState ps = new ProfileState();
ps.lowerBound = ps.upperBound = (int) s.getElapsedTimeSeconds();
ps.stop = tstop;
ps.accessType = Type.STREET;
stops.add(ps);
}
}
Map<TripPattern, ProfileState> optimalBoardingLocation = Maps.newHashMap();
TObjectIntMap<TripPattern> minBoardTime = new TObjectIntHashMap<TripPattern>(100, 0.75f, Integer.MAX_VALUE);
// Only board patterns at the closest possible stop
for (ProfileState ps : stops) {
for (TripPattern pattern : graph.index.patternsForStop.get(ps.stop.getStop())) {
if (ps.lowerBound < minBoardTime.get(pattern)) {
optimalBoardingLocation.put(pattern, ps);
minBoardTime.put(pattern, ps.lowerBound);
}
}
ps.targetPatterns = Sets.newHashSet();
}
LOG.info("Found {} reachable stops, filtering to only board at closest stops", stops.size());
for (Entry<TripPattern, ProfileState> e : optimalBoardingLocation.entrySet()) {
e.getValue().targetPatterns.add(e.getKey());
}
for (Iterator<ProfileState> it = stops.iterator(); it.hasNext(); ) {
if (it.next().targetPatterns.isEmpty())
it.remove();
}
rr.cleanup();
return stops;
}
use of gnu.trove.map.hash.TObjectIntHashMap in project OpenTripPlanner by opentripplanner.
the class ConvertToFrequency method apply.
public void apply(List<FrequencyEntry> frequencyEntries, List<TripTimes> scheduledTrips, Graph graph, BitSet servicesRunning, RaptorWorkerTimetable.BoardingAssumption assumption) {
// preserve existing frequency entries
this.frequencyEntries.addAll(frequencyEntries);
Set<String> routeIds = new HashSet<>();
if (routeId != null)
Stream.of(routeId).forEach(routeIds::add);
// loop over scheduled trips and figure out what to do with them
for (TripTimes tt : scheduledTrips) {
if (routeId == null || routeIds.contains(tt.trip.getRoute().getId().getId())) {
// put this in the appropriate group for frequency conversion
String key;
switch(groupBy) {
case ROUTE_DIRECTION:
key = tt.trip.getRoute().getId().getId() + "_" + tt.trip.getDirectionId();
break;
case ROUTE:
key = tt.trip.getRoute().getId().getId();
break;
case PATTERN:
key = graph.index.patternForTrip.get(tt.trip).getExemplar().getId().getId();
break;
default:
throw new RuntimeException("Unrecognized group by value");
}
tripsToConvert.put(key, tt);
} else {
// don't touch this trip
this.scheduledTrips.add(tt);
}
}
// loop over all the groups and create frequency entries
GROUPS: for (Map.Entry<String, Collection<TripTimes>> e : tripsToConvert.asMap().entrySet()) {
// get just the running services
List<TripTimes> group = e.getValue().stream().filter(tt -> servicesRunning.get(tt.serviceCode)).filter(tt -> windowStart < tt.getDepartureTime(0) && tt.getDepartureTime(0) < windowEnd).collect(Collectors.toList());
if (group.isEmpty())
continue GROUPS;
if (group.size() == 1) {
group.stream().forEach(scheduledTrips::add);
continue GROUPS;
}
// find the dominant pattern
TObjectIntMap<TripPattern> patternCount = new TObjectIntHashMap<>(5, 0.75f, 0);
group.forEach(tt -> patternCount.adjustOrPutValue(graph.index.patternForTrip.get(tt.trip), 1, 1));
int maxCount = 0;
TripPattern tripPattern = null;
for (TObjectIntIterator<TripPattern> it = patternCount.iterator(); it.hasNext(); ) {
it.advance();
if (it.value() > maxCount) {
maxCount = it.value();
tripPattern = it.key();
}
}
// find a stop that is common to all trip patterns. Sort the list so that the same common stop is always returned
NavigableSet<Stop> stops = new TreeSet<>((s1, s2) -> s1.getId().compareTo(s2.getId()));
stops.addAll(tripPattern.getStops());
patternCount.keySet().stream().forEach(p -> stops.retainAll(p.getStops()));
if (stops.isEmpty()) {
LOG.warn("Unable to find common stop for key {}, not converting to frequencies", e.getKey());
scheduledTrips.addAll(e.getValue());
continue GROUPS;
}
Stop stop = stops.stream().findFirst().get();
// determine the median frequency at this stop
// use a set to handle duplicated trips
TIntSet arrivalTimes = new TIntHashSet();
for (boolean filter : new boolean[] { true, false }) {
for (TripTimes tt : group) {
TripPattern tp = graph.index.patternForTrip.get(tt.trip);
int arrivalTime = tt.getArrivalTime(tp.getStops().indexOf(stop));
// however, if we apply the filter and end up with no trips at this stop, re-run with the filter disabled
if (windowStart < arrivalTime && arrivalTime < windowEnd || !filter)
arrivalTimes.add(arrivalTime);
}
// if we didn't find stops, continue, which will turn off the filter
if (arrivalTimes.size() > 1)
break;
}
// now convert to elapsed times
int[] arrivalTimeArray = arrivalTimes.toArray();
Arrays.sort(arrivalTimeArray);
int[] headway = new int[arrivalTimeArray.length - 1];
for (int i = 1; i < arrivalTimeArray.length; i++) {
headway[i - 1] = arrivalTimeArray[i] - arrivalTimeArray[i - 1];
}
Arrays.sort(headway);
// the headway that we will use
int aggregateHeadway;
if (assumption == RaptorWorkerTimetable.BoardingAssumption.WORST_CASE)
// simple: worst case analysis should use the worst case headway
aggregateHeadway = Ints.max(headway);
else {
// we want the average headway, but we we want the average of the headways weighted
// by themselves as if there is a two minute headway then a twenty-minute headway,
// customers are ten times as likely to experience the twenty minute headway
// (we want the average from the user's perspective, not the vehicle's perspective)
// This is a weighted average where the weight is the same as the headway so it simplifies
// to sum (headway^2) / sum(headway)
aggregateHeadway = IntStream.of(headway).map(h -> h * h).sum() / IntStream.of(headway).sum();
}
LOG.info("Headway for route {} ({}) in direction {}: {}min", tripPattern.route.getShortName(), tripPattern.route.getId().getId(), tripPattern.directionId, aggregateHeadway / 60);
// figure out running/dwell times based on the trips on this pattern
final TripPattern chosenTp = tripPattern;
List<TripTimes> candidates = group.stream().filter(tt -> graph.index.patternForTrip.get(tt.trip) == chosenTp).collect(Collectors.toList());
// transposed from what you'd expect: stops on the rows
int[][] hopTimes = new int[tripPattern.getStops().size() - 1][candidates.size()];
int[][] dwellTimes = new int[tripPattern.getStops().size()][candidates.size()];
int tripIndex = 0;
for (TripTimes tt : candidates) {
for (int stopIndex = 0; stopIndex < tripPattern.getStops().size(); stopIndex++) {
dwellTimes[stopIndex][tripIndex] = tt.getDwellTime(stopIndex);
if (stopIndex > 0)
hopTimes[stopIndex - 1][tripIndex] = tt.getArrivalTime(stopIndex) - tt.getDepartureTime(stopIndex - 1);
}
tripIndex++;
}
// collapse it down
int[] meanHopTimes = new int[tripPattern.getStops().size() - 1];
int hopIndex = 0;
for (int[] hop : hopTimes) {
meanHopTimes[hopIndex++] = IntStream.of(hop).sum() / hop.length;
}
int[] meanDwellTimes = new int[tripPattern.getStops().size()];
int dwellIndex = 0;
for (int[] dwell : dwellTimes) {
meanDwellTimes[dwellIndex++] = IntStream.of(dwell).sum() / dwell.length;
}
// phew! now let's make a frequency entry
TripTimes tt = new TripTimes(candidates.get(0));
int cumulative = 0;
for (int i = 0; i < tt.getNumStops(); i++) {
tt.updateArrivalTime(i, cumulative);
cumulative += meanDwellTimes[i];
tt.updateDepartureTime(i, cumulative);
if (i + 1 < tt.getNumStops())
cumulative += meanHopTimes[i];
}
FrequencyEntry fe = new FrequencyEntry(windowStart - 60 * 60 * 3, windowEnd + 60 * 60 * 3, aggregateHeadway, false, tt);
this.frequencyEntries.add(fe);
}
}
use of gnu.trove.map.hash.TObjectIntHashMap in project FoamFix by asiekierka.
the class FoamFixModelDeduplicate method debugCountModels.
private void debugCountModels(ModelBakeEvent event) {
List<String> bmNames = new ArrayList<>();
TObjectIntMap<String> bmCountMod = new TObjectIntHashMap<>();
TObjectIntMap<String> bmCountVariant = new TObjectIntHashMap<>();
for (ModelResourceLocation loc : event.getModelRegistry().getKeys()) {
bmNames.add(loc.toString());
bmCountMod.adjustOrPutValue(loc.getResourceDomain(), 1, 1);
bmCountVariant.adjustOrPutValue(loc.getResourceDomain() + ":" + loc.getResourcePath(), 1, 1);
}
List<String> bmCountModKeys = new ArrayList<>(bmCountMod.keySet());
List<String> bmCountVariantKeys = new ArrayList<>(bmCountVariant.keySet());
bmNames.sort(Comparator.naturalOrder());
bmCountModKeys.sort(Comparator.comparingInt(bmCountMod::get).reversed());
bmCountVariantKeys.sort(Comparator.comparingInt(bmCountVariant::get).reversed());
try {
File outFile = new File("foamfixBakedModelNames.txt");
PrintWriter writer = new PrintWriter(outFile);
for (String s : bmNames) {
writer.println(s);
}
writer.close();
outFile = new File("foamfixBakedModelCountsPerMod.txt");
writer = new PrintWriter(outFile);
for (String s : bmCountModKeys) {
writer.println(s + ": " + bmCountMod.get(s));
}
writer.close();
outFile = new File("foamfixBakedModelCountsPerBlock.txt");
writer = new PrintWriter(outFile);
for (String s : bmCountVariantKeys) {
writer.println(s + ": " + bmCountVariant.get(s));
}
writer.close();
} catch (Exception e) {
e.printStackTrace();
}
}
use of gnu.trove.map.hash.TObjectIntHashMap in project BetterRain by OreCruncher.
the class PlayerSoundEffectHandler method getBiomeSounds.
private static List<SoundEffect> getBiomeSounds(final String conditions) {
// Need to collect sounds from all the applicable biomes
// along with their weights.
final TObjectIntHashMap<SoundEffect> sounds = new TObjectIntHashMap<SoundEffect>();
final TObjectIntHashMap<BiomeGenBase> weights = BiomeSurveyHandler.getBiomes();
for (final BiomeGenBase biome : weights.keySet()) {
final List<SoundEffect> bs = BiomeRegistry.getSounds(biome, conditions);
for (final SoundEffect sound : bs) sounds.put(sound, sounds.get(sound) + weights.get(biome));
}
// Scale the volumes in the resulting list based on the weights
final List<SoundEffect> result = new ArrayList<SoundEffect>();
final int area = BiomeSurveyHandler.getArea();
for (final SoundEffect sound : sounds.keySet()) {
final float scale = 0.3F + 0.7F * ((float) sounds.get(sound) / (float) area);
result.add(SoundEffect.scaleVolume(sound, scale));
}
return result;
}
Aggregations