use of org.onebusaway.gtfs.serialization.GtfsWriter in project onebusaway-application-modules by camsys.
the class HastusGtfsFactory method run.
public void run() throws Exception {
logger = new MultiCSVLogger();
// hack to swap directories
String csvDir = _gtfsOutputPath.toString().replace("_gtfs/29", "s");
logger.setBasePath(new File(csvDir));
_log.info("setting up MultiCSVLogger at path " + csvDir);
processAgency();
processStops();
processRoutesStopSequences();
processShapes();
processSchedules();
processCalendars();
GtfsWriter writer = new GtfsWriter();
writer.setOutputLocation(_gtfsOutputPath);
writer.run(_dao);
writer.close();
// Release the dao
_dao = null;
applyModifications();
logger.summarize();
_log.info("MultiCSVLogger summarize called");
}
use of org.onebusaway.gtfs.serialization.GtfsWriter in project onebusaway-application-modules by camsys.
the class GtfsCombinerMain method writeGtfs.
private static void writeGtfs(File outputDirectory, GtfsDaoImpl store, DefaultEntitySchemaFactory schema) throws IOException {
GtfsWriter writer = new GtfsWriter();
writer.setOutputLocation(outputDirectory);
writer.setEntitySchemaFactory(schema);
writer.run(store);
}
use of org.onebusaway.gtfs.serialization.GtfsWriter in project onebusaway-gtfs-modules by OneBusAway.
the class GtfsMerger method run.
public void run(List<File> inputPaths, File outputPath) throws IOException {
GtfsRelationalDaoImpl mergedDao = new GtfsRelationalDaoImpl();
mergedDao.setPackShapePoints(true);
mergedDao.setPackStopTimes(true);
List<EntityMergeStrategy> strategies = new ArrayList<EntityMergeStrategy>();
buildStrategies(strategies);
/**
* For each entity merge strategy, we keep track of a mapping from raw GTFS
* ids to entities, if the particular entity type has an identifier. This
* will be used to detect id conflicts between subsequent runs of each merge
* strategy on different feeds. We can't use the AgencyAndId ids in the DAO
* because it might be possible for two entities with the same id but
* different agency prefixes to sneak in. Since we ultimately serialize the
* data to a GTFS feed with no agency prefixes, we need to track the raw id.
*/
Map<EntityMergeStrategy, Map<String, Object>> rawEntityIdMapsByMergeStrategy = new HashMap<EntityMergeStrategy, Map<String, Object>>();
for (EntityMergeStrategy strategy : strategies) {
rawEntityIdMapsByMergeStrategy.put(strategy, new HashMap<String, Object>());
}
/**
* We iterate over the input feeds in reverse order, such that entities from
* the newest feeds are added first and older entities are potentially
* dropped.
*/
long newestFile = Long.MIN_VALUE;
for (int index = inputPaths.size() - 1; index >= 0; --index) {
File inputPath = inputPaths.get(index);
String prefix = getIndexAsPrefix(index, inputPaths.size());
FileTime fileTime = null;
if (inputPath.isFile()) {
fileTime = ((FileTime) Files.readAttributes(inputPath.toPath(), "lastModifiedTime").get("lastModifiedTime"));
if (fileTime != null && fileTime.toMillis() > newestFile) {
newestFile = fileTime.toMillis();
}
}
_log.info("reading input: " + inputPath + " with lastModifiedTime " + fileTime);
GtfsReader reader = new GtfsReader();
reader.setInputLocation(inputPath);
GtfsRelationalDaoImpl dao = new GtfsRelationalDaoImpl();
dao.setPackShapePoints(true);
dao.setPackStopTimes(true);
reader.setEntityStore(dao);
reader.run();
for (EntityMergeStrategy strategy : strategies) {
_log.info("strategy=" + strategy.getClass());
GtfsMergeContext context = new GtfsMergeContext(dao, mergedDao, prefix, rawEntityIdMapsByMergeStrategy.get(strategy));
strategy.merge(context);
}
}
_log.info("writing merged output: " + outputPath);
GtfsWriter writer = new GtfsWriter();
writer.setOutputLocation(outputPath);
writer.run(mergedDao);
if (outputPath.isFile()) {
_log.info("setting merged file lastModified to " + new Date(newestFile));
Files.setAttribute(outputPath.toPath(), "lastModifiedTime", FileTime.fromMillis(newestFile));
} else {
_log.info("outputPath not a file, skipping");
}
}
use of org.onebusaway.gtfs.serialization.GtfsWriter in project onebusaway-gtfs-modules by OneBusAway.
the class ExtensionsTest method testExtensionWrite.
@Test
public void testExtensionWrite() throws IOException {
DefaultEntitySchemaFactory factory = GtfsEntitySchemaFactory.createEntitySchemaFactory();
factory.addExtension(Stop.class, StopExtension.class);
{
MockGtfs gtfs = MockGtfs.create();
gtfs.putMinimal();
gtfs.putStops(2, "label=a,b");
GtfsReader reader = new GtfsReader();
reader.setEntitySchemaFactory(factory);
GtfsMutableRelationalDao dao = gtfs.read(reader);
Stop stop = dao.getStopForId(new AgencyAndId("a0", "s0"));
StopExtension extension = stop.getExtension(StopExtension.class);
assertEquals("a", extension.getLabel());
GtfsWriter writer = new GtfsWriter();
writer.setEntitySchemaFactory(factory);
writer.setOutputLocation(_tmpDirectory);
writer.run(dao);
writer.close();
}
{
GtfsReader reader2 = new GtfsReader();
reader2.setEntitySchemaFactory(factory);
reader2.setInputLocation(_tmpDirectory);
GtfsRelationalDaoImpl dao2 = new GtfsRelationalDaoImpl();
reader2.setDefaultAgencyId("a0");
reader2.setEntityStore(dao2);
reader2.readEntities(Stop.class);
Stop stop2 = dao2.getStopForId(new AgencyAndId("a0", "s0"));
StopExtension extension2 = stop2.getExtension(StopExtension.class);
assertEquals("a", extension2.getLabel());
}
}
Aggregations