diff --git a/.travis.yml b/.travis.yml index 518f41399..c691130b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -64,5 +64,7 @@ notifications: # Push results to codecov.io and run semantic-release (releases only created on pushes to the master branch). after_success: - - bash <(curl -s https://codecov.io/bash) - semantic-release --prepare @conveyal/maven-semantic-release --publish @semantic-release/github,@conveyal/maven-semantic-release --verify-conditions @semantic-release/github,@conveyal/maven-semantic-release --verify-release @conveyal/maven-semantic-release --use-conveyal-workflow --dev-branch=dev + # run codecov after semantic-release because maven-semantic-release creates extra commits that + # codecov will need a report on to reference in future PRs to the release branch + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/src/main/java/com/conveyal/gtfs/GTFS.java b/src/main/java/com/conveyal/gtfs/GTFS.java index 52bc37f5a..32d1147c3 100644 --- a/src/main/java/com/conveyal/gtfs/GTFS.java +++ b/src/main/java/com/conveyal/gtfs/GTFS.java @@ -5,6 +5,7 @@ import com.conveyal.gtfs.loader.JdbcGtfsExporter; import com.conveyal.gtfs.loader.JdbcGtfsLoader; import com.conveyal.gtfs.loader.JdbcGtfsSnapshotter; +import com.conveyal.gtfs.loader.SnapshotResult; import com.conveyal.gtfs.util.InvalidNamespaceException; import com.conveyal.gtfs.validator.ValidationResult; import com.fasterxml.jackson.databind.ObjectMapper; @@ -83,9 +84,9 @@ public static FeedLoadResult load (String filePath, DataSource dataSource) { * @param dataSource JDBC connection to existing database * @return FIXME should this be a separate SnapshotResult object? */ - public static FeedLoadResult makeSnapshot (String feedId, DataSource dataSource) { + public static SnapshotResult makeSnapshot (String feedId, DataSource dataSource) { JdbcGtfsSnapshotter snapshotter = new JdbcGtfsSnapshotter(feedId, dataSource); - FeedLoadResult result = snapshotter.copyTables(); + SnapshotResult result = snapshotter.copyTables(); return result; } diff --git a/src/main/java/com/conveyal/gtfs/error/NewGTFSErrorType.java b/src/main/java/com/conveyal/gtfs/error/NewGTFSErrorType.java index 02fc5075d..b35037642 100644 --- a/src/main/java/com/conveyal/gtfs/error/NewGTFSErrorType.java +++ b/src/main/java/com/conveyal/gtfs/error/NewGTFSErrorType.java @@ -44,10 +44,14 @@ public enum NewGTFSErrorType { SERVICE_NEVER_ACTIVE(Priority.MEDIUM, "A service code was defined, but is never active on any date."), SERVICE_UNUSED(Priority.MEDIUM, "A service code was defined, but is never referenced by any trips."), SHAPE_DIST_TRAVELED_NOT_INCREASING(Priority.MEDIUM, "Shape distance traveled must increase with stop times."), + STOP_DESCRIPTION_SAME_AS_NAME(Priority.LOW, "The description of a stop is identical to its name, so does not add any information."), STOP_LOW_POPULATION_DENSITY(Priority.HIGH, "A stop is located in a geographic area with very low human population density."), + STOP_NAME_MISSING(Priority.MEDIUM, "A stop does not have a name."), STOP_GEOGRAPHIC_OUTLIER(Priority.HIGH, "This stop is located very far from the middle 90% of stops in this feed."), STOP_UNUSED(Priority.MEDIUM, "This stop is not referenced by any trips."), TRIP_EMPTY(Priority.HIGH, "This trip is defined but has no stop times."), + TRIP_HEADSIGN_CONTAINS_ROUTE_NAME(Priority.LOW, "A trip headsign contains the route name, but should only contain information to distinguish it from other trips for the route."), + TRIP_HEADSIGN_SHOULD_DESCRIBE_DESTINATION_OR_WAYPOINTS(Priority.LOW, "A trip headsign begins with 'to' or 'towards', but should begin with destination or direction and optionally include waypoints with 'via'"), TRIP_NEVER_ACTIVE(Priority.MEDIUM, "A trip is defined, but its service is never running on any date."), ROUTE_UNUSED(Priority.HIGH, "This route is defined but has no trips."), TRAVEL_DISTANCE_ZERO(Priority.MEDIUM, "The vehicle does not cover any distance between the last stop and this one."), diff --git a/src/main/java/com/conveyal/gtfs/error/SQLErrorStorage.java b/src/main/java/com/conveyal/gtfs/error/SQLErrorStorage.java index eea55f3a2..99b8a0df1 100644 --- a/src/main/java/com/conveyal/gtfs/error/SQLErrorStorage.java +++ b/src/main/java/com/conveyal/gtfs/error/SQLErrorStorage.java @@ -131,6 +131,7 @@ private void commit() { */ public void commitAndClose() { try { + LOG.info("Committing errors and closing SQL connection."); this.commit(); // Close the connection permanently (should be called only after errorStorage instance no longer needed). connection.close(); diff --git a/src/main/java/com/conveyal/gtfs/graphql/GraphQLGtfsSchema.java b/src/main/java/com/conveyal/gtfs/graphql/GraphQLGtfsSchema.java index b5702b85e..d0ee71372 100644 --- a/src/main/java/com/conveyal/gtfs/graphql/GraphQLGtfsSchema.java +++ b/src/main/java/com/conveyal/gtfs/graphql/GraphQLGtfsSchema.java @@ -381,6 +381,7 @@ public class GraphQLGtfsSchema { .field(MapFetcher.field("shape_dist_traveled", GraphQLFloat)) .field(MapFetcher.field("drop_off_type", GraphQLInt)) .field(MapFetcher.field("pickup_type", GraphQLInt)) + .field(MapFetcher.field("stop_sequence", GraphQLInt)) .field(MapFetcher.field("timepoint", GraphQLInt)) // FIXME: This will only returns a list with one stop entity (unless there is a referential integrity issue) // Should this be modified to be an object, rather than a list? @@ -390,7 +391,6 @@ public class GraphQLGtfsSchema { .dataFetcher(new JDBCFetcher("stops", "stop_id")) .build() ) - .field(MapFetcher.field("stop_sequence", GraphQLInt)) .build(); /** diff --git a/src/main/java/com/conveyal/gtfs/loader/Feed.java b/src/main/java/com/conveyal/gtfs/loader/Feed.java index c56fb099b..bb896196f 100644 --- a/src/main/java/com/conveyal/gtfs/loader/Feed.java +++ b/src/main/java/com/conveyal/gtfs/loader/Feed.java @@ -81,9 +81,7 @@ public ValidationResult validate () { SQLErrorStorage errorStorage = null; try { errorStorage = new SQLErrorStorage(dataSource.getConnection(), tablePrefix, false); - } catch (SQLException ex) { - throw new StorageException(ex); - } catch (InvalidNamespaceException ex) { + } catch (SQLException | InvalidNamespaceException ex) { throw new StorageException(ex); } int errorCountBeforeValidation = errorStorage.getErrorCount(); diff --git a/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsExporter.java b/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsExporter.java index ca05d7a74..9708c0f50 100644 --- a/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsExporter.java +++ b/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsExporter.java @@ -309,7 +309,7 @@ public FeedLoadResult exportTables() { // TableLoadResult. LOG.error("Exception while creating snapshot: {}", ex.toString()); ex.printStackTrace(); - result.fatalException = ex.getMessage(); + result.fatalException = ex.toString(); } return result; } @@ -354,7 +354,7 @@ private TableLoadResult export (Table table, Connection connection) { } catch (SQLException e) { LOG.error("failed to generate select statement for existing fields"); TableLoadResult tableLoadResult = new TableLoadResult(); - tableLoadResult.fatalException = e.getMessage(); + tableLoadResult.fatalException = e.toString(); e.printStackTrace(); return tableLoadResult; } @@ -402,7 +402,7 @@ private TableLoadResult export (Table table, String filterSql) { } catch (SQLException ex) { ex.printStackTrace(); } - tableLoadResult.fatalException = e.getMessage(); + tableLoadResult.fatalException = e.toString(); LOG.error("Exception while exporting tables", e); } return tableLoadResult; diff --git a/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsLoader.java b/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsLoader.java index 92e83d17b..0a536b5ce 100644 --- a/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsLoader.java +++ b/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsLoader.java @@ -169,7 +169,7 @@ public FeedLoadResult loadTables () { // TODO catch exceptions separately while loading each table so load can continue, store in TableLoadResult LOG.error("Exception while loading GTFS file: {}", ex.toString()); ex.printStackTrace(); - result.fatalException = ex.getMessage(); + result.fatalException = ex.toString(); } return result; } @@ -251,7 +251,7 @@ private void registerFeed (File gtfsFile) { connection.commit(); LOG.info("Created new feed namespace: {}", insertStatement); } catch (Exception ex) { - LOG.error("Exception while registering new feed namespace in feeds table: {}", ex.getMessage()); + LOG.error("Exception while registering new feed namespace in feeds table", ex); DbUtils.closeQuietly(connection); } } diff --git a/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsSnapshotter.java b/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsSnapshotter.java index 96f946044..415df8aa5 100644 --- a/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsSnapshotter.java +++ b/src/main/java/com/conveyal/gtfs/loader/JdbcGtfsSnapshotter.java @@ -56,9 +56,9 @@ public JdbcGtfsSnapshotter(String feedId, DataSource dataSource) { /** * Copy primary entity tables as well as Pattern and PatternStops tables. */ - public FeedLoadResult copyTables() { + public SnapshotResult copyTables() { // This result object will be returned to the caller to summarize the feed and report any critical errors. - FeedLoadResult result = new FeedLoadResult(); + SnapshotResult result = new SnapshotResult(); try { long startTime = System.currentTimeMillis(); @@ -89,7 +89,7 @@ public FeedLoadResult copyTables() { copy(Table.PATTERNS, true); copy(Table.PATTERN_STOP, true); // see method comments fo why different logic is needed for this table - createScheduleExceptionsTable(); + result.scheduleExceptions = createScheduleExceptionsTable(); result.shapes = copy(Table.SHAPES, true); result.stops = copy(Table.STOPS, true); // TODO: Should we defer index creation on stop times? @@ -106,7 +106,7 @@ public FeedLoadResult copyTables() { // TableLoadResult. LOG.error("Exception while creating snapshot: {}", ex.toString()); ex.printStackTrace(); - result.fatalException = ex.getMessage(); + result.fatalException = ex.toString(); } return result; } @@ -145,7 +145,7 @@ private TableLoadResult copy (Table table, boolean createIndexes) { connection.commit(); LOG.info("Done."); } catch (Exception ex) { - tableLoadResult.fatalException = ex.getMessage(); + tableLoadResult.fatalException = ex.toString(); LOG.error("Error: ", ex); try { connection.rollback(); @@ -217,6 +217,11 @@ private TableLoadResult createScheduleExceptionsTable() { Multimap removedServiceForDate = HashMultimap.create(); Multimap addedServiceForDate = HashMultimap.create(); for (CalendarDate calendarDate : calendarDates) { + // Skip any null dates + if (calendarDate.date == null) { + LOG.warn("Encountered calendar date record with null value for date field. Skipping."); + continue; + } String date = calendarDate.date.format(DateTimeFormatter.BASIC_ISO_DATE); if (calendarDate.exception_type == 1) { addedServiceForDate.put(date, calendarDate.service_id); @@ -293,8 +298,8 @@ private TableLoadResult createScheduleExceptionsTable() { } connection.commit(); - } catch (SQLException e) { - tableLoadResult.fatalException = e.getMessage(); + } catch (Exception e) { + tableLoadResult.fatalException = e.toString(); LOG.error("Error creating schedule Exceptions: ", e); e.printStackTrace(); try { @@ -427,7 +432,7 @@ private void registerSnapshot () { connection.commit(); LOG.info("Created new snapshot namespace: {}", insertStatement); } catch (Exception ex) { - LOG.error("Exception while registering snapshot namespace in feeds table: {}", ex.getMessage()); + LOG.error("Exception while registering snapshot namespace in feeds table", ex); DbUtils.closeQuietly(connection); } } diff --git a/src/main/java/com/conveyal/gtfs/loader/JdbcTableWriter.java b/src/main/java/com/conveyal/gtfs/loader/JdbcTableWriter.java index 94f77418d..4b341081e 100644 --- a/src/main/java/com/conveyal/gtfs/loader/JdbcTableWriter.java +++ b/src/main/java/com/conveyal/gtfs/loader/JdbcTableWriter.java @@ -77,9 +77,11 @@ public JdbcTableWriter ( // TODO: verify specTable.name is ok to use for constructing dynamic sql statements this.specTable = specTable; + // Below is a bit messy because the connection field on this class is set to final and we cannot set this to + // the optionally passed-in connection with the ternary statement unless connection1 already exists. Connection connection1; try { - connection1 = dataSource.getConnection(); + connection1 = this.dataSource.getConnection(); } catch (SQLException e) { e.printStackTrace(); connection1 = null; @@ -167,14 +169,61 @@ public String update(Integer id, String json, boolean autoCommit) throws SQLExce int entityId = isCreating ? (int) newId : id; // Cast child entities to array node to iterate over. ArrayNode childEntitiesArray = (ArrayNode)childEntities; - updateChildTable(childEntitiesArray, entityId, isCreating, referencingTable, connection); + boolean referencedPatternUsesFrequencies = false; + // If an entity references a pattern (e.g., pattern stop or trip), determine whether the pattern uses + // frequencies because this impacts update behaviors, for example whether stop times are kept in + // sync with default travel times or whether frequencies are allowed to be nested with a JSON trip. + if (jsonObject.has("pattern_id") && !jsonObject.get("pattern_id").isNull()) { + PreparedStatement statement = connection.prepareStatement(String.format( + "select use_frequency from %s.%s where pattern_id = ?", + tablePrefix, + Table.PATTERNS.name + )); + statement.setString(1, jsonObject.get("pattern_id").asText()); + LOG.info(statement.toString()); + ResultSet selectResults = statement.executeQuery(); + while (selectResults.next()) { + referencedPatternUsesFrequencies = selectResults.getBoolean(1); + } + } + updateChildTable( + childEntitiesArray, + entityId, + referencedPatternUsesFrequencies, + isCreating, + referencingTable, + connection + ); } } - // Iterate over table's fields and apply linked values to any tables - if ("routes".equals(specTable.name)) { - updateLinkedFields(specTable, jsonObject, "trips", "route_id", "wheelchair_accessible"); - } else if ("patterns".equals(specTable.name)) { - updateLinkedFields(specTable, jsonObject, "trips", "pattern_id", "direction_id"); + // Iterate over table's fields and apply linked values to any tables. This is to account for "exemplar" + // fields that exist in one place in our tables, but are duplicated in GTFS. For example, we have a + // Route#wheelchair_accessible field, which is used to set the Trip#wheelchair_accessible values for all + // trips on a route. + // NOTE: pattern_stops linked fields are updated in the updateChildTable method. + switch (specTable.name) { + case "routes": + updateLinkedFields( + specTable, + jsonObject, + "trips", + "route_id", + "wheelchair_accessible" + ); + break; + case "patterns": + updateLinkedFields( + specTable, + jsonObject, + "trips", + "pattern_id", + "direction_id", "shape_id" + ); + break; + default: + LOG.debug("No linked fields to update."); + // Do nothing. + break; } if (autoCommit) { // If nothing failed up to this point, it is safe to assume there were no problems updating/creating the @@ -204,25 +253,37 @@ public String update(Integer id, String json, boolean autoCommit) throws SQLExce * tables (for now just fields in trips and stop_times) where the reference table's value should take precedence over * the related table (e.g., pattern_stop#timepoint should update all of its related stop_times). */ - private void updateLinkedFields(Table referenceTable, ObjectNode jsonObject, String tableName, String keyField, String ...fieldNames) throws SQLException { + private void updateLinkedFields( + Table referenceTable, + ObjectNode exemplarEntity, + String linkedTableName, + String keyField, + String ...linkedFieldsToUpdate + ) throws SQLException { + boolean updatingStopTimes = "stop_times".equals(linkedTableName); // Collect fields, the JSON values for these fields, and the strings to add to the prepared statement into Lists. List fields = new ArrayList<>(); List values = new ArrayList<>(); List fieldStrings = new ArrayList<>(); - for (String field : fieldNames) { + for (String field : linkedFieldsToUpdate) { fields.add(referenceTable.getFieldForName(field)); - values.add(jsonObject.get(field)); + values.add(exemplarEntity.get(field)); fieldStrings.add(String.format("%s = ?", field)); } String setFields = String.join(", ", fieldStrings); // If updating stop_times, use a more complex query that joins trips to stop_times in order to match on pattern_id - boolean updatingStopTimes = "stop_times".equals(tableName); Field orderField = updatingStopTimes ? referenceTable.getFieldForName(referenceTable.getOrderFieldName()) : null; String sql = updatingStopTimes - ? String.format("update %s.stop_times st set %s from %s.trips t " + - "where st.trip_id = t.trip_id AND t.%s = ? AND st.%s = ?", - tablePrefix, setFields, tablePrefix, keyField, orderField.name) - : String.format("update %s.%s set %s where %s = ?", tablePrefix, tableName, setFields, keyField); + ? String.format( + "update %s.stop_times st set %s from %s.trips t " + + "where st.trip_id = t.trip_id AND t.%s = ? AND st.%s = ?", + tablePrefix, + setFields, + tablePrefix, + keyField, + orderField.name + ) + : String.format("update %s.%s set %s where %s = ?", tablePrefix, linkedTableName, setFields, keyField); // Prepare the statement and set statement parameters PreparedStatement statement = connection.prepareStatement(sql); int oneBasedIndex = 1; @@ -234,16 +295,16 @@ private void updateLinkedFields(Table referenceTable, ObjectNode jsonObject, Str else field.setParameter(statement, oneBasedIndex++, newValue); } // Set "where clause" with value for key field (e.g., set values where pattern_id = '3') - statement.setString(oneBasedIndex++, jsonObject.get(keyField).asText()); + statement.setString(oneBasedIndex++, exemplarEntity.get(keyField).asText()); if (updatingStopTimes) { // If updating stop times set the order field parameter (stop_sequence) - String orderValue = jsonObject.get(orderField.name).asText(); + String orderValue = exemplarEntity.get(orderField.name).asText(); orderField.setParameter(statement, oneBasedIndex++, orderValue); } // Log query, execute statement, and log result. LOG.debug(statement.toString()); int entitiesUpdated = statement.executeUpdate(); - LOG.debug("{} {} linked fields updated", entitiesUpdated, tableName); + LOG.debug("{} {} linked fields updated", entitiesUpdated, linkedTableName); } /** @@ -252,7 +313,14 @@ private void updateLinkedFields(Table referenceTable, ObjectNode jsonObject, Str * object here is provided as a positional argument (rather than provided via the JdbcTableWriter instance field) * because this method is used to update both the specTable for the primary entity and any relevant child entities. */ - private PreparedStatement createPreparedUpdate(Integer id, boolean isCreating, ObjectNode jsonObject, Table table, Connection connection, boolean batch) throws SQLException { + private PreparedStatement createPreparedUpdate( + Integer id, + boolean isCreating, + ObjectNode jsonObject, + Table table, + Connection connection, + boolean batch + ) throws SQLException { String statementString; if (isCreating) { statementString = table.generateInsertSql(tablePrefix, true); @@ -275,7 +343,12 @@ private PreparedStatement createPreparedUpdate(Integer id, boolean isCreating, O * taken from JSON. Note, string values are used here in order to take advantage of setParameter method on * individual fields, which handles parsing string and non-string values into the appropriate SQL field types. */ - private void setStatementParameters(ObjectNode jsonObject, Table table, PreparedStatement preparedStatement, Connection connection) throws SQLException { + private void setStatementParameters( + ObjectNode jsonObject, + Table table, + PreparedStatement preparedStatement, + Connection connection + ) throws SQLException { // JDBC SQL statements use a one-based index for setting fields/parameters List missingFieldNames = new ArrayList<>(); int index = 1; @@ -349,7 +422,13 @@ private void setStatementParameters(ObjectNode jsonObject, Table table, Prepared } if (missingFieldNames.size() > 0) { // String joinedFieldNames = missingFieldNames.stream().collect(Collectors.joining(", ")); - throw new SQLException(String.format("The following field(s) are missing from JSON %s object: %s", table.name, missingFieldNames.toString())); + throw new SQLException( + String.format( + "The following field(s) are missing from JSON %s object: %s", + table.name, + missingFieldNames.toString() + ) + ); } } @@ -362,7 +441,14 @@ private void setStatementParameters(ObjectNode jsonObject, Table table, Prepared * have a hierarchical relationship. * FIXME develop a better way to update tables with foreign keys to the table being updated. */ - private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreatingNewEntity, Table subTable, Connection connection) throws SQLException, IOException { + private void updateChildTable( + ArrayNode subEntities, + int id, + boolean referencedPatternUsesFrequencies, + boolean isCreatingNewEntity, + Table subTable, + Connection connection + ) throws SQLException, IOException { // Get parent table's key field Field keyField; String keyValue; @@ -371,11 +457,13 @@ private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreat // Get parent entity's key value keyValue = getValueForId(id, keyField.name, tablePrefix, specTable, connection); String childTableName = String.join(".", tablePrefix, subTable.name); - // FIXME: add check for pattern stop consistency. - // FIXME: re-order stop times if pattern stop order changes. + if (!referencedPatternUsesFrequencies && subTable.name.equals(Table.FREQUENCIES.name) && subEntities.size() > 0) { + // Do not permit the illegal state where frequency entries are being added/modified for a timetable pattern. + throw new IllegalStateException("Cannot create or update frequency entries for a timetable-based pattern."); + } // Reconciling pattern stops MUST happen before original pattern stops are deleted in below block (with // getUpdateReferencesSql) - if ("pattern_stops".equals(subTable.name)) { + if (Table.PATTERN_STOP.name.equals(subTable.name)) { List newPatternStops = new ArrayList<>(); // Clean up pattern stop ID fields (passed in as string ID from datatools-ui to avoid id collision) for (JsonNode node : subEntities) { @@ -390,15 +478,14 @@ private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreat } reconcilePatternStops(keyValue, newPatternStops, connection); } - // FIXME: allow shapes to be updated on pattern geometry change. if (!isCreatingNewEntity) { // Delete existing sub-entities for given entity ID if the parent entity is not being newly created. String deleteSql = getUpdateReferencesSql(SqlMethod.DELETE, childTableName, keyField, keyValue, null); LOG.info(deleteSql); Statement statement = connection.createStatement(); // FIXME: Use copy on update for a pattern's shape instead of deleting the previous shape and replacing it. - // This would better account for GTFS data loaded from a file where multiple patterns reference a single - // shape. + // This would better account for GTFS data loaded from a file where multiple patterns reference a single + // shape. int result = statement.executeUpdate(deleteSql); LOG.info("Deleted {} {}", result, subTable.name); // FIXME: are there cases when an update should not return zero? @@ -412,6 +499,7 @@ private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreat int previousOrder = -1; TIntSet orderValues = new TIntHashSet(); Multimap referencesPerTable = HashMultimap.create(); + int cumulativeTravelTime = 0; for (JsonNode entityNode : subEntities) { // Cast entity node to ObjectNode to allow mutations (JsonNode is immutable). ObjectNode subEntity = (ObjectNode)entityNode; @@ -436,17 +524,24 @@ private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreat // If handling first iteration, create the prepared statement (later iterations will add to batch). insertStatement = createPreparedUpdate(id, true, subEntity, subTable, connection, true); } - // Update linked stop times fields for updated pattern stop (e.g., timepoint, pickup/drop off type). + // Update linked stop times fields for each updated pattern stop (e.g., timepoint, pickup/drop off type). if ("pattern_stops".equals(subTable.name)) { + if (referencedPatternUsesFrequencies) { + // Update stop times linked to pattern stop if the pattern uses frequencies and accumulate time. + // Default travel and dwell time behave as "linked fields" for associated stop times. In other + // words, frequency trips in the editor must match the pattern stop travel times. + cumulativeTravelTime += updateStopTimesForPatternStop(subEntity, cumulativeTravelTime); + } + // These fields should be updated for all patterns (e.g., timepoint, pickup/drop off type). updateLinkedFields( - subTable, - subEntity, - "stop_times", - "pattern_id", - "timepoint", - "drop_off_type", - "pickup_type", - "shape_dist_traveled" + subTable, + subEntity, + "stop_times", + "pattern_id", + "timepoint", + "drop_off_type", + "pickup_type", + "shape_dist_traveled" ); } setStatementParameters(subEntity, subTable, insertStatement, connection); @@ -458,13 +553,15 @@ private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreat boolean orderIsUnique = orderValues.add(orderValue); boolean valuesAreIncrementing = ++previousOrder == orderValue; if (!orderIsUnique || !valuesAreIncrementing) { - throw new SQLException(String.format( + throw new SQLException( + String.format( "%s %s values must be zero-based, unique, and incrementing. Entity at index %d had %s value of %d", subTable.name, orderFieldName, entityCount, previousOrder == 0 ? "non-zero" : !valuesAreIncrementing ? "non-incrementing" : "duplicate", - orderValue) + orderValue + ) ); } } @@ -492,6 +589,39 @@ private void updateChildTable(ArrayNode subEntities, Integer id, boolean isCreat } } + /** + * Updates the stop times that reference the specified pattern stop. + * @param patternStop the pattern stop for which to update stop times + * @param previousTravelTime the travel time accumulated up to the previous stop_time's departure time (or the + * previous pattern stop's dwell time) + * @return the travel and dwell time added by this pattern stop + * @throws SQLException + */ + private int updateStopTimesForPatternStop(ObjectNode patternStop, int previousTravelTime) throws SQLException { + String sql = String.format( + "update %s.stop_times st set arrival_time = ?, departure_time = ? from %s.trips t " + + "where st.trip_id = t.trip_id AND t.pattern_id = ? AND st.stop_sequence = ?", + tablePrefix, + tablePrefix + ); + // Prepare the statement and set statement parameters + PreparedStatement statement = connection.prepareStatement(sql); + int oneBasedIndex = 1; + int travelTime = patternStop.get("default_travel_time").asInt(); + int arrivalTime = previousTravelTime + travelTime; + statement.setInt(oneBasedIndex++, arrivalTime); + int dwellTime = patternStop.get("default_dwell_time").asInt(); + statement.setInt(oneBasedIndex++, arrivalTime + dwellTime); + // Set "where clause" with value for pattern_id and stop_sequence + statement.setString(oneBasedIndex++, patternStop.get("pattern_id").asText()); + statement.setInt(oneBasedIndex++, patternStop.get("stop_sequence").asInt()); + // Log query, execute statement, and log result. + LOG.debug(statement.toString()); + int entitiesUpdated = statement.executeUpdate(); + LOG.debug("{} stop_time arrivals/departures updated", entitiesUpdated); + return travelTime + dwellTime; + } + /** * Checks that a set of string references to a set of reference tables are all valid. For each set of references * mapped to a reference table, the method queries for all of the references. If there are any references that were @@ -610,8 +740,13 @@ else if (i == newStops.size() - 1 || !originalStopIds.get(i).equals(newStops.get } // Increment sequences for stops that follow the inserted location (including the stop at the changed index). // NOTE: This should happen before the blank stop time insertion for logical consistency. - String updateSql = String.format("update %s.stop_times set stop_sequence = stop_sequence + 1 from %s.trips where stop_sequence >= %d AND %s", - tablePrefix, tablePrefix, differenceLocation, joinToTrips); + String updateSql = String.format( + "update %s.stop_times set stop_sequence = stop_sequence + 1 from %s.trips where stop_sequence >= %d AND %s", + tablePrefix, + tablePrefix, + differenceLocation, + joinToTrips + ); LOG.info(updateSql); PreparedStatement updateStatement = connection.prepareStatement(updateSql); int updated = updateStatement.executeUpdate(); @@ -638,13 +773,23 @@ else if (originalStopIds.size() == newStops.size() + 1) { } } // Delete stop at difference location - String deleteSql = String.format("delete from %s.stop_times using %s.trips where stop_sequence = %d AND %s", - tablePrefix, tablePrefix, differenceLocation, joinToTrips); + String deleteSql = String.format( + "delete from %s.stop_times using %s.trips where stop_sequence = %d AND %s", + tablePrefix, + tablePrefix, + differenceLocation, + joinToTrips + ); LOG.info(deleteSql); PreparedStatement deleteStatement = connection.prepareStatement(deleteSql); // Decrement all stops with sequence greater than difference location - String updateSql = String.format("update %s.stop_times set stop_sequence = stop_sequence - 1 from %s.trips where stop_sequence > %d AND %s", - tablePrefix, tablePrefix, differenceLocation, joinToTrips); + String updateSql = String.format( + "update %s.stop_times set stop_sequence = stop_sequence - 1 from %s.trips where stop_sequence > %d AND %s", + tablePrefix, + tablePrefix, + differenceLocation, + joinToTrips + ); LOG.info(updateSql); PreparedStatement updateStatement = connection.prepareStatement(updateSql); int deleted = deleteStatement.executeUpdate(); @@ -764,16 +909,20 @@ else if (originalStopIds.size() < newStops.size()) { insertBlankStopTimes(tripsForPattern, newStops, firstDifferentIndex, stopsToInsert, connection); } // ANY OTHER TYPE OF MODIFICATION IS NOT SUPPORTED - else { - throw new IllegalStateException(RECONCILE_STOPS_ERROR_MSG); - } + else throw new IllegalStateException(RECONCILE_STOPS_ERROR_MSG); } /** * Check the stops in the changed region to ensure they remain in the same order. If not, throw an exception to * cancel the transaction. */ - private static void verifyInteriorStopsAreUnchanged(List originalStopIds, List newStops, int firstDifferentIndex, int lastDifferentIndex, boolean movedRight) { + private static void verifyInteriorStopsAreUnchanged( + List originalStopIds, + List newStops, + int firstDifferentIndex, + int lastDifferentIndex, + boolean movedRight + ) { //Stops mapped to list of stop IDs simply for easier viewing/comparison with original IDs while debugging with // breakpoints. List newStopIds = newStops.stream().map(s -> s.stop_id).collect(Collectors.toList()); @@ -798,7 +947,13 @@ private static void verifyInteriorStopsAreUnchanged(List originalStopIds * You must call this method after updating sequences for any stop times following the starting stop sequence to * avoid overwriting these other stop times. */ - private void insertBlankStopTimes(List tripIds, List newStops, int startingStopSequence, int stopTimesToAdd, Connection connection) throws SQLException { + private void insertBlankStopTimes( + List tripIds, + List newStops, + int startingStopSequence, + int stopTimesToAdd, + Connection connection + ) throws SQLException { if (tripIds.isEmpty()) { // There is no need to insert blank stop times if there are no trips for the pattern. return; @@ -958,7 +1113,13 @@ private static long handleStatementExecution(PreparedStatement statement, boolea * * FIXME: add more detail/precise language on what this method actually does */ - private static void ensureReferentialIntegrity(Connection connection, ObjectNode jsonObject, String namespace, Table table, Integer id) throws SQLException { + private static void ensureReferentialIntegrity( + Connection connection, + ObjectNode jsonObject, + String namespace, + Table table, + Integer id + ) throws SQLException { final boolean isCreating = id == null; String keyField = table.getKeyFieldName(); String tableName = String.join(".", namespace, table.name); @@ -1034,7 +1195,12 @@ private static int getRowCount(String tableName, Connection connection) throws S /** * For some condition (where field = string value), return the set of unique int IDs for the records that match. */ - private static TIntSet getIdsForCondition(String tableName, String keyField, String keyValue, Connection connection) throws SQLException { + private static TIntSet getIdsForCondition( + String tableName, + String keyField, + String keyValue, + Connection connection + ) throws SQLException { String idCheckSql = String.format("select id from %s where %s = ?", tableName, keyField); // Create statement for counting rows selected PreparedStatement statement = connection.prepareStatement(idCheckSql); @@ -1085,11 +1251,11 @@ private static String getValueForId(int id, String fieldName, String namespace, LOG.info(selectIdSql); Statement selectIdStatement = connection.createStatement(); ResultSet selectResults = selectIdStatement.executeQuery(selectIdSql); - String keyValue = null; + String value = null; while (selectResults.next()) { - keyValue = selectResults.getString(1); + value = selectResults.getString(1); } - return keyValue; + return value; } /** @@ -1108,7 +1274,13 @@ private static String getValueForId(int id, String fieldName, String namespace, * not necessarily delete a shape that is shared by multiple trips)? I think not because we are skipping foreign refs * found in the table for the entity being updated/deleted. [Leaving this comment in place for now though.] */ - private static void updateReferencingTables(String namespace, Table table, Connection connection, int id, String newKeyValue) throws SQLException { + private static void updateReferencingTables( + String namespace, + Table table, + Connection connection, + int id, + String newKeyValue + ) throws SQLException { Field keyField = table.getFieldForName(table.getKeyFieldName()); Class entityClass = table.getEntityClass(); // Determine method (update vs. delete) depending on presence of newKeyValue field. @@ -1165,10 +1337,18 @@ private static void updateReferencingTables(String namespace, Table table, Conne if (table.isCascadeDeleteRestricted()) { // The entity must not have any referencing entities in order to delete it. connection.rollback(); - // List idStrings = new ArrayList<>(); - // uniqueIds.forEach(uniqueId -> idStrings.add(String.valueOf(uniqueId))); - // String message = String.format("Cannot delete %s %s=%s. %d %s reference this %s (%s).", entityClass.getSimpleName(), keyField, keyValue, result, referencingTable.name, entityClass.getSimpleName(), String.join(",", idStrings)); - String message = String.format("Cannot delete %s %s=%s. %d %s reference this %s.", entityClass.getSimpleName(), keyField.name, keyValue, result, referencingTable.name, entityClass.getSimpleName()); + // List idStrings = new ArrayList<>(); + // uniqueIds.forEach(uniqueId -> idStrings.add(String.valueOf(uniqueId))); + // String message = String.format("Cannot delete %s %s=%s. %d %s reference this %s (%s).", entityClass.getSimpleName(), keyField, keyValue, result, referencingTable.name, entityClass.getSimpleName(), String.join(",", idStrings)); + String message = String.format( + "Cannot delete %s %s=%s. %d %s reference this %s.", + entityClass.getSimpleName(), + keyField.name, + keyValue, + result, + referencingTable.name, + entityClass.getSimpleName() + ); LOG.warn(message); throw new SQLException(message); } @@ -1185,12 +1365,23 @@ private static void updateReferencingTables(String namespace, Table table, Conne /** * Constructs SQL string based on method provided. */ - private static String getUpdateReferencesSql(SqlMethod sqlMethod, String refTableName, Field keyField, String keyValue, String newKeyValue) throws SQLException { + private static String getUpdateReferencesSql( + SqlMethod sqlMethod, + String refTableName, + Field keyField, + String keyValue, + String newKeyValue + ) throws SQLException { boolean isArrayField = keyField.getSqlType().equals(JDBCType.ARRAY); switch (sqlMethod) { case DELETE: if (isArrayField) { - return String.format("delete from %s where %s @> ARRAY['%s']::text[]", refTableName, keyField.name, keyValue); + return String.format( + "delete from %s where %s @> ARRAY['%s']::text[]", + refTableName, + keyField.name, + keyValue + ); } else { return String.format("delete from %s where %s = '%s'", refTableName, keyField.name, keyValue); } @@ -1199,9 +1390,25 @@ private static String getUpdateReferencesSql(SqlMethod sqlMethod, String refTabl // If the field to be updated is an array field (of which there are only text[] types in the db), // replace the old value with the new value using array contains clause. // FIXME This is probably horribly postgres specific. - return String.format("update %s set %s = array_replace(%s, '%s', '%s') where %s @> ARRAY['%s']::text[]", refTableName, keyField.name, keyField.name, keyValue, newKeyValue, keyField.name, keyValue); + return String.format( + "update %s set %s = array_replace(%s, '%s', '%s') where %s @> ARRAY['%s']::text[]", + refTableName, + keyField.name, + keyField.name, + keyValue, + newKeyValue, + keyField.name, + keyValue + ); } else { - return String.format("update %s set %s = '%s' where %s = '%s'", refTableName, keyField.name, newKeyValue, keyField.name, keyValue); + return String.format( + "update %s set %s = '%s' where %s = '%s'", + refTableName, + keyField.name, + newKeyValue, + keyField.name, + keyValue + ); } // case CREATE: // return String.format("insert into %s "); diff --git a/src/main/java/com/conveyal/gtfs/loader/SnapshotResult.java b/src/main/java/com/conveyal/gtfs/loader/SnapshotResult.java new file mode 100644 index 000000000..9c596e345 --- /dev/null +++ b/src/main/java/com/conveyal/gtfs/loader/SnapshotResult.java @@ -0,0 +1,11 @@ +package com.conveyal.gtfs.loader; + +/** + * This contains the result of a feed snapshot operation. It is nearly identical to {@link FeedLoadResult} except that + * it has some additional tables that only exist for snapshots/editor feeds. + */ +public class SnapshotResult extends FeedLoadResult { + private static final long serialVersionUID = 1L; + + public TableLoadResult scheduleExceptions; +} diff --git a/src/main/java/com/conveyal/gtfs/loader/Table.java b/src/main/java/com/conveyal/gtfs/loader/Table.java index 9c6b94b7b..e5ecf86ec 100644 --- a/src/main/java/com/conveyal/gtfs/loader/Table.java +++ b/src/main/java/com/conveyal/gtfs/loader/Table.java @@ -163,6 +163,7 @@ public Table (String name, Class entityClass, Requirement requ new ColorField("route_text_color", OPTIONAL), // Editor fields below. new ShortField("publicly_visible", EDITOR, 1), + // wheelchair_accessible is an exemplar field applied to all trips on a route. new ShortField("wheelchair_accessible", EDITOR, 2).permitEmptyValue(), new IntegerField("route_sort_order", OPTIONAL, 0, Integer.MAX_VALUE), // Status values are In progress (0), Pending approval (1), and Approved (2). @@ -196,7 +197,8 @@ public Table (String name, Class entityClass, Requirement requ new StringField("pattern_id", REQUIRED), new StringField("route_id", REQUIRED).isReferenceTo(ROUTES), new StringField("name", OPTIONAL), - // Editor-specific fields + // Editor-specific fields. + // direction_id and shape_id are exemplar fields applied to all trips for a pattern. new ShortField("direction_id", EDITOR, 1), new ShortField("use_frequency", EDITOR, 1), new StringField("shape_id", EDITOR).isReferenceTo(SHAPES) @@ -281,7 +283,10 @@ public Table (String name, Class entityClass, Requirement requ new StringField("trip_id", REQUIRED).isReferenceTo(TRIPS), new TimeField("start_time", REQUIRED), new TimeField("end_time", REQUIRED), - new IntegerField("headway_secs", REQUIRED, 20, 60*60*2), + // Set max headway seconds to the equivalent of 6 hours. This should leave space for any very long headways + // (e.g., a ferry running exact times at a 4 hour headway), but will catch cases where milliseconds were + // exported accidentally. + new IntegerField("headway_secs", REQUIRED, 20, 60*60*6), new IntegerField("exact_times", OPTIONAL, 1)) .withParentTable(TRIPS) .keyFieldIsNotUnique(); diff --git a/src/main/java/com/conveyal/gtfs/model/StopTime.java b/src/main/java/com/conveyal/gtfs/model/StopTime.java index 37fd5d2d8..d69078e34 100644 --- a/src/main/java/com/conveyal/gtfs/model/StopTime.java +++ b/src/main/java/com/conveyal/gtfs/model/StopTime.java @@ -26,7 +26,7 @@ public class StopTime extends Entity implements Cloneable, Serializable { public String stop_headsign; public int pickup_type; public int drop_off_type; - public double shape_dist_traveled; + public double shape_dist_traveled = DOUBLE_MISSING; public int timepoint = INT_MISSING; @Override diff --git a/src/main/java/com/conveyal/gtfs/validator/NamesValidator.java b/src/main/java/com/conveyal/gtfs/validator/NamesValidator.java index 913b56453..889a3679b 100644 --- a/src/main/java/com/conveyal/gtfs/validator/NamesValidator.java +++ b/src/main/java/com/conveyal/gtfs/validator/NamesValidator.java @@ -3,6 +3,8 @@ import com.conveyal.gtfs.error.SQLErrorStorage; import com.conveyal.gtfs.loader.Feed; import com.conveyal.gtfs.model.Route; +import com.conveyal.gtfs.model.Stop; +import com.conveyal.gtfs.model.Trip; import static com.conveyal.gtfs.error.NewGTFSErrorType.*; @@ -41,7 +43,41 @@ public void validate() { // TODO we want some additional checking for extended route types. } } - // TODO Check trips and all other tables. + // Check stops + for (Stop stop : feed.stops) { + String name = normalize(stop.stop_name); + String desc = normalize(stop.stop_desc); + // Stops must be named. + if (name.isEmpty()) { + registerError(stop, STOP_NAME_MISSING); + } + // If provided, the description of a stop should be more informative than its name. + if (!desc.isEmpty() && desc.equals(name)) { + registerError(stop, STOP_DESCRIPTION_SAME_AS_NAME, desc); + } + } + // Check trips + for (Trip trip : feed.trips) { + String headsign = normalize(trip.trip_headsign); + // TODO: check trip short name? +// String shortName = normalize(trip.trip_short_name); + Route route = feed.routes.get(trip.route_id); + String routeShortName = "", routeLongName = ""; + if (route != null) { + routeShortName = normalize(route.route_short_name); + routeLongName = normalize(route.route_long_name); + } + // Trip headsign should not duplicate route name. + if (!headsign.isEmpty() && (headsign.contains(routeShortName) || headsign.contains(routeLongName))) { + registerError(trip, TRIP_HEADSIGN_CONTAINS_ROUTE_NAME, headsign); + } + // Trip headsign should not begin with "to" or "towards" (note: headsign normalized to lowercase). Headsigns + // should follow one of the patterns defined in the best practices: http://gtfs.org/best-practices#tripstxt + if (headsign.startsWith("to ") || headsign.startsWith("towards ")) { + registerError(trip, TRIP_HEADSIGN_SHOULD_DESCRIBE_DESTINATION_OR_WAYPOINTS, headsign); + } + } + // TODO Are there other tables we're not checking? } /** @return a non-null String that is lower case and has no leading or trailing whitespace */ diff --git a/src/main/java/com/conveyal/gtfs/validator/ServiceValidator.java b/src/main/java/com/conveyal/gtfs/validator/ServiceValidator.java index 7097da1c1..3b1639c26 100644 --- a/src/main/java/com/conveyal/gtfs/validator/ServiceValidator.java +++ b/src/main/java/com/conveyal/gtfs/validator/ServiceValidator.java @@ -117,8 +117,7 @@ public void complete(ValidationResult validationResult) { } } } catch (Exception ex) { - LOG.error(ex.getMessage()); - ex.printStackTrace(); + LOG.error("Error validating service entries (merging calendars and calendar_dates)", ex); // Continue on to next calendar entry. } } @@ -181,6 +180,11 @@ select durations.service_id, duration_seconds, days_active from ( LocalDate firstDate = LocalDate.MAX; LocalDate lastDate = LocalDate.MIN; for (LocalDate date : dateInfoForDate.keySet()) { + // If the date is invalid, skip. + if (date == null) { + LOG.error("Encountered null date. Did something go wrong with computeIfAbsent?"); + continue; + } if (date.isBefore(firstDate)) firstDate = date; if (date.isAfter(lastDate)) lastDate = date; } @@ -191,7 +195,8 @@ select durations.service_id, duration_seconds, days_active from ( validationResult.firstCalendarDate = firstDate; validationResult.lastCalendarDate = lastDate; // Is this any different? firstDate.until(lastDate, ChronoUnit.DAYS); - int nDays = (int) ChronoUnit.DAYS.between(firstDate, lastDate) + 1; + // If no days were found in the dateInfoForDate, nDays is a very large negative number, so we default to 0. + int nDays = Math.max(0, (int) ChronoUnit.DAYS.between(firstDate, lastDate) + 1); validationResult.dailyBusSeconds = new int[nDays]; validationResult.dailyTramSeconds = new int[nDays]; validationResult.dailyMetroSeconds = new int[nDays]; diff --git a/src/main/java/com/conveyal/gtfs/validator/SpeedTripValidator.java b/src/main/java/com/conveyal/gtfs/validator/SpeedTripValidator.java index 46bc39d14..6a89163ae 100644 --- a/src/main/java/com/conveyal/gtfs/validator/SpeedTripValidator.java +++ b/src/main/java/com/conveyal/gtfs/validator/SpeedTripValidator.java @@ -3,6 +3,7 @@ import com.conveyal.gtfs.error.NewGTFSError; import com.conveyal.gtfs.error.SQLErrorStorage; import com.conveyal.gtfs.loader.Feed; +import com.conveyal.gtfs.model.Entity; import com.conveyal.gtfs.model.Route; import com.conveyal.gtfs.model.Stop; import com.conveyal.gtfs.model.StopTime; @@ -49,6 +50,8 @@ public void validateTrip(Trip trip, Route route, List stopTimes, List< Stop currStop = stops.get(i); // Distance is accumulated in case times are not provided for some StopTimes. distanceMeters += fastDistance(currStop.stop_lat, currStop.stop_lon, prevStop.stop_lat, prevStop.stop_lon); + // Check that shape_dist_traveled is increasing. + checkShapeDistTraveled(prevStopTime, currStopTime); if (missingBothTimes(currStopTime)) { // FixMissingTimes has already been called, so both arrival and departure time are missing. // The spec allows this. Other than accumulating distance, skip this StopTime. @@ -82,6 +85,20 @@ public void validateTrip(Trip trip, Route route, List stopTimes, List< } } + /** + * Register shape dist traveled error if previous stop time's value was missing and the current value is + * not (if at least one stop time has a value, all stop times for the trip should) OR if current value + * is not greater than previous value. + */ + private void checkShapeDistTraveled(StopTime previous, StopTime current) { + if ( + (previous.shape_dist_traveled == Entity.DOUBLE_MISSING && current.shape_dist_traveled != Entity.DOUBLE_MISSING) || + current.shape_dist_traveled <= previous.shape_dist_traveled + ) { + registerError(current, SHAPE_DIST_TRAVELED_NOT_INCREASING, current.shape_dist_traveled); + } + } + /** * Completing this feed validator means checking if there were any unrounded travel times in the feed and (if so) * registering any zero travel time errors that were passed over before the first unrounded travel time was diff --git a/src/test/java/com/conveyal/gtfs/GTFSTest.java b/src/test/java/com/conveyal/gtfs/GTFSTest.java index 9a5acca3a..3e2c8fe69 100644 --- a/src/test/java/com/conveyal/gtfs/GTFSTest.java +++ b/src/test/java/com/conveyal/gtfs/GTFSTest.java @@ -1,9 +1,16 @@ package com.conveyal.gtfs; +import com.conveyal.gtfs.error.NewGTFSErrorType; import com.conveyal.gtfs.loader.FeedLoadResult; +import com.conveyal.gtfs.loader.SnapshotResult; +import com.conveyal.gtfs.storage.ExpectedFieldType; +import com.conveyal.gtfs.storage.PersistenceExpectation; +import com.conveyal.gtfs.storage.RecordExpectation; import com.conveyal.gtfs.validator.ValidationResult; import com.csvreader.CsvReader; +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Multimap; import com.google.common.io.Files; import org.apache.commons.io.FileUtils; import org.apache.commons.io.input.BOMInputStream; @@ -22,9 +29,9 @@ import java.io.PrintStream; import java.nio.charset.Charset; import java.sql.Connection; -import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.Collection; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; @@ -98,6 +105,26 @@ public void canLoadAndExportSimpleAgency() { ); } + /** + * Tests that a GTFS feed with bad date values in calendars.txt and calendar_dates.txt can pass the integration test. + */ + @Test + public void canLoadFeedWithBadDates () { + PersistenceExpectation[] expectations = PersistenceExpectation.list( + new PersistenceExpectation( + "calendar", + new RecordExpectation[]{ + new RecordExpectation("start_date", null) + } + ) + ); + assertThat( + "Integration test passes", + runIntegrationTestOnFolder("fake-agency-bad-calendar-date", nullValue(), expectations), + equalTo(true) + ); + } + /** * Tests whether or not "fake-agency" GTFS can be placed in a zipped subdirectory and loaded/exported successfully. */ @@ -119,11 +146,7 @@ public void canLoadAndExportSimpleAgencyInSubDirectory() { } // TODO Add error expectations argument that expects NewGTFSErrorType.TABLE_IN_SUBDIRECTORY error type. assertThat( - runIntegrationTestOnZipFile( - zipFileName, - nullValue(), - fakeAgencyPersistenceExpectations - ), + runIntegrationTestOnZipFile(zipFileName, nullValue(), fakeAgencyPersistenceExpectations), equalTo(true) ); } @@ -220,7 +243,7 @@ private boolean runIntegrationTestOnFolder( } /** - * A helper method that will run GTFS.main with a certain zip file. + * A helper method that will run GTFS#main with a certain zip file. * This tests whether a GTFS zip file can be loaded without any errors. * * After the GTFS is loaded, this will also initiate an export of a GTFS from the database and check @@ -251,7 +274,7 @@ private boolean runIntegrationTestOnZipFile( assertThat(validationResult.fatalException, is(fatalExceptionExpectation)); namespace = loadResult.uniqueIdentifier; - assertThatImportedGtfsMeetsExpectations(dbConnectionUrl, namespace, persistenceExpectations); + assertThatImportedGtfsMeetsExpectations(dataSource.getConnection(), namespace, persistenceExpectations); } catch (SQLException e) { TestUtils.dropDB(newDBName); e.printStackTrace(); @@ -261,9 +284,9 @@ private boolean runIntegrationTestOnZipFile( throw e; } - // Verify that exporting the feed (in non-editor mode) completes and data is outputed properly + // Verify that exporting the feed (in non-editor mode) completes and data is outputted properly try { - LOG.info("export GTFS from created namespase"); + LOG.info("export GTFS from created namespace"); File tempFile = exportGtfs(namespace, dataSource, false); assertThatExportedGtfsMeetsExpectations(tempFile, persistenceExpectations, false); } catch (IOException e) { @@ -278,9 +301,10 @@ private boolean runIntegrationTestOnZipFile( // Verify that making a snapshot from an existing feed database, then exporting that snapshot to a GTFS zip file // works as expected try { - LOG.info("copy GTFS from created namespase"); - FeedLoadResult copyResult = GTFS.makeSnapshot(namespace, dataSource); - LOG.info("export GTFS from copied namespase"); + LOG.info("copy GTFS from created namespace"); + SnapshotResult copyResult = GTFS.makeSnapshot(namespace, dataSource); + assertThatSnapshotIsErrorFree(copyResult); + LOG.info("export GTFS from copied namespace"); File tempFile = exportGtfs(copyResult.uniqueIdentifier, dataSource, true); assertThatExportedGtfsMeetsExpectations(tempFile, persistenceExpectations, true); } catch (IOException e) { @@ -293,6 +317,60 @@ private boolean runIntegrationTestOnZipFile( return true; } + private void assertThatLoadIsErrorFree(FeedLoadResult loadResult) { + assertThat(loadResult.fatalException, is(nullValue())); + assertThat(loadResult.agency.fatalException, is(nullValue())); + assertThat(loadResult.calendar.fatalException, is(nullValue())); + assertThat(loadResult.calendarDates.fatalException, is(nullValue())); + assertThat(loadResult.fareAttributes.fatalException, is(nullValue())); + assertThat(loadResult.fareRules.fatalException, is(nullValue())); + assertThat(loadResult.feedInfo.fatalException, is(nullValue())); + assertThat(loadResult.frequencies.fatalException, is(nullValue())); + assertThat(loadResult.routes.fatalException, is(nullValue())); + assertThat(loadResult.shapes.fatalException, is(nullValue())); + assertThat(loadResult.stops.fatalException, is(nullValue())); + assertThat(loadResult.stopTimes.fatalException, is(nullValue())); + assertThat(loadResult.transfers.fatalException, is(nullValue())); + assertThat(loadResult.trips.fatalException, is(nullValue())); + } + + private void assertThatSnapshotIsErrorFree(SnapshotResult snapshotResult) { + assertThatLoadIsErrorFree(snapshotResult); + assertThat(snapshotResult.scheduleExceptions.fatalException, is(nullValue())); + } + + private String zipFolderAndLoadGTFS(String folderName, DataSource dataSource, PersistenceExpectation[] persistenceExpectations) { + // zip up test folder into temp zip file + String zipFileName; + try { + zipFileName = TestUtils.zipFolderFiles(folderName, true); + } catch (IOException e) { + e.printStackTrace(); + return null; + } + String namespace; + // Verify that loading the feed completes and data is stored properly + try { + // load and validate feed + LOG.info("load and validate feed"); + FeedLoadResult loadResult = GTFS.load(zipFileName, dataSource); + ValidationResult validationResult = GTFS.validate(loadResult.uniqueIdentifier, dataSource); + DataSource ds = GTFS.createDataSource( + dataSource.getConnection().getMetaData().getURL(), + null, + null + ); + assertThatLoadIsErrorFree(loadResult); + assertThat(validationResult.fatalException, is(nullValue())); + namespace = loadResult.uniqueIdentifier; + assertThatImportedGtfsMeetsExpectations(ds.getConnection(), namespace, persistenceExpectations); + } catch (SQLException | AssertionError e) { + e.printStackTrace(); + return null; + } + return namespace; + } + /** * Helper function to export a GTFS from the database to a temporary zip file. */ @@ -302,18 +380,34 @@ private File exportGtfs(String namespace, DataSource dataSource, boolean fromEdi return tempFile; } + private class ValuePair { + private final Object expected; + private final Object found; + private ValuePair (Object expected, Object found) { + this.expected = expected; + this.found = found; + } + } + /** * Run through the list of persistence expectations to make sure that the feed was imported properly into the * database. */ private void assertThatImportedGtfsMeetsExpectations( - String dbConnectionUrl, + Connection connection, String namespace, PersistenceExpectation[] persistenceExpectations ) throws SQLException { + // Store field mismatches here (to provide assertion statements with more details). + Multimap fieldsWithMismatches = ArrayListMultimap.create(); + // Check that no validators failed during validation. + assertThat( + "One or more validators failed during GTFS import.", + countValidationErrorsOfType(connection, namespace, NewGTFSErrorType.VALIDATOR_FAILED), + equalTo(0) + ); // run through testing expectations - LOG.info("testing expecations of record storage in the database"); - Connection conn = DriverManager.getConnection(dbConnectionUrl); + LOG.info("testing expectations of record storage in the database"); for (PersistenceExpectation persistenceExpectation : persistenceExpectations) { // select all entries from a table String sql = String.format( @@ -322,7 +416,7 @@ private void assertThatImportedGtfsMeetsExpectations( persistenceExpectation.tableName ); LOG.info(sql); - ResultSet rs = conn.prepareStatement(sql).executeQuery(); + ResultSet rs = connection.prepareStatement(sql).executeQuery(); boolean foundRecord = false; int numRecordsSearched = 0; while (rs.next()) { @@ -332,32 +426,48 @@ private void assertThatImportedGtfsMeetsExpectations( for (RecordExpectation recordExpectation: persistenceExpectation.recordExpectations) { switch (recordExpectation.expectedFieldType) { case DOUBLE: + double doubleVal = rs.getDouble(recordExpectation.fieldName); LOG.info(String.format( "%s: %f", recordExpectation.fieldName, - rs.getDouble(recordExpectation.fieldName) + doubleVal )); - if (rs.getDouble(recordExpectation.fieldName) != recordExpectation.doubleExpectation) { + if (doubleVal != recordExpectation.doubleExpectation) { allFieldsMatch = false; } break; case INT: + int intVal = rs.getInt(recordExpectation.fieldName); LOG.info(String.format( "%s: %d", recordExpectation.fieldName, - rs.getInt(recordExpectation.fieldName) + intVal )); - if (rs.getInt(recordExpectation.fieldName) != recordExpectation.intExpectation) { + if (intVal != recordExpectation.intExpectation) { + fieldsWithMismatches.put( + recordExpectation.fieldName, + new ValuePair(recordExpectation.stringExpectation, intVal) + ); allFieldsMatch = false; } break; case STRING: + String strVal = rs.getString(recordExpectation.fieldName); LOG.info(String.format( "%s: %s", recordExpectation.fieldName, - rs.getString(recordExpectation.fieldName) + strVal )); - if (!rs.getString(recordExpectation.fieldName).equals(recordExpectation.stringExpectation)) { + if (strVal == null && recordExpectation.stringExpectation == null) { + break; + } else if ( + (strVal == null && recordExpectation.stringExpectation != null) || + !strVal.equals(recordExpectation.stringExpectation) + ) { + fieldsWithMismatches.put( + recordExpectation.fieldName, + new ValuePair(recordExpectation.stringExpectation, strVal) + ); allFieldsMatch = false; } break; @@ -374,10 +484,28 @@ private void assertThatImportedGtfsMeetsExpectations( break; } } - assertThatPersistenceExpectationRecordWasFound(numRecordsSearched, foundRecord); + assertThatPersistenceExpectationRecordWasFound(numRecordsSearched, foundRecord, fieldsWithMismatches); } } + private static int countValidationErrorsOfType( + Connection connection, + String namespace, + NewGTFSErrorType errorType + ) throws SQLException { + String errorCheckSql = String.format( + "select * from %s.errors where error_type = '%s'", + namespace, + errorType); + LOG.info(errorCheckSql); + ResultSet errorResults = connection.prepareStatement(errorCheckSql).executeQuery(); + int errorCount = 0; + while (errorResults.next()) { + errorCount++; + } + return errorCount; + } + /** * Helper to assert that the GTFS that was exported to a zip file matches all data expectations defined in the * persistence expectations. @@ -387,7 +515,7 @@ private void assertThatExportedGtfsMeetsExpectations( PersistenceExpectation[] persistenceExpectations, boolean fromEditor ) throws IOException { - LOG.info("testing expecations of csv outputs in an exported gtfs"); + LOG.info("testing expectations of csv outputs in an exported gtfs"); ZipFile gtfsZipfile = new ZipFile(tempFile.getAbsolutePath()); @@ -432,7 +560,12 @@ private void assertThatExportedGtfsMeetsExpectations( val, expectation )); - if (!val.equals(expectation)) { + if (val.isEmpty() && expectation == null) { + // First check that the csv value is an empty string and that the expectation is null. Null + // exported from the database to a csv should round trip into an empty string, so this meets the + // expectation. + break; + } else if (!val.equals(expectation)) { // sometimes there are slight differences in decimal precision in various fields // check if the decimal delta is acceptable if (equalsWithNumericDelta(val, recordExpectation)) continue; @@ -446,7 +579,7 @@ private void assertThatExportedGtfsMeetsExpectations( foundRecord = true; } } - assertThatPersistenceExpectationRecordWasFound(numRecordsSearched, foundRecord); + assertThatPersistenceExpectationRecordWasFound(numRecordsSearched, foundRecord, null); } } @@ -469,17 +602,34 @@ private boolean equalsWithNumericDelta(String val, RecordExpectation recordExpec /** * Helper method to make sure a persistence expectation was actually found after searching through records */ - private void assertThatPersistenceExpectationRecordWasFound(int numRecordsSearched, boolean foundRecord) { + private void assertThatPersistenceExpectationRecordWasFound( + int numRecordsSearched, + boolean foundRecord, + Multimap mismatches + ) { assertThat( "No records found in the ResultSet/CSV file", numRecordsSearched, ComparatorMatcherBuilder.usingNaturalOrdering().greaterThan(0) ); - assertThat( - "The record as defined in the PersistenceExpectation was not found.", - foundRecord, - equalTo(true) - ); + if (mismatches != null) { + for (String field : mismatches.keySet()) { + Collection valuePairs = mismatches.get(field); + for (ValuePair valuePair : valuePairs) { + assertThat( + String.format("The value expected for %s was not found", field), + valuePair.expected, + equalTo(valuePair.found) + ); + } + } + } else { + assertThat( + "The record as defined in the PersistenceExpectation was not found.", + foundRecord, + equalTo(true) + ); + } } /** @@ -617,102 +767,4 @@ private void assertThatPersistenceExpectationRecordWasFound(int numRecordsSearch } ) }; - - /** - * A helper class to verify that data got stored in a particular table. - */ - private class PersistenceExpectation { - public String tableName; - // each persistence expectation has an array of record expectations which all must reference a single row - // if looking for multiple records in the same table, - // create numerous PersistenceExpectations with the same tableName - public RecordExpectation[] recordExpectations; - - - public PersistenceExpectation(String tableName, RecordExpectation[] recordExpectations) { - this.tableName = tableName; - this.recordExpectations = recordExpectations; - } - } - - private enum ExpectedFieldType { - INT, - DOUBLE, STRING - } - - /** - * A helper class to verify that data got stored in a particular record. - */ - private class RecordExpectation { - public double acceptedDelta; - public double doubleExpectation; - public String editorExpectation; - public ExpectedFieldType expectedFieldType; - public String fieldName; - public int intExpectation; - public String stringExpectation; - public boolean stringExpectationInCSV = false; - public boolean editorStringExpectation = false; - - public RecordExpectation(String fieldName, int intExpectation) { - this.fieldName = fieldName; - this.expectedFieldType = ExpectedFieldType.INT; - this.intExpectation = intExpectation; - } - - /** - * This extra constructor is a bit hacky in that it is only used for certain records that have - * an int type when stored in the database, but a string type when exported to GTFS - */ - public RecordExpectation(String fieldName, int intExpectation, String stringExpectation) { - this.fieldName = fieldName; - this.expectedFieldType = ExpectedFieldType.INT; - this.intExpectation = intExpectation; - this.stringExpectation = stringExpectation; - this.stringExpectationInCSV = true; - } - - /** - * This extra constructor is a also hacky in that it is only used for records that have - * an int type when stored in the database, and different values in the CSV export depending on - * whether or not it is a snapshot from the editor. Currently this only applies to stop_times.stop_sequence - */ - public RecordExpectation(String fieldName, int intExpectation, String stringExpectation, String editorExpectation) { - this.fieldName = fieldName; - this.expectedFieldType = ExpectedFieldType.INT; - this.intExpectation = intExpectation; - this.stringExpectation = stringExpectation; - this.stringExpectationInCSV = true; - this.editorStringExpectation = true; - this.editorExpectation = editorExpectation; - } - - public RecordExpectation(String fieldName, String stringExpectation) { - this.fieldName = fieldName; - this.expectedFieldType = ExpectedFieldType.STRING; - this.stringExpectation = stringExpectation; - } - - public RecordExpectation(String fieldName, double doubleExpectation, double acceptedDelta) { - this.fieldName = fieldName; - this.expectedFieldType = ExpectedFieldType.DOUBLE; - this.doubleExpectation = doubleExpectation; - this.acceptedDelta = acceptedDelta; - } - - public String getStringifiedExpectation(boolean fromEditor) { - if (fromEditor && editorStringExpectation) return editorExpectation; - if (stringExpectationInCSV) return stringExpectation; - switch (expectedFieldType) { - case DOUBLE: - return String.valueOf(doubleExpectation); - case INT: - return String.valueOf(intExpectation); - case STRING: - return stringExpectation; - default: - return null; - } - } - } } diff --git a/src/test/java/com/conveyal/gtfs/loader/JDBCTableWriterTest.java b/src/test/java/com/conveyal/gtfs/loader/JDBCTableWriterTest.java index 44a418b4a..c00c59857 100644 --- a/src/test/java/com/conveyal/gtfs/loader/JDBCTableWriterTest.java +++ b/src/test/java/com/conveyal/gtfs/loader/JDBCTableWriterTest.java @@ -1,11 +1,19 @@ package com.conveyal.gtfs.loader; import com.conveyal.gtfs.TestUtils; +import com.conveyal.gtfs.util.CalendarDTO; import com.conveyal.gtfs.util.FareDTO; import com.conveyal.gtfs.util.FareRuleDTO; import com.conveyal.gtfs.util.FeedInfoDTO; +import com.conveyal.gtfs.util.FrequencyDTO; import com.conveyal.gtfs.util.InvalidNamespaceException; +import com.conveyal.gtfs.util.PatternDTO; +import com.conveyal.gtfs.util.PatternStopDTO; import com.conveyal.gtfs.util.RouteDTO; +import com.conveyal.gtfs.util.ShapePointDTO; +import com.conveyal.gtfs.util.StopDTO; +import com.conveyal.gtfs.util.StopTimeDTO; +import com.conveyal.gtfs.util.TripDTO; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -24,6 +32,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.junit.Assert.assertThat; +/** + * This class contains CRUD tests for {@link JdbcTableWriter} (i.e., editing GTFS entities in the RDBMS). Set up + * consists of creating a scratch database and an empty feed snapshot, which is the necessary starting condition + * for building a GTFS feed from scratch. It then runs the various CRUD tests and finishes by dropping the database + * (even if tests fail). + */ public class JDBCTableWriterTest { private static final Logger LOG = LoggerFactory.getLogger(JDBCTableWriterTest.class); @@ -31,6 +45,9 @@ public class JDBCTableWriterTest { private static String testDBName; private static DataSource testDataSource; private static String testNamespace; + private static String simpleServiceId = "1"; + private static String firstStopId = "1"; + private static String lastStopId = "2"; private static final ObjectMapper mapper = new ObjectMapper(); private static JdbcTableWriter createTestTableWriter (Table table) throws InvalidNamespaceException { @@ -38,8 +55,8 @@ private static JdbcTableWriter createTestTableWriter (Table table) throws Invali } @BeforeClass - public static void setUpClass() throws SQLException { - // create a new database + public static void setUpClass() throws SQLException, IOException, InvalidNamespaceException { + // Create a new database testDBName = TestUtils.generateNewDB(); String dbConnectionUrl = String.format("jdbc:postgresql://localhost/%s", testDBName); testDataSource = createDataSource (dbConnectionUrl, null, null); @@ -51,10 +68,13 @@ public static void setUpClass() throws SQLException { "snapshot_of varchar)"); connection.commit(); LOG.info("feeds table created"); - - // create an empty snapshot to create a new namespace and all the tables + // Create an empty snapshot to create a new namespace and all the tables FeedLoadResult result = makeSnapshot(null, testDataSource); testNamespace = result.uniqueIdentifier; + // Create a service calendar and two stops, both of which are necessary to perform pattern and trip tests. + createWeekdayCalendar(simpleServiceId, "20180103", "20180104"); + createSimpleStop(firstStopId, "First Stop", 34.2222, -87.333); + createSimpleStop(lastStopId, "Last Stop", 34.2233, -87.334); } @Test @@ -120,6 +140,10 @@ public void canCreateUpdateAndDeleteFeedInfoEntities() throws IOException, SQLEx )); } + /** + * Ensure that potentially malicious SQL injection is sanitized properly during create operations. + * TODO: We might should perform this check on multiple entities and for update and/or delete operations. + */ @Test public void canPreventSQLInjection() throws IOException, SQLException, InvalidNamespaceException { // create new object to be saved @@ -229,14 +253,16 @@ public void canCreateUpdateAndDeleteFares() throws IOException, SQLException, In )); } - private void assertThatSqlQueryYieldsZeroRows(String sql) throws SQLException { + void assertThatSqlQueryYieldsZeroRows(String sql) throws SQLException { assertThatSqlQueryYieldsRowCount(sql, 0); } private void assertThatSqlQueryYieldsRowCount(String sql, int expectedRowCount) throws SQLException { LOG.info(sql); - ResultSet resultSet = testDataSource.getConnection().prepareStatement(sql).executeQuery(); - assertThat(resultSet.getFetchSize(), equalTo(expectedRowCount)); + int recordCount = 0; + ResultSet rs = testDataSource.getConnection().prepareStatement(sql).executeQuery(); + while (rs.next()) recordCount++; + assertThat("Records matching query should equal expected count.", recordCount, equalTo(expectedRowCount)); } @Test @@ -246,23 +272,8 @@ public void canCreateUpdateAndDeleteRoutes() throws IOException, SQLException, I final Class routeDTOClass = RouteDTO.class; // create new object to be saved - RouteDTO routeInput = new RouteDTO(); String routeId = "500"; - routeInput.route_id = routeId; - routeInput.agency_id = "RTA"; - // Empty value should be permitted for transfers and transfer_duration - routeInput.route_short_name = "500"; - routeInput.route_long_name = "Hollingsworth"; - routeInput.route_type = 3; - - // convert object to json and save it - JdbcTableWriter createTableWriter = createTestTableWriter(routeTable); - String createOutput = createTableWriter.create(mapper.writeValueAsString(routeInput), true); - LOG.info("create {} output:", routeTable.name); - LOG.info(createOutput); - - // parse output - RouteDTO createdRoute = mapper.readValue(createOutput, routeDTOClass); + RouteDTO createdRoute = createSimpleTestRoute(routeId, "RTA", "500", "Hollingsworth", 3); // make sure saved data matches expected data assertThat(createdRoute.route_id, equalTo(routeId)); @@ -307,6 +318,169 @@ public void canCreateUpdateAndDeleteRoutes() throws IOException, SQLException, I )); } + /** + * Create and store a simple route for testing. + */ + private static RouteDTO createSimpleTestRoute(String routeId, String agencyId, String shortName, String longName, int routeType) throws InvalidNamespaceException, IOException, SQLException { + RouteDTO input = new RouteDTO(); + input.route_id = routeId; + input.agency_id = agencyId; + // Empty value should be permitted for transfers and transfer_duration + input.route_short_name = shortName; + input.route_long_name = longName; + input.route_type = routeType; + // convert object to json and save it + JdbcTableWriter createTableWriter = createTestTableWriter(Table.ROUTES); + String output = createTableWriter.create(mapper.writeValueAsString(input), true); + LOG.info("create {} output:", Table.ROUTES.name); + LOG.info(output); + // parse output + return mapper.readValue(output, RouteDTO.class); + } + + /** + * Creates a pattern by first creating a route and then a pattern for that route. + */ + private static PatternDTO createSimpleRouteAndPattern(String routeId, String patternId, String name) throws InvalidNamespaceException, SQLException, IOException { + // Create new route + createSimpleTestRoute(routeId, "RTA", "500", "Hollingsworth", 3); + // Create new pattern for route + PatternDTO input = new PatternDTO(); + input.pattern_id = patternId; + input.route_id = routeId; + input.name = name; + input.use_frequency = 0; + input.shapes = new ShapePointDTO[]{}; + input.pattern_stops = new PatternStopDTO[]{}; + // Write the pattern to the database + JdbcTableWriter createPatternWriter = createTestTableWriter(Table.PATTERNS); + String output = createPatternWriter.create(mapper.writeValueAsString(input), true); + LOG.info("create {} output:", Table.PATTERNS.name); + LOG.info(output); + // Parse output + return mapper.readValue(output, PatternDTO.class); + } + + /** + * Test that a frequency trip entry CANNOT be added for a timetable-based pattern. Expects an exception to be thrown. + */ + @Test(expected = IllegalStateException.class) + public void cannotCreateFrequencyForTimetablePattern() throws InvalidNamespaceException, IOException, SQLException { + PatternDTO simplePattern = createSimpleRouteAndPattern("900", "8", "The Loop"); + TripDTO tripInput = constructFrequencyTrip(simplePattern.pattern_id, simplePattern.route_id, 6 * 60 * 60); + JdbcTableWriter createTripWriter = createTestTableWriter(Table.TRIPS); + createTripWriter.create(mapper.writeValueAsString(tripInput), true); + } + + /** + * Checks that creating a frequency trip functions properly. This also updates the pattern to include pattern stops, + * which is a prerequisite for creating a frequency trip with stop times. + */ + @Test + public void canCreateUpdateAndDeleteFrequencyTripForFrequencyPattern() throws IOException, SQLException, InvalidNamespaceException { + // Store Table and Class values for use in test. + final Table tripsTable = Table.TRIPS; + int startTime = 6 * 60 * 60; + PatternDTO simplePattern = createSimpleRouteAndPattern("1000", "9", "The Line"); + TripDTO tripInput = constructFrequencyTrip(simplePattern.pattern_id, simplePattern.route_id, startTime); + JdbcTableWriter createTripWriter = createTestTableWriter(tripsTable); + // Update pattern with pattern stops, set to use frequencies, and TODO shape points + JdbcTableWriter patternUpdater = createTestTableWriter(Table.PATTERNS); + simplePattern.use_frequency = 1; + simplePattern.pattern_stops = new PatternStopDTO[]{ + new PatternStopDTO(simplePattern.pattern_id, firstStopId, 0), + new PatternStopDTO(simplePattern.pattern_id, lastStopId, 1) + }; + String updatedPatternOutput = patternUpdater.update(simplePattern.id, mapper.writeValueAsString(simplePattern), true); + LOG.info("Updated pattern output: {}", updatedPatternOutput); + // Create new trip for the pattern + String createTripOutput = createTripWriter.create(mapper.writeValueAsString(tripInput), true); + TripDTO createdTrip = mapper.readValue(createTripOutput, TripDTO.class); + // Update trip + // TODO: Add update and delete tests for updating pattern stops, stop_times, and frequencies. + String updatedTripId = "100A"; + createdTrip.trip_id = updatedTripId; + JdbcTableWriter updateTripWriter = createTestTableWriter(tripsTable); + String updateTripOutput = updateTripWriter.update(tripInput.id, mapper.writeValueAsString(createdTrip), true); + TripDTO updatedTrip = mapper.readValue(updateTripOutput, TripDTO.class); + // Check that saved data matches expected data + assertThat(updatedTrip.frequencies[0].start_time, equalTo(startTime)); + assertThat(updatedTrip.trip_id, equalTo(updatedTripId)); + // Delete trip record + JdbcTableWriter deleteTripWriter = createTestTableWriter(tripsTable); + int deleteOutput = deleteTripWriter.delete( + createdTrip.id, + true + ); + LOG.info("deleted {} records from {}", deleteOutput, tripsTable.name); + // Check that record does not exist in DB + assertThatSqlQueryYieldsZeroRows( + String.format( + "select * from %s.%s where id=%d", + testNamespace, + tripsTable.name, + createdTrip.id + )); + } + + /** + * Construct (without writing to the database) a trip with a frequency entry. + */ + private TripDTO constructFrequencyTrip(String patternId, String routeId, int startTime) { + TripDTO tripInput = new TripDTO(); + tripInput.pattern_id = patternId; + tripInput.route_id = routeId; + tripInput.service_id = simpleServiceId; + tripInput.stop_times = new StopTimeDTO[]{ + new StopTimeDTO(firstStopId, 0, 0, 0), + new StopTimeDTO(lastStopId, 60, 60, 1) + }; + FrequencyDTO frequency = new FrequencyDTO(); + frequency.start_time = startTime; + frequency.end_time = 9 * 60 * 60; + frequency.headway_secs = 15 * 60; + tripInput.frequencies = new FrequencyDTO[]{frequency}; + return tripInput; + } + + /** + * Create and store a simple stop entity. + */ + private static StopDTO createSimpleStop(String stopId, String stopName, double latitude, double longitude) throws InvalidNamespaceException, IOException, SQLException { + JdbcTableWriter createStopWriter = new JdbcTableWriter(Table.STOPS, testDataSource, testNamespace); + StopDTO input = new StopDTO(); + input.stop_id = stopId; + input.stop_name = stopName; + input.stop_lat = latitude; + input.stop_lon = longitude; + String output = createStopWriter.create(mapper.writeValueAsString(input), true); + LOG.info("create {} output:", Table.STOPS.name); + LOG.info(output); + return mapper.readValue(output, StopDTO.class); + } + + /** + * Create and store a simple calendar that runs on each weekday. + */ + private static CalendarDTO createWeekdayCalendar(String serviceId, String startDate, String endDate) throws IOException, SQLException, InvalidNamespaceException { + JdbcTableWriter createCalendarWriter = new JdbcTableWriter(Table.CALENDAR, testDataSource, testNamespace); + CalendarDTO calendarInput = new CalendarDTO(); + calendarInput.service_id = serviceId; + calendarInput.monday = 1; + calendarInput.tuesday = 1; + calendarInput.wednesday = 1; + calendarInput.thursday = 1; + calendarInput.friday = 1; + calendarInput.saturday = 0; + calendarInput.sunday = 0; + calendarInput.start_date = startDate; + calendarInput.end_date = endDate; + String output = createCalendarWriter.create(mapper.writeValueAsString(calendarInput), true); + LOG.info("create {} output:", Table.CALENDAR.name); + LOG.info(output); + return mapper.readValue(output, CalendarDTO.class); + } + @AfterClass public static void tearDownClass() { TestUtils.dropDB(testDBName); diff --git a/src/test/java/com/conveyal/gtfs/storage/ExpectedFieldType.java b/src/test/java/com/conveyal/gtfs/storage/ExpectedFieldType.java new file mode 100644 index 000000000..1eb604f04 --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/storage/ExpectedFieldType.java @@ -0,0 +1,6 @@ +package com.conveyal.gtfs.storage; + +public enum ExpectedFieldType { + INT, + DOUBLE, STRING +} diff --git a/src/test/java/com/conveyal/gtfs/storage/PersistenceExpectation.java b/src/test/java/com/conveyal/gtfs/storage/PersistenceExpectation.java new file mode 100644 index 000000000..1c1c1be7d --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/storage/PersistenceExpectation.java @@ -0,0 +1,26 @@ +package com.conveyal.gtfs.storage; + +import com.conveyal.gtfs.GTFSTest; + +/** + * A helper class to verify that data got stored in a particular table. + */ +public class PersistenceExpectation { + public String tableName; + /** + * Each persistence expectation has an array of record expectations which all must reference a single row. + * If looking for multiple records in the same table, create numerous PersistenceExpectations with the same + * tableName. + */ + public RecordExpectation[] recordExpectations; + + + public PersistenceExpectation(String tableName, RecordExpectation[] recordExpectations) { + this.tableName = tableName; + this.recordExpectations = recordExpectations; + } + + public static PersistenceExpectation[] list (PersistenceExpectation... expectations) { + return expectations; + } +} diff --git a/src/test/java/com/conveyal/gtfs/storage/RecordExpectation.java b/src/test/java/com/conveyal/gtfs/storage/RecordExpectation.java new file mode 100644 index 000000000..b3394f27f --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/storage/RecordExpectation.java @@ -0,0 +1,81 @@ +package com.conveyal.gtfs.storage; + +/** + * A helper class to verify that data got stored in a particular record. + */ +public class RecordExpectation { + public double acceptedDelta; + public double doubleExpectation; + public String editorExpectation; + public ExpectedFieldType expectedFieldType; + public String fieldName; + public int intExpectation; + public String stringExpectation; + public boolean stringExpectationInCSV = false; + public boolean editorStringExpectation = false; + + public RecordExpectation(String fieldName, int intExpectation) { + this.fieldName = fieldName; + this.expectedFieldType = ExpectedFieldType.INT; + this.intExpectation = intExpectation; + } + + public static RecordExpectation[] list(RecordExpectation... expectations) { + return expectations; + } + + /** + * This extra constructor is a bit hacky in that it is only used for certain records that have + * an int type when stored in the database, but a string type when exported to GTFS + */ + public RecordExpectation(String fieldName, int intExpectation, String stringExpectation) { + this.fieldName = fieldName; + this.expectedFieldType = ExpectedFieldType.INT; + this.intExpectation = intExpectation; + this.stringExpectation = stringExpectation; + this.stringExpectationInCSV = true; + } + + /** + * This extra constructor is a also hacky in that it is only used for records that have + * an int type when stored in the database, and different values in the CSV export depending on + * whether or not it is a snapshot from the editor. Currently this only applies to stop_times.stop_sequence + */ + public RecordExpectation(String fieldName, int intExpectation, String stringExpectation, String editorExpectation) { + this.fieldName = fieldName; + this.expectedFieldType = ExpectedFieldType.INT; + this.intExpectation = intExpectation; + this.stringExpectation = stringExpectation; + this.stringExpectationInCSV = true; + this.editorStringExpectation = true; + this.editorExpectation = editorExpectation; + } + + public RecordExpectation(String fieldName, String stringExpectation) { + this.fieldName = fieldName; + this.expectedFieldType = ExpectedFieldType.STRING; + this.stringExpectation = stringExpectation; + } + + public RecordExpectation(String fieldName, double doubleExpectation, double acceptedDelta) { + this.fieldName = fieldName; + this.expectedFieldType = ExpectedFieldType.DOUBLE; + this.doubleExpectation = doubleExpectation; + this.acceptedDelta = acceptedDelta; + } + + public String getStringifiedExpectation(boolean fromEditor) { + if (fromEditor && editorStringExpectation) return editorExpectation; + if (stringExpectationInCSV) return stringExpectation; + switch (expectedFieldType) { + case DOUBLE: + return String.valueOf(doubleExpectation); + case INT: + return String.valueOf(intExpectation); + case STRING: + return stringExpectation; + default: + return null; + } + } +} diff --git a/src/test/java/com/conveyal/gtfs/util/CalendarDTO.java b/src/test/java/com/conveyal/gtfs/util/CalendarDTO.java new file mode 100644 index 000000000..169ffa5ac --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/CalendarDTO.java @@ -0,0 +1,16 @@ +package com.conveyal.gtfs.util; + +public class CalendarDTO { + public Integer id; + public String service_id; + public Integer monday; + public Integer tuesday; + public Integer wednesday; + public Integer thursday; + public Integer friday; + public Integer saturday; + public Integer sunday; + public String start_date; + public String end_date; + public String description; +} diff --git a/src/test/java/com/conveyal/gtfs/util/FrequencyDTO.java b/src/test/java/com/conveyal/gtfs/util/FrequencyDTO.java new file mode 100644 index 000000000..a59617b4a --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/FrequencyDTO.java @@ -0,0 +1,9 @@ +package com.conveyal.gtfs.util; + +public class FrequencyDTO { + public String trip_id; + public Integer start_time; + public Integer end_time; + public Integer headway_secs; + public Integer exact_times; +} diff --git a/src/test/java/com/conveyal/gtfs/util/PatternDTO.java b/src/test/java/com/conveyal/gtfs/util/PatternDTO.java new file mode 100644 index 000000000..eb78e516d --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/PatternDTO.java @@ -0,0 +1,13 @@ +package com.conveyal.gtfs.util; + +public class PatternDTO { + public Integer id; + public String pattern_id; + public String shape_id; + public String route_id; + public Integer direction_id; + public Integer use_frequency; + public String name; + public PatternStopDTO[] pattern_stops; + public ShapePointDTO[] shapes; +} \ No newline at end of file diff --git a/src/test/java/com/conveyal/gtfs/util/PatternStopDTO.java b/src/test/java/com/conveyal/gtfs/util/PatternStopDTO.java new file mode 100644 index 000000000..b6daf838d --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/PatternStopDTO.java @@ -0,0 +1,20 @@ +package com.conveyal.gtfs.util; + +public class PatternStopDTO { + public Integer id; + public String pattern_id; + public String stop_id; + public Integer default_travel_time; + public Integer default_dwell_time; + public Double shape_dist_traveled; + public Integer drop_off_type; + public Integer pickup_type; + public Integer stop_sequence; + public Integer timepoint; + + public PatternStopDTO (String patternId, String stopId, int stopSequence) { + pattern_id = patternId; + stop_id = stopId; + stop_sequence = stopSequence; + } +} diff --git a/src/test/java/com/conveyal/gtfs/util/ShapePointDTO.java b/src/test/java/com/conveyal/gtfs/util/ShapePointDTO.java new file mode 100644 index 000000000..05ed6d6da --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/ShapePointDTO.java @@ -0,0 +1,6 @@ +package com.conveyal.gtfs.util; + +// TODO add fields +public class ShapePointDTO { + +} diff --git a/src/test/java/com/conveyal/gtfs/util/StopDTO.java b/src/test/java/com/conveyal/gtfs/util/StopDTO.java new file mode 100644 index 000000000..4ca821e59 --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/StopDTO.java @@ -0,0 +1,17 @@ +package com.conveyal.gtfs.util; + +public class StopDTO { + public Integer id; + public String stop_id; + public String stop_name; + public String stop_code; + public String stop_desc; + public Double stop_lon; + public Double stop_lat; + public String zone_id; + public String stop_url; + public String stop_timezone; + public String parent_station; + public Integer location_type; + public Integer wheelchair_boarding; +} diff --git a/src/test/java/com/conveyal/gtfs/util/StopTimeDTO.java b/src/test/java/com/conveyal/gtfs/util/StopTimeDTO.java new file mode 100644 index 000000000..bfbc56424 --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/StopTimeDTO.java @@ -0,0 +1,26 @@ +package com.conveyal.gtfs.util; + +public class StopTimeDTO { + public String trip_id; + public String stop_id; + public Integer stop_sequence; + public Integer arrival_time; + public Integer departure_time; + public String stop_headsign; + public Integer timepoint; + public Integer drop_off_type; + public Integer pickup_type; + public Double shape_dist_traveled; + + /** + * Empty constructor for deserialization + */ + public StopTimeDTO () {} + + public StopTimeDTO (String stopId, Integer arrivalTime, Integer departureTime, Integer stopSequence) { + stop_id = stopId; + arrival_time = arrivalTime; + departure_time = departureTime; + stop_sequence = stopSequence; + } +} diff --git a/src/test/java/com/conveyal/gtfs/util/TripDTO.java b/src/test/java/com/conveyal/gtfs/util/TripDTO.java new file mode 100644 index 000000000..ccd43a7e0 --- /dev/null +++ b/src/test/java/com/conveyal/gtfs/util/TripDTO.java @@ -0,0 +1,18 @@ +package com.conveyal.gtfs.util; + +public class TripDTO { + public Integer id; + public String trip_id; + public String trip_headsign; + public String trip_short_name; + public String block_id; + public Integer direction_id; + public String route_id; + public String service_id; + public Integer wheelchair_accessible; + public Integer bikes_allowed; + public String shape_id; + public String pattern_id; + public StopTimeDTO[] stop_times; + public FrequencyDTO[] frequencies; +} diff --git a/src/test/java/com/conveyal/gtfs/util/UtilTest.java b/src/test/java/com/conveyal/gtfs/util/UtilTest.java index c2c6ccd36..982ea31a4 100644 --- a/src/test/java/com/conveyal/gtfs/util/UtilTest.java +++ b/src/test/java/com/conveyal/gtfs/util/UtilTest.java @@ -15,8 +15,11 @@ */ public class UtilTest { - @Before // setup() - public void before() throws Exception { + /** + * Ensure locale (for {@link #canHumanize()}) is set correctly before doing anything else. + */ + @Before + public void before() { java.util.Locale.setDefault(java.util.Locale.US); } diff --git a/src/test/resources/fake-agency-bad-calendar-date/agency.txt b/src/test/resources/fake-agency-bad-calendar-date/agency.txt new file mode 100755 index 000000000..a916ce91b --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/agency.txt @@ -0,0 +1,2 @@ +agency_id,agency_name,agency_url,agency_lang,agency_phone,agency_email,agency_timezone,agency_fare_url,agency_branding_url +1,Fake Transit,,,,,America/Los_Angeles,, diff --git a/src/test/resources/fake-agency-bad-calendar-date/calendar.txt b/src/test/resources/fake-agency-bad-calendar-date/calendar.txt new file mode 100755 index 000000000..03b2867f7 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/calendar.txt @@ -0,0 +1,2 @@ +service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday,start_date,end_date +04100312-8fe1-46a5-a9f2-556f39478f57,1,1,1,1,1,1,1,bad_date!,20170917 diff --git a/src/test/resources/fake-agency-bad-calendar-date/calendar_dates.txt b/src/test/resources/fake-agency-bad-calendar-date/calendar_dates.txt new file mode 100755 index 000000000..546ad12b5 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/calendar_dates.txt @@ -0,0 +1,3 @@ +service_id,date,exception_type +04100312-8fe1-46a5-a9f2-556f39478f57,bad_date,2 +04100312-8fe1-46a5-a9f2-556f39478f57,bad_date2,1 \ No newline at end of file diff --git a/src/test/resources/fake-agency-bad-calendar-date/fare_attributes.txt b/src/test/resources/fake-agency-bad-calendar-date/fare_attributes.txt new file mode 100755 index 000000000..3173d016d --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/fare_attributes.txt @@ -0,0 +1,2 @@ +fare_id,price,currency_type,payment_method,transfers,transfer_duration +route_based_fare,1.23,USD,0,0,0 \ No newline at end of file diff --git a/src/test/resources/fake-agency-bad-calendar-date/fare_rules.txt b/src/test/resources/fake-agency-bad-calendar-date/fare_rules.txt new file mode 100755 index 000000000..05d2aadf8 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/fare_rules.txt @@ -0,0 +1,2 @@ +fare_id,route_id,origin_id,destination_id,contains_id +route_based_fare,1,,, \ No newline at end of file diff --git a/src/test/resources/fake-agency-bad-calendar-date/feed_info.txt b/src/test/resources/fake-agency-bad-calendar-date/feed_info.txt new file mode 100644 index 000000000..b617faf49 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/feed_info.txt @@ -0,0 +1,2 @@ +feed_publisher_name,feed_publisher_url,feed_lang,feed_version +Conveyal,http://www.conveyal.com,en,1.0 \ No newline at end of file diff --git a/src/test/resources/fake-agency-bad-calendar-date/frequencies.txt b/src/test/resources/fake-agency-bad-calendar-date/frequencies.txt new file mode 100755 index 000000000..9baceff3a --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/frequencies.txt @@ -0,0 +1,2 @@ +trip_id,start_time,end_time,headway_secs,exact_times +frequency-trip,08:00:00,09:00:00,1800,0 \ No newline at end of file diff --git a/src/test/resources/fake-agency-bad-calendar-date/routes.txt b/src/test/resources/fake-agency-bad-calendar-date/routes.txt new file mode 100755 index 000000000..35ea7aa67 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/routes.txt @@ -0,0 +1,2 @@ +agency_id,route_id,route_short_name,route_long_name,route_desc,route_type,route_url,route_color,route_text_color,route_branding_url +1,1,1,Route 1,,3,,7CE6E7,FFFFFF, diff --git a/src/test/resources/fake-agency-bad-calendar-date/shapes.txt b/src/test/resources/fake-agency-bad-calendar-date/shapes.txt new file mode 100755 index 000000000..3f2e3fd13 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/shapes.txt @@ -0,0 +1,8 @@ +shape_id,shape_pt_lat,shape_pt_lon,shape_pt_sequence,shape_dist_traveled +5820f377-f947-4728-ac29-ac0102cbc34e,37.0612132,-122.0074332,1,0.0000000 +5820f377-f947-4728-ac29-ac0102cbc34e,37.0611720,-122.0075000,2,7.4997067 +5820f377-f947-4728-ac29-ac0102cbc34e,37.0613590,-122.0076830,3,33.8739075 +5820f377-f947-4728-ac29-ac0102cbc34e,37.0608780,-122.0082780,4,109.0402932 +5820f377-f947-4728-ac29-ac0102cbc34e,37.0603590,-122.0088280,5,184.6078298 +5820f377-f947-4728-ac29-ac0102cbc34e,37.0597610,-122.0093540,6,265.8053023 +5820f377-f947-4728-ac29-ac0102cbc34e,37.0590660,-122.0099190,7,357.8617018 diff --git a/src/test/resources/fake-agency-bad-calendar-date/stop_times.txt b/src/test/resources/fake-agency-bad-calendar-date/stop_times.txt new file mode 100755 index 000000000..59a2623b9 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/stop_times.txt @@ -0,0 +1,7 @@ +trip_id,arrival_time,departure_time,stop_id,stop_sequence,stop_headsign,pickup_type,drop_off_type,shape_dist_traveled,timepoint +a30277f8-e50a-4a85-9141-b1e0da9d429d,07:00:00,07:00:00,4u6g,1,,0,0,0.0000000, +a30277f8-e50a-4a85-9141-b1e0da9d429d,07:01:00,07:01:00,johv,2,,0,0,341.4491961, +2,09:00:00,09:00:00,4u6g,1,,0,0,0.0000000, +2,09:01:00,09:01:00,johv,2,,0,0,341.4491961, +frequency-trip,08:00:00,08:00:00,4u6g,1,,0,0,0.0000000, +frequency-trip,08:01:00,08:01:00,johv,2,,0,0,341.4491961, diff --git a/src/test/resources/fake-agency-bad-calendar-date/stops.txt b/src/test/resources/fake-agency-bad-calendar-date/stops.txt new file mode 100755 index 000000000..8e71f7b2e --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/stops.txt @@ -0,0 +1,3 @@ +stop_id,stop_code,stop_name,stop_desc,stop_lat,stop_lon,zone_id,stop_url,location_type,parent_station,stop_timezone,wheelchair_boarding +4u6g,,Butler Ln,,37.0612132,-122.0074332,,,0,,, +johv,,Scotts Valley Dr & Victor Sq,,37.0590172,-122.0096058,,,0,,, diff --git a/src/test/resources/fake-agency-bad-calendar-date/transfers.txt b/src/test/resources/fake-agency-bad-calendar-date/transfers.txt new file mode 100755 index 000000000..357103c47 --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/transfers.txt @@ -0,0 +1 @@ +from_stop_id,to_stop_id,transfer_type,min_transfer_time diff --git a/src/test/resources/fake-agency-bad-calendar-date/trips.txt b/src/test/resources/fake-agency-bad-calendar-date/trips.txt new file mode 100755 index 000000000..f0ecc68ae --- /dev/null +++ b/src/test/resources/fake-agency-bad-calendar-date/trips.txt @@ -0,0 +1,4 @@ +route_id,trip_id,trip_headsign,trip_short_name,direction_id,block_id,shape_id,bikes_allowed,wheelchair_accessible,service_id +1,a30277f8-e50a-4a85-9141-b1e0da9d429d,,,0,,5820f377-f947-4728-ac29-ac0102cbc34e,0,0,04100312-8fe1-46a5-a9f2-556f39478f57 +1,2,,,0,,5820f377-f947-4728-ac29-ac0102cbc34e,0,0,a +1,frequency-trip,,,0,,5820f377-f947-4728-ac29-ac0102cbc34e,0,0,04100312-8fe1-46a5-a9f2-556f39478f57 \ No newline at end of file