diff --git a/src/main/java/com/conveyal/analysis/components/BackendComponents.java b/src/main/java/com/conveyal/analysis/components/BackendComponents.java index 9a7270a1b..706c454de 100644 --- a/src/main/java/com/conveyal/analysis/components/BackendComponents.java +++ b/src/main/java/com/conveyal/analysis/components/BackendComponents.java @@ -7,6 +7,7 @@ import com.conveyal.analysis.controllers.BrokerController; import com.conveyal.analysis.controllers.BundleController; import com.conveyal.analysis.controllers.DataSourceController; +import com.conveyal.analysis.controllers.DatabaseController; import com.conveyal.analysis.controllers.GtfsController; import com.conveyal.analysis.controllers.HttpController; import com.conveyal.analysis.controllers.OpportunityDatasetController; @@ -96,6 +97,7 @@ public List standardHttpControllers () { new BrokerController(broker, eventBus), new UserActivityController(taskScheduler), new DataSourceController(fileStorage, database, taskScheduler, censusExtractor), + new DatabaseController(database), new WorkerProxyController(broker) ); } diff --git a/src/main/java/com/conveyal/analysis/components/HttpApi.java b/src/main/java/com/conveyal/analysis/components/HttpApi.java index 65eec6bb3..f4bd8daf0 100644 --- a/src/main/java/com/conveyal/analysis/components/HttpApi.java +++ b/src/main/java/com/conveyal/analysis/components/HttpApi.java @@ -78,6 +78,21 @@ private spark.Service configureSparkService () { LOG.info("Analysis server will listen for HTTP connections on port {}.", config.serverPort()); spark.Service sparkService = spark.Service.ignite(); sparkService.port(config.serverPort()); + //sparkService.threadPool(1000); + + // Set up TLS (HTTPS). Unfortunately Spark HTTP only accepts String paths to keystore files. + // We want to build a Keystore instance programmatically from PEM files. + // Digging through the Spark source code it seems extremely convoluted to directly inject a Keystore instance. + // sparkService.secure(); + // Usage examples at: + // https://github.com/Hakky54/sslcontext-kickstart/blob/master/sslcontext-kickstart-for-pem/src/test/java/nl/altindag/ssl/util/PemUtilsShould.java + // Dependency: + // Tools to load PEM files into Java Keystore (so we don't have to use arcane Java keytool) + // implementation 'io.github.hakky54:sslcontext-kickstart-for-pem:7.4.1' + + // Serve up UI files. staticFileLocation("vector-client") inside classpath will not see changes to files. + // Note that this eliminates the need for CORS headers and eliminates CORS preflight request latency. + sparkService.externalStaticFileLocation("../r5/src/main/resources/vector-client"); // Specify actions to take before the main logic of handling each HTTP request. sparkService.before((req, res) -> { @@ -87,10 +102,10 @@ private spark.Service configureSparkService () { // Set CORS headers to allow requests to this API server from a frontend hosted on a different domain. // This used to be hardwired to Access-Control-Allow-Origin: * but that leaves the server open to XSRF // attacks when authentication is disabled (e.g. when running locally). - res.header("Access-Control-Allow-Origin", config.allowOrigin()); - // For caching, signal to the browser that responses may be different based on origin. - // TODO clarify why this is important, considering that normally all requests come from the same origin. - res.header("Vary", "Origin"); +// res.header("Access-Control-Allow-Origin", config.allowOrigin()); +// // For caching, signal to the browser that responses may be different based on origin. +// // TODO clarify why this is important, considering that normally all requests come from the same origin. +// res.header("Vary", "Origin"); // The default MIME type is JSON. This will be overridden by the few controllers that do not return JSON. res.type("application/json"); @@ -121,17 +136,17 @@ private spark.Service configureSparkService () { // Handle CORS preflight requests (which are OPTIONS requests). // See comment above about Access-Control-Allow-Origin - sparkService.options("/*", (req, res) -> { - // Cache the preflight response for up to one day (the maximum allowed by browsers) - res.header("Access-Control-Max-Age", "86400"); - res.header("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS"); - // Allowing credentials is necessary to send an Authorization header - res.header("Access-Control-Allow-Credentials", "true"); - res.header("Access-Control-Allow-Headers", "Accept,Authorization,Content-Type,Origin," + - "X-Requested-With,Content-Length,X-Conveyal-Access-Group" - ); - return "OK"; - }); +// sparkService.options("/*", (req, res) -> { +// // Cache the preflight response for up to one day (the maximum allowed by browsers) +// res.header("Access-Control-Max-Age", "86400"); +// res.header("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS"); +// // Allowing credentials is necessary to send an Authorization header +// res.header("Access-Control-Allow-Credentials", "true"); +// res.header("Access-Control-Allow-Headers", "Accept,Authorization,Content-Type,Origin," + +// "X-Requested-With,Content-Length,X-Conveyal-Access-Group" +// ); +// return "OK"; +// }); // Allow client to fetch information about the backend build version. sparkService.get( diff --git a/src/main/java/com/conveyal/analysis/components/LocalBackendComponents.java b/src/main/java/com/conveyal/analysis/components/LocalBackendComponents.java index 4de8e3098..f9b044b48 100644 --- a/src/main/java/com/conveyal/analysis/components/LocalBackendComponents.java +++ b/src/main/java/com/conveyal/analysis/components/LocalBackendComponents.java @@ -4,6 +4,7 @@ import com.conveyal.analysis.components.broker.Broker; import com.conveyal.analysis.components.eventbus.ErrorLogger; import com.conveyal.analysis.components.eventbus.EventBus; +import com.conveyal.analysis.controllers.AuthTokenController; import com.conveyal.analysis.controllers.HttpController; import com.conveyal.analysis.controllers.LocalFilesController; import com.conveyal.analysis.grids.SeamlessCensusGridExtractor; @@ -31,7 +32,8 @@ public LocalBackendComponents () { // New (October 2019) DB layer, this should progressively replace the Persistence class database = new AnalysisDB(config); eventBus = new EventBus(taskScheduler); - authentication = new LocalAuthentication(); + final TokenAuthentication tokenAuthentication = new TokenAuthentication(database); + authentication = tokenAuthentication; // TODO add nested LocalWorkerComponents here, to reuse some components, and pass it into the LocalWorkerLauncher? workerLauncher = new LocalWorkerLauncher(config, fileStorage, gtfsCache, osmCache); broker = new Broker(config, fileStorage, eventBus, workerLauncher); @@ -39,6 +41,7 @@ public LocalBackendComponents () { // Instantiate the HttpControllers last, when all the components except the HttpApi are already created. List httpControllers = standardHttpControllers(); httpControllers.add(new LocalFilesController(fileStorage)); + httpControllers.add(new AuthTokenController(tokenAuthentication)); httpApi = new HttpApi(fileStorage, authentication, eventBus, config, httpControllers); // compute = new LocalCompute(); // persistence = persistence(local_Mongo) diff --git a/src/main/java/com/conveyal/analysis/components/TokenAuthentication.java b/src/main/java/com/conveyal/analysis/components/TokenAuthentication.java new file mode 100644 index 000000000..b7636113b --- /dev/null +++ b/src/main/java/com/conveyal/analysis/components/TokenAuthentication.java @@ -0,0 +1,165 @@ +package com.conveyal.analysis.components; + +import com.conveyal.analysis.AnalysisServerException; +import com.conveyal.analysis.UserPermissions; +import com.conveyal.analysis.persistence.AnalysisDB; +import com.mongodb.client.MongoCollection; +import org.bson.Document; +import org.bson.types.Binary; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; + +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import java.lang.invoke.MethodHandles; +import java.security.SecureRandom; +import java.security.spec.KeySpec; +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; + +import static com.conveyal.analysis.AnalysisServerException.Type.UNAUTHORIZED; +import static com.mongodb.client.model.Filters.eq; + +/** + * Simple bearer token authentication storing hashed passwords in database. + * Allows direct management of users and permissions. + */ +public class TokenAuthentication implements Authentication { + + private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private final MongoCollection users; + + /** + * Bundles UserPermissions together with a last read time to allow expiry. + */ + private static class TokenValue { + long lastUsed = System.currentTimeMillis(); + final UserPermissions userPermissions; + public TokenValue(UserPermissions userPermissions) { + this.userPermissions = userPermissions; + } + } + + private Map userForToken = new HashMap<>(); + + public TokenAuthentication (AnalysisDB database) { + // TODO verify that sharing a MongoCollection across threads is safe + this.users = database.getBsonCollection("users"); + } + + @Override + public UserPermissions authenticate(Request request) { + String token = request.headers("authorization"); + // Some places such as MopboxGL do not make it easy to add headers, so also accept token in query parameter. + // The MapboxGL transformUrl setting seems to be missing from recent versions of the library. + if (token == null) { + token = request.queryParams("token"); + } + if (token == null) { + throw new AnalysisServerException(UNAUTHORIZED, "Authorization token mising.", 401); + } + if ("sesame".equalsIgnoreCase(token)) { + return new UserPermissions("local", true, "local"); + } + UserPermissions userPermissions = userForToken(token); + if (userPermissions == null) { + throw new AnalysisServerException(UNAUTHORIZED, "Invalid authorization token.", 401); + } else { + return userPermissions; + } + } + + /** + * TODO is SecureRandom a sufficiently secure source of randomness when used this way? + * Should we be creating a new instance of SecureRandom each time or reusing it? + * Do not use basic Base64 encoding since it contains some characters that are invalid in URLs. + * @return A url-safe representation of 32 random bytes + */ + public static String generateToken () { + Random random = new SecureRandom(); + byte[] tokenBytes = new byte[32]; + random.nextBytes(tokenBytes); + String token = Base64.getUrlEncoder().encodeToString(tokenBytes); + return token; + } + + public UserPermissions userForToken (String token) { + TokenValue tokenValue = null; + synchronized (userForToken) { + tokenValue = userForToken.get(token); + if (tokenValue == null) { + return null; + } else { + tokenValue.lastUsed = System.currentTimeMillis(); + return tokenValue.userPermissions; + } + } + } + + /** + * @return byte[] representing the supplied password hashed with the supplied salt. + */ + private byte[] hashWithSalt (String password, byte[] salt) { + try { + // Note Java char is 16-bit Unicode (not byte, which requires a specific encoding like UTF8). + // 256 bit key length is 32 bytes. + KeySpec keySpec = new PBEKeySpec(password.toCharArray(), salt, 65536, 256); + SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1"); + byte[] hash = keyFactory.generateSecret(keySpec).getEncoded(); + return hash; + // return Base64.getEncoder().encodeToString(hash); + } catch (Exception e) { + throw new RuntimeException("Exception:", e); + } + } + + /** + * Create a user with the specified password. Stores the random salt and hashed password in the database. + */ + public void createUser (String email, String group, String password) { + // TODO validate password entropy + Random random = new Random(); + byte[] salt = new byte[32]; + random.nextBytes(salt); + byte[] hash = hashWithSalt(password, salt); + // Due to Mongo's nature it may not be possible to verify whether the user already exists. + // Once the write is finalized though, this will produce E11000 duplicate key error. + // We may want to allow updating a user by simply calling this HTTP API method more than once. + users.insertOne(new Document("_id", email) + .append("group", group) + .append("salt", new Binary(salt)) + .append("hash", new Binary(hash)) + ); + } + + /** + * Create a new token, replacing any existing one for the same user (email) as long as the password is correct. + * @return a new token, or null if the supplied password is incorrect. + */ + public String makeToken (String email, String password) { + Document userDocument = users.find(eq("_id", email)).first(); + if (userDocument == null) { + throw new IllegalArgumentException("User unknown: " + email); + } + Binary salt = (Binary) userDocument.get("salt"); + Binary hash = (Binary) userDocument.get("hash"); + String group = userDocument.getString("group"); + byte[] hashForComparison = hashWithSalt(password, salt.getData()); + if (Arrays.equals(hash.getData(), hashForComparison)) { + // Maybe invalidation is pointless and we can continue to return the same key indefinitely. + String token = generateToken(); + synchronized (userForToken) { + userForToken.put(token, new TokenValue(new UserPermissions(email, false, group))); + return token; + } + } else { + return null; + } + } + +} diff --git a/src/main/java/com/conveyal/analysis/controllers/AuthTokenController.java b/src/main/java/com/conveyal/analysis/controllers/AuthTokenController.java new file mode 100644 index 000000000..df2bd71d5 --- /dev/null +++ b/src/main/java/com/conveyal/analysis/controllers/AuthTokenController.java @@ -0,0 +1,84 @@ +package com.conveyal.analysis.controllers; + +import com.conveyal.analysis.AnalysisServerException; +import com.conveyal.analysis.UserPermissions; +import com.conveyal.analysis.components.TokenAuthentication; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; + +import java.lang.invoke.MethodHandles; +import java.util.Map; + +import static com.conveyal.analysis.AnalysisServerException.Type.UNAUTHORIZED; +import static com.conveyal.analysis.util.JsonUtil.toJson; + +/** + * HTTP API Controller that handles user accounts and authentication. + * Serve up tokens for valid users. Allow admin users to create new users and set their passwords. + * TODO add rate limiting and map size limiting (limit number of concurrent users in case of attacks). + */ +public class AuthTokenController implements HttpController { + + private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private final TokenAuthentication tokenAuthentication; + + public AuthTokenController (TokenAuthentication tokenAuthentication) { + this.tokenAuthentication = tokenAuthentication; + } + + /** + * Create a user with the specified password. Stores the random salt and hashed password in the database. + */ + private Object createUser (Request req, Response res) { + if (!UserPermissions.from(req).admin) { + throw new AnalysisServerException(UNAUTHORIZED, "Only admin users can create new users.", 401); + } + String email = req.queryParams("email"); + String group = req.queryParams("group"); + String password = req.queryParams("password"); + tokenAuthentication.createUser(email, group, password); + res.status(201); + return "CREATED"; // alternatively UPDATED or FAILED + } + + /** + * Create a new token, replacing any existing one for the same user (email). + */ + private Map getTokenForEmail (Request req, Response res) { + // These should probably be in the body not URL, to prevent them from appearing as plaintext in history. + String email = req.queryParams("email"); + String password = req.queryParams("password"); + // Crude rate limiting, might just lead to connections piling up in event of attack. + // sleepSeconds(2); + // TODO clear out any expired tokens, limiting to one or two per email + String token = tokenAuthentication.makeToken(email, password); + if (token == null) { + throw new AnalysisServerException(UNAUTHORIZED, "Incorrect email/password combination.", 401); + } else { + return Map.of("token", token); + } + } + + // Testing with Apache bench shows some stalling + // -k keepalive connections fails immediately + + // Example usage: + // curl -H "authorization: sesame" -X POST "localhost:7070/api/user?email=abyrd@conveyal.com&group=local&password=testpass" + // 201 CREATED + // curl "localhost:7070/token?email=abyrd@conveyal.com&password=testpass" + // 200 {"token":"LHKUz6weI32mEk3SXBfGZFvPP3P9FZq8xboJdPPBIdo="} + // curl -H "authorization: abyrd@conveyal.com Jx5Re2/fl1AAISeeMzaCJOy8OCRO6MVOAJLSN7/tkSg=" "localhost:7070/api/activity" + // 200 {"systemStatusMessages":[],"taskBacklog":0,"taskProgress":[]} + + @Override + public void registerEndpoints (spark.Service sparkService) { + // Token endpoint is outside authenticated /api prefix because it's the means to get authentication tokens. + sparkService.get("/token", this::getTokenForEmail, toJson); + // User endpoint is inside the authenticated /api prefix because it is only accessible to admin users. + sparkService.post("/api/user", this::createUser); + } + +} diff --git a/src/main/java/com/conveyal/analysis/controllers/DatabaseController.java b/src/main/java/com/conveyal/analysis/controllers/DatabaseController.java new file mode 100644 index 000000000..d056180bd --- /dev/null +++ b/src/main/java/com/conveyal/analysis/controllers/DatabaseController.java @@ -0,0 +1,129 @@ +package com.conveyal.analysis.controllers; + +import com.conveyal.analysis.UserPermissions; +import com.conveyal.analysis.persistence.AnalysisDB; +import com.google.common.collect.Lists; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoCollection; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; + +import java.io.OutputStream; +import java.lang.invoke.MethodHandles; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.conveyal.analysis.util.JsonUtil.toJson; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; + +/** + * Serve up arbitrary records from the database without binding to Java objects. + * This converts BSON to JSON. Similar things could be done converting relational rows to JSON. + * This allows authenticated retrieval of anything in the database by the UI, even across schema migrations. + */ +public class DatabaseController implements HttpController { + + private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private final AnalysisDB database; + + private final Map> mongoCollections; + + // Preloading these avoids synchronization during handling http requests by reading from an immutable map. + // TODO verify if it is threadsafe to reuse MongoCollection in all threads. + // Amazingly there seems to be no documentation on this at all. Drilling down into the function calls, it seems + // to create a new session on each find() call, so should presumably go through synchronization. + // In testing with siege and other http benchmarking tools, reusing the MongoCollection seems to result in much + // smoother operation; creating a new MongoCollection on each request seems to jam up after a certain number + // of requests (perhaps waiting for idle MongoCollectons to be cleaned up). + public Map> mongoCollectionMap (String... collectionNames) { + Map> map = new HashMap<>(); + for (String name : collectionNames) { + map.put(name, database.getBsonCollection(name)); + } + // Make the map immutable for threadsafe reading and return. + return Map.copyOf(map); + } + + public DatabaseController(AnalysisDB database) { + this.database = database; + this.mongoCollections = mongoCollectionMap("regions", "bundles"); + } + + /** Factored out for experimenting with streaming and non-streaming approaches to serialization. */ + private FindIterable getDocuments (Request req) { + String accessGroup = UserPermissions.from(req).accessGroup; + final String collectionName = req.params("collection"); + MongoCollection collection = mongoCollections.get(collectionName); + checkNotNull(collection, "Collection not available: " + collectionName); + List filters = Lists.newArrayList(eq("accessGroup", accessGroup)); + req.queryMap().toMap().forEach((key, values) -> { + for (String value : values) { + filters.add(eq(key, value)); + } + }); + return collection.find(and(filters)); + } + + /** + * Fetch anything from database. Buffers all documents in memory so may not not suitable for large responses. + * Register result serialization with: sparkService.get("/api/db/:collection", this::getDocuments, toJson); + */ + private Iterable getDocuments (Request req, Response res) { + FindIterable docs = getDocuments(req); + List documents = new ArrayList<>(); + docs.into(documents); + return documents; + } + + /** + * Fetch anything from database. Streaming processing, no in-memory buffering of the BsonDocuments. + * The output stream does buffer to some extent but should stream chunks instead of serializing into memory. + * Anecdotally in testing with seige this does seem to almost double the response rate and allow double the + * concurrent connections without stalling (though still low at 20, and it eventually does stall). + */ + private Object getDocumentsStreaming (Request req, Response res) { + FindIterable docs = getDocuments(req); + // getOutputStream returns a ServletOutputStream, usually Jetty implementation HttpOutputStream which + // buffers the output. doc.toJson() creates a lot of short-lived objects which could be factored out. + // The Mongo driver says to use JsonWriter or toJson() rather than utility methods: + // https://github.com/mongodb/mongo-java-driver/commit/63409f9cb3bbd0779dd5139355113d9b227dfa05 + try { + OutputStream out = res.raw().getOutputStream(); + out.write('['); // Begin JSON array. + boolean firstElement = true; + for (Document doc : docs) { + if (firstElement) { + firstElement = false; + } else { + out.write(','); + } + out.write(doc.toJson().getBytes(StandardCharsets.UTF_8)); + } + out.write(']'); // Close JSON array. + // We do not close the OutputStream, even implicitly with a try-with-resources. + // The thinking is that closing the stream might close the underlying connection, which might be keepalive. + } catch (Exception e) { + throw new RuntimeException("Failed to write database records as JSON.", e); + } + // Since we're directly writing to the OutputStream, no need to return anything. + // But do not return null or Spark will complain cryptically. + return ""; + } + + @Override + public void registerEndpoints (spark.Service sparkService) { + sparkService.get("/api/db/:collection", this::getDocuments, toJson); + //sparkService.get("/api/db/:collection", this::getDocumentsStreaming); + } + +} diff --git a/src/main/java/com/conveyal/analysis/persistence/AnalysisDB.java b/src/main/java/com/conveyal/analysis/persistence/AnalysisDB.java index d9122d098..79f0f6315 100644 --- a/src/main/java/com/conveyal/analysis/persistence/AnalysisDB.java +++ b/src/main/java/com/conveyal/analysis/persistence/AnalysisDB.java @@ -6,6 +6,7 @@ import com.mongodb.client.MongoClients; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoDatabase; +import org.bson.Document; import org.bson.codecs.configuration.CodecProvider; import org.bson.codecs.configuration.CodecRegistry; import org.bson.codecs.pojo.Conventions; @@ -88,6 +89,14 @@ public MongoCollection getMongoCollection (String name, Class clazz) { return database.getCollection(name, clazz); } + /** + * Lowest-level access to Mongo collections, viewed as BSON rather than mapped to Java classes. + */ + public MongoCollection getBsonCollection (String name) { + // If MongoCollections are threadsafe + return database.getCollection(name); + } + /** Interface to supply configuration to this component. */ public interface Config { default String databaseUri() { return "mongodb://127.0.0.1:27017"; } diff --git a/src/main/resources/vector-client/index.html b/src/main/resources/vector-client/index.html index b0b7a931a..9a273448a 100644 --- a/src/main/resources/vector-client/index.html +++ b/src/main/resources/vector-client/index.html @@ -4,28 +4,140 @@ Conveyal GTFS Vector Map - - + + -
+
+
+ + + + + + +
+
+
+map.on('click', (e) => { + const bbox = [ + [e.point.x - 5, e.point.y - 5], + [e.point.x + 5, e.point.y + 5] + ]; + const selectedFeatures = map.queryRenderedFeatures(bbox, { layers: ['patterns'] }); + const names = selectedFeatures.map(feature => feature.properties.name); + console.log(names); +}); + \ No newline at end of file diff --git a/src/main/resources/vector-client/login.html b/src/main/resources/vector-client/login.html new file mode 100644 index 000000000..0c35084b3 --- /dev/null +++ b/src/main/resources/vector-client/login.html @@ -0,0 +1,41 @@ + + + + + Conveyal Login + + + +
+ + + + + +
+ + + \ No newline at end of file diff --git a/src/main/resources/vector-client/vectorstyle.json b/src/main/resources/vector-client/vectorstyle.json index 2710c72d3..1f87caa67 100644 --- a/src/main/resources/vector-client/vectorstyle.json +++ b/src/main/resources/vector-client/vectorstyle.json @@ -5,7 +5,7 @@ "r5": { "type": "vector", "tiles": [ - "http://localhost:7070/networkVectorTiles/5f4ee64db8b6303fdd2fdec2/{z}/{x}/{y}" + "http://localhost:7070/dummy" ], "maxzoom": 14 }, @@ -30,7 +30,7 @@ "terrain": { "source": "dem-terrain", "exaggeration": 1.4 }, "glyphs": "mapbox://fonts/mapbox/{fontstack}/{range}.pbf", "layers": [ - {"id": "hillshading", "source": "dem-hillshade", "type": "hillshade"}, + {"id": "hillshading", "source": "dem-hillshade", "type": "hillshade", "layout": {"visibility": "none"}}, { "id": "water", "source": "mapbox-streets", @@ -43,20 +43,39 @@ { "id": "patterns", "source": "r5", - "source-layer": "edges", + "source-layer": "conveyal:gtfs:patternShapes", "type": "line", - "filter": ["get", "car"], "paint": { - "line-color": [ - "interpolate", - ["linear"], - ["get", "lts"], - 1, - ["to-color","#0f0"], - 2, - ["to-color","#099"], - 4, - ["to-color","#f00"] + "line-opacity": 0.5, + "line-color": ["concat", "#", ["get", "routeColor"]], + "line-width": [ "interpolate", ["exponential", 1.25], ["zoom"], 8, 1, 22, 15 ] + } + }, + { + "id": "stops", + "source": "r5", + "source-layer": "conveyal:gtfs:stops", + "type": "circle", + "paint": { + "circle-radius": [ "interpolate", ["exponential", 1.25], ["zoom"], 8, 2, 22, 20 ] + } + }, + { + "id": "stop-labels", + "source": "r5", + "source-layer": "conveyal:gtfs:stops", + "type": "symbol", + "paint": { + "text-halo-color": "rgba(255, 255, 255, 1)", + "text-halo-width": 0.5 + }, + "layout": { + "text-field": "{name}", + "text-justify": "auto", + "text-padding": 5, + "text-size": 10, + "text-variable-anchor": [ + "top-left" ] } }