Skip to content

Commit

Permalink
Merge pull request #228 from inverted-ai/documentation_cleanup
Browse files Browse the repository at this point in the history
Documentation cleanup
  • Loading branch information
KieranRatcliffeInvertedAI authored Dec 6, 2024
2 parents 61a78e1 + 50fe453 commit ba1d02a
Show file tree
Hide file tree
Showing 22 changed files with 367 additions and 190 deletions.
29 changes: 17 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,11 @@ so you can also download it and build locally.
## Minimal example

``` python
import numpy as np
import matplotlib.pyplot as plt
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

import matplotlib.pyplot as plt

location = "canada:drake_street_and_pacific_blvd" # select one of available locations

Expand All @@ -60,9 +62,9 @@ location_info_response = iai.location_info(location=location)
# initialize the simulation by spawning NPCs
response = iai.initialize(
location=location, # select one of available locations
agent_count=10, # number of NPCs to spawn
agent_properties=get_default_agent_properties({AgentType.car:10}), # number of NPCs to spawn
)
agent_attributes = response.agent_attributes # get dimension and other attributes of NPCs
agent_properties = response.agent_properties # get dimension and other attributes of NPCs

rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(
Expand All @@ -72,8 +74,8 @@ scene_plotter = iai.utils.ScenePlotter(
location_info_response.static_actors
)
scene_plotter.initialize_recording(
response.agent_states,
agent_attributes,
agent_states=response.agent_states,
agent_properties=agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -82,7 +84,7 @@ for _ in range(100): # how many simulation steps to execute (10 steps is 1 seco
# query the API for subsequent NPC predictions
response = iai.drive(
location=location,
agent_attributes=agent_attributes,
agent_properties=agent_properties,
agent_states=response.agent_states,
recurrent_states=response.recurrent_states,
light_recurrent_states=response.light_recurrent_states,
Expand Down Expand Up @@ -112,9 +114,12 @@ Conceptually, the API is used to establish synchronous co-simulation between you
your machine and the NPC engine running on Inverted AI servers. The basic integration in Python looks like this.

```python
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

from typing import List
import numpy as np
import invertedai as iai
import matplotlib.pyplot as plt

iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable
Expand Down Expand Up @@ -150,10 +155,10 @@ class LocalSimulator:
return self.ego_state

print("Begin initialization.")
location = 'iai:ubc_roundabout'
location = "canada:drake_street_and_pacific_blvd"
iai_simulation = iai.BasicCosimulation( # instantiate a stateful wrapper for Inverted AI API
location=location, # select one of available locations
agent_count=5, # how many vehicles in total to use in the simulation
agent_properties=get_default_agent_properties({AgentType.car:5}), # how many vehicles in total to use in the simulation
ego_agent_mask=[True, False, False, False, False], # first vehicle is ego, rest are NPCs
get_birdview=False, # provides simple visualization - don't use in production
traffic_lights=True, # gets the traffic light states and used for initialization and steping the simulation
Expand All @@ -169,7 +174,7 @@ scene_plotter = iai.utils.ScenePlotter(
)
scene_plotter.initialize_recording(
agent_states=iai_simulation.agent_states,
agent_attributes=iai_simulation.agent_attributes,
agent_properties=iai_simulation.agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -182,7 +187,7 @@ for _ in range(100): # how many simulation steps to execute (10 steps is 1 seco
# execute predictions in your simulator, using your actions for the ego vehicle
updated_ego_agent_state = local_simulation.step(predicted_npc_behavior)
# save the visualization with ScenePlotter
scene_plotter.record_step(iai_simulation.agent_states)
scene_plotter.record_step(iai_simulation.agent_states,iai_simulation.light_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
Expand Down
2 changes: 2 additions & 0 deletions docs/source/pythonapi/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ sdk-drive
sdk-initialize
sdk-light
sdk-location-info
sdk-large-drive
sdk-large-initialize
sdk-common
sdk-simulation
sdk-env-var
Expand Down
3 changes: 2 additions & 1 deletion docs/source/pythonapi/sdk-drive.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@
```{eval-rst}
.. autofunction:: invertedai.api.drive
```

---
```{eval-rst}
.. autoclass:: invertedai.api.DriveResponse
:members:
:undoc-members:
:exclude-members: model_config, model_fields
```


8 changes: 8 additions & 0 deletions docs/source/pythonapi/sdk-large-drive.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# LARGE_DRIVE


```{eval-rst}
.. autofunction:: invertedai.large.large_drive
```


19 changes: 19 additions & 0 deletions docs/source/pythonapi/sdk-large-initialize.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# LARGE_INITIALIZE


```{eval-rst}
.. autofunction:: invertedai.large.large_initialize
```
---
```{eval-rst}
.. autofunction:: invertedai.large.get_regions_default
```
---
```{eval-rst}
.. autofunction:: invertedai.large.get_regions_in_grid
```
---
```{eval-rst}
.. autofunction:: invertedai.large.get_number_of_agents_per_region_by_drivable_area
```

4 changes: 2 additions & 2 deletions docs/source/pythonapi/sdk-light.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
```{eval-rst}
.. autofunction:: invertedai.api.light
```


---
```{eval-rst}
.. autoclass:: invertedai.api.LightResponse
:members:
:undoc-members:
:exclude-members: model_config, model_fields
```


30 changes: 15 additions & 15 deletions docs/source/userguide.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ bare-bones access mode that offers maximum flexibility to deploy in any environm
For convenience, we also provide a {ref}`Python SDK`, freely available on PyPI with minimal dependencies, which provides an abstraction layer on top of the REST API. Recently, we also released {ref}`C++ SDK` and in the future we intend to release similar libraries for other languages.

## Maps and geofencing
The API operates on a pre-defined collection of maps and currently there is no programmatic way to add additional
locations. For each location there is a map, represented internally in the
The API operates on a pre-defined collection of maps and currently a programmatic way to add additional locations is in development.
For each location there is a map, represented internally in the
[Lanelet2](https://github.com/fzi-forschungszentrum-informatik/Lanelet2) format, which specifies
lanelets, traffic lights, and a selection of static traffic signs (along with their relationship to specific lanelets).
Each map comes with a canonical Euclidean coordinate frame in meters, which for OSM files is obtained by applying a
Expand All @@ -43,8 +43,8 @@ access, LOCATION_INFO provides all the relevant information. Please contact us w
locations.

## Agent types and representations
At the moment the API only supports vehicles, but future releases will also support pedestrians, bicycles, etc.. We
assume that each vehicle is a rigid rectangle with a fixed length and width. The motion of each vehicle is constrained
At the moment the API only supports vehicles and pedestrians, but future releases will also support more agents and vulnerable road users.
We assume that each vehicle is a rigid rectangle with a fixed length and width. The motion of each vehicle is constrained
by the kinematic bicycle model, which further requires specifying the rear axis offset, that is the distance between the
center of the vehicle and its rear axis. Front axis offset is not relevant, because it can not be fit from observational
data, so we omit it. The three static agent attributes are: length, width, and rear offset.
Expand All @@ -65,26 +65,26 @@ Each traffic light can be green, yellow, or red at any given point.
Traffic light IDs are fixed and can be derived from the map, but for convenience we also provide traffic light IDs
and the corresponding locations in LOCATION_INFO.
For maps with traffic lights, on a call to INITIALIZE, the server generates a realistic configuration of all traffic lights,
and returns the associated light states via 'light_recurrent_states'. On each call to DRIVE, traffic lights' states can be automatically managed by the server with 'light_recurrent_states'.
and returns the associated light states via 'light_recurrent_states'. On each call to {ref}`DRIVE`, traffic lights' states can be automatically managed by the server with 'light_recurrent_states'.
There is also the option to manually set light states with 'traffic_lights_states', but once this path is taken,
it is on the client to continually provide 'traffic_lights_states' on all calls to DRIVE.
it is on the client to continually provide 'traffic_lights_states' on all calls to {ref}`DRIVE`.

## Handling agents and NPCs
In the API, there is no distinction between agents, controlled by you, and NPCs, controlled by us, so we refer to them
collectively as agents. In any simulation there can be zero or more characters of either kind. When calling DRIVE, the
collectively as agents. In any simulation there can be zero or more characters of either kind. When calling {ref}`DRIVE`, the
client needs to list all agents in simulation and we predict the next states for all of them. It is up to the client to
decide which of those agents are NPCs and use the corresponding predictions in the local simulator. However, it is
important to specify all agents when calling the API, since otherwise NPCs will not be able to react to omitted agents.
Due to the recurrent nature of ITRA, we generally recommend that the customer is consistent about this choice throughout
Due to the recurrent nature of ITRA, we generally recommend that the user is consistent about this choice throughout
the simulation - predictions for agents whose state is updated differently from ITRA predictions may not be as good as
when ITRA fully controls them.

## Consistent simulation with a stateless API
The API is stateless, so each call to DRIVE requires specifying both the static attributes and the dynamic state of each
The API is stateless, so each call to {ref}`DRIVE` requires specifying both the static attributes and the dynamic state of each
agent. However, ITRA is a recurrent model that uses the simulation’s history to make predictions, which we facilitate
through the stateless API by passing around a recurrent state, which is a vector with unspecified semantics from the
client’s perspective. Each call to DRIVE returns a new recurrent state for each agent, which must be passed for this
agent to DRIVE on the subsequent call. Providing an incorrect recurrent state may silently lead to deteriorating
client’s perspective. Each call to {ref}`DRIVE` returns a new recurrent state for each agent, which must be passed for this
agent to {ref}`DRIVE` on the subsequent call. Providing an incorrect recurrent state may silently lead to deteriorating
performance, and in order to obtain valid values for the initial recurrent state, the simulation must always start with
INITIALIZE. To initialize the simulation to a specific state, you can provide a sequence of historical states for all
agents that will be used to construct the matching recurrent state. For best performance, at least 10 time steps should
Expand All @@ -96,18 +96,18 @@ Python library that handles this internally.
In the simple case there is a fixed number of agents present throughout the entire simulation. However, it is also
possible to dynamically introduce and remove agents, which is typically done when they enter and exit the supported
area. Removing agents is easy, all it takes is removing the information for a given agent from the lists of agent
attributes, agent states, and recurrent states. For convenience, DRIVE returns a boolean vector indicating which agents
attributes, agent states, and recurrent states. For convenience, {ref}`DRIVE` returns a boolean vector indicating which agents
are within the supported area after the predicted step.
Introducing agents into a running simulation is more complicated, due to the requirement to construct their recurrent
state. When predictions for the new agents are not going to be consumed, its state can simply be appended to the
relevant lists, with the recurrent state set to zeros. To obtain good predictions for such an agent, another call to
INITIALIZE needs to be made, providing the recent history of all agents, including the new agent. This correctly
initializes the recurrent state and DRIVE can be called from that point on normally. For best performance, each agent
initializes the recurrent state and {ref}`DRIVE` can be called from that point on normally. For best performance, each agent
should initially be controlled by the client for at least 10 time steps before being handed off to ITRA as an NPC by
calling INITIALIZE.

## Reproducibility and control over predictions
INITIALIZE and DRIVE optionally accept a random seed, which controls their stochastic behavior. With the same seed and
INITIALIZE and {ref}`DRIVE` optionally accept a random seed, which controls their stochastic behavior. With the same seed and
the same inputs, the outputs will be approximately the same with high accuracy.
Other than for the random seed, there is currently no mechanism to influence the behavior of predicted agents, such as
by directing them to certain exits or setting their speed, but such mechanisms will be included in future releases.
Expand All @@ -119,6 +119,6 @@ formats, including checking lengths of lists and bounds for numeric values, and
performed on the client side before paid API calls. All those features are only available in the Python library and not
in the REST API.
To enable the mock API, just set the environment variable `IAI_MOCK_API` to true according to {ref}`Environment Variables`.
For further debugging and visualization, both INITIALIZE and DRIVE optionally return a rendered birdview image showing
For further debugging and visualization, both INITIALIZE and {ref}`DRIVE` optionally return a rendered birdview image showing
the simulation state after the call to them. This significantly increases the payload size and latency, so it should not
be done in real integrations.
24 changes: 14 additions & 10 deletions examples/cosimulation_minimal_example.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

from typing import List
import numpy as np
import invertedai as iai
import matplotlib.pyplot as plt

iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable


class LocalSimulator:
"""
Mock up of a local simulator, where you control the ego vehicle. This example only supports single ego vehicle.
Expand Down Expand Up @@ -36,24 +38,26 @@ def step(self, predicted_npc_states):
return self.ego_state

print("Begin initialization.")
location = 'iai:ubc_roundabout'
location = "canada:drake_street_and_pacific_blvd"
iai_simulation = iai.BasicCosimulation( # instantiate a stateful wrapper for Inverted AI API
location=location, # select one of available locations
agent_count=5, # how many vehicles in total to use in the simulation
agent_properties=get_default_agent_properties({AgentType.car:5}), # how many vehicles in total to use in the simulation
ego_agent_mask=[True, False, False, False, False], # first vehicle is ego, rest are NPCs
get_birdview=False, # provides simple visualization - don't use in production
traffic_lights=True, # gets the traffic light states and used for initialization and steping the simulation
)

location_info_response = iai.location_info(location=location)
rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors)
scene_plotter = iai.utils.ScenePlotter(
rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors
)
scene_plotter.initialize_recording(
agent_states=iai_simulation.agent_states,
agent_attributes=iai_simulation.agent_attributes,
agent_properties=iai_simulation.agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -66,7 +70,7 @@ def step(self, predicted_npc_states):
# execute predictions in your simulator, using your actions for the ego vehicle
updated_ego_agent_state = local_simulation.step(predicted_npc_behavior)
# save the visualization with ScenePlotter
scene_plotter.record_step(iai_simulation.agent_states)
scene_plotter.record_step(iai_simulation.agent_states,iai_simulation.light_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
Expand Down
10 changes: 5 additions & 5 deletions examples/large_map_example.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import invertedai as iai
from invertedai.large.common import Region
from invertedai.common import AgentAttributes
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentAttributes, AgentType

import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt
import time
import random
import time

from tqdm import tqdm

def main(args):
if args.model_version_drive == "None":
Expand All @@ -30,7 +30,7 @@ def main(args):
print(f"Begin initialization.")
regions = iai.get_regions_default(
location = args.location,
total_num_agents = args.num_agents,
agent_count_dict = {AgentType.car: args.num_agents},
area_shape = (int(args.width/2),int(args.height/2)),
map_center = map_center,
)
Expand Down
16 changes: 9 additions & 7 deletions examples/minimal_example.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import numpy as np
import matplotlib.pyplot as plt
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

import matplotlib.pyplot as plt

location = "canada:drake_street_and_pacific_blvd" # select one of available locations

Expand All @@ -14,9 +16,9 @@
# initialize the simulation by spawning NPCs
response = iai.initialize(
location=location, # select one of available locations
agent_count=10, # number of NPCs to spawn
agent_properties=get_default_agent_properties({AgentType.car:10}), # number of NPCs to spawn
)
agent_attributes = response.agent_attributes # get dimension and other attributes of NPCs
agent_properties = response.agent_properties # get dimension and other attributes of NPCs

rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(
Expand All @@ -26,8 +28,8 @@
location_info_response.static_actors
)
scene_plotter.initialize_recording(
response.agent_states,
agent_attributes,
agent_states=response.agent_states,
agent_properties=agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -36,7 +38,7 @@
# query the API for subsequent NPC predictions
response = iai.drive(
location=location,
agent_attributes=agent_attributes,
agent_properties=agent_properties,
agent_states=response.agent_states,
recurrent_states=response.recurrent_states,
light_recurrent_states=response.light_recurrent_states,
Expand Down
6 changes: 4 additions & 2 deletions examples/scenario_log_example.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

import os
from random import randint
import matplotlib.pyplot as plt

from random import randint

LOCATION = "canada:drake_street_and_pacific_blvd" # select one of available locations
SIMULATION_LENGTH = 100
SIMULATION_LENGTH_EXTEND = 100
Expand All @@ -18,7 +20,7 @@
# initialize the simulation by spawning NPCs
response = iai.initialize(
location=LOCATION, # select one of available locations
agent_properties=get_default_agent_properties({"car":5}), # number of NPCs to spawn
agent_properties=get_default_agent_properties({AgentType.car:5}), # number of NPCs to spawn
)
agent_properties = response.agent_properties # get dimension and other attributes of NPCs

Expand Down
Loading

0 comments on commit ba1d02a

Please sign in to comment.