Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge to master #223

Merged
merged 35 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
d02fcef
Added support for infractions in large initialize.
KieranRatcliffeInvertedAI Sep 20, 2024
dbcd2c8
Minor modifications from feedback including parameter types.
KieranRatcliffeInvertedAI Sep 20, 2024
a178475
Additional fix to function typing.
KieranRatcliffeInvertedAI Sep 20, 2024
6234fac
Merge pull request #219 from inverted-ai/conditional_large_initialize
KieranRatcliffeInvertedAI Sep 20, 2024
34312ef
Enabled None as parameter for recurrent states in large drive
KieranRatcliffeInvertedAI Sep 23, 2024
e009579
Merged develop
KieranRatcliffeInvertedAI Sep 23, 2024
a5b5e7b
Added ability to pass None to large_drive recurrent states
KieranRatcliffeInvertedAI Sep 26, 2024
53747c6
Merge pull request #220 from inverted-ai/hotfix_large_drive_recurr_state
KieranRatcliffeInvertedAI Sep 26, 2024
2ff11a4
Change scene plotter visualization to look like TDS.
KieranRatcliffeInvertedAI Oct 8, 2024
ad2d610
Merge branch 'develop' into hotfix_utils_agent_appearance
KieranRatcliffeInvertedAI Oct 8, 2024
a1ed9fb
Remove unnecessary comments
KieranRatcliffeInvertedAI Oct 8, 2024
27d80f0
Merge pull request #221 from inverted-ai/hotfix_utils_agent_appearance
KieranRatcliffeInvertedAI Oct 8, 2024
17036e4
Add blank function to utils.
KieranRatcliffeInvertedAI Oct 8, 2024
445a214
Merge branch 'develop' of https://github.com/inverted-ai/invertedai i…
KieranRatcliffeInvertedAI Oct 8, 2024
eb15cd4
Fixed merging conflicts.
KieranRatcliffeInvertedAI Oct 29, 2024
d43b7e2
Rebase.
KieranRatcliffeInvertedAI Nov 27, 2024
3bb8588
Fixed merging conflict.
KieranRatcliffeInvertedAI Nov 27, 2024
7deb32c
Added various changes from PR including actual UTC timestamps.
KieranRatcliffeInvertedAI Dec 4, 2024
b8d41bf
Added Optional type parameter for dict data parameter.
KieranRatcliffeInvertedAI Dec 4, 2024
61a78e1
Merge pull request #226 from inverted-ai/debug_logger
KieranRatcliffeInvertedAI Dec 4, 2024
2bd5482
Cleaned up formatting and imports.
KieranRatcliffeInvertedAI Dec 4, 2024
8e74b03
Merge branch 'develop' into documentation_cleanup
KieranRatcliffeInvertedAI Dec 4, 2024
cdec647
Updated large initialize helper functions to account for pedestrians …
KieranRatcliffeInvertedAI Dec 5, 2024
63ecd8f
Changed width, height back to current framing as half the total regio…
KieranRatcliffeInvertedAI Dec 5, 2024
ea50f92
Added documentation for large simulation.
KieranRatcliffeInvertedAI Dec 5, 2024
504dd1a
Cleaned up example python scripts.
KieranRatcliffeInvertedAI Dec 5, 2024
a7073b7
Small cleanup to cosimulation and README examples.
KieranRatcliffeInvertedAI Dec 5, 2024
82f0a56
Changed some minor visual formatting for readability.
KieranRatcliffeInvertedAI Dec 5, 2024
a9aba81
Fixed import issue.
KieranRatcliffeInvertedAI Dec 5, 2024
1b4a583
Added warnings regarding usage of agent attributes.
KieranRatcliffeInvertedAI Dec 5, 2024
d74c6aa
Updated warnings for clarity and only display them once per program t…
KieranRatcliffeInvertedAI Dec 6, 2024
6fa0d8c
Fixed missed warning message update.
KieranRatcliffeInvertedAI Dec 6, 2024
837ad94
Fixed docustring build and edited the descriptions.
KieranRatcliffeInvertedAI Dec 6, 2024
50fe453
Minor edits to existing documentation.
KieranRatcliffeInvertedAI Dec 6, 2024
ba1d02a
Merge pull request #228 from inverted-ai/documentation_cleanup
KieranRatcliffeInvertedAI Dec 6, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -182,5 +182,6 @@ bazel-*
*.gif
examples/*.png
examples/output/
examples/logs/
examples/*.csv
examples/*.json
29 changes: 17 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,11 @@ so you can also download it and build locally.
## Minimal example

``` python
import numpy as np
import matplotlib.pyplot as plt
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

import matplotlib.pyplot as plt

location = "canada:drake_street_and_pacific_blvd" # select one of available locations

Expand All @@ -60,9 +62,9 @@ location_info_response = iai.location_info(location=location)
# initialize the simulation by spawning NPCs
response = iai.initialize(
location=location, # select one of available locations
agent_count=10, # number of NPCs to spawn
agent_properties=get_default_agent_properties({AgentType.car:10}), # number of NPCs to spawn
)
agent_attributes = response.agent_attributes # get dimension and other attributes of NPCs
agent_properties = response.agent_properties # get dimension and other attributes of NPCs

rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(
Expand All @@ -72,8 +74,8 @@ scene_plotter = iai.utils.ScenePlotter(
location_info_response.static_actors
)
scene_plotter.initialize_recording(
response.agent_states,
agent_attributes,
agent_states=response.agent_states,
agent_properties=agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -82,7 +84,7 @@ for _ in range(100): # how many simulation steps to execute (10 steps is 1 seco
# query the API for subsequent NPC predictions
response = iai.drive(
location=location,
agent_attributes=agent_attributes,
agent_properties=agent_properties,
agent_states=response.agent_states,
recurrent_states=response.recurrent_states,
light_recurrent_states=response.light_recurrent_states,
Expand Down Expand Up @@ -112,9 +114,12 @@ Conceptually, the API is used to establish synchronous co-simulation between you
your machine and the NPC engine running on Inverted AI servers. The basic integration in Python looks like this.

```python
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

from typing import List
import numpy as np
import invertedai as iai
import matplotlib.pyplot as plt

iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable
Expand Down Expand Up @@ -150,10 +155,10 @@ class LocalSimulator:
return self.ego_state

print("Begin initialization.")
location = 'iai:ubc_roundabout'
location = "canada:drake_street_and_pacific_blvd"
iai_simulation = iai.BasicCosimulation( # instantiate a stateful wrapper for Inverted AI API
location=location, # select one of available locations
agent_count=5, # how many vehicles in total to use in the simulation
agent_properties=get_default_agent_properties({AgentType.car:5}), # how many vehicles in total to use in the simulation
ego_agent_mask=[True, False, False, False, False], # first vehicle is ego, rest are NPCs
get_birdview=False, # provides simple visualization - don't use in production
traffic_lights=True, # gets the traffic light states and used for initialization and steping the simulation
Expand All @@ -169,7 +174,7 @@ scene_plotter = iai.utils.ScenePlotter(
)
scene_plotter.initialize_recording(
agent_states=iai_simulation.agent_states,
agent_attributes=iai_simulation.agent_attributes,
agent_properties=iai_simulation.agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -182,7 +187,7 @@ for _ in range(100): # how many simulation steps to execute (10 steps is 1 seco
# execute predictions in your simulator, using your actions for the ego vehicle
updated_ego_agent_state = local_simulation.step(predicted_npc_behavior)
# save the visualization with ScenePlotter
scene_plotter.record_step(iai_simulation.agent_states)
scene_plotter.record_step(iai_simulation.agent_states,iai_simulation.light_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
Expand Down
2 changes: 2 additions & 0 deletions docs/source/pythonapi/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ sdk-drive
sdk-initialize
sdk-light
sdk-location-info
sdk-large-drive
sdk-large-initialize
sdk-common
sdk-simulation
sdk-env-var
Expand Down
3 changes: 2 additions & 1 deletion docs/source/pythonapi/sdk-drive.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@
```{eval-rst}
.. autofunction:: invertedai.api.drive
```

---
```{eval-rst}
.. autoclass:: invertedai.api.DriveResponse
:members:
:undoc-members:
:exclude-members: model_config, model_fields
```


8 changes: 8 additions & 0 deletions docs/source/pythonapi/sdk-large-drive.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# LARGE_DRIVE


```{eval-rst}
.. autofunction:: invertedai.large.large_drive
```


19 changes: 19 additions & 0 deletions docs/source/pythonapi/sdk-large-initialize.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# LARGE_INITIALIZE


```{eval-rst}
.. autofunction:: invertedai.large.large_initialize
```
---
```{eval-rst}
.. autofunction:: invertedai.large.get_regions_default
```
---
```{eval-rst}
.. autofunction:: invertedai.large.get_regions_in_grid
```
---
```{eval-rst}
.. autofunction:: invertedai.large.get_number_of_agents_per_region_by_drivable_area
```

4 changes: 2 additions & 2 deletions docs/source/pythonapi/sdk-light.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
```{eval-rst}
.. autofunction:: invertedai.api.light
```


---
```{eval-rst}
.. autoclass:: invertedai.api.LightResponse
:members:
:undoc-members:
:exclude-members: model_config, model_fields
```


30 changes: 15 additions & 15 deletions docs/source/userguide.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ bare-bones access mode that offers maximum flexibility to deploy in any environm
For convenience, we also provide a {ref}`Python SDK`, freely available on PyPI with minimal dependencies, which provides an abstraction layer on top of the REST API. Recently, we also released {ref}`C++ SDK` and in the future we intend to release similar libraries for other languages.

## Maps and geofencing
The API operates on a pre-defined collection of maps and currently there is no programmatic way to add additional
locations. For each location there is a map, represented internally in the
The API operates on a pre-defined collection of maps and currently a programmatic way to add additional locations is in development.
For each location there is a map, represented internally in the
[Lanelet2](https://github.com/fzi-forschungszentrum-informatik/Lanelet2) format, which specifies
lanelets, traffic lights, and a selection of static traffic signs (along with their relationship to specific lanelets).
Each map comes with a canonical Euclidean coordinate frame in meters, which for OSM files is obtained by applying a
Expand All @@ -43,8 +43,8 @@ access, LOCATION_INFO provides all the relevant information. Please contact us w
locations.

## Agent types and representations
At the moment the API only supports vehicles, but future releases will also support pedestrians, bicycles, etc.. We
assume that each vehicle is a rigid rectangle with a fixed length and width. The motion of each vehicle is constrained
At the moment the API only supports vehicles and pedestrians, but future releases will also support more agents and vulnerable road users.
We assume that each vehicle is a rigid rectangle with a fixed length and width. The motion of each vehicle is constrained
by the kinematic bicycle model, which further requires specifying the rear axis offset, that is the distance between the
center of the vehicle and its rear axis. Front axis offset is not relevant, because it can not be fit from observational
data, so we omit it. The three static agent attributes are: length, width, and rear offset.
Expand All @@ -65,26 +65,26 @@ Each traffic light can be green, yellow, or red at any given point.
Traffic light IDs are fixed and can be derived from the map, but for convenience we also provide traffic light IDs
and the corresponding locations in LOCATION_INFO.
For maps with traffic lights, on a call to INITIALIZE, the server generates a realistic configuration of all traffic lights,
and returns the associated light states via 'light_recurrent_states'. On each call to DRIVE, traffic lights' states can be automatically managed by the server with 'light_recurrent_states'.
and returns the associated light states via 'light_recurrent_states'. On each call to {ref}`DRIVE`, traffic lights' states can be automatically managed by the server with 'light_recurrent_states'.
There is also the option to manually set light states with 'traffic_lights_states', but once this path is taken,
it is on the client to continually provide 'traffic_lights_states' on all calls to DRIVE.
it is on the client to continually provide 'traffic_lights_states' on all calls to {ref}`DRIVE`.

## Handling agents and NPCs
In the API, there is no distinction between agents, controlled by you, and NPCs, controlled by us, so we refer to them
collectively as agents. In any simulation there can be zero or more characters of either kind. When calling DRIVE, the
collectively as agents. In any simulation there can be zero or more characters of either kind. When calling {ref}`DRIVE`, the
client needs to list all agents in simulation and we predict the next states for all of them. It is up to the client to
decide which of those agents are NPCs and use the corresponding predictions in the local simulator. However, it is
important to specify all agents when calling the API, since otherwise NPCs will not be able to react to omitted agents.
Due to the recurrent nature of ITRA, we generally recommend that the customer is consistent about this choice throughout
Due to the recurrent nature of ITRA, we generally recommend that the user is consistent about this choice throughout
the simulation - predictions for agents whose state is updated differently from ITRA predictions may not be as good as
when ITRA fully controls them.

## Consistent simulation with a stateless API
The API is stateless, so each call to DRIVE requires specifying both the static attributes and the dynamic state of each
The API is stateless, so each call to {ref}`DRIVE` requires specifying both the static attributes and the dynamic state of each
agent. However, ITRA is a recurrent model that uses the simulation’s history to make predictions, which we facilitate
through the stateless API by passing around a recurrent state, which is a vector with unspecified semantics from the
client’s perspective. Each call to DRIVE returns a new recurrent state for each agent, which must be passed for this
agent to DRIVE on the subsequent call. Providing an incorrect recurrent state may silently lead to deteriorating
client’s perspective. Each call to {ref}`DRIVE` returns a new recurrent state for each agent, which must be passed for this
agent to {ref}`DRIVE` on the subsequent call. Providing an incorrect recurrent state may silently lead to deteriorating
performance, and in order to obtain valid values for the initial recurrent state, the simulation must always start with
INITIALIZE. To initialize the simulation to a specific state, you can provide a sequence of historical states for all
agents that will be used to construct the matching recurrent state. For best performance, at least 10 time steps should
Expand All @@ -96,18 +96,18 @@ Python library that handles this internally.
In the simple case there is a fixed number of agents present throughout the entire simulation. However, it is also
possible to dynamically introduce and remove agents, which is typically done when they enter and exit the supported
area. Removing agents is easy, all it takes is removing the information for a given agent from the lists of agent
attributes, agent states, and recurrent states. For convenience, DRIVE returns a boolean vector indicating which agents
attributes, agent states, and recurrent states. For convenience, {ref}`DRIVE` returns a boolean vector indicating which agents
are within the supported area after the predicted step.
Introducing agents into a running simulation is more complicated, due to the requirement to construct their recurrent
state. When predictions for the new agents are not going to be consumed, its state can simply be appended to the
relevant lists, with the recurrent state set to zeros. To obtain good predictions for such an agent, another call to
INITIALIZE needs to be made, providing the recent history of all agents, including the new agent. This correctly
initializes the recurrent state and DRIVE can be called from that point on normally. For best performance, each agent
initializes the recurrent state and {ref}`DRIVE` can be called from that point on normally. For best performance, each agent
should initially be controlled by the client for at least 10 time steps before being handed off to ITRA as an NPC by
calling INITIALIZE.

## Reproducibility and control over predictions
INITIALIZE and DRIVE optionally accept a random seed, which controls their stochastic behavior. With the same seed and
INITIALIZE and {ref}`DRIVE` optionally accept a random seed, which controls their stochastic behavior. With the same seed and
the same inputs, the outputs will be approximately the same with high accuracy.
Other than for the random seed, there is currently no mechanism to influence the behavior of predicted agents, such as
by directing them to certain exits or setting their speed, but such mechanisms will be included in future releases.
Expand All @@ -119,6 +119,6 @@ formats, including checking lengths of lists and bounds for numeric values, and
performed on the client side before paid API calls. All those features are only available in the Python library and not
in the REST API.
To enable the mock API, just set the environment variable `IAI_MOCK_API` to true according to {ref}`Environment Variables`.
For further debugging and visualization, both INITIALIZE and DRIVE optionally return a rendered birdview image showing
For further debugging and visualization, both INITIALIZE and {ref}`DRIVE` optionally return a rendered birdview image showing
the simulation state after the call to them. This significantly increases the payload size and latency, so it should not
be done in real integrations.
24 changes: 14 additions & 10 deletions examples/cosimulation_minimal_example.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import invertedai as iai
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentType

from typing import List
import numpy as np
import invertedai as iai
import matplotlib.pyplot as plt

iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable


class LocalSimulator:
"""
Mock up of a local simulator, where you control the ego vehicle. This example only supports single ego vehicle.
Expand Down Expand Up @@ -36,24 +38,26 @@ def step(self, predicted_npc_states):
return self.ego_state

print("Begin initialization.")
location = 'iai:ubc_roundabout'
location = "canada:drake_street_and_pacific_blvd"
iai_simulation = iai.BasicCosimulation( # instantiate a stateful wrapper for Inverted AI API
location=location, # select one of available locations
agent_count=5, # how many vehicles in total to use in the simulation
agent_properties=get_default_agent_properties({AgentType.car:5}), # how many vehicles in total to use in the simulation
ego_agent_mask=[True, False, False, False, False], # first vehicle is ego, rest are NPCs
get_birdview=False, # provides simple visualization - don't use in production
traffic_lights=True, # gets the traffic light states and used for initialization and steping the simulation
)

location_info_response = iai.location_info(location=location)
rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors)
scene_plotter = iai.utils.ScenePlotter(
rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors
)
scene_plotter.initialize_recording(
agent_states=iai_simulation.agent_states,
agent_attributes=iai_simulation.agent_attributes,
agent_properties=iai_simulation.agent_properties,
)

print("Begin stepping through simulation.")
Expand All @@ -66,7 +70,7 @@ def step(self, predicted_npc_states):
# execute predictions in your simulator, using your actions for the ego vehicle
updated_ego_agent_state = local_simulation.step(predicted_npc_behavior)
# save the visualization with ScenePlotter
scene_plotter.record_step(iai_simulation.agent_states)
scene_plotter.record_step(iai_simulation.agent_states,iai_simulation.light_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
Expand Down
23 changes: 16 additions & 7 deletions examples/large_map_example.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import invertedai as iai
from invertedai.large.common import Region
from invertedai.common import AgentAttributes
from invertedai.utils import get_default_agent_properties
from invertedai.common import AgentAttributes, AgentType

import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt
import time
import random
import time

from tqdm import tqdm

def main(args):
if args.model_version_drive == "None":
Expand All @@ -30,15 +30,16 @@ def main(args):
print(f"Begin initialization.")
regions = iai.get_regions_default(
location = args.location,
total_num_agents = args.num_agents,
agent_count_dict = {AgentType.car: args.num_agents},
area_shape = (int(args.width/2),int(args.height/2)),
map_center = map_center,
)

response = iai.large_initialize(
location = args.location,
regions = regions,
random_seed = initialize_seed
random_seed = initialize_seed,
get_infractions = args.get_infractions
)

print(f"Set up simulation.")
Expand Down Expand Up @@ -72,6 +73,7 @@ def main(args):
light_recurrent_states = response.light_recurrent_states,
random_seed = drive_seed,
api_model_version = model_version,
get_infractions = args.get_infractions,
single_call_agent_limit = args.capacity,
async_api_calls = args.is_async
)
Expand All @@ -88,10 +90,11 @@ def main(args):
scene_plotter.animate_scene(
output_name=gif_name,
ax=ax,
direction_vec=False,
direction_vec=True,
velocity_vec=False,
plot_frame_number=True,
)
plt.close(fig)
print("Done")

if __name__ == '__main__':
Expand Down Expand Up @@ -159,6 +162,12 @@ def main(args):
help=f"Should the simulation be saved with visualization tool.",
default=True
)
argparser.add_argument(
'--get-infractions',
type=bool,
help=f"Should the simulation capture infractions data.",
default=False
)
argparser.add_argument(
'--num-simulations',
type=int,
Expand Down
Loading
Loading