Skip to content

Commit

Permalink
Merge pull request #145 from inverted-ai/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
Ruishenl authored Nov 29, 2023
2 parents 5132028 + c059a5b commit 1bc28c1
Show file tree
Hide file tree
Showing 59 changed files with 5,362 additions and 4,077 deletions.
87 changes: 67 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[pypi-badge]: https://badge.fury.io/py/invertedai.svg
[pypi-link]: https://pypi.org/project/invertedai/
[colab-badge]: https://colab.research.google.com/assets/colab-badge.svg
[colab-link]: https://colab.research.google.com/github/inverted-ai/invertedai/blob/develop/examples/IAI_demo.ipynb
[colab-link]: https://colab.research.google.com/github/inverted-ai/invertedai/blob/develop/examples/IAI_full_demo.ipynb
[rest-link]: https://app.swaggerhub.com/apis-docs/InvertedAI/InvertedAI
[examples-link]: https://github.com/inverted-ai/invertedai/tree/master/examples

Expand Down Expand Up @@ -41,13 +41,14 @@ so you can also download it and build locally.

``` python
import numpy as np
import imageio
import matplotlib.pyplot as plt
import invertedai as iai

location = "canada:vancouver:drake_street_and_pacific_blvd" # select one of available locations
location = "iai:drake_street_and_pacific_blvd" # select one of available locations

iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable

print("Begin initialization.")
# get static information about a given location including map in osm
# format and list traffic lights with their IDs and locations.
location_info_response = iai.location_info(location=location)
Expand All @@ -64,7 +65,18 @@ response = iai.initialize(
)
agent_attributes = response.agent_attributes # get dimension and other attributes of NPCs

images = [response.birdview.decode()] # images storing visualizations of subsequent states
location_info_response = iai.location_info(location=location)
rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors)
scene_plotter.initialize_recording(
agent_states=response.agent_states,
agent_attributes=agent_attributes,
)

print("Begin stepping through simulation.")
for _ in range(100): # how many simulation steps to execute (10 steps is 1 second)

# get next traffic light state
Expand All @@ -80,11 +92,21 @@ for _ in range(100): # how many simulation steps to execute (10 steps is 1 seco
traffic_lights_states=light_response.traffic_lights_states,
)

# save the visualization - requires np and cv2
images.append(response.birdview.decode())
# save the visualization
scene_plotter.record_step(response.agent_states,light_response.traffic_lights_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
imageio.mimsave("iai-example.gif", np.array(images), format="GIF-PIL")
fig, ax = plt.subplots(constrained_layout=True, figsize=(50, 50))
gif_name = 'minimal_example.gif'
scene_plotter.animate_scene(
output_name=gif_name,
ax=ax,
direction_vec=False,
velocity_vec=False,
plot_frame_number=True
)
print("Done")

```

Expand All @@ -96,17 +118,18 @@ your machine and the NPC engine running on Inverted AI servers. The basic integr
```python
from typing import List
import numpy as np
import imageio
import invertedai as iai
import matplotlib.pyplot as plt

# iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable
iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable


class LocalSimulator:
"""
Mock up of a local simulator, where you control the ego vehicle. This example only supports single ego vehicle.
"""
def __init__(self, ego_state: iai.AgentState, npc_states: List[iai.AgentState]):

def __init__(self, ego_state: iai.common.AgentState, npc_states: List[iai.common.AgentState]):
self.ego_state = ego_state
self.npc_states = npc_states

Expand All @@ -119,8 +142,8 @@ class LocalSimulator:
dx = self.ego_state.speed * dt * np.cos(self.ego_state.orientation)
dy = self.ego_state.speed * dt * np.sin(self.ego_state.orientation)

self.ego_state = iai.AgentState(
center=iai.Point(x=self.ego_state.center.x + dx, y=self.ego_state.center.y + dy),
self.ego_state = iai.common.AgentState(
center=iai.common.Point(x=self.ego_state.center.x + dx, y=self.ego_state.center.y + dy),
orientation=self.ego_state.orientation,
speed=self.ego_state.speed,
)
Expand All @@ -130,32 +153,56 @@ class LocalSimulator:
self.npc_states = predicted_npc_states
return self.ego_state


print("Begin initialization.")
location = 'iai:ubc_roundabout'
iai_simulation = iai.BasicCosimulation( # instantiate a stateful wrapper for Inverted AI API
location='canada:vancouver:ubc_roundabout', # select one of available locations
location=location, # select one of available locations
agent_count=5, # how many vehicles in total to use in the simulation
ego_agent_mask=[True, False, False, False, False], # first vehicle is ego, rest are NPCs
get_birdview=True, # provides simple visualization - don't use in production
get_birdview=False, # provides simple visualization - don't use in production
traffic_lights=True, # gets the traffic light states and used for initialization and steping the simulation
)

location_info_response = iai.location_info(location=location)
rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors)
scene_plotter.initialize_recording(
agent_states=iai_simulation.agent_states,
agent_attributes=iai_simulation.agent_attributes,
)

print("Begin stepping through simulation.")
local_simulation = LocalSimulator(iai_simulation.ego_states[0], iai_simulation.npc_states)
images = [iai_simulation.birdview.decode()] # images storing visualizations of subsequent states
for _ in range(100): # how many simulation steps to execute (10 steps is 1 second)
# query the API for subsequent NPC predictions, informing it how the ego vehicle acted
iai_simulation.step([local_simulation.ego_state])
# collect predictions for the next time step
predicted_npc_behavior = iai_simulation.npc_states
# execute predictions in your simulator, using your actions for the ego vehicle
updated_ego_agent_state = local_simulation.step(predicted_npc_behavior)
# save the visualization - requires np and cv2
images.append(iai_simulation.birdview.decode())
# save the visualization with ScenePlotter
scene_plotter.record_step(iai_simulation.agent_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
imageio.mimsave("iai-example.gif", np.array(images), format="GIF-PIL")
fig, ax = plt.subplots(constrained_layout=True, figsize=(50, 50))
gif_name = 'cosimulation_minimal_example.gif'
scene_plotter.animate_scene(
output_name=gif_name,
ax=ax,
direction_vec=False,
velocity_vec=False,
plot_frame_number=True
)
print("Done")

```
To quickly check out how Inverted AI NPCs
behave, try our
[Colab](https://colab.research.google.com/github/inverted-ai/invertedai-drive/blob/develop/examples/npc_only_colab.ipynb),
[Colab](https://colab.research.google.com/github/inverted-ai/invertedai-drive/blob/develop/examples/IAI_full_demo.ipynb),
where all agents are NPCs, or go to our
[github repository](https://github.com/inverted-ai/invertedai/tree/master/examples) to execute it locally.
When you're ready to try our NPCs with a real simulator, see the example [CARLA integration](https://github.com/inverted-ai/invertedai/tree/master/examples/carla).
Expand Down
17 changes: 17 additions & 0 deletions docs/source/cppapi/cpp-blame.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# BLAME (C++)


```{eval-rst}
.. doxygenfunction:: invertedai::blame
:project: InvertedAI-CPP
```

---
```{eval-rst}
.. doxygenclass:: invertedai::BlameRequest
:members:
:undoc-members:
.. doxygenclass:: invertedai::BlameResponse
:members:
:undoc-members:
```
1 change: 1 addition & 0 deletions docs/source/cppapi/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ accessed directly. Below are the key functions of the library, along with some c
```{toctree}
:maxdepth: 1
cpp-blame
cpp-drive
cpp-initialize
cpp-location-info
Expand Down
6 changes: 3 additions & 3 deletions docs/source/pythonapi/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@ accessed directly. Below are the key functions of the library, along with some c
```{toctree}
:maxdepth: 1
sdk-blame
sdk-drive
sdk-initialize
sdk-location-info
sdk-light
sdk-blame
sdk-simulation
sdk-location-info
sdk-common
sdk-simulation
sdk-env-var
```

Expand Down
557 changes: 0 additions & 557 deletions examples/Controlled_intersection.ipynb

This file was deleted.

1,726 changes: 0 additions & 1,726 deletions examples/IAI_demo.ipynb

This file was deleted.

1,892 changes: 1,892 additions & 0 deletions examples/IAI_full_demo.ipynb

Large diffs are not rendered by default.

13 changes: 8 additions & 5 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,19 @@ This folder contains examples demonstrating how to use the Inverted AI API in Py
<!-- start exampels -->
[Click here](https://download-directory.github.io/?url=https://github.com/inverted-ai/invertedai/tree/master/examples) to download the folder as a zip-file.
To run the examples locally, first build the virtual environment.
```commandline
```bash
python -m venv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install --upgrade -r requirements.txt
# optionally install carla, if you want to run carla demos
pip install carla==0.9.13
```
*If running into package not found issue like`ERROR: No matching distribution found for carla==0.9.13`, try update
the pip: `pip install upgrade pip `.
*If you run into an issue like`ERROR: No matching distribution found for carla==0.9.13`, your Python version may not
be CARLA-compatible.

Then, once you obtain an API key, you can run the examples.
```commandline
```bash
python npc_only.py --api_key $IAI_API_KEY
```
There are currently three different examples available.
Expand All @@ -28,7 +31,7 @@ quickly give you an idea for how the underlying NPCs behave. This example is ava
in a few different versions, one calling the underlying REST API directly, and others
using the wrapper provided as a part of our library, the latter version also being
available as a Jupyter notebook and
[Colab](https://colab.research.google.com/github/inverted-ai/invertedai-drive/blob/develop/examples/npc_only_colab.ipynb).
[Colab](https://colab.research.google.com/github/inverted-ai/invertedai-drive/blob/develop/examples/npc_only.ipynb).

## Cosimulation Minimal example

Expand Down
11 changes: 7 additions & 4 deletions examples/carla/region_drive.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import sys
sys.path.append('../')

import argparse
import invertedai as iai
from invertedai.simulation.simulator import Simulation, SimulationConfig
from simulation.simulator import Simulation, SimulationConfig
import pathlib
import pygame
from tqdm import tqdm
Expand All @@ -17,7 +20,7 @@
parser.add_argument("-cap", "--quadtree_capacity", type=int, default=15)
parser.add_argument("-ad", "--agent_density", type=int, default=10)
parser.add_argument("-ri", "--re_initialization", type=int, default=30)
parser.add_argument("-len", "--simulation_length", type=int, default=10000)
parser.add_argument("-len", "--simulation_length", type=int, default=600)
args = parser.parse_args()


Expand All @@ -29,12 +32,12 @@

cfg = SimulationConfig(location=args.location, map_center=(response.map_center.x, response.map_center.y),
map_fov=response.map_fov, rendered_static_map=rendered_static_map,
map_width=response.map_fov+200, map_height=response.map_fov+200, agent_density=args.agent_density,
map_width=response.map_fov, map_height=response.map_fov, agent_density=args.agent_density,
initialize_stride=50, quadtree_capacity=args.quadtree_capacity,
re_initialization_period=args.re_initialization)
simulation = Simulation(cfg=cfg)

fps = 100
fps = 60
clock = pygame.time.Clock()
run = True
start = perf_counter()
Expand Down
48 changes: 36 additions & 12 deletions examples/cosimulation_minimal_example.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
from typing import List
import numpy as np
import imageio
import invertedai as iai
import matplotlib.pyplot as plt

# iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable
iai.add_apikey('') # specify your key here or through the IAI_API_KEY variable


class LocalSimulator:
"""
Mock up of a local simulator, where you control the ego vehicle. This example only supports single ego vehicle.
"""

def __init__(self, ego_state: iai.AgentState, npc_states: List[iai.AgentState]):
def __init__(self, ego_state: iai.common.AgentState, npc_states: List[iai.common.AgentState]):
self.ego_state = ego_state
self.npc_states = npc_states

Expand All @@ -24,8 +24,8 @@ def _step_ego(self):
dx = self.ego_state.speed * dt * np.cos(self.ego_state.orientation)
dy = self.ego_state.speed * dt * np.sin(self.ego_state.orientation)

self.ego_state = iai.AgentState(
center=iai.Point(x=self.ego_state.center.x + dx, y=self.ego_state.center.y + dy),
self.ego_state = iai.common.AgentState(
center=iai.common.Point(x=self.ego_state.center.x + dx, y=self.ego_state.center.y + dy),
orientation=self.ego_state.orientation,
speed=self.ego_state.speed,
)
Expand All @@ -35,24 +35,48 @@ def step(self, predicted_npc_states):
self.npc_states = predicted_npc_states
return self.ego_state


print("Begin initialization.")
location = 'iai:ubc_roundabout'
iai_simulation = iai.BasicCosimulation( # instantiate a stateful wrapper for Inverted AI API
location='canada:vancouver:ubc_roundabout', # select one of available locations
location=location, # select one of available locations
agent_count=5, # how many vehicles in total to use in the simulation
ego_agent_mask=[True, False, False, False, False], # first vehicle is ego, rest are NPCs
get_birdview=True, # provides simple visualization - don't use in production
get_birdview=False, # provides simple visualization - don't use in production
traffic_lights=True, # gets the traffic light states and used for initialization and steping the simulation
)

location_info_response = iai.location_info(location=location)
rendered_static_map = location_info_response.birdview_image.decode()
scene_plotter = iai.utils.ScenePlotter(rendered_static_map,
location_info_response.map_fov,
(location_info_response.map_center.x, location_info_response.map_center.y),
location_info_response.static_actors)
scene_plotter.initialize_recording(
agent_states=iai_simulation.agent_states,
agent_attributes=iai_simulation.agent_attributes,
)

print("Begin stepping through simulation.")
local_simulation = LocalSimulator(iai_simulation.ego_states[0], iai_simulation.npc_states)
images = [iai_simulation.birdview.decode()] # images storing visualizations of subsequent states
for _ in range(100): # how many simulation steps to execute (10 steps is 1 second)
# query the API for subsequent NPC predictions, informing it how the ego vehicle acted
iai_simulation.step([local_simulation.ego_state])
# collect predictions for the next time step
predicted_npc_behavior = iai_simulation.npc_states
# execute predictions in your simulator, using your actions for the ego vehicle
updated_ego_agent_state = local_simulation.step(predicted_npc_behavior)
# save the visualization - requires np and cv2
images.append(iai_simulation.birdview.decode())
# save the visualization with ScenePlotter
scene_plotter.record_step(iai_simulation.agent_states)

print("Simulation finished, save visualization.")
# save the visualization to disk
imageio.mimsave("iai-example.gif", np.array(images), format="GIF-PIL")
fig, ax = plt.subplots(constrained_layout=True, figsize=(50, 50))
gif_name = 'cosimulation_minimal_example.gif'
scene_plotter.animate_scene(
output_name=gif_name,
ax=ax,
direction_vec=False,
velocity_vec=False,
plot_frame_number=True
)
print("Done")
Loading

0 comments on commit 1bc28c1

Please sign in to comment.