Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
neopostmodern committed Feb 28, 2021
0 parents commit 9861c5b
Show file tree
Hide file tree
Showing 10 changed files with 468 additions and 0 deletions.
1 change: 1 addition & 0 deletions .github/FUNDING.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
custom: https://bericht.neopostmodern.com/posts/support
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
.idea/
documentation/
52 changes: 52 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Camera peace

![Screencapture of using Camera peace](screencapture.gif)

A little script to manipulate your webcam in real time for video calls.
Gives you peace of mind, the ability to sneak away to make coffee, zoom off, or freedom to mind wander without looking absent.
¯\\\_(ツ)\_

~~It captures an avatar of you in real time and then synthesizes a credible, interactive version of you using
state of the art artificial intelligence models.~~
It allows you to freeze or loop yourself. Stutter and artifact utilities help convey that you are having a bad
connection and masks the not-really-looping loop you just created.

Also, don't take this too serious and don't forget that
[there are no technological solutions for social problems](https://media.ccc.de/v/36c3-10988-wohnungsbot_an_automation-drama_in_three_acts).

## How to

Launch this script before joining the video call.
Then simply select the 'Dummy output' camera when joining the call.
Control the different modes from the terminal window.
Shortcuts are presented on screen in blue.

## Requirements
Probably only runs on Linux (tested with Ubuntu 20.04), with Python 3.x (tested with 3.7).

## Setup
You might need to install more/other packages! Using a virtual environment might also be a good idea.
```shell
sudo apt install v4l2loopback-utils
pip3 install --user opencv-python pyfakewebcam
```

Create a fake webcam before running the script:
```shell
sudo modprobe v4l2loopback devices=1 exclusive_caps=1
```
(`exclusive_caps` is required for the fake webcam to show up in the browser.)

## Development

Uses [black](https://github.com/psf/black) for formatting.

## To-do

- Support other platforms
- Audio manipulation

---

Inspiration: https://stackoverflow.com/a/61394280
See also: http://signalculture.org/interstream.html
24 changes: 24 additions & 0 deletions artifacts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import cv2
import numpy as np

from config import WIDTH, HEIGHT


def compression_artifact(current_frame, base_frame, resize_factor=0.2):
# generate a 'compressed' looking version (of the historical frame)
resized_frame = cv2.resize(base_frame, None, fx=resize_factor, fy=resize_factor)
_, resized_compressed_frame_encoded = cv2.imencode(
".jpg", resized_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 10]
)
resized_compressed_frame = cv2.imdecode(resized_compressed_frame_encoded, 1)
compressed_frame = cv2.resize(resized_compressed_frame, (WIDTH, HEIGHT))

# generate a mask
diff = cv2.absdiff(compressed_frame, current_frame)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
threshold_mask = mask < 10

# add the masked portion of the 'compressed' (historical) frame to the current one
frame_composition = np.copy(current_frame)
frame_composition[threshold_mask] = compressed_frame[threshold_mask]
return frame_composition
40 changes: 40 additions & 0 deletions camera_detection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from dataclasses import dataclass
from subprocess import run, PIPE


@dataclass
class Camera:
name: str
path: str
identifier: str


def detect_cameras():
v4l2_query = run(["v4l2-ctl", "--list-devices"], stdout=PIPE)
if v4l2_query.returncode != 0:
raise Exception("Failed to query video devices")

v4l2_output = v4l2_query.stdout.decode()
cameras = v4l2_output.strip().split("\n\n")

input_cameras = []
output_camera = None

for camera in cameras:
segments = [s.strip() for s in camera.split("\n")]

name = segments[0]
name_split_position = name.rfind("(")
identifier = name[name_split_position:].strip("():")
name_segments = name[:name_split_position].split(":")
readable_name = name_segments[0]

paths = segments[1:]
path = paths[0] # ignore all but the first file path offered

if name.startswith("Dummy"):
output_camera = Camera(readable_name, path, identifier)
elif path.startswith("/dev/video"):
input_cameras.append(Camera(readable_name, path, identifier))

return input_cameras, output_camera
8 changes: 8 additions & 0 deletions config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
WIDTH = 1280
HEIGHT = 720
MAX_FRAMES_PER_SECOND = 15

FRAME_HISTORY_LENGTH = 120

STUTTER_PROBABILITY = 0.01
ARTIFACT_PROBABILITY = 0.005
109 changes: 109 additions & 0 deletions core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
from collections import deque
from dataclasses import dataclass
from subprocess import Popen, PIPE
from os import setsid, killpg
from signal import SIGTERM
from typing import Callable, List

import cv2

from camera_detection import Camera
from config import FRAME_HISTORY_LENGTH, WIDTH, HEIGHT


class MODE:
FREEZE = "freeze"
STUTTER = "stutter"
ARTIFACTS = "artifacts"
RECORD = "record"
LOOP = "loop"
VIEW = "view"


@dataclass
class Mode:
name: str
key: str
# on_toggle is passed the intended future active state, must return the next active state
on_toggle: Callable[[bool], bool] = lambda x: x
active: bool = False


class Core:
frame_count = 0
# todo: mostly unused - remove? or use for another artifact?
last_frames = deque(maxlen=FRAME_HISTORY_LENGTH)
frozen_frame = None
recorded_frames = []
loop_frames = []
loop_frames_offset = None
ffplay_process = None
modes = {}

available_input_cameras: List[Camera]
input_camera: Camera
input_video_capture: cv2.VideoCapture = None
output_camera: Camera

def __init__(self, input_cameras, output_camera):
self.available_input_cameras = input_cameras
self.output_camera = output_camera

self.register_mode(MODE.FREEZE, "f", self.store_frozen_frame)
self.register_mode(MODE.STUTTER, "s")
self.register_mode(MODE.ARTIFACTS, "a")
self.register_mode(MODE.RECORD, "r", self.manage_recorded_frames)
self.register_mode(MODE.LOOP, "l", self.manage_loop)
self.register_mode(MODE.VIEW, "v", self.manage_playback)

def register_mode(self, name, key, on_toggle=lambda x: x, active=False):
self.modes[name] = Mode(name, key, on_toggle, active)

def store_frozen_frame(self, active):
if active:
self.frozen_frame = self.last_frames[-1]
else:
self.frozen_frame = None

return active

def manage_recorded_frames(self, active):
if not active:
self.loop_frames = self.recorded_frames
self.recorded_frames = []

return active

def manage_loop(self, active):
if active and (self.loop_frames is None or len(self.loop_frames) == 0):
return False

if active:
self.loop_frames_offset = self.frame_count

return active

def manage_playback(self, active):
if active:
self.ffplay_process = Popen(
["/usr/bin/ffplay", self.output_camera.path],
stdout=PIPE,
stderr=PIPE,
shell=False,
preexec_fn=setsid,
)
else:
killpg(self.ffplay_process.pid, SIGTERM)

return active

def open_input_camera(self, camera: Camera):
if self.input_video_capture is not None:
self.input_video_capture.release()

self.input_video_capture = cv2.VideoCapture(
int(camera.path.replace("/dev/video", ""))
)
self.input_video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)
self.input_video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)
self.input_camera = camera
112 changes: 112 additions & 0 deletions interface.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import curses

from core import MODE


class COLORS:
RED = 1
GREEN = 2
BLUE = 3
YELLOW = 4


def spinner_character(counter):
spinner = "⠈⠐⠠⢀⡀⠄⠂⠁"
return spinner[counter % len(spinner)]


def initialize_interface(win):
win.nodelay(True)
curses.cbreak()
# curses.echo()
curses.init_pair(COLORS.RED, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(COLORS.GREEN, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(COLORS.BLUE, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(COLORS.YELLOW, curses.COLOR_YELLOW, curses.COLOR_BLACK)


def render_interface(
win, core, stutter_frame_counter, artifact_frame_counter, error=None
):
win.clear()
win.addstr(
0, 0, spinner_character(core.frame_count), curses.color_pair(COLORS.GREEN),
)
win.addstr(0, 3, f"{core.frame_count: 6} frames")
win.addstr(0, 20, f"Output camera: {core.output_camera.path}")

line = 2

if error is not None:
win.addstr(line, 0, error, curses.color_pair(COLORS.RED))
line += 2

win.addstr(line, 0, "Input Camera:")
win.addstr(line, 15, core.input_camera.name, curses.color_pair(COLORS.GREEN))
if len(core.available_input_cameras) > 1:
for camera_index, camera in enumerate(core.available_input_cameras):
if camera.identifier == core.input_camera.identifier:
continue

line += 1
win.addstr(
line, 0, str(camera_index), curses.color_pair(COLORS.BLUE),
)
win.addstr(
line, 3, camera.name,
)

line += 2
win.addstr(
line, 0, f"MODES",
)
for mode_index, mode in enumerate(core.modes.values()):
line += 1
win.addstr(line, 0, mode.key, curses.color_pair(COLORS.BLUE))
win.addstr(
line,
3,
"✓️" if mode.active else "✗️",
curses.color_pair(COLORS.GREEN if mode.active else COLORS.RED),
)
win.addstr(line, 6, mode.name.capitalize())

if mode.name == MODE.RECORD and len(core.recorded_frames) > 0:
win.addstr(line, 15, f"({len(core.recorded_frames)} frames)")
if mode.name == MODE.LOOP and len(core.loop_frames) > 0:
if mode.active:
frame_index = (
(core.frame_count - core.loop_frames_offset) % len(core.loop_frames)
) + 1
win.addstr(
line, 15, f"(frame {frame_index: 3} of {len(core.loop_frames)})"
)
else:
win.addstr(line, 15, f"({len(core.loop_frames)} frames available)")

line += 1
for counter_index, (mode, counter) in enumerate(
(
{
MODE.STUTTER: stutter_frame_counter,
MODE.ARTIFACTS: artifact_frame_counter,
}
).items()
):
line += 1
color = COLORS.RED
if core.modes[mode].active:
color = COLORS.YELLOW
if counter > 0:
color = COLORS.GREEN

win.addstr(
line, 0, "●" if counter > 0 else "○", curses.color_pair(color),
)
win.addstr(line, 3, mode.capitalize())
if counter > 0:
win.addstr(line, 14, f"({str(counter)} frames remaining)")

line += 2
win.addstr(line, 0, "q", curses.color_pair(COLORS.BLUE))
win.addstr(line, 3, "Quit")
Loading

0 comments on commit 9861c5b

Please sign in to comment.