Skip to content

Commit

Permalink
feat: Initial release
Browse files Browse the repository at this point in the history
  • Loading branch information
maheckathorn committed Nov 1, 2024
0 parents commit 09086c8
Show file tree
Hide file tree
Showing 10 changed files with 408 additions and 0 deletions.
7 changes: 7 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: monthly
99 changes: 99 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
---
name: Release
'on':
pull_request:
push:
branches:
- main
schedule:
- cron: "0 7 * * 0"

env:
IMAGE_NAME: super_mediator

jobs:

# Test the image builds and works correctly.
test:
name: Test
runs-on: ubuntu-latest

steps:
- name: Check out the codebase.
uses: actions/checkout@v4

- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'

- name: Install test dependencies.
run: pip3 install pytest-testinfra

- name: Build image.
run: docker build -t cmusei/${{ env.IMAGE_NAME }} .

- name: Run the built image.
run: docker run --name=${{ env.IMAGE_NAME }} --entrypoint=/bin/bash -td cmusei/${{ env.IMAGE_NAME }}

- name: Test the built image.
run: py.test --hosts='docker://${{ env.IMAGE_NAME }}'

# If on main branch, build and release image.
release1:
name: Release1
runs-on: ubuntu-latest
needs: test
if: github.ref == 'refs/heads/main'

steps:
- uses: actions/checkout@v4
- uses: docker/setup-qemu-action@v3
- uses: docker/setup-buildx-action@v3

- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}

- name: Build and push image.
uses: docker/build-push-action@v6
with:
context: ./
file: Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
cmusei/${{ env.IMAGE_NAME }}:latest
cmusei/${{ env.IMAGE_NAME }}:1
cmusei/${{ env.IMAGE_NAME }}:1.11.0
release2:
name: Release2
runs-on: ubuntu-latest
needs: test
if: github.ref == 'refs/heads/main'

steps:
- uses: actions/checkout@v4
- uses: docker/setup-qemu-action@v3
- uses: docker/setup-buildx-action@v3

- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}

- name: Build and push image.
uses: docker/build-push-action@v6
with:
context: ./
file: Dockerfile
platforms: linux/amd64,linux/arm64
push: true
build-args: |
FIXBUF_VERSION=3
SUPER_VERSION=2.0.0.alpha3
tags: |
cmusei/${{ env.IMAGE_NAME }}:2
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
__pycache__/
inventory
pytest_junit.xml
52 changes: 52 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
ARG FIXBUF_VERSION=2
FROM cmusei/fixbuf:${FIXBUF_VERSION} AS build
LABEL maintainer="maheckathorn@cert.org"

ARG SUPER_VERSION=1.11.0

# Pre-reqs:
# curl for downloading
# build-essentials for build tools
# ca-certs to download https
#
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
build-essential \
pkg-config \
ca-certificates \
libglib2.0-dev \
libssl-dev \
zlib1g-dev \
&& apt-get clean && \
rm -rf /var/lib/apt/lists/*

WORKDIR /netsa

RUN curl https://tools.netsa.cert.org/releases/super_mediator-$SUPER_VERSION.tar.gz | \
tar -xz && cd super_mediator-* && \
./configure --prefix=/netsa \
--with-libfixbuf=/netsa/lib/pkgconfig \
--with-openssl \
--with-mysql=no && \
make && \
make install && \
cd ../ && rm -rf super_mediator-$SUPER_VERSION

FROM debian:11-slim
LABEL maintainer="maheckathorn@cert.org"

RUN apt-get update && apt-get install -y --no-install-recommends \
libglib2.0-0 \
zlib1g \
libssl1.1 \
&& apt-get clean && \
rm -rf /var/lib/apt/lists/*

COPY --from=build /netsa/ /netsa/

COPY super_mediator.conf /usr/local/etc/

COPY docker-entrypoint.sh /usr/local/bin/
RUN ln -s /usr/local/bin/docker-entrypoint.sh /

ENTRYPOINT ["docker-entrypoint.sh"]
135 changes: 135 additions & 0 deletions LICENSE

Large diffs are not rendered by default.

29 changes: 29 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# source: https://jmkhael.io/makefiles-for-your-dockerfiles/
# Run in parallel via make -j2 see: https://stackoverflow.com/a/9220818

NS = cmusei
export SOFTWARE_NAME = super_mediator

export IMAGE_NAME += $(NS)/$(SOFTWARE_NAME)

export WORK_DIR = .

.PHONY: build build1 build2 test

build: build1

build1:
docker build --build-arg http_proxy --build-arg https_proxy --build-arg no_proxy -t $(IMAGE_NAME):latest -f Dockerfile .
docker tag $(IMAGE_NAME):latest $(IMAGE_NAME):1

build2:
docker build --build-arg http_proxy --build-arg https_proxy --build-arg no_proxy --build-arg FIXBUF_VERSION=3 \
--build-arg SUPER_VERSION=2.0.0.alpha3 -t $(IMAGE_NAME):2 -f Dockerfile .

test:
docker rm -f $(SOFTWARE_NAME)
docker run --name=$(SOFTWARE_NAME) --entrypoint=/bin/bash -td $(IMAGE_NAME)
py.test --hosts='docker://$(SOFTWARE_NAME)'
docker rm -f $(SOFTWARE_NAME)

default: build
56 changes: 56 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
[![Software Engineering Institute](https://avatars.githubusercontent.com/u/12465755?s=200&v=4)](https://www.sei.cmu.edu/)

[![Blog](https://img.shields.io/static/v1.svg?color=468f8b&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=SEI&message=Blog)](https://insights.sei.cmu.edu/blog/ "blog posts from our experts in Software Engineering.")
[![Youtube](https://img.shields.io/static/v1.svg?color=468f8b&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=SEI&message=Youtube&logo=youtube)](https://www.youtube.com/@TheSEICMU/ "vidoes from our experts in Software Engineering.")
[![Podcasts](https://img.shields.io/static/v1.svg?color=468f8b&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=SEI&message=Podcasts&logo=applepodcasts)](https://insights.sei.cmu.edu/podcasts/ "podcasts from our experts in Software Engineering.")
[![GitHub](https://img.shields.io/static/v1.svg?color=468f8b&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=SEI&message=GitHub&logo=github)](https://github.com/cmu-sei "view the source for all of our repositories.")
[![Flow Tools](https://img.shields.io/static/v1.svg?color=468f8b&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=SEI&message=Flow%20Tools)](https://tools.netsa.cert.org/ "documentation and source for all our flow collection and analysis tools.")


At the [SEI](https://www.sei.cmu.edu/), we research software engineering, cybersecurity, and AI engineering problems; create innovative technologies; and put solutions into practice.

Find us at:

* [Blog](https://insights.sei.cmu.edu/blog/) - blog posts from our experts in Software Engineering.
* [Youtube](https://www.youtube.com/@TheSEICMU/) - vidoes from our experts in Software Engineering.
* [Podcasts](https://insights.sei.cmu.edu/podcasts/) - podcasts from our experts in Software Engineering.
* [GitHub](https://github.com/cmu-sei) - view the source for all of our repositories.
* [Flow Tools](https://tools.netsa.cert.org/) - documentation and source for all our flow collection and analysis tools.

# [super_mediator](https://tools.netsa.cert.org/super_mediator1/index.html)

super_mediator is an IPFIX mediator for use with the [yaf](http://tools.netsa.cert.org/yaf/index.html) and [SiLK](http://tools.netsa.cert.org/silk/index.html) tools. What is an IPFIX mediator? An IPFIX mediator is an intermediate entity between IPFIX Exporters and Collectors that can potentially provide aggregation, filtering, and modification of IPFIX records. It may provide conversion to or from IPFIX or a conversion of IPFIX transport protocols. super_mediator collects and processes yaf output (IPFIX files or via TCP, UDP, or Spread) and exports that data in IPFIX, JSON, or CSV text format to one or more IPFIX collectors such as [rwflowpack](http://tools.netsa.cert.org/silk/rwflowpack.html), [flowcap](http://tools.netsa.cert.org/silk/flowcap.html), or to text files that may be bulk uploaded to a database. MySQL support is provided for automatic import.

super_mediator can provide simple filtering upon collection or at export time. Any traditional flow field can be used in a filter, including IP address or IPset (requires [SiLK IPset library](http://tools.netsa.cert.org/silk-ipset/index.html)).

super_mediator can be configured to pull the Deep Packet Inspection (DPI) data from yaf and export that information to another IPFIX collector, or simply export the data to a CSV file or JSON file for bulk upload into a database of your choice. Given MySQL credentials, super_mediator will import the files into the given database.

super_mediator can also be configured to perform de-duplication of DNS resource records, DPI data, and SSL/TLS certificate data exported by YAF. It will export the de-duplicated records in IPFIX, CSV, or JSON format. See the man pages and tutorials for more information.

## Documentation

More information [here](https://tools.netsa.cert.org/super_mediator/docs.html).

## Usage

Create a user network:

```
docker network create --driver bridge isolated_nw
```

Run the super_mediator container and pass in the desired options:

```
docker run --network=isolated_nw --name=super_mediator --rm -i -t certcc/super_mediator -c /usr/local/etc/super_mediator.conf
```

The above command attaches the container to the user defined network and names the container super_mediator. By default, the included config file [super_mediator.conf](https://tools.netsa.cert.org/super_mediator/super_mediator.conf.html) is based on the following [file](https://bitbucket.ss.dte.cert.org/projects/DOC/repos/super_mediator/browse/super_mediator.conf).

If you'd like to overwrite the configuration, run the following with your custom configuration file:

```
docker run --network=isolated_nw --name=super_mediator --rm -v $PWD/super_mediator.conf:/usr/local/etc/super_mediator.conf -it certcc/super_mediator
```

Attaching a container to a user defined network allows you to take advantage of automatic service discovery. In other words, if you want containers to be able to resolve IP addresses by container name, you should use user-defined networks.
5 changes: 5 additions & 0 deletions docker-entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/bin/bash

set -e

eval "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/netsa/lib/ /netsa/bin/super_mediator "$@""
13 changes: 13 additions & 0 deletions super_mediator.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
COLLECTOR TCP
PORT 18000
COLLECTOR END

# rwflowpack
EXPORTER TCP "silk"
PORT 18001
HOST localhost
FLOW_ONLY
EXPORTER END

LOGLEVEL DEBUG
PIDFILE "/var/run/super_mediator.pid"
9 changes: 9 additions & 0 deletions tests/default/test_default.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
def test_super_mediator_version(host):
version = "1.11.0"
command = r"""LD_LIBRARY_PATH=/netsa/lib:$LD_LIBRARY_PATH \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/netsa/lib/pkgconfig \
/netsa/bin/super_mediator --version 2>&1 | \
head -n1 | egrep -o '([0-9]{1,}\.)+[0-9]{1,}'"""
cmd = host.run(command)

assert version in cmd.stdout

0 comments on commit 09086c8

Please sign in to comment.