Skip to content

Commit

Permalink
Merge branch 'main' into enhance-products-no-identified-vulns
Browse files Browse the repository at this point in the history
  • Loading branch information
ffontaine authored Aug 15, 2023
2 parents 3c49231 + 1cd48a2 commit 91d66f4
Show file tree
Hide file tree
Showing 15 changed files with 313 additions and 13 deletions.
3 changes: 3 additions & 0 deletions .github/actions/spelling/allow.txt
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ cves
cvs
cvss
cyberciti
cybersecurity
cygwin
darkhttpd
davfs
Expand Down Expand Up @@ -145,6 +146,8 @@ emacs
endoflife
enscript
entrypoint
epss
EPSS
Eqt
Everyone
everytime
Expand Down
22 changes: 14 additions & 8 deletions cve_bin_tool/data_sources/osv_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,9 @@ def format_data(self, all_cve_entries):
vendor = (
"unknown" # OSV Schema does not provide vendor names for packages
)
if "/" in product and "github":
if (
"github.com/" in product
): # if package name is of format github.com/xxxx/yyyy xxxx can be vendor name and yyyy is package name
vendor = product.split("/")[-2] # trying to guess vendor name
product = product.split("/")[-1]

Expand All @@ -314,7 +316,7 @@ def format_data(self, all_cve_entries):

events = None
for ranges in package.get("ranges", []):
if ranges["type"] != "GIT":
if ranges["type"] == "SEMVER":
events = ranges["events"]

if events is None:
Expand All @@ -323,10 +325,12 @@ def format_data(self, all_cve_entries):
if versions == []:
continue

affected["versionStartIncluding"] = versions[0]
affected["versionEndIncluding"] = versions[-1]
version_affected = affected.copy()

affected_data.append(affected)
version_affected["versionStartIncluding"] = versions[0]
version_affected["versionEndIncluding"] = versions[-1]

affected_data.append(version_affected)
else:
introduced = None
fixed = None
Expand All @@ -338,12 +342,14 @@ def format_data(self, all_cve_entries):
fixed = event.get("fixed")

if fixed is not None:
affected["versionStartIncluding"] = introduced
affected["versionEndExcluding"] = fixed
range_affected = affected.copy()

range_affected["versionStartIncluding"] = introduced
range_affected["versionEndExcluding"] = fixed

fixed = None

affected_data.append(affected)
affected_data.append(range_affected)

return severity_data, affected_data

Expand Down
68 changes: 68 additions & 0 deletions cve_bin_tool/output_engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,74 @@ def output_pdf(
"Applicationlist", widths=[3 * cm, 3 * cm, 2 * cm, 4 * cm, 3 * cm]
)

pdfdoc.heading(1, "List of Vulnerabilities with different metric")
pdfdoc.paragraph(
"The table given below gives CVE found with there score on different metrics."
)
cve_by_metrics: defaultdict[Remarks, list[dict[str, str]]] = defaultdict(
list
)
col_headings = [
"CVE Number",
"CVSS_version",
"CVSS_score",
"EPSS_propability",
"EPSS_percentile",
]
# group cve_data by its remarks and separately by paths
for product_info, cve_data in all_cve_data.items():
for cve in cve_data["cves"]:
propability = "-"
percentile = "-"
for metric, field in cve.metric.items():
if metric == "EPSS":
propability = round(field[0] * 100, 4)
percentile = field[1]

cve_by_metrics[cve.remarks].append(
{
"cve_number": cve.cve_number,
"cvss_version": str(cve.cvss_version),
"cvss_score": str(cve.score),
"epss_propability": str(propability),
"epss_percentile": str(percentile),
"severity": cve.severity,
}
)

for remarks in sorted(cve_by_metrics):
pdfdoc.createtable(
"cvemetric",
col_headings,
pdfdoc.tblStyle,
)
row = 1
for cve in cve_by_metrics[remarks]:
entry = [
cve["cve_number"],
cve["cvss_version"],
str(cve["cvss_score"]),
str(cve["epss_propability"]),
str(cve["epss_percentile"]),
]
pdfdoc.addrow(
"cvemetric",
entry,
[
(
"TEXTCOLOR",
(0, row),
(4, row),
severity_colour[cve["severity"].split("-")[0].upper()],
),
("FONT", (0, row), (4, row), "Helvetica-Bold"),
],
)
row += 1
pdfdoc.showtable(
"cvemetric", widths=[4 * cm, 4 * cm, 3 * cm, 4 * cm, 4 * cm]
)

# List of scanned products with no identified vulnerabilities
if all_product_data is not None:
pdfdoc.heading(1, "No Identified Vulnerabilities")
Expand Down
44 changes: 44 additions & 0 deletions cve_bin_tool/output_engine/console.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,3 +272,47 @@ def validate_cell_length(cell_name, cell_type):
table.add_row(*cells)
# Print the table to the console
console.print(table)

table = Table()
# Add Head Columns to the Table
table.add_column("CVE")
table.add_column("CVSS_version")
table.add_column("CVSS_score")
table.add_column("EPSS_propability")
table.add_column("EPSS_percentile")
color = "green"

cve_by_metrics: defaultdict[Remarks, list[dict[str, str]]] = defaultdict(list)
# group cve_data by its remarks and separately by paths
for product_info, cve_data in all_cve_data.items():
for cve in cve_data["cves"]:
propability = "-"
percentile = "-"
for metric, field in cve.metric.items():
if metric == "EPSS":
propability = round(field[0] * 100, 4)
percentile = field[1]
cve_by_metrics[cve.remarks].append(
{
"cve_number": cve.cve_number,
"cvss_version": str(cve.cvss_version),
"cvss_score": str(cve.score),
"epss_propability": str(propability),
"epss_percentile": str(percentile),
"severity": cve.severity,
}
)

for remarks in sorted(cve_by_remarks):
color = remarks_colors[remarks]
for cve in cve_by_metrics[remarks]:
color = cve["severity"].split("-")[0].lower()
cells = [
Text.styled(cve["cve_number"], color),
Text.styled(cve["cvss_version"], color),
Text.styled(str(cve["cvss_score"]), color),
Text.styled(cve["epss_propability"], color),
Text.styled(cve["epss_percentile"], color),
]
table.add_row(*cells)
console.print(table)
47 changes: 47 additions & 0 deletions cve_bin_tool/output_engine/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,52 @@ def output_html(

cve_severity = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "UNKNOWN": 0}

cve_by_metrics: defaultdict[Remarks, list[dict[str, str]]] = defaultdict(list)
for product_info, cve_data in all_cve_data.items():
for cve in cve_data["cves"]:
propability = "-"
percentile = "-"

for metric, field in cve.metric.items():
if metric == "EPSS":
propability = round(field[0] * 100, 4)
percentile = field[1]

cve_by_metrics[cve.remarks].append(
{
"cve_number": cve.cve_number,
"cvss_version": str(cve.cvss_version),
"cvss_score": str(cve.score),
"epss_propability": str(propability),
"epss_percentile": str(percentile),
"severity": cve.severity,
}
)

cve_metric_html_rows = []
for remarks in sorted(cve_by_metrics):
for cve in cve_by_metrics[remarks]:
row_color = "table-success"
if cve["severity"] == "CRITICAL":
row_color = "table-danger"
elif cve["severity"] == "HIGH":
row_color = "table-primary"
elif cve["severity"] == "MEDIUM":
row_color = "table-warning"

html_row = f"""
<tr class="{row_color}">
<th scope="row">{cve["cve_number"]}</th>
<td>{cve["cvss_version"]}</td>
<td>{cve["cvss_score"]}</td>
<td>{cve["epss_propability"]}</td>
<td>{cve["epss_percentile"]}</td>
</tr>
"""
cve_metric_html_rows.append(html_row)
# Join the HTML rows to create the full table content
table_content = "\n".join(cve_metric_html_rows)

# List of Products
for product_info, cve_data in all_cve_data.items():
# Check if product contains CVEs
Expand Down Expand Up @@ -357,6 +403,7 @@ def output_html(
products_without_cve=products_without_cve,
cve_remarks=cve_remarks,
cve_severity=cve_severity,
table_content=table_content,
)

# try to load the bigger files just before the generation of report
Expand Down
20 changes: 20 additions & 0 deletions cve_bin_tool/output_engine/html_reports/templates/dashboard.html
Original file line number Diff line number Diff line change
Expand Up @@ -131,3 +131,23 @@ <h6>Product CVEs</h6>
</div>
</div>
</div>
<div class="card text-color-main shadow-lg bg-white rounded">
<div class="card-header bg-header-dash">
<h6>CVE metric</h6>
</div>
<div class="card-body">
<table id="cveSummary" class="table table-bordered">
<thead>
<tr>
<th scope="col">CVE number</th>
<th scope="col">CVSS version</th>
<th scope="col">CVSS score</th>
<th scope="col">EPSS propability</th>
<th scope="col">EPSS percentile</th>
</tr>
</thead>
<tbody>
{{ table_content }}
</tbody>
</table>
</div>
35 changes: 35 additions & 0 deletions doc/MANUAL.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
- [Limitations](#limitations)
- [Architecture](#architecture)
- [Database Structure](#database-structure)
- [Metric](#metric)
- [EPSS](#epss)
- [Different output showing metrics](#different-output-showing-metrics)
- [Optional Arguments](#optional-arguments)
- [-e EXCLUDE, --exclude EXCLUDE](#-e-exclude---exclude-exclude)
- [-h, --help](#-h---help)
Expand Down Expand Up @@ -330,6 +333,38 @@ The CVE Binary Tool database comprises three tables: cve_severity, cve_range, an

![database structure of CVE Binary Tool](images/cve-bin-tool-database.png)

## Metric

### EPSS

The Exploit Prediction Scoring System (EPSS) is a data-driven tool designed to help estimate the likelihood of a software vulnerability being exploited in the real world. Its purpose is to assist cybersecurity teams in prioritizing which vulnerabilities to address first. While other standards focus on inherent vulnerability traits and severity, they often lack the ability to evaluate the actual threat level.

EPSS bridges this gap by incorporating up-to-date threat information from CVE and real-world exploit data. Using this data, EPSS generates a probability score ranging from 0 to 1 (equivalent to 0% to 100%). A higher score indicates a higher likelihood of a vulnerability being exploited. For more information about [EPSS here](https://www.first.org/epss/model)

### Different output showing metrics

- Console

![console table](images/metric/console_table.png)

![console metric table](images/metric/metric_table.png)

- HTML

![HTML metric table](images/metric/HTML.png)

- PDF

![PDF metric table](images/metric/PDF.png)

- CSV

![CSV metric table](images/metric/CSV.png)

- JSON

![JSON metric table](images/metric/JSON.png)

## Optional Arguments

### -e EXCLUDE, --exclude EXCLUDE
Expand Down
Binary file added doc/images/metric/CSV.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added doc/images/metric/HTML.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added doc/images/metric/JSON.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added doc/images/metric/PDF.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added doc/images/metric/console_table.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added doc/images/metric/metric_table.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 91d66f4

Please sign in to comment.