From 6492d6bfc7c6219fa48c37cbb5d5761cb90ad24d Mon Sep 17 00:00:00 2001 From: Potterluo <2926612857@qq.com> Date: Mon, 10 Nov 2025 11:13:28 +0800 Subject: [PATCH] [Feature]Integration of the pytest testing framework for convenient end-to-end testing, etc., and automatic recording of required data --- .gitignore | 13 +- test/.gitignore | 13 ++ test/README.md | 179 ++++++++++++++++++++++ test/README_zh.md | 182 ++++++++++++++++++++++ test/common/__init__.py | 0 test/common/capture_utils.py | 95 ++++++++++++ test/common/config_utils.py | 86 +++++++++++ test/common/db_utils.py | 183 +++++++++++++++++++++++ test/config.yaml | 18 +++ test/conftest.py | 159 ++++++++++++++++++++ test/pytest.ini | 25 ++++ test/requirements.txt | 6 + test/suites/E2E/test_demo_performance.py | 66 ++++++++ 13 files changed, 1024 insertions(+), 1 deletion(-) create mode 100644 test/.gitignore create mode 100644 test/README.md create mode 100644 test/README_zh.md create mode 100644 test/common/__init__.py create mode 100644 test/common/capture_utils.py create mode 100644 test/common/config_utils.py create mode 100644 test/common/db_utils.py create mode 100644 test/config.yaml create mode 100644 test/conftest.py create mode 100644 test/pytest.ini create mode 100644 test/requirements.txt create mode 100644 test/suites/E2E/test_demo_performance.py diff --git a/.gitignore b/.gitignore index 8ff7d5c2..734cf4bf 100644 --- a/.gitignore +++ b/.gitignore @@ -49,4 +49,15 @@ **/output/** .venv/** **/__pycache__/** -*.egg-info/** \ No newline at end of file +*.egg-info/** +reports/ +dataset/ +logs/ +.* +*.log +result_outputs/ +results/ +.cache/ +backup/ +$null +*__pycache__/ \ No newline at end of file diff --git a/test/.gitignore b/test/.gitignore new file mode 100644 index 00000000..220d21ac --- /dev/null +++ b/test/.gitignore @@ -0,0 +1,13 @@ +reports/ +dataset/ +logs/ +result_outputs/ +results/ +.cache/ +backup/ +$null +*__pycache__/ +.* +*.log +start.bat +!.gitignore \ No newline at end of file diff --git a/test/README.md b/test/README.md new file mode 100644 index 00000000..1e11da7e --- /dev/null +++ b/test/README.md @@ -0,0 +1,179 @@ +# Pytest +[简体中文](README_zh.md) +A comprehensive Pytest testing framework featuring configuration management, database integration, performance testing, and HTML report generation. + +## 📋 Features + +- **Modern Testing Framework**: Complete test solution built on Pytest 7.0+ +- **Configuration Management**: YAML-based config with thread-safe singleton pattern +- **Database Integration**: Built-in MySQL support with automatic result storage +- **HTML Reports**: Auto-generated pytest HTML test reports +- **Tagging System**: Multi-dimensional test tags (stage, feature, platform, etc.) + +## 🗂️ Project Structure + +``` +pytest_demo/ +├── common/ # Common modules +│ ├── __init__.py +│ ├── config_utils.py # Configuration utilities +│ ├── db_utils.py # Database utilities +│ └── capture_utils # Return-value capture utilities +├── results/ # Result storage folder +├── suites/ # Test suites +│ ├── UnitTest # Unit tests +│ ├── Feature # Feature tests +│ └── E2E/ # End-to-end tests +│ └── test_demo_performance.py # Sample test file +├── config.yaml # Main config file +├── conftest.py # Pytest config +├── pytest.ini # Pytest settings +├── requirements.txt # Dependencies +└── README.md # This doc (CN) +``` + +## 🚀 Quick Start + +### Prerequisites + +- Python 3.8+ +- MySQL 5.7+ (optional, for DB features) +- Git + +### Installation + +1. **Install dependencies** + ```bash + pip install -r requirements.txt + ``` + +2. **Configure database** (optional) + + Edit `config.yaml`: + ```yaml + database: + backup: "results/" + host: "127.0.0.1" + port: 3306 + name: "ucm_pytest" + user: "root" + password: "123456" + charset: "utf8mb4" + ``` + +3. **Run tests** + ```bash + # Run all tests + pytest + + # Run tests by tag + pytest --stage=1 + pytest --feature=performance + ``` + +## ⚙️ Configuration + +### config.yaml + +Full YAML-based config. Key sections: + +- **reports**: Report settings (HTML, timestamp, etc.) +- **database**: MySQL connection details + +## 🧪 Test Examples + +### Basic functional test + +```python +# suites/E2E/test_demo_performance.py +import pytest + +@pytest.fixture(scope="module", name="calc") +def calculator(): + return Calculator() + +@pytest.mark.feature("mark") +class TestCalculator: + def test_add(self, calc): + assert calc.add(1, 2) == 3 + + def test_divide_by_zero(self, calc): + with pytest.raises(ZeroDivisionError): + calc.divide(6, 0) +``` + +## 🏷️ Tagging System + +Multi-dimensional tags supported: + +### Stage tags +- `stage(0)`: Unit tests +- `stage(1)`: Smoke tests +- `stage(2)`: Regression tests +- `stage(3)`: Release tests + +### Functional tags +- `feature`: Module tag +- `platform`: Platform tag (GPU/NPU) + +### Usage + +```bash +# Run smoke tests and above +pytest --stage=1+ + +# Run by feature +pytest --feature=performance +pytest --feature=performance,reliability + +# Run by platform +pytest --platform=gpu +``` + +### HTML Reports + +Auto-generated timestamped HTML reports: +- Location: `reports/pytest_YYYYMMDD_HHMMSS/report.html` +- Detailed results, errors, timing +- Customizable title & style + +### Database Storage + +If enabled, results are auto-saved to MySQL. +To add new record types, ask DB admin to create tables; otherwise only local files are used. + +Example: +```python +@pytest.mark.feature("capture") # Must be top decorator +@export_vars +def test_capture_mix(): + assert 1 == 1 + return { + '_name': 'demo', + '_data': { + 'length': 10086, # single value + 'accuracy': [0.1, 0.2, 0.3], # list + 'loss': [0.1, 0.2, 0.3], # list + } + } +``` + +### Config Access + +Read settings easily: +```python +from common.config_utils import config_utils +# Get config +db_config = config_utils.get_config("database") +api_config = config_utils.get_nested_config("easyPerf.api") +``` + +## 🛠️ Development Guide + +### Adding New Tests + +1. Create test files under `suites/` categories +2. Apply appropriate tags +3. Naming: `test_*.py` +4. Use fixtures & marks for data management +5. Keep custom marks concise and aligned with overall goals \ No newline at end of file diff --git a/test/README_zh.md b/test/README_zh.md new file mode 100644 index 00000000..26b0f393 --- /dev/null +++ b/test/README_zh.md @@ -0,0 +1,182 @@ +# Pytest 项目 + Pytest 测试框架,包括配置管理、数据库集成、性能测试和 HTML 报告生成。 + +## 📋 项目特性 + +- **现代化测试框架**: 基于 Pytest 7.0+ 的完整测试解决方案 +- **配置管理**: 支持 YAML 配置文件,线程安全的单例模式配置管理 +- **数据库集成**: 内置 MySQL 数据库支持,自动结果存储 +- **HTML 报告**: 自动生成pytest HTML 测试报告 +- **标记系统**: 支持多维度测试标记(阶段、功能、平台等) + +## 🗂️ 项目结构 + +``` +pytest_demo/ +├── common/ # 公共模块 +│ ├── __init__.py +│ ├── config_utils.py # 配置管理工具 +│ ├── db_utils.py # 数据库工具 +│ └── capture_utils # 返回值捕获工具 +├── results/ # 结果存储目录 +├── suites/ # 测试套件 +│ ├── UnitTest # 单元测试 +│ ├── Feature # 功能测试 +│ └── E2E/ # 端到端测试 +│ └── test_demo_performance.py # 示例测试文件 +├── config.yaml # 主配置文件 +├── conftest.py # Pytest 配置文件 +├── pytest.ini # Pytest 配置 +├── requirements.txt # 项目依赖 +└── README.md # 本文档 +``` + +## 🚀 快速开始 + +### 环境要求 + +- Python 3.8+ +- MySQL 5.7+ (可选,用于数据库功能) +- Git + +### 安装步骤 + +1. **安装依赖** + ```bash + pip install -r requirements.txt + ``` + +2. **配置数据库**(可选) + + 编辑 `config.yaml` 文件中的数据库配置: + ```yaml + database: + backup: "results/" + host: "127.0.0.1" + port: 3306 + name: "ucm_pytest" + user: "root" + password: "123456" + charset: "utf8mb4" + ``` + +3. **运行测试** + ```bash + # 运行所有测试 + pytest + + # 运行特定标记的测试 + pytest --stage=1 + pytest --feature=performance + ``` + +## ⚙️ 配置说明 + + +### config.yaml 配置 + +项目支持完整的 YAML 配置管理,主要配置项包括: + +- **reports**: 报告配置(HTML 报告、时间戳等) +- **database**: 数据库连接配置 + +## 🧪 测试示例 + +### 基础功能测试 + +```python +# suites/E2E/test_demo_performance.py +import pytest + +@pytest.fixture(scope="module", name="calc") +def calculator(): + return Calculator() + +@pytest.mark.feature("mark") +class TestCalculator: + def test_add(self, calc): + assert calc.add(1, 2) == 3 + + def test_divide_by_zero(self, calc): + with pytest.raises(ZeroDivisionError): + calc.divide(6, 0) +``` + +## 🏷️ 测试标记系统 + +项目支持多维度的测试标记: + +### 测试阶段标记 +- `stage(0)`: 单元测试 +- `stage(1)`: 冒烟测试 +- `stage(2)`: 回归测试 +- `stage(3)`: 发布测试 + +### 功能标记 +- `feature`: 功能模块标记 +- `platform`: 平台标记(GPU/NPU) + +### 使用示例 + +```bash +# 运行冒烟测试及以上的所有测试 +pytest --stage=1+ + +# 运行特定功能的测试 +pytest --feature=performance +pytest --feature=performance, reliability +# 运行特定平台的测试 +pytest --platform=gpu +``` + + +### HTML 报告 + +项目自动生成带时间戳的 HTML 测试报告: +- 报告位置:`reports/pytest_YYYYMMDD_HHMMSS/report.html` +- 包含详细的测试结果、错误信息和执行时间 +- 支持自定义报告标题和样式 + +### 数据库存储 + +如果启用数据库功能,测试结果会自动存储到 MySQL 数据库。 +若需要新增记录,请联系管理人员在数据库新增对应表;否则只能保存至本地文件。 +使用方式示例: +```python +@pytest.mark.feature("capture") # pytest 的标签必须在上面,否则无法正常使用标记功能 +@export_vars +def test_capture_mix(): + assert 1 == 1 + return { + '_name': 'demo', + '_data': { + 'length': 10086, # single value + 'accuracy': [0.1, 0.2, 0.3], # list + 'loss': [0.1, 0.2, 0.3], # list + } + } + +``` + + +### 配置管理 + +可以通过配置工具便捷读取参数: +```python +from common.config_utils import config_utils +# 获取配置 +db_config = config_utils.get_config("database") +api_config = config_utils.get_nested_config("easyPerf.api") +``` + + + +## 🛠️ 开发指南 + +### 添加新测试 + +1. 在 `suites/` 目录下的各个分类下创建新的测试文件 +2. 使用适当的测试标记 +3. 遵循命名规范:`test_*.py` +4. 使用 fixture 及mark 进行测试数据管理 +5. 自定义 mark 标签不易过细,应当与整体功能目标相符合 \ No newline at end of file diff --git a/test/common/__init__.py b/test/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/common/capture_utils.py b/test/common/capture_utils.py new file mode 100644 index 00000000..ee12ed2a --- /dev/null +++ b/test/common/capture_utils.py @@ -0,0 +1,95 @@ +from typing import Any, Dict, List + +from common.db_utils import write_to_db + + +def _align_and_split(name: str, data: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Align a mixed data package (single values and/or lists) and split it into + """ + if not data: + return [] + + aligned: Dict[str, List[Any]] = {} + lengths: Dict[str, int] = {} + for k, v in data.items(): + if isinstance(v, (list, tuple)): + aligned[k] = list(v) + else: + aligned[k] = [v] + lengths[k] = len(aligned[k]) + + max_len = max(lengths.values()) + + for k, lst in aligned.items(): + if len(lst) < max_len: + lst.extend([lst[-1]] * (max_len - len(lst))) + + return [{k: aligned[k][i] for k in aligned} for i in range(max_len)] + + +def post_process(table_name: str, **kwargs) -> List[Dict[str, Any]]: + """ + Unified post-processing entry point. Supports two calling styles: + """ + results = [] + if "_data" in kwargs: + name = kwargs.get("_name", table_name) + results = _align_and_split(name, kwargs["_data"]) + for result in results: + write_to_db(name, result) + return results + return [] + + +# ---------------- decorator ---------------- +def export_vars(func): + def wrapper(*args, **kwargs): + result = func(*args, **kwargs) + # If the function returns a dict containing '_data' or 'data', post-process it + if isinstance(result, dict): + if "_data" in result or "data" in result: + return post_process(func.__name__, **result) + # Otherwise return unchanged + return result + + return wrapper + + +# ---------------- usage examples ---------------- +@export_vars +def capture(): + """All single values via 'name' + 'data'""" + return {"name": "demo", "_data": {"accuracy": 0.1, "loss": 0.3}} + + +@export_vars +def capture_list(): + """All lists via '_name' + '_data'""" + return { + "_name": "demo", + "_data": { + "accuracy": [0.1, 0.2, 0.3], + "loss": [0.1, 0.2, 0.3], + }, + } + + +@export_vars +def capture_mix(): + """Mixed single + lists via '_name' + '_data'""" + return { + "_name": "demo", + "_data": { + "length": 10086, # single value + "accuracy": [0.1, 0.2, 0.3], # list + "loss": [0.1, 0.2, 0.3], # list + }, + } + + +# quick test +if __name__ == "__main__": + print("capture(): ", capture()) + print("capture_list(): ", capture_list()) + print("capture_mix(): ", capture_mix()) diff --git a/test/common/config_utils.py b/test/common/config_utils.py new file mode 100644 index 00000000..106f783e --- /dev/null +++ b/test/common/config_utils.py @@ -0,0 +1,86 @@ +import os +import threading +from typing import Any, Dict + +import yaml + + +class ConfigUtils: + """ + Singleton Configuration Utility + Provides methods to read and access YAML configuration files. + """ + + _instance = None + _lock = threading.Lock() # Ensure thread-safe singleton creation + + def __init__(self): + self._config = None + + def __new__(cls, config_file: str = None): + # Double-checked locking + if cls._instance is None: + with cls._lock: + if cls._instance is None: + instance = super().__new__(cls) + instance._init_config(config_file) + cls._instance = instance + return cls._instance + + def _init_config(self, config_file: str = None): + """Initialize configuration file path and load config""" + if config_file is None: + current_dir = os.path.dirname(os.path.abspath(__file__)) + config_file = os.path.join(current_dir, "..", "config.yaml") + + self.config_file = os.path.abspath(config_file) + self._config = None # Lazy load + + def _load_config(self) -> Dict[str, Any]: + """Internal method to read configuration from file""" + try: + with open(self.config_file, "r", encoding="utf-8") as f: + return yaml.safe_load(f) or {} + except FileNotFoundError: + print(f"[WARN] Config file not found: {self.config_file}") + return {} + except yaml.YAMLError as e: + print(f"[ERROR] Failed to parse YAML config: {e}") + return {} + + def read_config(self) -> Dict[str, Any]: + """Read configuration file (lazy load)""" + if self._config is None: + self._config = self._load_config() + return self._config + + def reload_config(self): + """Force reload configuration file""" + self._config = self._load_config() + + def get_config(self, key: str, default: Any = None) -> Any: + """Get top-level configuration item""" + config = self.read_config() + return config.get(key, default) + + def get_nested_config(self, key_path: str, default: Any = None) -> Any: + """Get nested configuration, e.g., 'influxdb.host'""" + config = self.read_config() + keys = key_path.split(".") + value = config + try: + for k in keys: + value = value[k] + return value + except (KeyError, TypeError): + return default + + +# Global instance +config_utils = ConfigUtils() + +if __name__ == "__main__": + print("DataBase config:", config_utils.get_config("database")) + print( + "DataBase host:", config_utils.get_nested_config("database.host", "localhost") + ) diff --git a/test/common/db_utils.py b/test/common/db_utils.py new file mode 100644 index 00000000..089af43b --- /dev/null +++ b/test/common/db_utils.py @@ -0,0 +1,183 @@ +import json +import logging +import threading +from pathlib import Path +from typing import Any, Dict, Optional + +import peewee +from common.config_utils import config_utils as config_instance +from peewee import AutoField, Model, MySQLDatabase, TextField + +logger = logging.getLogger("db_handler") +logger.setLevel(logging.DEBUG) + +# Avoid adding handlers multiple times +if not logger.handlers: + logger.setLevel(logging.DEBUG) + +# Global DB instance and lock for thread-safe singleton +_db_instance: Optional[MySQLDatabase] = None +_db_lock = threading.Lock() +_test_build_id: Optional[str] = None +_backup_path: Optional[Path] = None +_db_enabled: bool = False # from config + + +def _get_db() -> Optional[MySQLDatabase]: + """Return a singleton MySQLDatabase instance based on YAML configuration.""" + global _db_instance, _backup_path, _db_enabled + + if _db_instance is None: + with _db_lock: + if _db_instance is None: + db_config = config_instance.get_config("database", {}) + _db_enabled = db_config.get("enabled", False) + + backup_str = db_config.get("backup", "results/") + _backup_path = Path(backup_str).resolve() + _backup_path.mkdir(parents=True, exist_ok=True) + logger.info(f"Backup directory set to: {_backup_path}") + + if not _db_enabled: + return None + + try: + _db_instance = MySQLDatabase( + db_config.get("name", "test_db"), + user=db_config.get("user", "root"), + password=db_config.get("password", ""), + host=db_config.get("host", "localhost"), + port=db_config.get("port", 3306), + charset=db_config.get("charset", "utf8mb4"), + ) + logger.info( + f"Database instance created for: {_db_instance.database}" + ) + except Exception as e: + logger.error(f"Failed to create database instance: {e}") + _db_instance = None + + return _db_instance + + +def _set_test_build_id(build_id: Optional[str] = None) -> None: + """Set or generate a unique test build ID.""" + global _test_build_id + _test_build_id = build_id or "default_build_id" + logger.debug(f"Test build ID set to: {_test_build_id}") + + +def _get_test_build_id() -> str: + """Return the current test build ID, generating one if necessary.""" + global _test_build_id + if _test_build_id is None: + _set_test_build_id() + return _test_build_id + + +class BaseEntity(Model): + """Base PeeWee model class using the singleton database.""" + + class Meta: + database = _get_db() + + +def _backup_to_file(table_name: str, data: Dict[str, Any]) -> None: + """Write data to a JSON Lines (.jsonl) file in the backup directory.""" + if not _backup_path: + logger.warning("Backup path is not set. Skipping backup.") + return + + file_path = _backup_path / f"{table_name}.jsonl" + try: + file_path.parent.mkdir(parents=True, exist_ok=True) + with file_path.open("a", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False) + f.write("\n") + logger.info(f"Data backed up to {file_path}") + except Exception as e: + logger.error(f"Failed to write backup file {file_path}: {e}") + + +def write_to_db(table_name: str, data: Dict[str, Any]) -> bool: + """ + Attempt to insert data into the specified database table. + If the table doesn't exist or an error occurs, back up to a JSONL file. + """ + db = _get_db() + data["test_build_id"] = _get_test_build_id() + + # Skip DB entirely if disabled + if not _db_enabled or db is None: + _backup_to_file(table_name, data) + return False + + try: + if not db.table_exists(table_name): + logger.warning(f"Table '{table_name}' does not exist. Writing to backup.") + _backup_to_file(table_name, data) + return False + + # Get existing columns and filter data + columns = db.get_columns(table_name) + col_names = {col.name for col in columns} + filtered_data = {k: v for k, v in data.items() if k in col_names} + + # Build dynamic model for insertion + fields = {"id": AutoField()} + for col in columns: + if col.name != "id": + fields[col.name] = TextField(null=True) + + DynamicEntity = type( + f"{table_name.capitalize()}DynamicModel", + (BaseEntity,), + { + "Meta": type("Meta", (), {"database": db, "table_name": table_name}), + **fields, + }, + ) + + with db.atomic(): + DynamicEntity.insert(filtered_data).execute() + logger.info(f"Successfully inserted data into table '{table_name}'.") + return True + + except peewee.PeeweeException as e: + logger.error( + f"Database write error for table '{table_name}': {e}", exc_info=True + ) + except Exception as e: + logger.critical( + f"Unexpected error during DB write for '{table_name}': {e}", exc_info=True + ) + + # Fallback to backup on any failure + _backup_to_file(table_name, data) + return False + + +def database_connection(build_id: str) -> None: + """Test database connection and set the build ID.""" + logger.info(f"Setting test build ID: {build_id}") + _set_test_build_id(build_id) + + db = _get_db() + if not _db_enabled: + logger.info("Database connection skipped because enabled=false.") + return + + if db is None: + logger.error("No database instance available.") + return + + logger.info(f"Attempting connection to database: {db.database}") + try: + db.connect(reuse_if_open=True) + logger.info("Database connection successful.") + except Exception as e: + logger.error(f"Database connection failed: {e}", exc_info=True) + finally: + if not db.is_closed(): + db.close() + logger.debug("Database connection closed.") diff --git a/test/config.yaml b/test/config.yaml new file mode 100644 index 00000000..88d00a61 --- /dev/null +++ b/test/config.yaml @@ -0,0 +1,18 @@ +reports: + base_dir: "results/reports" + use_timestamp: true + directory_prefix: "pytest" + html: # pytest-html + enabled: true + filename: "report.html" + title: "UCM Pytest Test Report" + +database: + backup: "results/" + enabled: true + host: "127.0.0.1" + port: 3306 + name: "ucm_pytest" + user: "root" + password: "123456" + charset: "utf8mb4" \ No newline at end of file diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 00000000..15025795 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import datetime as dt +import platform as pf +import sys +from functools import wraps +from pathlib import Path + +import pytest +from common.config_utils import config_utils as config_instance +from common.db_utils import database_connection, write_to_db + +# ---------------- Constants ---------------- +PRJ_ROOT = Path(__file__).resolve().parent +sys.path.insert(0, str(PRJ_ROOT)) + + +# ---------------- CLI Options ---------------- +def pytest_addoption(parser): + parser.addoption( + "--stage", action="store", default="", help="Filter by stage marker (1,2,3,+)" + ) + parser.addoption( + "--feature", action="store", default="", help="Filter by feature marker" + ) + parser.addoption( + "--platform", action="store", default="", help="Filter by platform marker" + ) + + +# ---------------- Test Filtering ---------------- +def pytest_collection_modifyitems(config, items): + kept = items[:] + + markers = [m.split(":", 1)[0].strip() for m in config.getini("markers")] + for name in markers: + opt = config.getoption(f"--{name}", "").strip() + if not opt: + continue + + if name == "stage" and opt.endswith("+"): + min_stage = int(opt[:-1]) + kept = [ + it + for it in kept + if any(int(v) >= min_stage for v in _get_marker_args(it, "stage")) + ] + else: + wanted = {x.strip() for x in opt.split(",") if x.strip()} + kept = [ + it + for it in kept + if any(v in wanted for v in _get_marker_args(it, name)) + ] + + config.hook.pytest_deselected(items=[i for i in items if i not in kept]) + items[:] = kept + + +def _get_marker_args(item, marker_name): + """Extract only args (not kwargs) from markers, as strings.""" + return [ + str(arg) for mark in item.iter_markers(name=marker_name) for arg in mark.args + ] + + +# ---------------- Report Setup ---------------- +def _prepare_report_dir(config: pytest.Config) -> Path: + cfg = config_instance.get_config("reports", {}) + base_dir = Path(cfg.get("base_dir", "reports")) + prefix = cfg.get("directory_prefix", "pytest") + if cfg.get("use_timestamp", False): + ts = dt.datetime.now().strftime("%Y%m%d_%H%M%S") + report_dir = base_dir / f"{prefix}_{ts}" + else: + report_dir = base_dir + report_dir.mkdir(parents=True, exist_ok=True) + return report_dir + + +def _setup_html_report(config: pytest.Config, report_dir: Path) -> None: + reports_config = config_instance.get_config("reports", {}) + html_cfg = reports_config.get("html", {}) + if not html_cfg.get("enabled", True): + if hasattr(config.option, "htmlpath"): + config.option.htmlpath = None + print("HTML report disabled according to config.yaml") + return + + html_filename = html_cfg.get("filename", "report.html") + config.option.htmlpath = str(report_dir / html_filename) + config.option.self_contained_html = True + print("HTML report enabled") + + +# ---------------- Build ID & Session Init ---------------- +def _generate_build_id(config: pytest.Config) -> str: + ts = dt.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + cli_parts = [] + markers = [m.split(":", 1)[0].strip() for m in config.getini("markers")] + for opt in markers: + val = config.getoption(opt, "") + if val: + cli_parts.append(f"{opt}={val}") + args_part = "_".join(cli_parts) if cli_parts else "all_cases" + return f"pytest_{ts}_{args_part}" + + +# ---------------- Pytest Hooks ---------------- +def pytest_configure(config: pytest.Config) -> None: + """The global configuration will be executed directly upon entering pytest.""" + print(f"Starting Test Session: {dt.datetime.now():%Y-%m-%d %H:%M:%S}") + + # Set up report directory + report_dir = _prepare_report_dir(config) + config._report_dir = report_dir # Attach to config for later use + _setup_html_report(config, report_dir) + + # Generate and register build ID into DB + build_id = _generate_build_id(config) + config._build_id = build_id + database_connection(build_id) + + +def pytest_sessionstart(session): + print("") + print("-" * 60) + print(f"{'Python':<10} │ {pf.python_version()}") + print(f"{'Platform':<10} │ {pf.system()} {pf.release()}") + print("-" * 60) + + +def pytest_sessionfinish(session, exitstatus): + report_dir = getattr(session.config, "_report_dir", "reports") + print("") + print("-" * 60) + print(f"{'Reports at':<10} │ {report_dir}") + print("Test session ended") + print("-" * 60) + + +# ---------------- Fixtures ---------------- + + +def pytest_runtest_logreport(report): + """ + Called after each test phase. We only care about 'call' (the actual test). + """ + if report.when != "call": + return + + status = report.outcome.upper() # 'passed', 'failed', 'skipped' → 'PASSED', etc. + test_result = { + "test_case": report.nodeid, + "status": status, + # "duration": report.duration, + "error": str(report.longrepr) if report.failed else None, + } + write_to_db("test_case_info", test_result) diff --git a/test/pytest.ini b/test/pytest.ini new file mode 100644 index 00000000..4be3cf47 --- /dev/null +++ b/test/pytest.ini @@ -0,0 +1,25 @@ +[pytest] +testpaths = suites +python_files = test_*.py +python_classes = Test* +python_functions = test_* + +addopts = + -ra + --strict-markers + --capture=no +filterwarnings = + ignore::pytest.PytestReturnNotNoneWarning + +log_cli = 1 +log_cli_level = INFO +log_cli_format = [%(levelname)s] %(name)s: %(message)s +norecursedirs = .git venv env __pycache__ *.egg + +markers = + # -------- Levels (Required) -------- + stage(n): Unit/Smoke/Regression/Release (0=Unit 1=Smoke 2=Regression 3=Release) + # -------- Features (Recommended) -------- + feature: Feature tag + platform(name): Platform tag(gpu/npu) +# end of markers \ No newline at end of file diff --git a/test/requirements.txt b/test/requirements.txt new file mode 100644 index 00000000..07635b24 --- /dev/null +++ b/test/requirements.txt @@ -0,0 +1,6 @@ +pytest>=7.0.0 +pytest-html>=3.1.1 +PyYAML>=6.0 +# MySQL +peewee>=3.14.5 +pymysql>=1.0.2 \ No newline at end of file diff --git a/test/suites/E2E/test_demo_performance.py b/test/suites/E2E/test_demo_performance.py new file mode 100644 index 00000000..1b76818f --- /dev/null +++ b/test/suites/E2E/test_demo_performance.py @@ -0,0 +1,66 @@ +import pytest +from common.config_utils import config_utils as config_instance + + +# ---------------- Fixture Example ---------------- +class Calculator: + def __init__(self): + print("[Calculator Initialization]") + pass + + def add(self, a, b): + return a + b + + def divide(self, a, b): + if b == 0: + raise ZeroDivisionError("Cannot divide by zero") + return a / b + + +@pytest.fixture(scope="module", name="calc") +def calculator(): + return Calculator() + + +@pytest.mark.feature("mark") +class TestCalculator: + # The calc instance will only be initialized on the first call, see the pytest documentation for more usage + def test_add(self, calc): + assert calc.add(1, 2) == 3 + + def test_divide(self, calc): + assert calc.divide(6, 2) == 3 + + def test_divide_by_zero(self, calc): + with pytest.raises(ZeroDivisionError): + calc.divide(6, 0) + + +# ---------------- Write to DB Example ---------------- +from common.capture_utils import * + + +@pytest.mark.feature("capture") # pytest must be the top +@export_vars +def test_capture_mix(): + """Mixed single + lists via '_name' + '_data'""" + assert 1 == 1 + return { + "_name": "demo", + "_data": { + "length": 10086, # single value + "accuracy": [0.1, 0.2, 0.3], # list + "loss": [0.1, 0.2, 0.3], # list + }, + } + + +# ---------------- Read Config Example ---------------- +from common.config_utils import config_utils as config_instance + + +@pytest.mark.feature("config") +def test_config(): + assert ( + config_instance.get_nested_config("database.host", "localhost") == "127.0.0.1" + )