diff --git a/.github/workflows/smoke-test.yml b/.github/workflows/smoke-test.yml index c55d63c8..771a6f16 100644 --- a/.github/workflows/smoke-test.yml +++ b/.github/workflows/smoke-test.yml @@ -26,6 +26,7 @@ jobs: manual-installation: name: manual installation test for (${{ matrix.python-version }}, ${{ matrix.os }}) runs-on: ${{ matrix.os }} + timeout-minutes: 60 strategy: fail-fast: false matrix: @@ -64,9 +65,32 @@ jobs: python -m pip --version python -m pip list + conda-installation: + name: conda installation test for (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + strategy: + fail-fast: false + matrix: + os: + - ubuntu-latest + + steps: + - name: checkout repository + uses: actions/checkout@v3 + + - name: conda install deps (CPU, only base) + run: | + conda env create -n edgelab -f environment.yml + + - name: conda install deps (CUDA, only base) + run: | + conda env create -n edgelab_cuda -f environment_cuda.yml + auto-installation: name: auto installation test for (${{ matrix.os }}) runs-on: ${{ matrix.os }} + timeout-minutes: 60 strategy: fail-fast: false matrix: @@ -77,6 +101,6 @@ jobs: - name: checkout repository uses: actions/checkout@v3 - - name: setup deps + - name: setup deps (shell) run: | bash scripts/setup_linux.sh diff --git a/docs/.vitepress/locales/en_US.ts b/docs/.vitepress/locales/en_US.ts index fa63f4ca..0547c0a5 100644 --- a/docs/.vitepress/locales/en_US.ts +++ b/docs/.vitepress/locales/en_US.ts @@ -65,25 +65,25 @@ function sidebar() { ] }, { - text: 'Examples', + text: 'Deploy', collapsed: false, - link: '/examples/examples', + link: '/deploy/examples', items: [ { text: 'ESP32 - Deploy', - link: '/examples/esp32/deploy', + link: '/deploy/esp32/deploy', items: [ - { text: 'ESP32 Mask Detection', link: '/examples/esp32/mask_detection' }, - { text: 'ESP32 Meter Reader', link: '/examples/esp32/meter_reader' } + { text: 'ESP32 Mask Detection', link: '/deploy/esp32/mask_detection' }, + { text: 'ESP32 Meter Reader', link: '/deploy/esp32/meter_reader' } ] }, { text: 'Grove - Deploy', - link: '/examples/grove/deploy', + link: '/deploy/grove/deploy', items: [ - { text: 'Grove Mask Detection', link: '/examples/grove/mask_detection' }, - { text: 'Grove Meter Reader', link: '/examples/grove/meter_reader' }, - { text: 'Grove Digital Meter', link: '/examples/grove/digital_meter' } + { text: 'Grove Mask Detection', link: '/deploy/grove/mask_detection' }, + { text: 'Grove Meter Reader', link: '/deploy/grove/meter_reader' }, + { text: 'Grove Digital Meter', link: '/deploy/grove/digital_meter' } ] } ] @@ -93,7 +93,7 @@ function sidebar() { collapsed: false, items: [ { - text: 'Edge Impulse Learning Blocks', + text: 'Machine Learning Blocks', link: '/edgeimpulse/ei_ml_blocks', }, ] @@ -104,7 +104,7 @@ function sidebar() { items: [ { text: 'FAQs', link: '/community/faqs' }, { text: 'Reference', link: '/community/reference' }, - { text: 'Contribution Guidelines', link: '/community/contributing' }, + { text: 'Contribution', link: '/community/contributing' }, { text: 'Copyrights and Licenses', link: '/community/licenses' } ] } diff --git a/docs/.vitepress/locales/zh_CN.ts b/docs/.vitepress/locales/zh_CN.ts index dfb689fb..91970a1a 100644 --- a/docs/.vitepress/locales/zh_CN.ts +++ b/docs/.vitepress/locales/zh_CN.ts @@ -71,23 +71,23 @@ function sidebar() { { text: '部署示例', collapsed: false, - link: '/zh_cn/examples/examples', + link: '/zh_cn/deploy/examples', items: [ { text: 'ESP32 - 部署教程', - link: '/zh_cn/examples/esp32/deploy', + link: '/zh_cn/deploy/esp32/deploy', items: [ - { text: 'ESP32 口罩检测', link: '/zh_cn/examples/esp32/mask_detection' }, - { text: 'ESP32 表计读数', link: '/zh_cn/examples/esp32/meter_reader' } + { text: 'ESP32 口罩检测', link: '/zh_cn/deploy/esp32/mask_detection' }, + { text: 'ESP32 表计读数', link: '/zh_cn/deploy/esp32/meter_reader' } ] }, { text: 'Grove - 部署教程', - link: '/zh_cn/examples/grove/deploy', + link: '/zh_cn/deploy/grove/deploy', items: [ - { text: 'Grove 口罩检测', link: '/zh_cn/examples/grove/mask_detection' }, - { text: 'Grove 表计读数', link: '/zh_cn/examples/grove/meter_reader' }, - { text: 'Grove 数字表记', link: '/zh_cn/examples/grove/digital_meter' } + { text: 'Grove 口罩检测', link: '/zh_cn/deploy/grove/mask_detection' }, + { text: 'Grove 表计读数', link: '/zh_cn/deploy/grove/meter_reader' }, + { text: 'Grove 数字表记', link: '/zh_cn/deploy/grove/digital_meter' } ] } ] diff --git a/docs/community/contributing.md b/docs/community/contributing.md index ca5f32dd..90ede9ad 100644 --- a/docs/community/contributing.md +++ b/docs/community/contributing.md @@ -1,4 +1,4 @@ -# Contribution Guidelines +# Contribute Contributions to EdgeLab are welcome! We welcome contributions of any kind, including but not limited to: @@ -28,6 +28,25 @@ Contributions to EdgeLab are welcome! We welcome contributions of any kind, incl Please refer to the [Github Documentation - Collaborating](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests). +## Commit Style + +We recommend that you follow the following authoring principles when writing your commit, as this will make our project cleaner and easier to iterate. + +``` +build: build related changes +chore: typo fixes, library updates, etc. +ci: continue integration related changes +deps: dependencies update +docs: docs related changes +feat: new feactures +fix: fix issues +perf: add perf results +refactor: refactor components +revert: undo some changes +style: code style changes +test: test cases changes +``` + ## Permissions Section After a contribution is submitted, you agree to the project's [License](./licenses). diff --git a/docs/examples/esp32/deploy.md b/docs/deploy/esp32/deploy.md similarity index 100% rename from docs/examples/esp32/deploy.md rename to docs/deploy/esp32/deploy.md diff --git a/docs/examples/esp32/mask_detection.md b/docs/deploy/esp32/mask_detection.md similarity index 100% rename from docs/examples/esp32/mask_detection.md rename to docs/deploy/esp32/mask_detection.md diff --git a/docs/examples/esp32/meter_reader.md b/docs/deploy/esp32/meter_reader.md similarity index 100% rename from docs/examples/esp32/meter_reader.md rename to docs/deploy/esp32/meter_reader.md diff --git a/docs/examples/examples.md b/docs/deploy/examples.md similarity index 100% rename from docs/examples/examples.md rename to docs/deploy/examples.md diff --git a/docs/examples/grove/deploy.md b/docs/deploy/grove/deploy.md similarity index 100% rename from docs/examples/grove/deploy.md rename to docs/deploy/grove/deploy.md diff --git a/docs/examples/grove/digital_meter.md b/docs/deploy/grove/digital_meter.md similarity index 100% rename from docs/examples/grove/digital_meter.md rename to docs/deploy/grove/digital_meter.md diff --git a/docs/examples/grove/mask_detection.md b/docs/deploy/grove/mask_detection.md similarity index 100% rename from docs/examples/grove/mask_detection.md rename to docs/deploy/grove/mask_detection.md diff --git a/docs/examples/grove/meter_reader.md b/docs/deploy/grove/meter_reader.md similarity index 100% rename from docs/examples/grove/meter_reader.md rename to docs/deploy/grove/meter_reader.md diff --git a/docs/introduction/installation.md b/docs/introduction/installation.md index 9af45ddd..8170c9e6 100644 --- a/docs/introduction/installation.md +++ b/docs/introduction/installation.md @@ -121,16 +121,18 @@ Or you can do the configuration manually using Conda's configuration file. ::: code-group ```sh [CPU] -conda env create -n edgelab -f environment.yml -y && \ +conda env create -n edgelab -f environment.yml && \ conda activate edgelab && \ -pip3 install -r requirements.txt && \ +pip3 install -r requirements/inference.txt -r requirements/export.txt -r requirements/tests.txt && \ +mim install -r requirements/mmlab.txt && \ mim install -e . ``` ```sh [GPU (CUDA)] -conda env create -n edgelab -f environment_cuda.yml -y && \ +conda env create -n edgelab -f environment_cuda.yml && \ conda activate edgelab && \ -pip3 install -r requirements_cuda.txt && \ +pip3 install -r requirements/inference.txt -r requirements/export.txt -r requirements/tests.txt && \ +mim install -r requirements/mmlab.txt && \ mim install -e . ``` diff --git a/docs/introduction/quick_start.md b/docs/introduction/quick_start.md index 6e133364..f27c76e9 100644 --- a/docs/introduction/quick_start.md +++ b/docs/introduction/quick_start.md @@ -6,6 +6,8 @@ In [Overview](./what_is_edgelab), we have introduced the functions and features We suggest that all beginners of EdgeLab start learning from [Getting Started](#getting-started), if you are familiar with EdgeLab or [OpenMMLab](https://github.com/open-mmlab), and you want to try to deploy on edge computing devices, modify existing neural networks, or train on user-defined data sets, you can directly refer to [Advanced](#advanced). ::: +Now, you can try out [EdgeLab Colab Examples](https://github.com/Seeed-Studio/EdgeLab/tree/main/notebooks) on Google Colab without setup EdgeLab on your computer. + ## Getting Started 1. First, refer to the [Installation Guide](./installation.md) to configure the running environment of EdgeLab. @@ -20,8 +22,23 @@ We suggest that all beginners of EdgeLab start learning from [Getting Started](# ## Advanced -- **Model Deployment**. If you want to deploy the exported training model on edge computing devices, please refer to [ESP32 Deployment Example](../examples/esp32/deploy) or [Grove Vision AI Deployment Example](../examples/grove/deploy). +- **Model Deployment**. If you want to deploy the exported training model on edge computing devices, please refer to [ESP32 Deployment Example](../deploy/esp32/deploy) or [Grove Vision AI Deployment Example](../deploy/grove/deploy). - **Custom Datasets**. If you want to train on a custom dataset, please refer to [Datasets](../tutorials/datasets). - **Custom Model**. If you want to modify an existing neural network or design your own neural network, please refer to [Model Configuration](../tutorials/config). + +## Necessary Knowledge + +- Computer Vision: + + The basics of computer vision are built upon digital image processing. So, you need to learn the basics of the DlP first. Then you can move forward to read computer vision topics like pattern recognition and 3D geometry. You need to know linear algebra to be able to fully understand some concepts of the computer vision like dimensionality reduction. After understanding the fundamentals of computer vision you should also build your knowledge in deep learning, especially in Convolutional Neural Networks (CNN). + +- Programming: + + Python will be enough for design and prototyping, but if you want to do some + embedded work, you should also be familiar with C++. + +- Tools: + + OpenCV is the main tool for computer vision, and Numpy is an important tool for data processing and analysis. You must know them. You never know, but you should know what tools are available and how to use them. How to use them. Another tool you need to familiarize yourself with is the deep learning framework. Frameworks. You can start with Keras which is the easiest to learn and then learn Tensorflow or PyTorch. diff --git a/docs/tutorials/export/pytorch_2_onnx.md b/docs/tutorials/export/pytorch_2_onnx.md index 50603462..d5ecdd4d 100644 --- a/docs/tutorials/export/pytorch_2_onnx.md +++ b/docs/tutorials/export/pytorch_2_onnx.md @@ -26,18 +26,18 @@ You also need to prepare the PyTorch model and its weights before exporting the - Or download the EdgeLab official pre-trained weights from our [GitHub Releases - Model Zoo](https://github.com/Seeed-Studio/EdgeLab/releases/tag/model_zoo). -## Model Transform +## Export Model -For model transformation (convert and export), the relevant commands with some common parameters are listed. +For model convert and export, the relevant commands with some common parameters are listed. ```sh python3 tools/export.py \ "" \ "" \ - "" + onnx ``` -### Transform Examples +### ONNX Export Examples Here are some model conversion examples for reference. @@ -90,7 +90,7 @@ For more parameters supported, please refer to the source code `tools/inference. ::: -### Validation Example +### Model Validation Example ::: code-group diff --git a/docs/tutorials/export/pytorch_2_tflite.md b/docs/tutorials/export/pytorch_2_tflite.md index 1e2df3a7..3ed3036b 100644 --- a/docs/tutorials/export/pytorch_2_tflite.md +++ b/docs/tutorials/export/pytorch_2_tflite.md @@ -32,7 +32,7 @@ Export TFLite model requires a training set as a representative dataset, if it n ::: -## Model Transform +## Export Model For model transformation (convert and export), the relevant commands with some common parameters are listed. @@ -40,10 +40,10 @@ For model transformation (convert and export), the relevant commands with some c python3 tools/export.py \ "" \ "" \ - "" + tflite ``` -### Transform Examples +### TFLite Export Examples Here are some model conversion examples (`int8` precision) for reference. @@ -97,7 +97,7 @@ For more parameters supported, please refer to the source code `tools/inference. ::: -### Validation Example +### Model Validation Example Here are some examples for validating converted model (`int8` precision), for reference only. diff --git a/docs/tutorials/training/fomo.md b/docs/tutorials/training/fomo.md index 37555783..330b8b07 100644 --- a/docs/tutorials/training/fomo.md +++ b/docs/tutorials/training/fomo.md @@ -175,4 +175,4 @@ In order to further test and evaluate the model on a realistic edge computing de ### Deployment -After exporting the model, you can deploy the model to the edge computing device for testing and evaluation. You can refer to the [examples](../../examples/examples.md) section to learn more about how to deploy models. +After exporting the model, you can deploy the model to the edge computing device for testing and evaluation. You can refer to the [Deploy](../../deploy/examples.md) section to learn more about how to deploy models. diff --git a/docs/tutorials/training/overview.md b/docs/tutorials/training/overview.md index 5670d1b8..86e42eb4 100644 --- a/docs/tutorials/training/overview.md +++ b/docs/tutorials/training/overview.md @@ -50,4 +50,4 @@ python3 tools/train.py --help ### Deployment -After exporting the model, you can deploy the model to an edge computing device for testing and evaluation. You can refer to [examples](../../examples/examples.md) section to learn more about how to deploy the model. +After exporting the model, you can deploy the model to an edge computing device for testing and evaluation. You can refer to [Deploy](../../deploy/examples.md) section to learn more about how to deploy the model. diff --git a/docs/tutorials/training/pfld.md b/docs/tutorials/training/pfld.md index 3fa61b5d..c835709c 100644 --- a/docs/tutorials/training/pfld.md +++ b/docs/tutorials/training/pfld.md @@ -170,4 +170,4 @@ In order to further test and evaluate the model on a realistic edge computing de ### Deployment -After exporting the model, you can deploy the model to the edge computing device for testing and evaluation. You can refer to the [examples](../../examples/examples.md) section to learn more about how to deploy models. +After exporting the model, you can deploy the model to the edge computing device for testing and evaluation. You can refer to the [Deploy](../../deploy/examples.md) section to learn more about how to deploy models. diff --git a/docs/tutorials/training/yolov5.md b/docs/tutorials/training/yolov5.md index a61b76af..bfed5763 100644 --- a/docs/tutorials/training/yolov5.md +++ b/docs/tutorials/training/yolov5.md @@ -118,4 +118,4 @@ In order to further test and evaluate the model on a realistic edge computing de ### Deployment -After exporting the model, you can deploy the model to the edge computing device for testing and evaluation. You can refer to the [examples](../../examples/examples.md) section to learn more about how to deploy models. +After exporting the model, you can deploy the model to the edge computing device for testing and evaluation. You can refer to the [Deploy](../../deploy/examples.md) section to learn more about how to deploy models. diff --git a/docs/zh_cn/community/contributing.md b/docs/zh_cn/community/contributing.md index bd9ea5f1..4185cabd 100644 --- a/docs/zh_cn/community/contributing.md +++ b/docs/zh_cn/community/contributing.md @@ -28,6 +28,25 @@ 请参考 [Github 文档 - Collaborating](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests)。 +## 提交格式 + +我们建议您在编写 commit 时,遵循以下编写原则,这可以让我们的项目更加整洁,方便迭代 + +``` +build: build related changes +chore: typo fixes, library updates, etc. +ci: continue integration related changes +deps: dependencies update +docs: docs related changes +feat: new feactures +fix: fix issues +perf: add perf results +refactor: refactor components +revert: undo some changes +style: code style changes +test: test cases changes +``` + ## 许可部分 在贡献被提交之后,我们默认您同意了本项目的[许可协议](./licenses)。 diff --git a/docs/zh_cn/examples/esp32/deploy.md b/docs/zh_cn/deploy/esp32/deploy.md similarity index 100% rename from docs/zh_cn/examples/esp32/deploy.md rename to docs/zh_cn/deploy/esp32/deploy.md diff --git a/docs/zh_cn/examples/esp32/mask_detection.md b/docs/zh_cn/deploy/esp32/mask_detection.md similarity index 100% rename from docs/zh_cn/examples/esp32/mask_detection.md rename to docs/zh_cn/deploy/esp32/mask_detection.md diff --git a/docs/zh_cn/examples/esp32/meter_reader.md b/docs/zh_cn/deploy/esp32/meter_reader.md similarity index 100% rename from docs/zh_cn/examples/esp32/meter_reader.md rename to docs/zh_cn/deploy/esp32/meter_reader.md diff --git a/docs/zh_cn/examples/examples.md b/docs/zh_cn/deploy/examples.md similarity index 100% rename from docs/zh_cn/examples/examples.md rename to docs/zh_cn/deploy/examples.md diff --git a/docs/zh_cn/examples/grove/deploy.md b/docs/zh_cn/deploy/grove/deploy.md similarity index 100% rename from docs/zh_cn/examples/grove/deploy.md rename to docs/zh_cn/deploy/grove/deploy.md diff --git a/docs/zh_cn/examples/grove/digital_meter.md b/docs/zh_cn/deploy/grove/digital_meter.md similarity index 100% rename from docs/zh_cn/examples/grove/digital_meter.md rename to docs/zh_cn/deploy/grove/digital_meter.md diff --git a/docs/zh_cn/examples/grove/mask_detection.md b/docs/zh_cn/deploy/grove/mask_detection.md similarity index 100% rename from docs/zh_cn/examples/grove/mask_detection.md rename to docs/zh_cn/deploy/grove/mask_detection.md diff --git a/docs/zh_cn/examples/grove/meter_reader.md b/docs/zh_cn/deploy/grove/meter_reader.md similarity index 100% rename from docs/zh_cn/examples/grove/meter_reader.md rename to docs/zh_cn/deploy/grove/meter_reader.md diff --git a/docs/zh_cn/introduction/installation.md b/docs/zh_cn/introduction/installation.md index 9e759375..a2f13041 100644 --- a/docs/zh_cn/introduction/installation.md +++ b/docs/zh_cn/introduction/installation.md @@ -120,16 +120,18 @@ bash scripts/setup_linux.sh ::: code-group ```sh [CPU] -conda env create -n edgelab -f environment.yml -y && \ +conda env create -n edgelab -f environment.yml && \ conda activate edgelab && \ -pip3 install -r requirements.txt && \ +pip3 install -r requirements/inference.txt -r requirements/export.txt -r requirements/tests.txt && \ +mim install -r requirements/mmlab.txt && \ mim install -e . ``` ```sh [GPU (CUDA)] -conda env create -n edgelab -f environment_cuda.yml -y && \ +conda env create -n edgelab -f environment_cuda.yml && \ conda activate edgelab && \ -pip3 install -r requirements_cuda.txt && \ +pip3 install -r requirements/inference.txt -r requirements/export.txt -r requirements/tests.txt && \ +mim install -r requirements/mmlab.txt && \ mim install -e . ``` diff --git a/docs/zh_cn/introduction/quick_start.md b/docs/zh_cn/introduction/quick_start.md index 17eb3144..110d663c 100644 --- a/docs/zh_cn/introduction/quick_start.md +++ b/docs/zh_cn/introduction/quick_start.md @@ -6,6 +6,8 @@ 我们建议所有初次上手 EdgeLab 的小伙伴从[入门](#%E5%85%A5%E9%97%A8)开始学习,如果你熟悉 EdgeLab 或 [OpenMMLab](https://github.com/open-mmlab),想尝试在边缘计算设备上部署、对现有的神经网络进行修改或在自定义的数据集上进行训练,则可以直接参考[进阶](#%E8%BF%9B%E9%98%B6)。 ::: +现在,您可以在 Google Colab 上试用 [EdgeLab Colab Examples](https://github.com/Seeed-Studio/EdgeLab/tree/main/notebooks),而无需在您的电脑上安装 EdgeLab。 + ## 入门 1. 首先查阅[安装指南](./installation.md)配置 EdgeLab 的运行环境。 @@ -20,8 +22,23 @@ ## 进阶 -- **模型的部署**,如果您想将训练导出后的模型部署在边缘计算设备上,请参考 [ESP32 部署示例](../examples/esp32/deploy)或 [Grove Vision AI 部署示例](../examples/grove/deploy)。 +- **模型的部署**,如果您想将训练导出后的模型部署在边缘计算设备上,请参考 [ESP32 部署示例](../deploy/esp32/deploy)或 [Grove Vision AI 部署示例](../deploy/grove/deploy)。 - **自定义数据集**,如果您想在自定义数据集上进行训练,请参考[数据集](../tutorials/datasets)。 - **自定义模型**,如果您想对现有的神经网络进行修改或设计自己的神经网络,请参考[模型配置](../tutorials/config)。 + +## 必要知识 + +- 计算机视觉: + + 计算机视觉的基础知识建立在数字图像处理之上。因此,您需要首先学习数字图像处理的基础知识。然后,您可以继续阅读计算机视觉主题,如模式识别和三维几何。您需要了解线性代数,以便能够充分理解计算机视觉的一些概念,如降维。在了解了计算机视觉的基础知识后,您还应该掌握深度学习方面的知识,尤其是卷积神经网络(CNN)方面的知识。 + +- 编程: + + Python 足够用于设计和原型开发,但如果您想做一些嵌入式工作 + 嵌入式工作,您还应该熟悉 C++。 + +- 工具: + + OpenCV 是计算机视觉的主要工具,Numpy 是数据处理和分析的重要工具。您必须了解它们。你永远不会知道,但你应该知道有哪些工具可用以及如何使用它们。如何使用它们 您需要熟悉的另一个工具是深度学习框架。框架。你可以从最容易学习的 Keras 开始,然后学习 TensorFlow 或 PyTorch。 diff --git a/docs/zh_cn/tutorials/training/fomo.md b/docs/zh_cn/tutorials/training/fomo.md index 97edd23b..74001452 100644 --- a/docs/zh_cn/tutorials/training/fomo.md +++ b/docs/zh_cn/tutorials/training/fomo.md @@ -175,4 +175,4 @@ python3 tools/inference.py \ ### 部署 -在导出模型后,你可以将模型部署到边缘计算设备上进行测试和评估。你可以参考 [examples](../../examples/examples.md) 部分来了解更多关于如何部署模型的信息。 +在导出模型后,你可以将模型部署到边缘计算设备上进行测试和评估。你可以参考 [Deploy](../../deploy/examples.md) 部分来了解更多关于如何部署模型的信息。 diff --git a/docs/zh_cn/tutorials/training/overview.md b/docs/zh_cn/tutorials/training/overview.md index f1239a67..fd8f7fd1 100644 --- a/docs/zh_cn/tutorials/training/overview.md +++ b/docs/zh_cn/tutorials/training/overview.md @@ -50,4 +50,4 @@ python3 tools/train.py --help ### 部署 -在导出模型后,你可以将模型部署到边缘计算设备上进行测试和评估。你可以参考 [examples](../../examples/examples.md) 部分来了解更多关于如何部署模型的信息。 +在导出模型后,你可以将模型部署到边缘计算设备上进行测试和评估。你可以参考 [Deploy](../../deploy/examples.md) 部分来了解更多关于如何部署模型的信息。 diff --git a/docs/zh_cn/tutorials/training/yolov5.md b/docs/zh_cn/tutorials/training/yolov5.md index e28ca8a1..752162ce 100644 --- a/docs/zh_cn/tutorials/training/yolov5.md +++ b/docs/zh_cn/tutorials/training/yolov5.md @@ -118,4 +118,4 @@ python3 tools/inference.py \ ### 部署 -在导出模型后,你可以将模型部署到边缘计算设备上进行测试和评估。你可以参考 [examples](../../examples/examples.md) 部分来了解更多关于如何部署模型的信息。 +在导出模型后,你可以将模型部署到边缘计算设备上进行测试和评估。你可以参考 \[Deploy\]../deploy/examples.md) 部分来了解更多关于如何部署模型的信息。 diff --git a/environment.yml b/environment.yml index 13b7b994..16698d55 100644 --- a/environment.yml +++ b/environment.yml @@ -5,12 +5,11 @@ channels: - pytorch dependencies: - - python=3.8 - - cpuonly=2.0 - - pytorch=2.0.0 - - torchaudio=2.0.0 - - torchvision=0.15.0 - - pip=23.0.1 + - python>=3.8, <= 3.10 + - cpuonly>=2.0 + - pytorch>=2.0.0 + - torchaudio>=2.0.0 + - torchvision>=0.15.0 + - pip>=23.0.1 - pip: - -r requirements/base.txt - - -r requirements/inference.txt diff --git a/environment_cuda.yml b/environment_cuda.yml index 85bc9266..92752a7a 100644 --- a/environment_cuda.yml +++ b/environment_cuda.yml @@ -6,13 +6,12 @@ channels: - nvidia dependencies: - - cudatoolkit=11.7 - - python=3.8 - - pytorch=2.0.0 - - pytorch-cuda=11.7 - - torchaudio=2.0.0 - - torchvision=0.15.0 - - pip=23.0.1 + - cudatoolkit==11.7 # mmcv pre-built wheels currently requires cuda 11.7 + - python>=3.8, <= 3.10 + - pytorch>=2.0.0 + - pytorch-cuda>=11.7 # mmcv pre-built wheels currently requires cuda 11.7 + - torchaudio>=2.0.0 + - torchvision>=0.15.0 + - pip>=23.0.1 - pip: - -r requirements/base.txt - - -r requirements/inference.txt diff --git a/requirements/base.txt b/requirements/base.txt index 0a7a92fa..378c65be 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,22 +1,22 @@ -albumentations==1.3.0 # common -numpy==1.23.5 +albumentations>=1.3.0 +numpy>=1.23.5 # vision -opencv-python==4.7.0.72 +opencv-python>=4.7.0.72 # openmmlab package manager -openmim==0.3.7 -packaging==23.1 -pandas==2.0.0 -pillow==9.4.0 -pyyaml==6.0 -scikit-image==0.20.0 -scikit-learn==1.2.2 +openmim>=0.3.7 +packaging>=23.1 +pandas>=2.0.0 +pillow>=9.4.0 +pyyaml>=6.0 +scikit-image>=0.20.0 +scikit-learn>=1.2.2 # sound -soundfile==0.12.1 +soundfile>=0.12.1 # visualize -tensorboard==2.12.3 -tqdm==4.65.0 +tensorboard>=2.12.3 +tqdm>=4.65.0 diff --git a/requirements/export.txt b/requirements/export.txt index 80ad2a39..7780576b 100644 --- a/requirements/export.txt +++ b/requirements/export.txt @@ -1,2 +1,2 @@ -# tlite export dep +# tflite export dep TinyNeuralNetwork@git+https://github.com/LynnL4/TinyNeuralNetwork.git diff --git a/requirements/inference.txt b/requirements/inference.txt index 40056c8b..226729ad 100644 --- a/requirements/inference.txt +++ b/requirements/inference.txt @@ -1,11 +1,9 @@ - -libusb1 - -ncnn==1.0.20230517 -onnx==1.14.0 -onnxmltools==1.11.2 -onnxruntime==1.15.1 -onnxsim==0.4.33 -protobuf==4.23.3 -tensorflow==2.12.0 -tflite-runtime==2.12.0 +libusb1>=3.0.0 +ncnn>=1.0.20230517 +onnx>=1.14.0 +onnxmltools>=1.11.2 +onnxruntime>=1.15.1 +onnxsim>=0.4.33 +protobuf>=4.23.3 +tensorflow>=2.12.0 +tflite-runtime>=2.12.0 diff --git a/requirements/mmlab.txt b/requirements/mmlab.txt index 705004c4..63388263 100644 --- a/requirements/mmlab.txt +++ b/requirements/mmlab.txt @@ -1,8 +1,8 @@ -mmcls==1.0.0.rc6 -mmcv==2.0.0 -mmdet==3.0.0 # use openmim to install -mmengine==0.7.2 -mmpose==1.0.0 +mmcls>=1.0.0.rc6 +mmcv>=2.0.0 +mmdet>=3.0.0, <3.1.0 # mmyolo currently does not support mmdet 3.1.0 +mmengine>=0.7.2 +mmpose>=1.0.0 mmyolo@git+https://github.com/mjq2020/mmyolo diff --git a/requirements/pytorch_cpu.txt b/requirements/pytorch_cpu.txt index a5ccc36f..579d0bef 100644 --- a/requirements/pytorch_cpu.txt +++ b/requirements/pytorch_cpu.txt @@ -1,5 +1,5 @@ -i https://download.pytorch.org/whl/cpu -torch==2.0.0 -torchaudio==2.0.0 -torchvision==0.15.0 +torch>=2.0.0 +torchaudio>=2.0.0 +torchvision>=0.15.0 diff --git a/requirements/pytorch_cuda.txt b/requirements/pytorch_cuda.txt index 4c0200b6..29298aa7 100644 --- a/requirements/pytorch_cuda.txt +++ b/requirements/pytorch_cuda.txt @@ -1,5 +1,5 @@ -i https://download.pytorch.org/whl/cu117 -torch==2.0.0 -torchaudio==2.0.0 -torchvision==0.15.0 +torch>=2.0.0 +torchaudio>=2.0.0 +torchvision>=0.15.0 diff --git a/requirements/tests.txt b/requirements/tests.txt index 4e747a57..5a1f15d9 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,4 +1,4 @@ -black==23.3.0 -isort==5.12.0 -pre-commit==3.3.3 -ruff==0.0.275 +black>=23.3.0 +isort>=5.12.0 +pre-commit>=3.3.3 +ruff>=0.0.275 diff --git a/tools/train.py b/tools/train.py index ef6be0b0..d95e3bed 100644 --- a/tools/train.py +++ b/tools/train.py @@ -198,7 +198,7 @@ def main(): model = runner.model.to(device=device) model.eval() - analysis_results = get_model_complexity_info(model=model, input_shape=args.input_shape, inputs=(dummy_inputs,)) + analysis_results = get_model_complexity_info(model=model, inputs=(dummy_inputs,)) print('Model Flops:{}'.format(analysis_results['flops_str'])) print('Model Parameters:{}'.format(analysis_results['params_str']))