From 303f30d3998fb8872e77fe9edad77c120c7c39d0 Mon Sep 17 00:00:00 2001 From: cuicheng01 <45199522+cuicheng01@users.noreply.github.com> Date: Mon, 25 Nov 2024 17:03:09 +0800 Subject: [PATCH] unify module name (#2530) * unify module name * update * update --- .../cv_modules/anomaly_detection.en.md | 8 ++-- .../tutorials/cv_modules/anomaly_detection.md | 8 ++-- .../tutorials/cv_modules/face_feature.en.md | 6 +-- .../tutorials/cv_modules/face_feature.md | 8 ++-- .../tutorials/cv_modules/image_feature.en.md | 16 ++++---- .../tutorials/cv_modules/image_feature.md | 16 ++++---- .../image_multilabel_classification.en.md | 16 ++++---- .../image_multilabel_classification.md | 16 ++++---- .../pedestrian_attribute_recognition.en.md | 12 +++--- .../pedestrian_attribute_recognition.md | 12 +++--- .../vehicle_attribute_recognition.en.md | 12 +++--- .../vehicle_attribute_recognition.md | 12 +++--- .../ocr_modules/layout_detection.en.md | 12 +++--- .../tutorials/ocr_modules/layout_detection.md | 12 +++--- .../ocr_modules/seal_text_detection.en.md | 12 +++--- .../ocr_modules/seal_text_detection.md | 12 +++--- ...en.md => anomaly_detection_tutorial.en.md} | 14 ++++--- .../anomaly_detection_tutorial.md | 14 ++++--- ...xtraction(layout_detection)_tutorial.en.md | 10 ++--- ...n_extraction(layout_detection)_tutorial.md | 10 ++--- ...xtraction(seal_recognition)_tutorial.en.md | 12 +++--- ...n_extraction(seal_recognition)_tutorial.md | 12 +++--- docs/support_list/models_list.en.md | 40 +++++++++---------- docs/support_list/models_list.md | 40 +++++++++---------- .../MobileFaceNet.yaml | 0 .../ResNet50_face.yaml | 0 .../STFPM.yaml | 0 .../PP-ShiTuV2_rec.yaml | 0 .../PP-ShiTuV2_rec_CLIP_vit_base.yaml | 0 .../PP-ShiTuV2_rec_CLIP_vit_large.yaml | 0 .../CLIP_vit_base_patch16_448_ML.yaml | 0 .../PP-HGNetV2-B0_ML.yaml | 0 .../PP-HGNetV2-B4_ML.yaml | 0 .../PP-HGNetV2-B6_ML.yaml | 0 .../PP-LCNet_x1_0_ML.yaml | 0 .../ResNet50_ML.yaml | 0 .../PicoDet-L_layout_17cls.yaml | 0 .../PicoDet-L_layout_3cls.yaml | 0 .../PicoDet-S_layout_17cls.yaml | 0 .../PicoDet-S_layout_3cls.yaml | 0 .../PicoDet_layout_1x.yaml | 0 .../RT-DETR-H_layout_17cls.yaml | 0 .../RT-DETR-H_layout_3cls.yaml | 0 .../PP-LCNet_x1_0_pedestrian_attribute.yaml | 0 .../PP-OCRv4_mobile_seal_det.yaml | 0 .../PP-OCRv4_server_seal_det.yaml | 0 .../PP-LCNet_x1_0_vehicle_attribute.yaml | 0 47 files changed, 173 insertions(+), 169 deletions(-) rename docs/practical_tutorials/{anomaly_detection_tutorial_en.md => anomaly_detection_tutorial.en.md} (97%) rename paddlex/configs/{face_recognition => face_feature}/MobileFaceNet.yaml (100%) rename paddlex/configs/{face_recognition => face_feature}/ResNet50_face.yaml (100%) rename paddlex/configs/{anomaly_detection => image_anomaly_detection}/STFPM.yaml (100%) rename paddlex/configs/{general_recognition => image_feature}/PP-ShiTuV2_rec.yaml (100%) rename paddlex/configs/{general_recognition => image_feature}/PP-ShiTuV2_rec_CLIP_vit_base.yaml (100%) rename paddlex/configs/{general_recognition => image_feature}/PP-ShiTuV2_rec_CLIP_vit_large.yaml (100%) rename paddlex/configs/{multilabel_classification => image_multilabel_classification}/CLIP_vit_base_patch16_448_ML.yaml (100%) rename paddlex/configs/{multilabel_classification => image_multilabel_classification}/PP-HGNetV2-B0_ML.yaml (100%) rename paddlex/configs/{multilabel_classification => image_multilabel_classification}/PP-HGNetV2-B4_ML.yaml (100%) rename paddlex/configs/{multilabel_classification => image_multilabel_classification}/PP-HGNetV2-B6_ML.yaml (100%) rename paddlex/configs/{multilabel_classification => image_multilabel_classification}/PP-LCNet_x1_0_ML.yaml (100%) rename paddlex/configs/{multilabel_classification => image_multilabel_classification}/ResNet50_ML.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/PicoDet-L_layout_17cls.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/PicoDet-L_layout_3cls.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/PicoDet-S_layout_17cls.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/PicoDet-S_layout_3cls.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/PicoDet_layout_1x.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/RT-DETR-H_layout_17cls.yaml (100%) rename paddlex/configs/{structure_analysis => layout_detection}/RT-DETR-H_layout_3cls.yaml (100%) rename paddlex/configs/{pedestrian_attribute => pedestrian_attribute_recognition}/PP-LCNet_x1_0_pedestrian_attribute.yaml (100%) rename paddlex/configs/{text_detection_seal => seal_text_detection}/PP-OCRv4_mobile_seal_det.yaml (100%) rename paddlex/configs/{text_detection_seal => seal_text_detection}/PP-OCRv4_server_seal_det.yaml (100%) rename paddlex/configs/{vehicle_attribute => vehicle_attribute_recognition}/PP-LCNet_x1_0_vehicle_attribute.yaml (100%) diff --git a/docs/module_usage/tutorials/cv_modules/anomaly_detection.en.md b/docs/module_usage/tutorials/cv_modules/anomaly_detection.en.md index b56351714..120ffcaa7 100644 --- a/docs/module_usage/tutorials/cv_modules/anomaly_detection.en.md +++ b/docs/module_usage/tutorials/cv_modules/anomaly_detection.en.md @@ -68,7 +68,7 @@ tar -xf ./dataset/mvtec_examples.tar -C ./dataset/ A single command can complete data validation: ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=check_dataset \ -o Global.dataset_dir=./dataset/mvtec_examples ``` @@ -118,7 +118,7 @@ After executing the above command, PaddleX will validate the dataset and collect A single command is sufficient to complete model training, taking the training of STFPM as an example: ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=train \ -o Global.dataset_dir=./dataset/mvtec_examples ``` @@ -150,7 +150,7 @@ Other related parameters can be set by modifying the `Global` and `Train` fields After completing model training, you can evaluate the specified model weight file on the validation set to verify the model's accuracy. Using PaddleX for model evaluation, you can complete the evaluation with a single command: ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=evaluate \ -o Global.dataset_dir=./dataset/mvtec_examples ``` @@ -172,7 +172,7 @@ After completing model training and evaluation, you can use the trained model we #### 4.4.1 Model Inference * To perform inference prediction through the command line, simply use the following command. Before running the following code, please download the [demo image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/uad_grid.png) to your local machine. ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=predict \ -o Predict.model_dir="./output/best_model/inference" \ -o Predict.input="uad_grid.png" diff --git a/docs/module_usage/tutorials/cv_modules/anomaly_detection.md b/docs/module_usage/tutorials/cv_modules/anomaly_detection.md index 41eba999b..b067bbdc7 100644 --- a/docs/module_usage/tutorials/cv_modules/anomaly_detection.md +++ b/docs/module_usage/tutorials/cv_modules/anomaly_detection.md @@ -69,7 +69,7 @@ tar -xf ./dataset/mvtec_examples.tar -C ./dataset/ 一行命令即可完成数据校验: ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=check_dataset \ -o Global.dataset_dir=./dataset/mvtec_examples ``` @@ -116,7 +116,7 @@ python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ 一条命令即可完成模型的训练,以此处STFPM的训练为例: ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=train \ -o Global.dataset_dir=./dataset/mvtec_examples ``` @@ -147,7 +147,7 @@ python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估: ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=evaluate \ -o Global.dataset_dir=./dataset/mvtec_examples ``` @@ -169,7 +169,7 @@ python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ #### 4.4.1 模型推理 * 通过命令行的方式进行推理预测,只需如下一条命令,运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/uad_grid.png)到本地。 ```bash -python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \ +python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \ -o Global.mode=predict \ -o Predict.model_dir="./output/best_model/inference" \ -o Predict.input="uad_grid.png" diff --git a/docs/module_usage/tutorials/cv_modules/face_feature.en.md b/docs/module_usage/tutorials/cv_modules/face_feature.en.md index c00471bca..921122493 100644 --- a/docs/module_usage/tutorials/cv_modules/face_feature.en.md +++ b/docs/module_usage/tutorials/cv_modules/face_feature.en.md @@ -84,7 +84,7 @@ tar -xf ./dataset/face_rec_examples.tar -C ./dataset/ A single command can complete data validation: ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=check_dataset \ -o Global.dataset_dir=./dataset/face_rec_examples ``` @@ -186,7 +186,7 @@ images/Miyako_Miyazaki_0002.jpg images/Munir_Akram_0002.jpg 0 Model training can be completed with a single command. Here is an example of training MobileFaceNet: ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=train \ -o Global.dataset_dir=./dataset/face_rec_examples ``` @@ -240,7 +240,7 @@ After completing model training and evaluation, you can use the trained model we #### 4.4.1 Model Inference * To perform inference predictions through the command line, you only need the following command. Before running the following code, please download the [example image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/face_recognition_001.jpg) to your local machine. ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=predict \ -o Predict.model_dir="./output/best_model/inference" \ -o Predict.input="face_recognition_001.jpg" diff --git a/docs/module_usage/tutorials/cv_modules/face_feature.md b/docs/module_usage/tutorials/cv_modules/face_feature.md index e5bf0e272..653a220d9 100644 --- a/docs/module_usage/tutorials/cv_modules/face_feature.md +++ b/docs/module_usage/tutorials/cv_modules/face_feature.md @@ -83,7 +83,7 @@ tar -xf ./dataset/face_rec_examples.tar -C ./dataset/ 一行命令即可完成数据校验: ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=check_dataset \ -o Global.dataset_dir=./dataset/face_rec_examples ``` @@ -184,7 +184,7 @@ images/Miyako_Miyazaki_0002.jpg images/Munir_Akram_0002.jpg 0 一条命令即可完成模型的训练,以此处MobileFaceNet的训练为例: ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=train \ -o Global.dataset_dir=./dataset/face_rec_examples ``` @@ -216,7 +216,7 @@ python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估: ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=evaluate \ -o Global.dataset_dir=./dataset/face_rec_examples ``` @@ -238,7 +238,7 @@ python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ #### 4.4.1 模型推理 * 通过命令行的方式进行推理预测,只需如下一条命令,运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/face_recognition_001.jpg)到本地。 ```bash -python main.py -c paddlex/configs/face_recognition/MobileFaceNet.yaml \ +python main.py -c paddlex/configs/face_feature/MobileFaceNet.yaml \ -o Global.mode=predict \ -o Predict.model_dir="./output/best_model/inference" \ -o Predict.input="face_recognition_001.jpg" diff --git a/docs/module_usage/tutorials/cv_modules/image_feature.en.md b/docs/module_usage/tutorials/cv_modules/image_feature.en.md index fc2bd2606..f7bf09b53 100644 --- a/docs/module_usage/tutorials/cv_modules/image_feature.en.md +++ b/docs/module_usage/tutorials/cv_modules/image_feature.en.md @@ -76,7 +76,7 @@ tar -xf ./dataset/Inshop_examples.tar -C ./dataset/ #### 4.1.2 Data Validation A single command can complete data validation: ```bash -python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \ +python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \ -o Global.mode=check_dataset \ -o Global.dataset_dir=./dataset/Inshop_examples ``` @@ -174,13 +174,13 @@ CheckDataset: ......

Then execute the command:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/image_classification_labelme_examples
 

After the data conversion is executed, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters also support being set by appending command line arguments:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/image_classification_labelme_examples \
     -o CheckDataset.convert.enable=True \
@@ -206,13 +206,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/Inshop_examples
 

After the data splitting is executed, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters also support being set by appending command line arguments:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/Inshop_examples \
     -o CheckDataset.split.enable=True \
@@ -228,7 +228,7 @@ CheckDataset:
 Model training can be completed with a single command, taking the training of the image feature model PP-ShiTuV2_rec as an example:
 
 ```bash
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/Inshop_examples
 ```
@@ -259,7 +259,7 @@ Other related parameters can be set by modifying the `Global` and `Train` fields
 After completing model training, you can evaluate the specified model weight file on the validation set to verify the model's accuracy. Using PaddleX for model evaluation can be done with a single command:
 
 ```bash
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/Inshop_examples
 ```
@@ -283,7 +283,7 @@ After completing model training and evaluation, you can use the trained model we
 To perform inference prediction through the command line, simply use the following command. Before running the following code, please download the [demo image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_recognition_001.jpg) to your local machine.
 
 ```bash
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="general_image_recognition_001.jpg"
diff --git a/docs/module_usage/tutorials/cv_modules/image_feature.md b/docs/module_usage/tutorials/cv_modules/image_feature.md
index ccd6ef8be..0e0fa90f7 100644
--- a/docs/module_usage/tutorials/cv_modules/image_feature.md
+++ b/docs/module_usage/tutorials/cv_modules/image_feature.md
@@ -78,7 +78,7 @@ tar -xf ./dataset/Inshop_examples.tar -C ./dataset/
 一行命令即可完成数据校验:
 
 ```bash
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/Inshop_examples
 ```
@@ -178,13 +178,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c  paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+
python main.py -c  paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/image_classification_labelme_examples
 

数据转换执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/image_classification_labelme_examples \
     -o CheckDataset.convert.enable=True \
@@ -210,13 +210,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/Inshop_examples
 

数据划分执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+
python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/Inshop_examples \
     -o CheckDataset.split.enable=True \
@@ -232,7 +232,7 @@ CheckDataset:
 一条命令即可完成模型的训练,以此处图像特征模型 PP-ShiTuV2_rec 的训练为例:
 
 ```
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/Inshop_examples
 ```
@@ -263,7 +263,7 @@ python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
 
 ```bash
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/Inshop_examples
 ```
@@ -286,7 +286,7 @@ python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml \
 通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_recognition_001.jpg)到本地。
 
 ```bash
-python main.py -c paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml  \
+python main.py -c paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml  \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="general_image_recognition_001.jpg"
diff --git a/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.en.md b/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.en.md
index 534d51292..708aea53b 100644
--- a/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.en.md
+++ b/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.en.md
@@ -86,7 +86,7 @@ tar -xf ./dataset/mlcls_nus_examples.tar -C ./dataset/
 A single command can complete data validation:
 
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/mlcls_nus_examples
 ```
@@ -175,13 +175,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples
 

After the data conversion is executed, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters also support being set by appending command line arguments:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples \
     -o CheckDataset.convert.enable=True \
@@ -207,13 +207,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples
 

After the data splitting is executed, the original annotation files will be renamed to xxx.bak in the original path.

These parameters can also be set by appending command-line arguments:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples \
     -o CheckDataset.split.enable=True \
@@ -224,7 +224,7 @@ CheckDataset:
 ### 4.2 Model Training
 A single command can complete the model training. Taking the training of the image multi-label classification model PP-LCNet_x1_0_ML as an example:
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/mlcls_nus_examples
 ```
@@ -257,7 +257,7 @@ the following steps are required:
 After completing model training, you can evaluate the specified model weights file on the validation set to verify the model's accuracy. Using PaddleX for model evaluation can be done with a single command:
 
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/mlcls_nus_examples
 ```
@@ -280,7 +280,7 @@ After completing model training and evaluation, you can use the trained model we
 * Inference predictions can be performed through the command line with just one command. Before running the following code, please download the [demo image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/multilabel_classification_005.png) to your local machine.
 
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml  \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml  \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="multilabel_classification_005.png"
diff --git a/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.md b/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.md
index 87dfacdfc..93294d38a 100644
--- a/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.md
+++ b/docs/module_usage/tutorials/cv_modules/image_multilabel_classification.md
@@ -89,7 +89,7 @@ tar -xf ./dataset/mlcls_nus_examples.tar -C ./dataset/
 一行命令即可完成数据校验:
 
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/mlcls_nus_examples
 ```
@@ -177,13 +177,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples
 

数据转换执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples \
     -o CheckDataset.convert.enable=True \
@@ -209,13 +209,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples
 

数据划分执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+
python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_coco_examples \
     -o CheckDataset.split.enable=True \
@@ -227,7 +227,7 @@ CheckDataset:
 一条命令即可完成模型的训练,以此处图像多标签分类模型 PP-LCNet_x1_0_ML 的训练为例:
 
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/mlcls_nus_examples
 ```
@@ -258,7 +258,7 @@ python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yam
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
 
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/mlcls_nus_examples
 ```
@@ -281,7 +281,7 @@ python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yam
 
 * 通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/multilabel_classification_005.png)到本地。
 ```bash
-python main.py -c paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml  \
+python main.py -c paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml  \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="multilabel_classification_005.png"
diff --git a/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.en.md b/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.en.md
index 63683418c..4e5430046 100644
--- a/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.en.md
+++ b/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.en.md
@@ -89,7 +89,7 @@ tar -xf ./dataset/pedestrian_attribute_examples.tar -C ./dataset/
 Run a single command to complete data validation:
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 ```
@@ -176,13 +176,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+
python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 

After the data splitting is executed, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters also support being set by appending command-line arguments:

-
python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml  \
+
python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples \
     -o CheckDataset.split.enable=True \
@@ -195,7 +195,7 @@ CheckDataset:
 Model training can be completed with a single command. Taking the training of the PP-LCNet pedestrian attribute recognition model (PP-LCNet_x1_0_pedestrian_attribute) as an example:
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 ```
@@ -226,7 +226,7 @@ the following steps are required:
 After completing model training, you can evaluate the specified model weights file on the validation set to verify the model's accuracy. Using PaddleX for model evaluation can be done with a single command:
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 ```
@@ -249,7 +249,7 @@ After completing model training and evaluation, you can use the trained model we
 To perform inference prediction through the command line, simply use the following command. Before running the following code, please download the [demo image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_006.jpg) to your local machine.
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="pedestrian_attribute_006.jpg"
diff --git a/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md b/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md
index 68bd62f02..6a08ff4ad 100644
--- a/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md
+++ b/docs/module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md
@@ -87,7 +87,7 @@ tar -xf ./dataset/pedestrian_attribute_examples.tar -C ./dataset/
 一行命令即可完成数据校验:
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 ```
@@ -174,13 +174,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+
python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 

数据划分执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml  \
+
python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples \
     -o CheckDataset.split.enable=True \
@@ -193,7 +193,7 @@ CheckDataset:
 一条命令即可完成模型的训练,以此处PP-LCNet行人属性识别模型(PP-LCNet_x1_0_pedestrian_attribute)的训练为例:
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 ```
@@ -224,7 +224,7 @@ python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/pedestrian_attribute_examples
 ```
@@ -247,7 +247,7 @@ python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_
 通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_006.jpg)到本地。
 
 ```bash
-python main.py -c paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml \
+python main.py -c paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="pedestrian_attribute_006.jpg"
diff --git a/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.en.md b/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.en.md
index 1ad32956e..6ae63f4a8 100644
--- a/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.en.md
+++ b/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.en.md
@@ -73,7 +73,7 @@ tar -xf ./dataset/vehicle_attribute_examples.tar -C ./dataset/
 A single command can complete data validation:
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 ```
@@ -161,13 +161,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+
python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 

After dataset splitting, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters can also be set by appending command-line arguments:

-
python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml  \
+
python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples \
     -o CheckDataset.split.enable=True \
@@ -179,7 +179,7 @@ CheckDataset:
 Training a model can be done with a single command, taking the training of the PP-LCNet vehicle attribute recognition model (`PP-LCNet_x1_0_vehicle_attribute`) as an example:
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 ```
@@ -210,7 +210,7 @@ Other related parameters can be set by modifying the `Global` and `Train` fields
 After completing model training, you can evaluate the specified model weights file on the validation set to verify the model's accuracy. Using PaddleX for model evaluation can be done with a single command:
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml  \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml  \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 ```
@@ -235,7 +235,7 @@ After completing model training and evaluation, you can use the trained model we
 To perform inference prediction through the command line, simply use the following command. Before running the following code, please download the [demo image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_007.jpg) to your local machine.
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="vehicle_attribute_007.jpg"
diff --git a/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md b/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md
index f84dbeca0..a605249a4 100644
--- a/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md
+++ b/docs/module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md
@@ -70,7 +70,7 @@ tar -xf ./dataset/vehicle_attribute_examples.tar -C ./dataset/
 一行命令即可完成数据校验:
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 ```
@@ -157,13 +157,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+
python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 

数据划分执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml  \
+
python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples \
     -o CheckDataset.split.enable=True \
@@ -175,7 +175,7 @@ CheckDataset:
 一条命令即可完成模型的训练,以此处 PP-LCNet 车辆属性识别模型(`PP-LCNet_x1_0_vehicle_attribute`)的训练为例:
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 ```
@@ -206,7 +206,7 @@ python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attrib
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml  \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml  \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/vehicle_attribute_examples
 ```
@@ -229,7 +229,7 @@ python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attrib
 通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_007.jpg)到本地。
 
 ```bash
-python main.py -c paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml \
+python main.py -c paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="vehicle_attribute_007.jpg"
diff --git a/docs/module_usage/tutorials/ocr_modules/layout_detection.en.md b/docs/module_usage/tutorials/ocr_modules/layout_detection.en.md
index b730ca3f1..760e8ed4c 100644
--- a/docs/module_usage/tutorials/ocr_modules/layout_detection.en.md
+++ b/docs/module_usage/tutorials/ocr_modules/layout_detection.en.md
@@ -123,7 +123,7 @@ tar -xf ./dataset/det_layout_examples.tar -C ./dataset/
 A single command can complete data validation:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_layout_examples
 ```
@@ -197,13 +197,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+
python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_layout_examples
 

After dataset splitting, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters can also be set by appending command-line arguments:

-
python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml  \
+
python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_layout_examples \
     -o CheckDataset.split.enable=True \
@@ -216,7 +216,7 @@ CheckDataset:
 A single command is sufficient to complete model training, taking the training of PicoDet-L_layout_3cls as an example:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/det_layout_examples
 ```
@@ -248,7 +248,7 @@ Other related parameters can be set by modifying the `Global` and `Train` fields
 After completing model training, you can evaluate the specified model weight file on the validation set to verify the model's accuracy. Using PaddleX for model evaluation, you can complete the evaluation with a single command:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/det_layout_examples
 ```
@@ -270,7 +270,7 @@ After completing model training and evaluation, you can use the trained model we
 #### 4.4.1 Model Inference
 * To perform inference predictions through the command line, simply use the following command. Before running the following code, please download the [demo image](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/layout.jpg) to your local machine.
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="layout.jpg"
diff --git a/docs/module_usage/tutorials/ocr_modules/layout_detection.md b/docs/module_usage/tutorials/ocr_modules/layout_detection.md
index f69dbd2d0..deeee5f76 100644
--- a/docs/module_usage/tutorials/ocr_modules/layout_detection.md
+++ b/docs/module_usage/tutorials/ocr_modules/layout_detection.md
@@ -123,7 +123,7 @@ tar -xf ./dataset/det_layout_examples.tar -C ./dataset/
 一行命令即可完成数据校验:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_layout_examples
 ```
@@ -195,13 +195,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+
python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_layout_examples
 

数据划分执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml  \
+
python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/det_layout_examples \
     -o CheckDataset.split.enable=True \
@@ -213,7 +213,7 @@ CheckDataset:
 一条命令即可完成模型的训练,以此处`PicoDet-L_layout_3cls`的训练为例:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/det_layout_examples
 ```
@@ -244,7 +244,7 @@ python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/det_layout_examples
 ```
@@ -266,7 +266,7 @@ python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml
 #### 4.4.1 模型推理
 * 通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/layout.jpg)到本地。
 ```bash
-python main.py -c paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_model/inference" \
     -o Predict.input="layout.jpg"
diff --git a/docs/module_usage/tutorials/ocr_modules/seal_text_detection.en.md b/docs/module_usage/tutorials/ocr_modules/seal_text_detection.en.md
index ccb9ddb1c..b618a92c4 100644
--- a/docs/module_usage/tutorials/ocr_modules/seal_text_detection.en.md
+++ b/docs/module_usage/tutorials/ocr_modules/seal_text_detection.en.md
@@ -82,7 +82,7 @@ tar -xf ./dataset/ocr_curve_det_dataset_examples.tar -C ./dataset/
 Data validation can be completed with a single command:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 ```
@@ -167,13 +167,13 @@ CheckDataset:
   ......
 

Then execute the command:

-
python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+
python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 

After dataset splitting, the original annotation files will be renamed to xxx.bak in the original path.

The above parameters also support setting through appending command line arguments:

-
python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml  \
+
python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples \
     -o CheckDataset.split.enable=True \
@@ -186,7 +186,7 @@ CheckDataset:
 Model training can be completed with just one command. Here, we use the Seal Text Detection model (PP-OCRv4_server_seal_det) as an example:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 ```
@@ -220,7 +220,7 @@ Other related parameters can be set by modifying the `Global` and `Train` fields
 After model training, you can evaluate the specified model weights on the validation set to verify model accuracy. Using PaddleX for model evaluation requires just one command:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 ```
@@ -249,7 +249,7 @@ To perform inference predictions via the command line, use the following command
 
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_accuracy/inference" \
     -o Predict.input="seal_text_det.png"
diff --git a/docs/module_usage/tutorials/ocr_modules/seal_text_detection.md b/docs/module_usage/tutorials/ocr_modules/seal_text_detection.md
index 9799e88e8..2e4c055a8 100644
--- a/docs/module_usage/tutorials/ocr_modules/seal_text_detection.md
+++ b/docs/module_usage/tutorials/ocr_modules/seal_text_detection.md
@@ -76,7 +76,7 @@ tar -xf ./dataset/ocr_curve_det_dataset_examples.tar -C ./dataset/
 一行命令即可完成数据校验:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 ```
@@ -161,13 +161,13 @@ CheckDataset:
   ......
 

随后执行命令:

-
python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+
python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 

数据划分执行之后,原有标注文件会被在原路径下重命名为 xxx.bak

以上参数同样支持通过追加命令行参数的方式进行设置:

-
python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml  \
+
python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml  \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples \
     -o CheckDataset.split.enable=True \
@@ -179,7 +179,7 @@ CheckDataset:
 一条命令即可完成模型的训练,以此处PP-OCRv4服务端印章文本检测模型(PP-OCRv4_server_seal_det)的训练为例:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 ```
@@ -210,7 +210,7 @@ python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.y
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,一条命令即可完成模型的评估:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/ocr_curve_det_dataset_examples
 ```
@@ -234,7 +234,7 @@ python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.y
 通过命令行的方式进行推理预测,只需如下一条命令。运行以下代码前,请您下载[示例图片](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/seal_text_det.png)到本地。
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="./output/best_accuracy/inference" \
     -o Predict.input="seal_text_det.png"
diff --git a/docs/practical_tutorials/anomaly_detection_tutorial_en.md b/docs/practical_tutorials/anomaly_detection_tutorial.en.md
similarity index 97%
rename from docs/practical_tutorials/anomaly_detection_tutorial_en.md
rename to docs/practical_tutorials/anomaly_detection_tutorial.en.md
index 831af1336..8f4c72446 100644
--- a/docs/practical_tutorials/anomaly_detection_tutorial_en.md
+++ b/docs/practical_tutorials/anomaly_detection_tutorial.en.md
@@ -1,4 +1,6 @@
-[简体中文](anomaly_detection_tutorial.md) | English
+---
+comments: true
+---
 
 # PaddleX 3.0 Image Anomaly Detection Pipeline — Food Appearance Quality Inspection Tutorial
 
@@ -35,7 +37,7 @@ PaddleX provides 1 end-to-end anomaly detection models. For details, refer to th
 
 | Model List          | Avg (%) | GPU Inference Time (ms) | CPU Inference Time (ms) | Model Size (M) | yaml file |
 |-|-|-|-|-|-|
-|STFPM|96.2|-|-|21.5 M|[STFPM.yaml](../../paddlex/configs/anomaly_detection/STFPM.yaml)|
+|STFPM|96.2|-|-|21.5 M|[STFPM.yaml](../../paddlex/configs/image_anomaly_detection/STFPM.yaml)|
 
 > **Note: The above accuracy metrics are measured on the [MVTec AD](https://www.mvtec.com/company/research/datasets/mvtec-ad) dataset.**
 
@@ -56,7 +58,7 @@ tar -xf ./dataset/anomaly_detection_hazelnut.tar -C ./dataset/
 To verify the dataset, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/anomaly_detection_hazelnut
 ```
@@ -147,7 +149,7 @@ Data conversion and splitting can be enabled simultaneously. For data splitting,
 Before training, ensure that you have validated your dataset. To complete the training of a PaddleX model, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/anomaly_detection_hazelnut \
     -o Train.epochs_iters=4000
@@ -185,7 +187,7 @@ After completing model training, all outputs are saved in the specified output d
 After completing model training, you can evaluate the specified model weight file on the validation set to verify the model's accuracy. To evaluate a model using PaddleX, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/anomaly_detection_hazelnut
 ```
@@ -232,7 +234,7 @@ Changing Epoch Results:
 Replace the model in the production line with the fine-tuned model for testing, for example:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="output/best_model/inference" \
     -o Predict.input="https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/uad_hazelnut.png"
diff --git a/docs/practical_tutorials/anomaly_detection_tutorial.md b/docs/practical_tutorials/anomaly_detection_tutorial.md
index 5d937dbfc..55915d901 100644
--- a/docs/practical_tutorials/anomaly_detection_tutorial.md
+++ b/docs/practical_tutorials/anomaly_detection_tutorial.md
@@ -1,4 +1,6 @@
-简体中文 | [English](anomaly_detection_tutorial_en.md)
+---
+comments: true
+---
 
 # PaddleX 3.0 图像异常检测产线———食品外观质检教程
 
@@ -36,7 +38,7 @@ PaddleX 提供了 1 个端到端的高精度异常检测模型,具体可参考
 
 |模型名称|Avg(%)|GPU推理耗时(ms)|CPU推理耗时(ms)|模型存储大小|yaml 文件|
 |-|-|-|-|-|-|
-|STFPM|96.2|-|-|21.5 M|[STFPM.yaml](../../paddlex/configs/anomaly_detection/STFPM.yaml)|
+|STFPM|96.2|-|-|21.5 M|[STFPM.yaml](../../paddlex/configs/image_anomaly_detection/STFPM.yaml)|
 
 **注:以上精度指标为 **[MVTec AD](https://www.mvtec.com/company/research/datasets/mvtec-ad)** 验证集 平均异常分数。**
 
@@ -58,7 +60,7 @@ tar -xf ./dataset/anomaly_detection_hazelnut.tar -C ./dataset/
 在对数据集校验时,只需一行命令:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/anomaly_detection_hazelnut
 ```
@@ -146,7 +148,7 @@ python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
 在训练之前,请确保您已经对数据集进行了校验。完成 PaddleX 模型的训练,只需如下一条命令:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/anomaly_detection_hazelnut \
     -o Train.epochs_iters=4000
@@ -184,7 +186,7 @@ PaddleX 中每个模型都提供了模型开发的配置文件,用于设置相
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,只需一行命令:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/anomaly_detection_hazelnut
 ```
@@ -230,7 +232,7 @@ python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
 将产线中的模型替换为微调后的模型进行测试,如:
 
 ```bash
-python main.py -c paddlex/configs/anomaly_detection/STFPM.yaml \
+python main.py -c paddlex/configs/image_anomaly_detection/STFPM.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="output/best_model/inference" \
     -o Predict.input="https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/uad_hazelnut.png"
diff --git a/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.en.md b/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.en.md
index a41345a98..b0f78306e 100644
--- a/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.en.md
+++ b/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.en.md
@@ -178,7 +178,7 @@ tar -xf ./dataset/paperlayout.tar -C ./dataset/
 To verify the dataset, simply run the following command:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/paperlayout/
 ```
@@ -266,7 +266,7 @@ When splitting data, the original annotation files will be renamed as `xxx.bak`
 Before training, please ensure that you have validated the dataset. To complete PaddleX model training, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/paperlayout \
     -o Train.num_classes=4
@@ -305,7 +305,7 @@ After completing model training, all outputs are saved in the specified output d
 After completing model training, you can evaluate the specified model weight file on the validation set to verify the model accuracy. To evaluate a model using PaddleX, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/paperlayout
 ```
@@ -401,7 +401,7 @@ When selecting a training environment, it is important to consider the relations
 For reference, the command to execute training with different parameter adjustments can be:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/paperlayout \
     -o Train.num_classes=4 \
@@ -415,7 +415,7 @@ python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml
 You can test the fine-tuned single model using [test file](https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/doc_images/practical_tutorial/PP-ChatOCRv3_doc_layout/test.jpg)
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="output/best_model/inference" \
     -o Predict.input="https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/doc_images/practical_tutorial/PP-ChatOCRv3_doc_layout/test.jpg"
diff --git a/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.md b/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.md
index 75ab34b51..199b3ed16 100644
--- a/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.md
+++ b/docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.md
@@ -183,7 +183,7 @@ tar -xf ./dataset/paperlayout.tar -C ./dataset/
 在对数据集校验时,只需一行命令:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/paperlayout/
 ```
@@ -270,7 +270,7 @@ python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml
 在训练之前,请确保您已经对数据集进行了校验。完成 PaddleX 模型的训练,只需如下一条命令:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/paperlayout \
     -o Train.num_classes=4
@@ -309,7 +309,7 @@ PaddleX 中每个模型都提供了模型开发的配置文件,用于设置相
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,只需一行命令:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/paperlayout
 ```
@@ -404,7 +404,7 @@ python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml
 调整不同参数执行训练的命令可以参考:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/paperlayout \
     -o Train.num_classes=4 \
@@ -418,7 +418,7 @@ python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml
 可以将微调后的单模型进行测试,使用 [测试文件](https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/doc_images/practical_tutorial/PP-ChatOCRv3_doc_layout/test.jpg),进行预测:
 
 ```bash
-python main.py -c paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml \
+python main.py -c paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml \
     -o Global.mode=predict \
     -o Predict.model_dir="output/best_model/inference" \
     -o Predict.input="https://paddle-model-ecology.bj.bcebos.com/paddlex/PaddleX3.0/doc_images/practical_tutorial/PP-ChatOCRv3_doc_layout/test.jpg"
diff --git a/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.en.md b/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.en.md
index adf93068b..64f359ba6 100644
--- a/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.en.md
+++ b/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.en.md
@@ -109,7 +109,7 @@ PaddleX provides 2 end-to-end seal text detection models, which can be reference
 10.5878
 131.813
 4.7M
-PP-OCRv4_mobile_seal_det.yaml
+PP-OCRv4_mobile_seal_det.yaml
 
 
 PP-OCRv4_server_seal_det
@@ -117,7 +117,7 @@ PaddleX provides 2 end-to-end seal text detection models, which can be reference
 84.341
 2425.06
 108.3 M
-PP-OCRv4_server_seal_det.yaml
+PP-OCRv4_server_seal_det.yaml
 
 
 
@@ -141,7 +141,7 @@ tar -xf ./dataset/practical_seal.tar -C ./dataset/
 To verify the dataset, simply run the following command:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/practical_seal/
 ```
@@ -228,7 +228,7 @@ When splitting data, the original annotation files will be renamed as `xxx.bak`
 Before training, please ensure that you have validated the dataset. To complete PaddleX model training, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/practical_seal \
     -o Train.epochs_iters=30 \
@@ -269,7 +269,7 @@ After completing model training, all outputs are saved in the specified output d
 After completing model training, you can evaluate the specified model weight file on the validation set to verify the model accuracy. To evaluate a model using PaddleX, simply use the following command:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/practical_seal
 ```
@@ -355,7 +355,7 @@ When selecting a training environment, it is important to consider the relations
 For reference, the command to execute training with different parameter adjustments can be:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/practical_seal \
     -o Train.learning_rate=0.0001 \
diff --git a/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.md b/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.md
index 2977dd3b0..f9933cd4c 100644
--- a/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.md
+++ b/docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.md
@@ -105,7 +105,7 @@ PaddleX 提供了 2 个端到端的印章文本检测模型,具体可参考 [
 10.5878
 131.813
 4.7M
-PP-OCRv4_mobile_seal_det.yaml
+PP-OCRv4_mobile_seal_det.yaml
 
 
 PP-OCRv4_server_seal_det
@@ -113,7 +113,7 @@ PaddleX 提供了 2 个端到端的印章文本检测模型,具体可参考 [
 84.341
 2425.06
 108.3 M
-PP-OCRv4_server_seal_det.yaml
+PP-OCRv4_server_seal_det.yaml
 
 
 
@@ -137,7 +137,7 @@ tar -xf ./dataset/practical_seal.tar -C ./dataset/
 在对数据集校验时,只需一行命令:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=check_dataset \
     -o Global.dataset_dir=./dataset/practical_seal/
 ```
@@ -222,7 +222,7 @@ python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.y
 在训练之前,请确保您已经对数据集进行了校验。完成 PaddleX 模型的训练,只需如下一条命令:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/practical_seal \
     -o Train.epochs_iters=30 \
@@ -263,7 +263,7 @@ PaddleX 中每个模型都提供了模型开发的配置文件,用于设置相
 在完成模型训练后,可以对指定的模型权重文件在验证集上进行评估,验证模型精度。使用 PaddleX 进行模型评估,只需一行命令:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=evaluate \
     -o Global.dataset_dir=./dataset/practical_seal
 ```
@@ -348,7 +348,7 @@ python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.y
 调整不同参数执行训练的命令可以参考:
 
 ```bash
-python main.py -c paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml \
+python main.py -c paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml \
     -o Global.mode=train \
     -o Global.dataset_dir=./dataset/practical_seal \
     -o Train.learning_rate=0.0001 \
diff --git a/docs/support_list/models_list.en.md b/docs/support_list/models_list.en.md
index 4567c795c..fbb01f18b 100644
--- a/docs/support_list/models_list.en.md
+++ b/docs/support_list/models_list.en.md
@@ -682,7 +682,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 -
 -
 325.6 M
-CLIP_vit_base_patch16_448_ML.yaml
+CLIP_vit_base_patch16_448_ML.yaml
 Inference Model/Trained Model
 
 PP-HGNetV2-B0_ML
@@ -690,7 +690,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 -
 -
 39.6 M
-PP-HGNetV2-B0_ML.yaml
+PP-HGNetV2-B0_ML.yaml
 Inference Model/Trained Model
 
 PP-HGNetV2-B4_ML
@@ -698,7 +698,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 -
 -
 88.5 M
-PP-HGNetV2-B4_ML.yaml
+PP-HGNetV2-B4_ML.yaml
 Inference Model/Trained Model
 
 PP-HGNetV2-B6_ML
@@ -706,7 +706,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 -
 -
 286.5 M
-PP-HGNetV2-B6_ML.yaml
+PP-HGNetV2-B6_ML.yaml
 Inference Model/Trained Model
 
 PP-LCNet_x1_0_ML
@@ -714,7 +714,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 -
 -
 29.4 M
-PP-LCNet_x1_0_ML.yaml
+PP-LCNet_x1_0_ML.yaml
 Inference Model/Trained Model
 
 ResNet50_ML
@@ -722,7 +722,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 -
 -
 108.9 M
-ResNet50_ML.yaml
+ResNet50_ML.yaml
 Inference Model/Trained Model
 
 
@@ -747,7 +747,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 3.84845
 9.23735
 6.7 M
-PP-LCNet_x1_0_pedestrian_attribute.yaml
+PP-LCNet_x1_0_pedestrian_attribute.yaml
 Inference Model/Trained Model
 
 
@@ -772,7 +772,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 3.84845
 9.23735
 6.7 M
-PP-LCNet_x1_0_vehicle_attribute.yaml
+PP-LCNet_x1_0_vehicle_attribute.yaml
 Inference Model/Trained Model
 
 
@@ -797,7 +797,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 5.23428
 19.6005
 16.3 M
-PP-ShiTuV2_rec.yaml
+PP-ShiTuV2_rec.yaml
 Inference Model/Trained Model
 
 PP-ShiTuV2_rec_CLIP_vit_base
@@ -805,7 +805,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 13.1957
 285.493
 306.6 M
-PP-ShiTuV2_rec_CLIP_vit_base.yaml
+PP-ShiTuV2_rec_CLIP_vit_base.yaml
 Inference Model/Trained Model
 
 PP-ShiTuV2_rec_CLIP_vit_large
@@ -813,7 +813,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 51.1284
 1131.28
 1.05 G
-PP-ShiTuV2_rec_CLIP_vit_large.yaml
+PP-ShiTuV2_rec_CLIP_vit_large.yaml
 Inference Model/Trained Model
 
 
@@ -1761,7 +1761,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 10.5878
 131.813
 4.7 M
-PP-OCRv4_mobile_seal_det.yaml
+PP-OCRv4_mobile_seal_det.yaml
 Inference Model/Trained Model
 
 PP-OCRv4_server_seal_det
@@ -1769,7 +1769,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 84.341
 2425.06
 108.3 M
-PP-OCRv4_server_seal_det.yaml
+PP-OCRv4_server_seal_det.yaml
 Inference Model/Trained Model
 
 
@@ -1960,7 +1960,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 13.036
 91.2634
 7.4 M
-PicoDet_layout_1x.yaml
+PicoDet_layout_1x.yaml
 Inference Model/Trained Model
 
 PicoDet-S_layout_3cls
@@ -1968,7 +1968,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 13.521
 45.7633
 4.8 M
-PicoDet-S_layout_3cls.yaml
+PicoDet-S_layout_3cls.yaml
 Inference Model/Trained Model
 
 PicoDet-S_layout_17cls
@@ -1976,7 +1976,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 13.5632
 46.2059
 4.8 M
-PicoDet-S_layout_17cls.yaml
+PicoDet-S_layout_17cls.yaml
 Inference Model/Trained Model
 
 PicoDet-L_layout_3cls
@@ -1984,7 +1984,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 15.7425
 159.771
 22.6 M
-PicoDet-L_layout_3cls.yaml
+PicoDet-L_layout_3cls.yaml
 Inference Model/Trained Model
 
 PicoDet-L_layout_17cls
@@ -1992,7 +1992,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 17.1901
 160.262
 22.6 M
-PicoDet-L_layout_17cls.yaml
+PicoDet-L_layout_17cls.yaml
 Inference Model/Trained Model
 
 RT-DETR-H_layout_3cls
@@ -2000,7 +2000,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 114.644
 3832.62
 470.1 M
-RT-DETR-H_layout_3cls.yaml
+RT-DETR-H_layout_3cls.yaml
 Inference Model/Trained Model
 
 RT-DETR-H_layout_17cls
@@ -2008,7 +2008,7 @@ PaddleX incorporates multiple pipelines, each containing several modules, and ea
 115.126
 3827.25
 470.2 M
-RT-DETR-H_layout_17cls.yaml
+RT-DETR-H_layout_17cls.yaml
 Inference Model/Trained Model
 
 
diff --git a/docs/support_list/models_list.md b/docs/support_list/models_list.md
index a50176ec5..0f80039e4 100644
--- a/docs/support_list/models_list.md
+++ b/docs/support_list/models_list.md
@@ -682,7 +682,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 -
 -
 325.6 M
-CLIP_vit_base_patch16_448_ML.yaml
+CLIP_vit_base_patch16_448_ML.yaml
 推理模型/训练模型
 
 PP-HGNetV2-B0_ML
@@ -690,7 +690,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 -
 -
 39.6 M
-PP-HGNetV2-B0_ML.yaml
+PP-HGNetV2-B0_ML.yaml
 推理模型/训练模型
 
 PP-HGNetV2-B4_ML
@@ -698,7 +698,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 -
 -
 88.5 M
-PP-HGNetV2-B4_ML.yaml
+PP-HGNetV2-B4_ML.yaml
 推理模型/训练模型
 
 PP-HGNetV2-B6_ML
@@ -706,7 +706,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 -
 -
 286.5 M
-PP-HGNetV2-B6_ML.yaml
+PP-HGNetV2-B6_ML.yaml
 推理模型/训练模型
 
 PP-LCNet_x1_0_ML
@@ -714,7 +714,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 -
 -
 29.4 M
-PP-LCNet_x1_0_ML.yaml
+PP-LCNet_x1_0_ML.yaml
 推理模型/训练模型
 
 ResNet50_ML
@@ -722,7 +722,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 -
 -
 108.9 M
-ResNet50_ML.yaml
+ResNet50_ML.yaml
 推理模型/训练模型
 
 
@@ -747,7 +747,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 3.84845
 9.23735
 6.7 M
-PP-LCNet_x1_0_pedestrian_attribute.yaml
+PP-LCNet_x1_0_pedestrian_attribute.yaml
 推理模型/训练模型
 
 
@@ -772,7 +772,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 3.84845
 9.23735
 6.7 M
-PP-LCNet_x1_0_vehicle_attribute.yaml
+PP-LCNet_x1_0_vehicle_attribute.yaml
 推理模型/训练模型
 
 
@@ -797,7 +797,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 5.23428
 19.6005
 16.3 M
-PP-ShiTuV2_rec.yaml
+PP-ShiTuV2_rec.yaml
 推理模型/训练模型
 
 PP-ShiTuV2_rec_CLIP_vit_base
@@ -805,7 +805,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 13.1957
 285.493
 306.6 M
-PP-ShiTuV2_rec_CLIP_vit_base.yaml
+PP-ShiTuV2_rec_CLIP_vit_base.yaml
 推理模型/训练模型
 
 PP-ShiTuV2_rec_CLIP_vit_large
@@ -813,7 +813,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 51.1284
 1131.28
 1.05 G
-PP-ShiTuV2_rec_CLIP_vit_large.yaml
+PP-ShiTuV2_rec_CLIP_vit_large.yaml
 推理模型/训练模型
 
 
@@ -1759,7 +1759,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 10.5878
 131.813
 4.7M
-PP-OCRv4_mobile_seal_det.yaml
+PP-OCRv4_mobile_seal_det.yaml
 推理模型/训练模型
 
 PP-OCRv4_server_seal_det
@@ -1767,7 +1767,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 84.341
 2425.06
 108.3 M
-PP-OCRv4_server_seal_det.yaml
+PP-OCRv4_server_seal_det.yaml
 推理模型/训练模型
 
 
@@ -1958,7 +1958,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 13.036
 91.2634
 7.4 M
-PicoDet_layout_1x.yaml
+PicoDet_layout_1x.yaml
 推理模型/训练模型
 
 PicoDet-S_layout_3cls
@@ -1966,7 +1966,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 13.521
 45.7633
 4.8 M
-PicoDet-S_layout_3cls.yaml
+PicoDet-S_layout_3cls.yaml
 推理模型/训练模型
 
 PicoDet-S_layout_17cls
@@ -1974,7 +1974,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 13.5632
 46.2059
 4.8 M
-PicoDet-S_layout_17cls.yaml
+PicoDet-S_layout_17cls.yaml
 推理模型/训练模型
 
 PicoDet-L_layout_3cls
@@ -1982,7 +1982,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 15.7425
 159.771
 22.6 M
-PicoDet-L_layout_3cls.yaml
+PicoDet-L_layout_3cls.yaml
 推理模型/训练模型
 
 PicoDet-L_layout_17cls
@@ -1990,7 +1990,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 17.1901
 160.262
 22.6 M
-PicoDet-L_layout_17cls.yaml
+PicoDet-L_layout_17cls.yaml
 推理模型/训练模型
 
 RT-DETR-H_layout_3cls
@@ -1998,7 +1998,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 114.644
 3832.62
 470.1 M
-RT-DETR-H_layout_3cls.yaml
+RT-DETR-H_layout_3cls.yaml
 推理模型/训练模型
 
 RT-DETR-H_layout_17cls
@@ -2006,7 +2006,7 @@ PaddleX 内置了多条产线,每条产线都包含了若干模块,每个模
 115.126
 3827.25
 470.2 M
-RT-DETR-H_layout_17cls.yaml
+RT-DETR-H_layout_17cls.yaml
 推理模型/训练模型
 
 
diff --git a/paddlex/configs/face_recognition/MobileFaceNet.yaml b/paddlex/configs/face_feature/MobileFaceNet.yaml
similarity index 100%
rename from paddlex/configs/face_recognition/MobileFaceNet.yaml
rename to paddlex/configs/face_feature/MobileFaceNet.yaml
diff --git a/paddlex/configs/face_recognition/ResNet50_face.yaml b/paddlex/configs/face_feature/ResNet50_face.yaml
similarity index 100%
rename from paddlex/configs/face_recognition/ResNet50_face.yaml
rename to paddlex/configs/face_feature/ResNet50_face.yaml
diff --git a/paddlex/configs/anomaly_detection/STFPM.yaml b/paddlex/configs/image_anomaly_detection/STFPM.yaml
similarity index 100%
rename from paddlex/configs/anomaly_detection/STFPM.yaml
rename to paddlex/configs/image_anomaly_detection/STFPM.yaml
diff --git a/paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml b/paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml
similarity index 100%
rename from paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml
rename to paddlex/configs/image_feature/PP-ShiTuV2_rec.yaml
diff --git a/paddlex/configs/general_recognition/PP-ShiTuV2_rec_CLIP_vit_base.yaml b/paddlex/configs/image_feature/PP-ShiTuV2_rec_CLIP_vit_base.yaml
similarity index 100%
rename from paddlex/configs/general_recognition/PP-ShiTuV2_rec_CLIP_vit_base.yaml
rename to paddlex/configs/image_feature/PP-ShiTuV2_rec_CLIP_vit_base.yaml
diff --git a/paddlex/configs/general_recognition/PP-ShiTuV2_rec_CLIP_vit_large.yaml b/paddlex/configs/image_feature/PP-ShiTuV2_rec_CLIP_vit_large.yaml
similarity index 100%
rename from paddlex/configs/general_recognition/PP-ShiTuV2_rec_CLIP_vit_large.yaml
rename to paddlex/configs/image_feature/PP-ShiTuV2_rec_CLIP_vit_large.yaml
diff --git a/paddlex/configs/multilabel_classification/CLIP_vit_base_patch16_448_ML.yaml b/paddlex/configs/image_multilabel_classification/CLIP_vit_base_patch16_448_ML.yaml
similarity index 100%
rename from paddlex/configs/multilabel_classification/CLIP_vit_base_patch16_448_ML.yaml
rename to paddlex/configs/image_multilabel_classification/CLIP_vit_base_patch16_448_ML.yaml
diff --git a/paddlex/configs/multilabel_classification/PP-HGNetV2-B0_ML.yaml b/paddlex/configs/image_multilabel_classification/PP-HGNetV2-B0_ML.yaml
similarity index 100%
rename from paddlex/configs/multilabel_classification/PP-HGNetV2-B0_ML.yaml
rename to paddlex/configs/image_multilabel_classification/PP-HGNetV2-B0_ML.yaml
diff --git a/paddlex/configs/multilabel_classification/PP-HGNetV2-B4_ML.yaml b/paddlex/configs/image_multilabel_classification/PP-HGNetV2-B4_ML.yaml
similarity index 100%
rename from paddlex/configs/multilabel_classification/PP-HGNetV2-B4_ML.yaml
rename to paddlex/configs/image_multilabel_classification/PP-HGNetV2-B4_ML.yaml
diff --git a/paddlex/configs/multilabel_classification/PP-HGNetV2-B6_ML.yaml b/paddlex/configs/image_multilabel_classification/PP-HGNetV2-B6_ML.yaml
similarity index 100%
rename from paddlex/configs/multilabel_classification/PP-HGNetV2-B6_ML.yaml
rename to paddlex/configs/image_multilabel_classification/PP-HGNetV2-B6_ML.yaml
diff --git a/paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml b/paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml
similarity index 100%
rename from paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml
rename to paddlex/configs/image_multilabel_classification/PP-LCNet_x1_0_ML.yaml
diff --git a/paddlex/configs/multilabel_classification/ResNet50_ML.yaml b/paddlex/configs/image_multilabel_classification/ResNet50_ML.yaml
similarity index 100%
rename from paddlex/configs/multilabel_classification/ResNet50_ML.yaml
rename to paddlex/configs/image_multilabel_classification/ResNet50_ML.yaml
diff --git a/paddlex/configs/structure_analysis/PicoDet-L_layout_17cls.yaml b/paddlex/configs/layout_detection/PicoDet-L_layout_17cls.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/PicoDet-L_layout_17cls.yaml
rename to paddlex/configs/layout_detection/PicoDet-L_layout_17cls.yaml
diff --git a/paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml b/paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml
rename to paddlex/configs/layout_detection/PicoDet-L_layout_3cls.yaml
diff --git a/paddlex/configs/structure_analysis/PicoDet-S_layout_17cls.yaml b/paddlex/configs/layout_detection/PicoDet-S_layout_17cls.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/PicoDet-S_layout_17cls.yaml
rename to paddlex/configs/layout_detection/PicoDet-S_layout_17cls.yaml
diff --git a/paddlex/configs/structure_analysis/PicoDet-S_layout_3cls.yaml b/paddlex/configs/layout_detection/PicoDet-S_layout_3cls.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/PicoDet-S_layout_3cls.yaml
rename to paddlex/configs/layout_detection/PicoDet-S_layout_3cls.yaml
diff --git a/paddlex/configs/structure_analysis/PicoDet_layout_1x.yaml b/paddlex/configs/layout_detection/PicoDet_layout_1x.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/PicoDet_layout_1x.yaml
rename to paddlex/configs/layout_detection/PicoDet_layout_1x.yaml
diff --git a/paddlex/configs/structure_analysis/RT-DETR-H_layout_17cls.yaml b/paddlex/configs/layout_detection/RT-DETR-H_layout_17cls.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/RT-DETR-H_layout_17cls.yaml
rename to paddlex/configs/layout_detection/RT-DETR-H_layout_17cls.yaml
diff --git a/paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml b/paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml
similarity index 100%
rename from paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml
rename to paddlex/configs/layout_detection/RT-DETR-H_layout_3cls.yaml
diff --git a/paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml b/paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml
similarity index 100%
rename from paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml
rename to paddlex/configs/pedestrian_attribute_recognition/PP-LCNet_x1_0_pedestrian_attribute.yaml
diff --git a/paddlex/configs/text_detection_seal/PP-OCRv4_mobile_seal_det.yaml b/paddlex/configs/seal_text_detection/PP-OCRv4_mobile_seal_det.yaml
similarity index 100%
rename from paddlex/configs/text_detection_seal/PP-OCRv4_mobile_seal_det.yaml
rename to paddlex/configs/seal_text_detection/PP-OCRv4_mobile_seal_det.yaml
diff --git a/paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml b/paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml
similarity index 100%
rename from paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml
rename to paddlex/configs/seal_text_detection/PP-OCRv4_server_seal_det.yaml
diff --git a/paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml b/paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml
similarity index 100%
rename from paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml
rename to paddlex/configs/vehicle_attribute_recognition/PP-LCNet_x1_0_vehicle_attribute.yaml