Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions app/dependencies/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# app/dependencies/__init__.py

from datetime import datetime

# 글로벌 변수 초기화
globals_dict = {}
model_initializing = False # 모델 초기화 상태를 추적하는 전역 변수
last_initialization_attempt = None # 마지막 초기화 시도 시간

def get_globals_dict():
return globals_dict
166 changes: 166 additions & 0 deletions app/router/evaluation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
# app/routers/evaluation.py

from fastapi import APIRouter, Depends, HTTPException
from app.schema.recommendation_schema import RecommendationEvaluationResponse
from app.services.evaluation.evaluator import evaluate_recommendation_model, evaluate_with_cross_validation
from app.services.evaluation.diversity_metrics import evaluate_recommendation_diversity
from app.services.model_trainer.hyperparameter_tuning import optimize_recommendation_parameters
from app.dependencies import get_globals_dict
from app.services.model_trainer import evaluate_model, optimize_recommendation_parameters

# 각 알고리즘별 결과 평가
from app.services.model_trainer.recommenation.basic import generate_recommendations
from app.services.model_trainer.recommenation.hybrid import generate_hybrid_recommendations

import json
import logging
import numpy as np

router = APIRouter(
prefix="/evaluate",
tags=["evaluation"],
responses={404: {"description": "Not found"}},
)

logger = logging.getLogger(__name__)

@router.get("/basic", response_model=RecommendationEvaluationResponse)
async def evaluate_basic(globals_dict=Depends(get_globals_dict)):
"""기본 추천 시스템 평가 수행"""
try:
metrics = evaluate_recommendation_model(globals_dict)

# 추가 다양성 지표 계산
df_model = globals_dict.get("df_model")

if "test_interactions" in globals_dict and df_model is not None:
test_interactions = globals_dict.get("test_interactions")

# 추천 결과를 사용자-식당 매핑 딕셔너리로 변환
recommendations_dict = {}
for user_id, metrics in globals_dict.get("recommendations_results", {}).items():
recommendations_dict[user_id] = [r['restaurant_id'] for r in metrics.get('recommendations', [])]

diversity_metrics = evaluate_recommendation_diversity(
recommendations_dict,
user_history=test_interactions,
restaurant_data=df_model
)

# 기본 지표와 다양성 지표 병합
metrics.update(diversity_metrics)

return {"metrics": metrics, "status": "success"}

except Exception as e:
logger.error(f"평가 중 오류 발생: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

@router.get("/cross-validation", response_model=RecommendationEvaluationResponse)
async def evaluate_with_cross_val(n_splits: int = 5, globals_dict=Depends(get_globals_dict)):
"""교차 검증 평가 수행"""
try:
metrics = evaluate_with_cross_validation(globals_dict, n_splits=n_splits)
return {"metrics": metrics, "status": "success"}

except Exception as e:
logger.error(f"교차 검증 평가 중 오류 발생: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

@router.post("/optimize-params", response_model=dict)
async def optimize_parameters(n_trials: int = 30, timeout: int = 300, globals_dict=Depends(get_globals_dict)):
"""추천 시스템 파라미터 최적화"""
try:
df_model = globals_dict.get("df_model")
user_features_df = globals_dict.get("user_features_df")
model_features = globals_dict.get("model_features")

if df_model is None or user_features_df is None or model_features is None:
raise HTTPException(status_code=400, detail="필요한 데이터가 로드되지 않았습니다.")

best_params = optimize_recommendation_parameters(
df_model,
user_features_df,
model_features,
n_trials=n_trials,
timeout=timeout
)

return {"best_parameters": best_params, "status": "success"}

except Exception as e:
logger.error(f"파라미터 최적화 중 오류 발생: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

@router.get("/compare-algorithms", response_model=dict)
async def compare_algorithms(globals_dict=Depends(get_globals_dict)):
"""서로 다른 추천 알고리즘 비교"""
try:
df_model = globals_dict.get("df_model")
user_features_df = globals_dict.get("user_features_df")

if df_model is None or user_features_df is None:
raise HTTPException(status_code=400, detail="필요한 데이터가 로드되지 않았습니다.")

# 샘플 사용자 선택
sample_users = df_model['user_id'].sample(min(20, df_model['user_id'].nunique())).unique()

basic_metrics = {}
hybrid_metrics = {}

for user_id in sample_users:
# 기본 추천 알고리즘 평가
stacking_reg = globals_dict.get("stacking_reg")
scaler = globals_dict.get("scaler")
model_features = globals_dict.get("model_features")

if all([stacking_reg, scaler, model_features]):
# 기본 추천 생성
basic_result = json.loads(generate_recommendations(
df_model.copy(),
stacking_reg,
model_features,
user_id,
scaler,
user_features=user_features_df
))

# 하이브리드 추천 생성
hybrid_result = generate_hybrid_recommendations(
df_model.copy(),
df_model.copy(),
user_id,
n=15,
alpha=0.7
)

# 평가 지표 계산 및 저장 (여기서는 간소화를 위해 추천된 식당 수만 계산)
basic_metrics[user_id] = {
'num_recommendations': len(basic_result.get('recommendations', [])),
'is_new_user': basic_result.get('is_new_user', True)
}

hybrid_metrics[user_id] = {
'num_recommendations': len(hybrid_result.get('recommendations', [])),
'is_new_user': hybrid_result.get('is_new_user', True)
}

# 결과 정리
result = {
'basic_algorithm': {
'avg_recommendations': np.mean([m['num_recommendations'] for m in basic_metrics.values()]),
'new_user_ratio': np.mean([m['is_new_user'] for m in basic_metrics.values()]),
'coverage': len(basic_metrics) / len(sample_users) if sample_users.size > 0 else 0
},
'hybrid_algorithm': {
'avg_recommendations': np.mean([m['num_recommendations'] for m in hybrid_metrics.values()]),
'new_user_ratio': np.mean([m['is_new_user'] for m in hybrid_metrics.values()]),
'coverage': len(hybrid_metrics) / len(sample_users) if sample_users.size > 0 else 0
}
}

return {"comparison_results": result, "status": "success"}

except Exception as e:
logger.error(f"알고리즘 비교 중 오류 발생: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
36 changes: 30 additions & 6 deletions app/router/recommendation_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,18 @@
from app.services.preprocess.restaurant.data_loader import load_restaurant_json_files, load_user_json_files
from app.services.preprocess.restaurant.preprocessor import preprocess_data
from app.services.model_trainer import train_model
from app.services.model_trainer.recommendation import generate_recommendations
from app.services.model_trainer.recommenation.basic import generate_recommendations
from app.services.preprocess.user.user_preprocess import user_preprocess_data # 사용자 데이터 전처리 모듈 추가
from app.services.evaluation.evaluator import evaluate_recommendation_model
from app.services.model_trainer import train_model, optimize_recommendation_parameters
from app.dependencies import globals_dict, model_initializing, last_initialization_attempt
from typing import List, Dict, Any
from datetime import datetime

# 라우터 설정
logger = logging.getLogger("recommendation_api")
router = APIRouter()

# 글로벌 변수 초기화
globals_dict = {}
model_initializing = False # 모델 초기화 상태를 추적하는 전역 변수
last_initialization_attempt = None # 마지막 초기화 시도 시간

# 초기 데이터 로딩 및 모델 학습
def initialize_model(force=False):
global globals_dict, model_initializing, last_initialization_attempt
Expand Down Expand Up @@ -173,6 +171,32 @@ async def check_model_status():

return status

# 평가 지표 확인 엔드포인트 추가
@router.get("/evaluate", response_model=Dict[str, Any])
async def evaluate_model():
"""현재 추천 모델의 성능 지표를 계산합니다."""
global globals_dict, model_initializing

try:
# 모델 초기화 상태 확인
if not globals_dict or "stacking_reg" not in globals_dict or "df_model" not in globals_dict:
raise HTTPException(
status_code=503,
detail="모델이 초기화되지 않았습니다. 먼저 모델을 초기화하세요.",
headers={"Retry-After": "30"}
)

# 모델 평가
metrics = evaluate_recommendation_model(globals_dict)

return metrics

except HTTPException:
raise
except Exception as e:
logger.error(f"모델 평가 중 오류 발생: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

# recommend 함수 내부 수정
@router.post("",
response_model=Dict[str, Any],
Expand Down
14 changes: 13 additions & 1 deletion app/schema/recommendation_schema.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# app/models/recommendation_schema.py

from pydantic import BaseModel, Field
from typing import List, Annotated
from typing import Dict, Any, List, Optional, Annotated

class RecommendationItem(BaseModel):
category_id: int
Expand Down Expand Up @@ -30,3 +30,15 @@ class UserData(BaseModel):
user_id: str
# 선호 카테고리는 최소 1개, 최대 3개 (Pydantic v2 방식)
preferred_categories: Annotated[List[str], Field(min_length=1, max_length=3)]

class RecommendationEvaluationResponse(BaseModel):
metrics: Dict[str, Any]
status: str

class HyperparameterOptimizationRequest(BaseModel):
n_trials: Optional[int] = 30
timeout: Optional[int] = 300

class HyperparameterOptimizationResponse(BaseModel):
best_parameters: Dict[str, Any]
status: str
19 changes: 19 additions & 0 deletions app/services/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# app/services/evaluation/__init__.py

from app.services.evaluation.evaluator import (
evaluate_recommendation_model,
evaluate_with_cross_validation
)
from app.services.evaluation.data_generation import (
create_test_interactions,
create_stratified_train_test_split
)
from app.services.evaluation.utils import default_empty_metrics

__all__ = [
'evaluate_recommendation_model',
'evaluate_with_cross_validation',
'create_test_interactions',
'create_stratified_train_test_split',
'default_empty_metrics'
]
Loading