From 037a4bd3848013304ea9fd212c364ed1b6a3051d Mon Sep 17 00:00:00 2001 From: Oscar Arroyo Vega Date: Thu, 6 Nov 2025 11:33:15 +0000 Subject: [PATCH 1/6] docs: makefile shortcuts to run services (they are mounted but sleeping -infinity) --- Makefile | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Makefile b/Makefile index 0e721ab..f23b259 100644 --- a/Makefile +++ b/Makefile @@ -58,6 +58,22 @@ all-services-up: cd services/frontend && uv run streamlit run main.py --server.address 0.0.0.0 --server.port 3000 & \ cd services/retriever && uv run uvicorn retriever.api:app --host 0.0.0.0 --port 8000 --reload +# --------------------SERVICE STARTERS -------------------------------------------------------------------------------------------------- +start-frontend: + cd services/frontend && uv sync && uv run streamlit run main.py --server.address 0.0.0.0 --server.port 3000 + +start-retriever: + cd services/retriever && uv sync && uv run uvicorn src.api:app --host 0.0.0.0 --port 8000 --reload + +start-pusher: + cd services/pusher && uv sync && uv run python main.py + +start-scraper: + cd services/scraper && uv sync && cd event_scraper && uv run python main.py + +start-parser: + cd services/parser && uv sync && uv run python parser.py + # --------------------TESTS -------------------------------------------------------------------------------------------------- test-parser: pytest tests/test_parser.py -v -s --log-cli-level=INFO --capture=no --tb=short From 9a5a617e60a06174158de0e320271581ee94c830 Mon Sep 17 00:00:00 2001 From: Oscar Arroyo Vega Date: Thu, 6 Nov 2025 11:34:05 +0000 Subject: [PATCH 2/6] refactor: add some changes in frontend UI --- services/frontend/main.py | 166 +++++++++++++++++++++++++++++++------- 1 file changed, 137 insertions(+), 29 deletions(-) diff --git a/services/frontend/main.py b/services/frontend/main.py index a8027ca..f3c8305 100644 --- a/services/frontend/main.py +++ b/services/frontend/main.py @@ -3,20 +3,124 @@ from datetime import date from config import settings +# Enhanced dark theme CSS +st.markdown( + """ + +""", + unsafe_allow_html=True, +) + API_URL = f"{settings.api_url}{settings.api_path}" st.title("🫦laiive") -# Sidebar for filters with st.sidebar: - st.header("SQL Filters") - - # Place filter places = ["All", "Bergamo", "Barcelona", "Boston", "Milano"] - selected_place = st.selectbox("PLACE FILTER:", places) + selected_place = st.selectbox("CITY:", places) - # Date filter - date_option = st.radio("DATE FILTER:", ["All Dates", "Specific Date", "Date Range"]) + date_option = st.radio("DATE:", ["All Dates", "Specific Date", "Date Range"]) if date_option == "Specific Date": selected_date = st.date_input("Select Date:", value=date(2025, 8, 1)) @@ -33,15 +137,6 @@ selected_date = None date_range = None - # Display current filters - st.subheader("Current Filters") - st.write(f"**Place:** {selected_place}") - if date_option == "Specific Date": - st.write(f"**Date:** {selected_date}") - elif date_option == "Date Range" and date_range is not None: - st.write(f"**Date Range:** {date_range[0]} to {date_range[1]}") - else: - st.write("**Date:** All Dates") if "messages" not in st.session_state: st.session_state.messages = [] @@ -82,12 +177,31 @@ def get_laiive_response( return f"Connection error: {e}" -# Main chat interface -st.header("Chat search engine") +# Create a scrollable container for chat messages +chat_container = st.container(height=500) # Adjust height as needed -user_input = st.text_input("You:", key="user_input") - -if st.button("Send") and user_input: +with chat_container: + # Display chat history + for sender, msg in st.session_state.messages: + if sender == "user": + st.markdown(f"**You:** {msg}") + else: + st.markdown(f"**laiive:** {msg}") + +# Fixed input area at the bottom +with st.container(): + col1, col2 = st.columns([5, 1]) + with col1: + user_input = st.text_input( + "You:", + key="user_input", + label_visibility="collapsed", + placeholder="Type your message...", + ) + with col2: + send_button = st.button("Send", use_container_width=True) + +if send_button and user_input: st.session_state.messages.append(("user", user_input)) # Pass filters to the API @@ -98,11 +212,5 @@ def get_laiive_response( date_range=date_range, ) - st.session_state.messages.append(("bot", bot_reply)) - -# Display chat history -for sender, msg in st.session_state.messages: - if sender == "user": - st.markdown(f"**You:** {msg}") - else: - st.markdown(f"**laiive:** {msg}") + st.session_state.messages.append(("laiive", bot_reply)) + st.rerun() # Refresh to show new messages From dd7d8abf78d81a7d30671761d33e0ee445ffec6a Mon Sep 17 00:00:00 2001 From: Oscar Arroyo Vega Date: Fri, 7 Nov 2025 20:22:05 +0000 Subject: [PATCH 3/6] feat: Add Ollama provider and rebuild query for production schema Added local LLM support via Ollama and rebuilt query builder to match production DB schema with events/venues/artists joins. Includes timing instrumentation for performance monitoring. Known issue: Date filtering uses N OR conditions - needs optimization. Schema and query require data engineer review. Next: Add filter extraction LLM, conversational memory, voice input, sentiment analysis, and RLHF feedback collection system. --- services/retriever/.example.env | 10 +- services/retriever/prompts/__init__.py | 0 services/retriever/src/api.py | 9 +- services/retriever/src/main.py | 319 ++++++++++++++----------- 4 files changed, 186 insertions(+), 152 deletions(-) delete mode 100644 services/retriever/prompts/__init__.py diff --git a/services/retriever/.example.env b/services/retriever/.example.env index 2e7b36d..289ff26 100644 --- a/services/retriever/.example.env +++ b/services/retriever/.example.env @@ -5,14 +5,16 @@ POSTGRES_PORT=5432 # pragma: allowlist secret POSTGRES_HOST=dbhost # pragma: allowlist secret POSTGRES_URL=postgresql+asyncpg://postgres:yourpassword@dbhost:5432/yourdb # pragma: allowlist secret -LLM_PROVIDER= provider_name # openai or anthropic, gemini, ollama -LLM_MODEL= model_name # Must match the provider you chose -LLM_TEMPERATURE= # 0.0 = deterministic, 1.0 = creative +LLM_PROVIDER=provider_name # openai or anthropic, gemini, ollama +LLM_MODEL=model_name # Must match the provider you chose +LLM_TEMPERATURE=0.1 # 0.0 = deterministic, 1.0 = creative OPENAI_API_KEY=your-openai-key # pragma: allowlist secret ANTHROPIC_API_KEY=your-anthropic-key # pragma: allowlist secret GEMINI_API_KEY=your-gemini-key # pragma: allowlist secret -OLLAMA_BASE_URL=http://localhost:11434 # example of use: > curl -fsSL https://ollama.ai/install.sh | sh > ollama run llama3.2:1b + +OLLAMA_BASE_URL=http://host.docker.internal:11434 # example of use to run ollama on localhost:11434 > curl -fsSL https://ollama.ai/install.sh | sh > ollama serve +# FOR PRODUCTION POPOSES, RUN OLLAMA IN CLOUD OR DEDICATED SERVER AND POINT TO IT. API_URL=http://backend:8000 # for local API_URL= http://loaclhost:8000 HOST=0.0.0.0 diff --git a/services/retriever/prompts/__init__.py b/services/retriever/prompts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/services/retriever/src/api.py b/services/retriever/src/api.py index df27988..b25639c 100644 --- a/services/retriever/src/api.py +++ b/services/retriever/src/api.py @@ -2,19 +2,14 @@ from fastapi import FastAPI, status from datetime import date from pydantic import BaseModel -from typing import Optional +from typing import Optional, List from src.config import settings from src.main import get_response # Schemas defined inline TODO if API endpoints > 15 move Schemas to schemas file. -class DataRange(BaseModel): - start: date - end: date - - class SQLFilter(BaseModel): - date_range: Optional[DataRange] = None + dates: Optional[List[str]] = None place: Optional[str] = None diff --git a/services/retriever/src/main.py b/services/retriever/src/main.py index 839e364..37a09b0 100644 --- a/services/retriever/src/main.py +++ b/services/retriever/src/main.py @@ -4,29 +4,17 @@ from llama_index.llms.ollama import Ollama from llama_index.llms.gemini import Gemini from llama_index.core.llms import LLM -from pydantic import BaseModel, ValidationError -from typing import Optional import sqlalchemy from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession from sqlalchemy.orm import sessionmaker from datetime import date, datetime from loguru import logger -import json - - -class DataRange(BaseModel): - start: str - end: str - - -class SQLFilter(BaseModel): - date_range: Optional[DataRange] = None - place: Optional[str] = None +import time +from contextlib import contextmanager today = date.today() -# Create async engine engine = create_async_engine(settings.postgres_url) AsyncSessionLocal = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False) @@ -64,10 +52,14 @@ def llm_factory() -> LLM: ) elif settings.llm_provider == "ollama": + logger.info( + f"Using Ollama with model: {settings.llm_model}, base_url: {settings.ollama_base_url}, temperature: {settings.llm_temperature}" + ) return Ollama( model=settings.llm_model, # e.g., "llama2", "mistral" base_url=settings.ollama_base_url, temperature=settings.llm_temperature, + timeout=120.0, ) else: @@ -77,151 +69,196 @@ def llm_factory() -> LLM: llm = llm_factory() -async def get_response(message: str, filters_info: Optional[SQLFilter] = None) -> str: - """Simple version: just pass the user message to the LLM""" - logger.info(f"Processing message: {message}") - - # Build a simple prompt - prompt = f"""You are a helpful assistant for musical life events. - Today is {today}. - User question: {message} - Please provide a helpful response.""" - +@contextmanager +def log_timing(description: str): + start = time.perf_counter() + logger.info(f"⏱️ Starting: {description}") try: - # Call the LLM - response = llm.complete(prompt) - logger.info("Successfully got LLM response") - return response.text - except Exception as e: - logger.error(f"Error calling LLM: {e}", exc_info=True) - return f"Sorry, I encountered an error: {str(e)}" - - def format_date_range(date_range: DataRange) -> str: - return "event_date >= :start_date AND event_date <= :end_date" - - def format_place_filter(place: str) -> str: - return "place_city ILIKE :place_pattern" # TODO - - def build_query(filters_info: SQLFilter) -> tuple[str, dict]: - base_query = "SELECT * FROM events" - conditions = [] - params = {} - - if filters_info.date_range: - date_condition = format_date_range(filters_info.date_range) - conditions.append(date_condition) - params["start_date"] = filters_info.date_range.start - params["end_date"] = filters_info.date_range.end + yield + finally: + elapsed = time.perf_counter() - start + logger.info( + f"✓ Completed: {description} in {elapsed:.3f}s ({elapsed*1000:.1f}ms)" + ) - if filters_info.place: - place_condition = format_place_filter(filters_info.place) - conditions.append(place_condition) - params["place_pattern"] = f"%{filters_info.place}%" - if conditions: - query = f"{base_query} WHERE {' AND '.join(conditions)}" - else: - query = base_query +async def get_response(message, filters_info) -> str: + request_start = time.perf_counter() + + logger.info("=" * 80) + logger.info("NEW REQUEST") + logger.info(f"User message: {message}") + logger.info(f"Filters received: {filters_info}") + + def build_query(filters) -> tuple[str, dict]: + with log_timing("Query building"): + base_query = """ + WITH event_artist_ids AS ( + SELECT + e.id AS event_id, + UNNEST(ARRAY[ + e.artist1_id, e.artist2_id, e.artist3_id, e.artist4_id, e.artist5_id, + e.artist6_id, e.artist7_id, e.artist8_id, e.artist9_id, e.artist10_id + ]) AS artist_id + FROM events e + ) + SELECT + -- all event fields + e.*, + -- venue fields (prefixed to avoid name collisions) + v.id AS venue_id, + v.name AS venue_name, + v.description AS venue_description, + v.address AS venue_address, + v.city AS venue_city, + v.country AS venue_country, + v.capacity AS venue_capacity, + v.latitude AS venue_latitude, + v.longitude AS venue_longitude, + v.link AS venue_link, + v.image AS venue_image, + -- artists: + -- 1) human-friendly list of names + STRING_AGG(DISTINCT a.name, ', ' ORDER BY a.name) AS artist_names, + -- 2) full JSON objects + JSONB_AGG( + DISTINCT JSONB_BUILD_OBJECT( + 'id', a.id, + 'name', a.name, + 'description', a.description, + 'genres', a.genres, + 'link', a.link, + 'image', a.image, + 'country', a.country, + 'city', a.city + ) + ) FILTER (WHERE a.id IS NOT NULL) AS artists_json + FROM events e + JOIN venues v ON v.id = e.venue_id + LEFT JOIN event_artist_ids ea ON ea.event_id = e.id + LEFT JOIN artists a ON a.id = ea.artist_id + """ + + conditions = [] + params = {} + + if filters.place: + conditions.append("v.city = :city") + params["city"] = filters.place + logger.info(f"Filter: City = {filters.place}") + + if filters.dates: + logger.info(f"Filter: Processing {len(filters.dates)} dates") + logger.info(f"Date range: {filters.dates[0]} to {filters.dates[-1]}") + + date_conditions = [] + for idx, date_str in enumerate(filters.dates): + date_param = f"date_{idx}" + date_conditions.append( + f"(e.start_date <= :{date_param} AND COALESCE(e.end_date, e.start_date) >= :{date_param})" + ) + params[date_param] = datetime.strptime(date_str, "%Y-%m-%d").date() + + if date_conditions: + conditions.append(f"({' OR '.join(date_conditions)})") + logger.info(f"Generated {len(date_conditions)} date conditions") + + if conditions: + query = f"{base_query}\nWHERE {' AND '.join(conditions)}" + else: + query = base_query + + query += ( + "\nGROUP BY e.id, v.id\nORDER BY e.start_date, e.start_time NULLS LAST" + ) - logger.info(f"Generated SQL query: {query}") - return query, params + logger.debug(f"Full SQL query:\n{query}") + logger.info(f"Query params count: {len(params)}") + return query, params - async def query_db(filters_info: SQLFilter) -> str: + async def execute_query(query: str, params: dict) -> str: try: - query = build_query(filters_info) - async with AsyncSessionLocal() as session: - result = await session.execute(sqlalchemy.text(query)) - rows = result.fetchall() - return str([dict(row._mapping) for row in rows]) + logger.info("-" * 80) + logger.info("EXECUTING DATABASE QUERY") + + with log_timing("Database query execution"): + async with AsyncSessionLocal() as session: + db_start = time.perf_counter() + result = await session.execute(sqlalchemy.text(query), params) + db_elapsed = time.perf_counter() - db_start + logger.info(f" → SQL execution: {db_elapsed:.3f}s") + + fetch_start = time.perf_counter() + rows = result.fetchall() + fetch_elapsed = time.perf_counter() - fetch_start + logger.info(f" → Fetch rows: {fetch_elapsed:.3f}s") + + logger.info(f"✓ Query returned {len(rows)} events") + + if rows: + sample_event = dict(rows[0]._mapping) + logger.debug(f"Sample event keys: {list(sample_event.keys())}") + logger.debug(f"Sample event: {sample_event}") + + process_start = time.perf_counter() + events_list = [dict(row._mapping) for row in rows] + events_str = str(events_list) + process_elapsed = time.perf_counter() - process_start + logger.info(f" → Process results: {process_elapsed:.3f}s") + logger.info(f"Events data size: {len(events_str)} characters") + + return events_str except Exception as e: - logger.error(f"Error querying database: {e}") + logger.error(f"✗ Database error: {e}", exc_info=True) return f"Error querying database: {str(e)}" - filtered_events = await query_db(filters_info) - - prompt = f""" - You are a helpful assistant that can answer questions about musical life events. - You will be given the location and date range of dates that the user is interested in. - You will be given a list of filtered events from the events database. - You need to answer to the user given information in the message. - - Today is {today} - The user message is: {message} - The filtered events are: {filtered_events} - The filters are: {filters_info} - - IMPORTANT: If the user asks for a "list" or "all" events, show ALL available events from the filtered results. - Do not limit yourself to just 2-3 events unless specifically asked. - Format the response as a clear list with all relevant details for each event. + query, params = build_query(filters_info) + filtered_events = await execute_query(query, params) - Answer the user message based on the filtered events. - The answer should be in the same language as the user message. - """ + with log_timing("Prompt construction"): + prompt = f""" + You are a helpful assistant that can answer questions about musical life events. + You will be given the location and date range of dates that the user is interested in. + You will be given a list of filtered events from the events database. + You need to answer to the user given information in the message. + Today is {today} + The user message is: {message} + The filters are: {filters_info} + The filtered events are: {filtered_events} -def get_sql_filters(message: str) -> SQLFilter: - prompt = f""" - Analyze the following user message and extract specific information for database filtering. + IMPORTANT: return a list of maximum 5 events selected from the filtered events, that have better fit with the user message. + The answer should be short and include: date, artists, price, link, venue name, link and a very short description of the event. - User message: "{message}" + The answer should be in the same language as the user message. + """ - Please extract and format the following information: + logger.info("-" * 80) + logger.info("CALLING LLM") + logger.info(f"Prompt length: {len(prompt)} characters") + logger.info(f"Prompt preview (first 500 chars):\n{prompt[:500]}...") - 1. DATE_RANGE: If a date range is mentioned, format as ["YYYY-MM-DD", "YYYY-MM-DD"] - 2. PLACE: If a location, city, or address is mentioned + logger.debug(f"Full prompt:\n{prompt}") - Examples: - - "Show me events in New York from 2024-01-15 to 2024-01-20" → date_range: ["2024-01-15", "2024-01-20"], place: "New York" - - "What's happening in London this week?" → place: "London" - - "What can I do in Paris?" → place: "Paris" + try: + with log_timing(f"LLM call ({settings.llm_provider})"): + response = llm.complete(prompt) - Return your response as a JSON object with only the fields that are present in the message. - If a field is not mentioned, omit it from the JSON response. - """ + logger.info(f"✓ LLM response received, length: {len(response.text)} characters") + logger.debug(f"Full LLM response:\n{response.text}") - try: - response = llm.complete(prompt) - logger.debug(f"Raw LLM response: {response.text}") - - filter_data = json.loads(response.text.strip()) - - # Validate expected structure - if not isinstance(filter_data, dict): - raise ValueError(f"Expected dict, got {type(filter_data)}") - - # Convert date strings to date objects - date_range = None - if filter_data.get("date_range"): - if ( - not isinstance(filter_data["date_range"], list) - or len(filter_data["date_range"]) != 2 - ): - raise ValueError( - f"Invalid date_range format: {filter_data['date_range']}" - ) - start_date = datetime.strptime( - filter_data["date_range"][0], "%Y-%m-%d" - ).date() - end_date = datetime.strptime( - filter_data["date_range"][1], "%Y-%m-%d" - ).date() - date_range = (start_date, end_date) - - sql_filter = SQLFilter( - date_range=date_range, - place=filter_data.get("place"), + # Total request time + total_elapsed = time.perf_counter() - request_start + logger.info("=" * 80) + logger.info( + f"🏁 TOTAL REQUEST TIME: {total_elapsed:.3f}s ({total_elapsed*1000:.1f}ms)" ) - logger.info(f"SQL filters: {sql_filter}") - return sql_filter + logger.info("=" * 80) - except json.JSONDecodeError as e: - logger.error( - f"Failed to parse LLM response as JSON: {e}. Response: {response.text[:200]}" - ) - return SQLFilter() - except (ValueError, KeyError, ValidationError) as e: - logger.error(f"Invalid filter data structure: {e}. Data: {filter_data}") - return SQLFilter() + return response.text except Exception as e: - logger.error(f"Unexpected error extracting SQL filters: {e}", exc_info=True) - return SQLFilter() + logger.error(f"✗ LLM error: {e}", exc_info=True) + total_elapsed = time.perf_counter() - request_start + logger.info(f"🏁 REQUEST FAILED after {total_elapsed:.3f}s") + logger.info("=" * 80) + return f"Sorry, I encountered an error: {str(e)}" From 327c5575e96cf3461ad30f3d3b656a98044fd2cc Mon Sep 17 00:00:00 2001 From: Oscar Arroyo Vega Date: Fri, 7 Nov 2025 20:24:24 +0000 Subject: [PATCH 4/6] refactor: various small changes to readme.md makefile and parser.py --- Makefile | 6 +----- README.md | 9 ++++----- services/parser/parser.py | 6 ++++-- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index f23b259..e052c42 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ all-services-up: cd services/frontend && uv run streamlit run main.py --server.address 0.0.0.0 --server.port 3000 & \ cd services/retriever && uv run uvicorn retriever.api:app --host 0.0.0.0 --port 8000 --reload -# --------------------SERVICE STARTERS -------------------------------------------------------------------------------------------------- +# --------------------SERVICE STARTERS INSIDE DEVCONTAINER -------------------------------------------------------------------------------------------------- start-frontend: cd services/frontend && uv sync && uv run streamlit run main.py --server.address 0.0.0.0 --server.port 3000 @@ -73,7 +73,3 @@ start-scraper: start-parser: cd services/parser && uv sync && uv run python parser.py - -# --------------------TESTS -------------------------------------------------------------------------------------------------- -test-parser: - pytest tests/test_parser.py -v -s --log-cli-level=INFO --capture=no --tb=short diff --git a/README.md b/README.md index a3d6256..48a693c 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1 +laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1 # Laiive #### what is 🫦Laiive @@ -16,11 +16,11 @@ laiive links the broken connection between events and public[^*] #### why 🫦laiive makes sense? -laiive was born to connect small events with people close to them, laiive does not focus on big musical events as many platforms are, laiive works on the human and community scale where small music events live. +laiive was born to connect small events with people close to them, laiive does not focus on big musical events as many platforms are, laiive works on the human and community scale where small music events live. #### why is 🫦laiive good? -laiive was born as an AI cultural agenda, with the AI hype and AI competition without the AI Safety layer laiive has become a subversive way of using AI, it tries to steal attention from the main digital platforms and bring it back to real world social meetings. laiive positions itself as an ethical AI app helping to develop a balanced digital-physical culture before the intermediate layer in our digital comunication becomes too powerful. +laiive was born as an AI cultural agenda, with the AI hype and AI competition without the AI Safety layer laiive has become a subversive way of using AI, it tries to steal attention from the main digital platforms and bring it back to real world social meetings. laiive positions itself as an ethical AI app helping to develop a balanced digital-physical culture before the intermediate layer in our digital comunication becomes too powerful. #### why is 🫦laiive profitable? @@ -30,8 +30,7 @@ laiive is a catalyzer of a world wide business that is actually unatended. laiiv ## Services -laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1 - +laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1laiive1 ### UI a ZERO CLIC UI is the public view of laiive, easy to publish an event, easy to find an event. diff --git a/services/parser/parser.py b/services/parser/parser.py index 7e5a1ee..e2b9519 100644 --- a/services/parser/parser.py +++ b/services/parser/parser.py @@ -9,6 +9,8 @@ from datetime import datetime import os +# TODO PARSER NEEDS API AND ISOLATED SYSTEM CONFIG AND ENV +# In the long therm THINK PARSER microservice AS A GENERAL PORPOUSE PARSER LIBRARY that gets json files and pushes them to a SQL database with diferent levels of checkings, and an LLM check layer and a final human in the loop. observability tool could be a cool feature. class DatabaseParser: def __init__(self, database_url: str = None): @@ -156,7 +158,7 @@ def get_review_summary( self, ) -> Dict[ str, Any - ]: # TODO add all the data for review and check reviews for artists and venues + ]: # TODO add all the data for review and check reviews for artists and venues // THIS FEATURE SHOULD BE BEFORE THE PARSE LIBRARY. IN THE SCRAPER Post transformations try: with open(self.review_file, "r", encoding="utf-8") as f: lines = f.readlines() @@ -196,7 +198,7 @@ def remove_review_file(self): def _normalize_text(self, text: str) -> str: if not text: return "" - normalized = re.sub(r"[^\w\s]", "", text.lower().strip()) + normalized = re.sub(r"[^\w\s]", "", text.lower().strip()) # FIXME no need and maybe some names include special characteres normalized = re.sub(r"\s+", " ", normalized) return normalized From 99cdb92b7b008ab6f4abd241b8d07920acdaaff9 Mon Sep 17 00:00:00 2001 From: Oscar Arroyo Vega Date: Fri, 7 Nov 2025 20:26:04 +0000 Subject: [PATCH 5/6] refactor: remove pgAdmin feature and container for simplicity and lack of use --- docker-compose.override.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/docker-compose.override.yml b/docker-compose.override.yml index d6feda6..0bb9f9e 100644 --- a/docker-compose.override.yml +++ b/docker-compose.override.yml @@ -58,16 +58,6 @@ services: target: /app ignore: - .venv/ - pgadmin: - # TODO migrate to DBeaver - image: dpage/pgadmin4 - environment: - PGADMIN_DEFAULT_EMAIL: admin@admin.com # pragma: allowlist secret - PGADMIN_DEFAULT_PASSWORD: admin # pragma: allowlist secret - ports: - - "5050:80" - depends_on: - - db volumes: frontend-venv: From d333ab4a30933a1fd37b17408607747437a87689 Mon Sep 17 00:00:00 2001 From: Oscar Arroyo Vega Date: Fri, 7 Nov 2025 20:30:46 +0000 Subject: [PATCH 6/6] feat: changes to frontend aesthetics, UX and brand alingment --- services/frontend/main.py | 73 +++- services/frontend/pyproject.toml | 1 + services/frontend/uv.lock | 698 ++++++++++++++++--------------- 3 files changed, 412 insertions(+), 360 deletions(-) diff --git a/services/frontend/main.py b/services/frontend/main.py index f3c8305..cac3ca8 100644 --- a/services/frontend/main.py +++ b/services/frontend/main.py @@ -9,12 +9,12 @@