Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .github/actions/spelling/allow.txt
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These probably don't need to be added. The spell-checker should already handle these terms

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@holtskinner Good point removed those entries. Thanks!

Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ atms
Atulan
auc
aujourd'hui
authDomain
authtoken
autocal
autoflake
Expand Down Expand Up @@ -453,6 +454,7 @@ flac
Flahs
Flatform
Flipkart
FlutterFire
floormat
FLX
fmeasure
Expand Down Expand Up @@ -668,6 +670,7 @@ informati
inpaint
INR
IOMGR
iosBundleId
Iosif
iostream
ipd
Expand Down Expand Up @@ -742,6 +745,7 @@ kfp
Khanh
khz
kickstart
kIsWeb
Kipchoge
Kleiser
kmeans
Expand Down Expand Up @@ -864,6 +868,7 @@ MCQ
mdc
MDD
Meawad
measurementId
mec
MEDIANCUT
mediapy
Expand All @@ -878,6 +883,7 @@ Merhaba
mesop
Mesop
metadatas
messagingSenderId
metamath
METAMATH
metaverse
Expand Down Expand Up @@ -1353,6 +1359,7 @@ Statcast
stdcall
stext
STIX
storageBucket
Storrer
stp
Strappy
Expand Down
3 changes: 3 additions & 0 deletions .github/actions/spelling/patterns.txt
Original file line number Diff line number Diff line change
Expand Up @@ -254,3 +254,6 @@ GHSA(?:-[0-9a-z]{4}){3}

# mailto urls
mailto:[-a-zA-Z=;:/?%&0-9+@._]{3,}

# Python type hints with VertexAI class (langchain)
\bVertexAI(?:SearchRetriever|)\b
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ dist/
downloads/
eggs/
.eggs/
lib/
/lib/
lib64/
parts/
sdist/
Expand Down
2 changes: 1 addition & 1 deletion .ruff.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
line-length = 88
indent-width = 4 # Google Style Guide §3.4: 4 spaces

target-version = "py310" # Minimum Python version
target-version = "py311" # Minimum Python version

[lint]
ignore = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import enum
import logging
from typing import Literal, TypedDict, TypeVar
from typing import Literal, TypedDict, TypeVar, Union

from concierge import schemas, utils
from google import genai
Expand Down Expand Up @@ -69,8 +69,9 @@ def build_semantic_router_node(
Builds a LangGraph node that can dynamically route between sub-agents based on user intent.
"""

# ignore typing errors, this creates a valid literal type
NextNodeT = Literal[*class_node_mapping.values()] # type: ignore
# Python 3.10 compatible: create type annotation from mapping values
# Note: Using str directly as dynamic Literal construction is complex in 3.10
NextNodeT = str # type: ignore[valid-type]

response_schema = genai_types.Schema(
properties={
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,17 @@
# agreement with Google.
"""Common schemas for the concierge demo."""

from typing import Callable, NamedTuple, Protocol, TypedDict
from typing import Callable, NamedTuple, Protocol, TypedDict, TypeVar, Generic
from typing_extensions import ParamSpec

from google.genai import types as genai_types
import pydantic

T = TypeVar("T")
P = ParamSpec("P")

class Node[T, **P](NamedTuple):

class Node(NamedTuple, Generic[T, P]): # type: ignore[name-defined]
"""
Represents a node in a LangGraph.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
GoogleSearchRetrieval,
Tool,
)
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, ValidationError
from rich import print as rich_print
from termcolor import colored

Expand Down Expand Up @@ -601,10 +601,10 @@ async def _enhance_section_with_search(

Section to enhance: {section.title}
City: {city_data.summary.city}

Current analysis:
{section.content}

Requirements:
1. Add supporting data with citations for existing analysis
2. Cross-check uncited claims and add citations
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
// File generated by FlutterFire CLI.
// ignore_for_file: type=lint
import 'package:firebase_core/firebase_core.dart' show FirebaseOptions;
import 'package:flutter/foundation.dart'
show defaultTargetPlatform, kIsWeb, TargetPlatform;

/// Default [FirebaseOptions] for use with your Firebase apps.
///
/// Example:
/// ```dart
/// import 'firebase_options.dart';
/// // ...
/// await Firebase.initializeApp(
/// options: DefaultFirebaseOptions.currentPlatform,
/// );
/// ```
class DefaultFirebaseOptions {
static FirebaseOptions get currentPlatform {
if (kIsWeb) {
return web;
}
switch (defaultTargetPlatform) {
case TargetPlatform.android:
return android;
case TargetPlatform.iOS:
return ios;
case TargetPlatform.macOS:
return macos;
case TargetPlatform.windows:
return windows;
case TargetPlatform.linux:
throw UnsupportedError(
'DefaultFirebaseOptions have not been configured for linux - '
'you can reconfigure this by running the FlutterFire CLI again.',
);
default:
throw UnsupportedError(
'DefaultFirebaseOptions are not supported for this platform.',
);
}
}

static const FirebaseOptions web = FirebaseOptions(
apiKey: 'REPLACE_WITH_YOUR_API_KEY',
appId: 'REPLACE_WITH_YOUR_APP_ID',
messagingSenderId: 'REPLACE_WITH_YOUR_SENDER_ID',
projectId: 'REPLACE_WITH_YOUR_PROJECT_ID',
authDomain: 'REPLACE_WITH_YOUR_AUTH_DOMAIN',
storageBucket: 'REPLACE_WITH_YOUR_STORAGE_BUCKET',
measurementId: 'REPLACE_WITH_YOUR_MEASUREMENT_ID',
);

static const FirebaseOptions android = FirebaseOptions(
apiKey: 'REPLACE_WITH_YOUR_API_KEY',
appId: 'REPLACE_WITH_YOUR_APP_ID',
messagingSenderId: 'REPLACE_WITH_YOUR_SENDER_ID',
projectId: 'REPLACE_WITH_YOUR_PROJECT_ID',
storageBucket: 'REPLACE_WITH_YOUR_STORAGE_BUCKET',
);

static const FirebaseOptions ios = FirebaseOptions(
apiKey: 'REPLACE_WITH_YOUR_API_KEY',
appId: 'REPLACE_WITH_YOUR_APP_ID',
messagingSenderId: 'REPLACE_WITH_YOUR_SENDER_ID',
projectId: 'REPLACE_WITH_YOUR_PROJECT_ID',
storageBucket: 'REPLACE_WITH_YOUR_STORAGE_BUCKET',
iosBundleId: 'com.example.app',
);

static const FirebaseOptions macos = FirebaseOptions(
apiKey: 'REPLACE_WITH_YOUR_API_KEY',
appId: 'REPLACE_WITH_YOUR_APP_ID',
messagingSenderId: 'REPLACE_WITH_YOUR_SENDER_ID',
projectId: 'REPLACE_WITH_YOUR_PROJECT_ID',
storageBucket: 'REPLACE_WITH_YOUR_STORAGE_BUCKET',
iosBundleId: 'com.example.app',
);

static const FirebaseOptions windows = FirebaseOptions(
apiKey: 'REPLACE_WITH_YOUR_API_KEY',
appId: 'REPLACE_WITH_YOUR_APP_ID',
messagingSenderId: 'REPLACE_WITH_YOUR_SENDER_ID',
projectId: 'REPLACE_WITH_YOUR_PROJECT_ID',
authDomain: 'REPLACE_WITH_YOUR_AUTH_DOMAIN',
storageBucket: 'REPLACE_WITH_YOUR_STORAGE_BUCKET',
measurementId: 'REPLACE_WITH_YOUR_MEASUREMENT_ID',
);
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@
import requests
import streamlit as st

# --- Logger Setup ---
logger = logging.getLogger(__name__)

# --- Configuration Constants ---
DEBUG = False # Set to False in production
PROMPT_FOLDER = "prompts"
Expand Down Expand Up @@ -180,7 +183,7 @@ def get_rephraser_prompt(query: str) -> Optional[str]:

try:
formatted_prompt = """Now, please rephrase the following customer query:

{query}
"""
formatted_prompt = prompt_template + formatted_prompt.format(query=query)
Expand Down Expand Up @@ -222,19 +225,19 @@ def get_summarizer_prompt(documents: List[str], query: str) -> Optional[str]:
formatted_prompt = """
Now it's your turn! Here is the query and relevant documents:
Customer Search Query: {query}

Document Texts:
[Start of Document 1]
{Text_of_Document_1}
[End of Document 1]

[Start of Document 2]
{Text_of_Document_2}
[End of Document 2]

[Start of Document 3]
{Text_of_Document_3}
[End of Document 3]
[End of Document 3]
"""

try:
Expand Down Expand Up @@ -753,7 +756,7 @@ def setup_retriever_sidebar() -> Optional[VertexAI]:
Sets up the sidebar for Vertex AI Search connection and returns the LLM.

Returns:
An initialized VertexAI LLM object for use with the retriever,
An initialized Vertex AI LLM object for use with the retriever,
or None if configuration fails.
"""
with st.sidebar:
Expand Down Expand Up @@ -803,8 +806,8 @@ def setup_retriever_sidebar() -> Optional[VertexAI]:
try:
llm = VertexAI(model_name=DEFAULT_VERTEX_SEARCH_MODEL, project=project_id)
except Exception as e:
st.error(f"Failed to initialize VertexAI LLM for retriever: {e}")
logging.exception("VertexAI LLM init error:")
st.error(f"Failed to initialize Vertex AI LLM for retriever: {e}")
logging.exception("Vertex AI LLM init error:")
return None

# Create the retriever (only if a datastore is selected)
Expand Down Expand Up @@ -844,7 +847,7 @@ def clean_json(response_text: str):
response_text = response_text.replace("{{", "{").replace("}}", "}")

pattern = r"(?:^```.*)"
modified_text = re.sub(pattern, "", response_text, 0, re.MULTILINE)
modified_text = re.sub(pattern, "", response_text, count=0, flags=re.MULTILINE)
try:
# print(modified_text)
result = json.loads(modified_text)
Expand Down Expand Up @@ -968,23 +971,23 @@ def judge_responses(

# Load the prompt for the judge model
judge_prompt = f"""Given the following QUESTION and the CONTEXT which is the source of truth to use, judge each model's response:

Here are two responses from different language models:

Response from model on the left:
QUESTION:
{left_question}

CONTEXT:
{left_context}

Response A (Model on the Left):
{left_response['text']}

Response from model on the right:
QUESTION:
{right_question}

CONTEXT:
{right_context}

Expand Down Expand Up @@ -1030,8 +1033,8 @@ def judge_responses(
# --- Main Application ---
def main(args: argparse.Namespace): # <-- Pass parsed args to main
"""Runs the main Streamlit application flow."""
st.set_page_config("Vertex RAG Compare with Dual LLMs", layout="wide")
st.title("📊 Vertex RAG Compare with 2 LLM's")
st.set_page_config("Vertex AI RAG Compare with Dual LLMs", layout="wide")
st.title("📊 Vertex AI RAG Compare with 2 LLM's")
st.caption("Compare LLM responses using Vertex AI Search RAG")

# --- Initialization ---
Expand Down
Loading