diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml index 8f21a6d4..e5a652bc 100644 --- a/.github/workflows/pipeline.yml +++ b/.github/workflows/pipeline.yml @@ -31,51 +31,25 @@ jobs: shell: bash run: semgrep scan --config auto --error - test: - runs-on: ubuntu-latest + test-without-llms: + uses: ./.github/workflows/test.yml + with: + pytest-marks: "not openai" + secrets: inherit # pragma: allowlist secret + + test-with-llms: + # Don't run this job for the readonly queue branches + if: ${{ !startsWith(github.ref_name, 'gh-readonly-queue') }} strategy: matrix: - pytest-marks: ["not openai", "openai and (not brief_creation_team and not campaign_creation_team and not weekly_analysis_team and not get_info_from_the_web_page and not fastapi_openapi_team)", "brief_creation_team and openai", "campaign_creation_team and openai", "weekly_analysis_team", "get_info_from_the_web_page", "fastapi_openapi_team"] + pytest-marks: ["openai and (not brief_creation_team and not campaign_creation_team and not weekly_analysis_team and not get_info_from_the_web_page and not fastapi_openapi_team)", "brief_creation_team and openai", "campaign_creation_team and openai", "weekly_analysis_team", "get_info_from_the_web_page", "fastapi_openapi_team"] fail-fast: false - services: - postgres: - image: postgres:13 - env: - POSTGRES_USER: postgres - POSTGRES_PASSWORD: postgres - POSTGRES_DB: gads - ports: - - 5432:5432 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - env: - INFOBIP_API_KEY: "dummy_key" - INFOBIP_BASE_URL: "dummy_url" - DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/gads" - AZURE_API_VERSION: ${{ secrets.STAGING_AZURE_API_VERSION }} - AZURE_API_ENDPOINT: ${{ secrets.STAGING_AZURE_API_ENDPOINT }} - AZURE_API_ENDPOINT_GPT4O: ${{ secrets.STAGING_AZURE_API_ENDPOINT_GPT4O }} - AZURE_GPT4O_MODEL: ${{ secrets.STAGING_AZURE_GPT4O_MODEL }} - AZURE_GPT4_MODEL: ${{ secrets.STAGING_AZURE_GPT4_MODEL }} - AZURE_GPT35_MODEL: ${{ secrets.STAGING_AZURE_GPT35_MODEL }} - AZURE_OPENAI_API_KEY: ${{ secrets.STAGING_AZURE_OPENAI_API_KEY }} - AZURE_OPENAI_API_KEY_GPT4O: ${{ secrets.STAGING_AZURE_OPENAI_API_KEY_GPT4O }} - GOOGLE_SHEETS_OPENAPI_URL: ${{ vars.STAGING_GOOGLE_SHEETS_OPENAPI_URL }} - steps: - - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - cache: "pip" - cache-dependency-path: pyproject.toml - - name: Install Dependencies - run: pip install -e ".[dev]" - - name: Prisma generate - run: prisma generate - - name: Create client secrets file - run: echo '{"web":{"client_id":"dummy.apps.googleusercontent.com","project_id":"dummy-id","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"dummy-secret","redirect_uris":["http://localhost:9000/login/callback"]}}' > client_secret.json - - name: Test - run: pytest tests/ci/ -m "${{ matrix.pytest-marks }}" + uses: ./.github/workflows/test.yml + with: + pytest-marks: ${{ matrix.pytest-marks }} + secrets: inherit # pragma: allowlist secret + needs: + - test-without-llms docker_build_push: runs-on: ubuntu-22.04 @@ -115,7 +89,7 @@ jobs: if: github.event.pull_request.draft == false needs: - - test + - test-without-llms - static_analysis - docker_build_push @@ -132,7 +106,9 @@ jobs: defaults: run: shell: bash - needs: [check] + needs: + - check + - test-with-llms if: github.ref_name == 'main' || github.ref_name == 'dev' env: GITHUB_USERNAME: ${{ github.actor }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..dcedb584 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,52 @@ +name: Test + +on: + workflow_call: + inputs: + pytest-marks: + description: 'Pytest marks to run' + required: true + type: string + +jobs: + test: + runs-on: ubuntu-latest + services: + postgres: + image: postgres:13 + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres # pragma: allowlist secret + POSTGRES_DB: gads + ports: + - 5432:5432 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + env: + INFOBIP_API_KEY: "dummy_key" # pragma: allowlist secret + INFOBIP_BASE_URL: "dummy_url" + DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/gads" # pragma: allowlist secret + AZURE_API_VERSION: ${{ secrets.STAGING_AZURE_API_VERSION }} + AZURE_API_ENDPOINT: ${{ secrets.STAGING_AZURE_API_ENDPOINT }} + AZURE_API_ENDPOINT_GPT4O: ${{ secrets.STAGING_AZURE_API_ENDPOINT_GPT4O }} + AZURE_GPT4O_MODEL: ${{ secrets.STAGING_AZURE_GPT4O_MODEL }} + AZURE_GPT4_MODEL: ${{ secrets.STAGING_AZURE_GPT4_MODEL }} + AZURE_GPT35_MODEL: ${{ secrets.STAGING_AZURE_GPT35_MODEL }} + AZURE_OPENAI_API_KEY: ${{ secrets.STAGING_AZURE_OPENAI_API_KEY }} + AZURE_OPENAI_API_KEY_GPT4O: ${{ secrets.STAGING_AZURE_OPENAI_API_KEY_GPT4O }} + GOOGLE_SHEETS_OPENAPI_URL: ${{ vars.STAGING_GOOGLE_SHEETS_OPENAPI_URL }} + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" + cache-dependency-path: pyproject.toml + - name: Install Dependencies + run: pip install -e ".[dev]" + - name: Prisma generate + run: prisma generate + - name: Create client secrets file + run: echo '{"web":{"client_id":"dummy.apps.googleusercontent.com","project_id":"dummy-id","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"dummy-secret","redirect_uris":["http://localhost:9000/login/callback"]}}' > client_secret.json + - name: Test + run: pytest tests/ci/ -m "${{ inputs.pytest-marks }}" diff --git a/.secrets.baseline b/.secrets.baseline index bf4b3254..80b8ce50 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -112,32 +112,6 @@ } ], "results": { - ".github/workflows/pipeline.yml": [ - { - "type": "Secret Keyword", - "filename": ".github/workflows/pipeline.yml", - "hashed_secret": "afc848c316af1a89d49826c5ae9d00ed769415f3", - "is_verified": false, - "line_number": 45, - "is_secret": false - }, - { - "type": "Secret Keyword", - "filename": ".github/workflows/pipeline.yml", - "hashed_secret": "48ca756d77c7d21242d537bb15aed63b5acd5edd", - "is_verified": false, - "line_number": 51, - "is_secret": false - }, - { - "type": "Basic Auth Credentials", - "filename": ".github/workflows/pipeline.yml", - "hashed_secret": "afc848c316af1a89d49826c5ae9d00ed769415f3", - "is_verified": false, - "line_number": 53, - "is_secret": false - } - ], "tests/ci/test_team.py": [ { "type": "Secret Keyword", @@ -157,5 +131,5 @@ } ] }, - "generated_at": "2024-04-02T12:08:41Z" + "generated_at": "2024-09-04T06:37:54Z" } diff --git a/captn/captn_agents/backend/tools/_team_with_client_tools.py b/captn/captn_agents/backend/tools/_team_with_client_tools.py index 2c17375a..8a34e09f 100644 --- a/captn/captn_agents/backend/tools/_team_with_client_tools.py +++ b/captn/captn_agents/backend/tools/_team_with_client_tools.py @@ -1,15 +1,15 @@ import httpx -from fastagency.openapi.client import Client +from fastagency.api.openapi.client import OpenAPI __all__ = ("create_client",) -def create_client(openapi_url: str) -> Client: +def create_client(openapi_url: str) -> OpenAPI: with httpx.Client() as httpx_client: response = httpx_client.get(openapi_url) response.raise_for_status() openapi_spec = response.text - client = Client.create(openapi_spec) + client = OpenAPI.create(openapi_spec) return client diff --git a/captn/captn_agents/backend/tools/patch_client.py b/captn/captn_agents/backend/tools/patch_client.py index fcf67cdc..0e59e221 100644 --- a/captn/captn_agents/backend/tools/patch_client.py +++ b/captn/captn_agents/backend/tools/patch_client.py @@ -3,7 +3,7 @@ from typing import Any, Callable, Dict, Optional from autogen.agentchat import ConversableAgent -from fastagency.openapi.client import Client +from fastagency.api.openapi.client import OpenAPI _org_register_for_execution: Optional[Callable[..., None]] = None @@ -23,16 +23,16 @@ def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Any: def get_patch_register_for_execution( - client: Client, kwargs_to_patch: Dict[str, Any] + client: OpenAPI, kwargs_to_patch: Dict[str, Any] ) -> Callable[..., None]: def _patch_register_for_execution() -> None: global _org_register_for_execution if _org_register_for_execution is None: - _org_register_for_execution = Client.register_for_execution + _org_register_for_execution = OpenAPI.register_for_execution def register_for_execution( - self: Client, + self: OpenAPI, agent: ConversableAgent, ) -> None: global _org_register_for_execution diff --git a/google_ads/application.py b/google_ads/application.py index 0203fb87..bc37c1ed 100644 --- a/google_ads/application.py +++ b/google_ads/application.py @@ -817,23 +817,55 @@ def _create_ad_group_ad_set_attr( # Set a pinning to always choose this asset for HEADLINE_1. Pinning is # optional; if no pinning is set, then headlines and descriptions will be # rotated and the ones that perform best will be used more often. + NUM_HEADLINES_1 = 3 + NUM_HEADLINES_2 = 3 + TOTAL_HEADLINES_1_2 = NUM_HEADLINES_1 + NUM_HEADLINES_2 + num_headlines = len(model_dict["headlines"]) + if num_headlines < NUM_HEADLINES_1: + raise ValueError(f"Number of headlines must be at least {NUM_HEADLINES_1}.") + + # Pin as first headlines + headlines = [ + _create_ad_text_asset( + client, headline, client.enums.ServedAssetFieldTypeEnum.HEADLINE_1 + ) + for headline in model_dict["headlines"][0:NUM_HEADLINES_1] + ] - # Headline 1 - served_asset_enum = client.enums.ServedAssetFieldTypeEnum.HEADLINE_1 - pinned_headline = _create_ad_text_asset( - client, model_dict["headlines"][0], served_asset_enum - ) + if num_headlines >= NUM_HEADLINES_1 and num_headlines <= TOTAL_HEADLINES_1_2: + # Pin as second headlines + headlines += [ + _create_ad_text_asset( + client, headline, client.enums.ServedAssetFieldTypeEnum.HEADLINE_2 + ) + for headline in model_dict["headlines"][NUM_HEADLINES_1:] + ] + else: + # Pin as second headlines + headlines += [ + _create_ad_text_asset( + client, headline, client.enums.ServedAssetFieldTypeEnum.HEADLINE_2 + ) + for headline in model_dict["headlines"][NUM_HEADLINES_1:TOTAL_HEADLINES_1_2] + ] + # Don't pin the rest + headlines += [ + _create_ad_text_asset(client, headline) + for headline in model_dict["headlines"][TOTAL_HEADLINES_1_2:] + ] - # Headlines 2-15 - headlines = [pinned_headline] - headlines += [ - _create_ad_text_asset(client, headline) - for headline in model_dict["headlines"][1:] - ] operation_create.ad.responsive_search_ad.headlines.extend(headlines) + # pin first description descriptions = [ - _create_ad_text_asset(client, desc) for desc in model_dict["descriptions"] + _create_ad_text_asset( + client, + model_dict["descriptions"][0], + client.enums.ServedAssetFieldTypeEnum.DESCRIPTION_1, + ) + ] + descriptions += [ + _create_ad_text_asset(client, desc) for desc in model_dict["descriptions"][1:] ] operation_create.ad.responsive_search_ad.descriptions.extend(descriptions) diff --git a/pyproject.toml b/pyproject.toml index 77d8d918..841ece5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,9 +56,9 @@ lint = [ "types-docutils", "types-requests", "mypy==1.11.2", - "black==24.4.2", + "black==24.8.0", "isort>=5", - "ruff==0.6.3", + "ruff==0.6.4", "pyupgrade-directories", "bandit==1.7.9", "semgrep==1.78.0", @@ -85,12 +85,12 @@ testing = [ benchmarking = [ "typer==0.12.5", - "filelock==3.15.4", + "filelock==3.16.0", "tabulate==0.9.0", ] agents = [ - "fastapi==0.112.2", + "fastapi==0.114.0", "APScheduler==3.10.4", "prisma==0.13.1", "google-ads==25.0.0", @@ -99,7 +99,7 @@ agents = [ "python-dotenv==1.0.1", "pyautogen[websurfer,websockets,anthropic,together]==0.2.35", "pandas>=2.1", - "fastcore==1.7.1", + "fastcore==1.7.4", "asyncer==0.0.7", "pydantic==2.8.2", "tenacity==9.0.0", @@ -110,7 +110,7 @@ agents = [ "opentelemetry-exporter-otlp==1.27.0", "openpyxl==3.1.5", "aiofiles==24.1.0", - "fastagency[studio]@git+https://github.com/airtai/fastagency.git@main", + "fastagency[studio,openapi]@git+https://github.com/airtai/fastagency.git@main", "markdownify==0.13.1", "python-multipart==0.0.9", ] diff --git a/tests/ci/captn/captn_agents/backend/teams/test_weather_team.py b/tests/ci/captn/captn_agents/backend/teams/test_weather_team.py index 3a0c684b..dcc4720a 100644 --- a/tests/ci/captn/captn_agents/backend/teams/test_weather_team.py +++ b/tests/ci/captn/captn_agents/backend/teams/test_weather_team.py @@ -23,14 +23,14 @@ def test_init(self) -> None: conv_id=456, ) agent_number_of_functions_dict = { - "weather_forecaster": 1, + "weather_forecaster": 2, "news_reporter": 1, "user_proxy": 0, } helper_test_init( team=weather_team, - number_of_registered_executions=2, + number_of_registered_executions=3, agent_number_of_functions_dict=agent_number_of_functions_dict, team_class=WeatherTeam, )