From 6b54b5bfc09ce290c648a8aa0eb6cfffa311f87b Mon Sep 17 00:00:00 2001 From: Daniel Campagnoli Date: Mon, 21 Oct 2024 16:10:17 +0800 Subject: [PATCH] Add WIP of new file select agent. Implement vercel ai for deepseek, openai and vertex --- bin/aider | 3 +- src/llm/llm.ts | 4 +- src/llm/models/deepseek.ts | 14 +- src/llm/models/openai.ts | 14 +- src/llm/models/vertexai.ts | 19 +- src/routes/code/code-routes.ts | 4 +- src/swe/SWEBenchAgent.ts | 2 +- src/swe/codeEditingAgent.ts | 2 +- src/swe/codeFunctions.ts | 36 +++ src/swe/discovery/README.md | 2 + src/swe/{ => discovery}/codebaseQuery.ts | 0 src/swe/{ => discovery}/discovery.ts | 2 +- src/swe/discovery/selectFilesAgent.ts | 289 ++++++++++++++++++ .../{ => discovery}/selectFilesToEdit.test.ts | 2 +- src/swe/{ => discovery}/selectFilesToEdit.ts | 2 +- 15 files changed, 379 insertions(+), 16 deletions(-) create mode 100644 src/swe/codeFunctions.ts create mode 100644 src/swe/discovery/README.md rename src/swe/{ => discovery}/codebaseQuery.ts (100%) rename src/swe/{ => discovery}/discovery.ts (98%) create mode 100644 src/swe/discovery/selectFilesAgent.ts rename src/swe/{ => discovery}/selectFilesToEdit.test.ts (92%) rename src/swe/{ => discovery}/selectFilesToEdit.ts (99%) diff --git a/bin/aider b/bin/aider index 3dbfd979..108853b4 100755 --- a/bin/aider +++ b/bin/aider @@ -2,5 +2,6 @@ source variables/local.env export VERTEXAI_PROJECT=$GCLOUD_PROJECT export VERTEXAI_LOCATION=$GCLOUD_CLAUDE_REGION +export OPENAI_API_KEY=$OPENAI_API_KEY echo $VERTEXAI_PROJECT $VERTEXAI_LOCATION -aider --model vertex_ai/claude-3-5-sonnet@20240620 +aider --o1-preview --editor-model vertex_ai/claude-3-5-sonnet@20240620 diff --git a/src/llm/llm.ts b/src/llm/llm.ts index b8c6d6e4..90f1675c 100644 --- a/src/llm/llm.ts +++ b/src/llm/llm.ts @@ -71,7 +71,7 @@ export function assistant(text: string): LlmMessage { export interface LLM { generateTextFromMessages(messages: LlmMessage[], opts?: GenerateTextOptions): Promise; - /* Generates a response that is expected to be in JSON format, and returns the object */ + /* Generates a response that is expected to be in JSON format, or end with a JSON object wrapped in tags or Markdown triple ticks, and returns the object */ generateJsonFromMessages(messages: LlmMessage[], opts?: GenerateJsonOptions): Promise; /* Generates text from a LLM */ @@ -108,7 +108,7 @@ export interface LLM { getService(): string; /** - * The LLM model identifier + * The LLM model identifier. This should match the model ids in the Vercel ai module (https://github.com/vercel/ai) */ getModel(): string; diff --git a/src/llm/models/deepseek.ts b/src/llm/models/deepseek.ts index 31fb5299..90547c17 100644 --- a/src/llm/models/deepseek.ts +++ b/src/llm/models/deepseek.ts @@ -1,7 +1,8 @@ +import { createOpenAI } from '@ai-sdk/openai'; +import { LanguageModel } from 'ai'; import axios from 'axios'; import { addCost, agentContext } from '#agent/agentContextLocalStorage'; import { LlmCall } from '#llm/llmCallService/llmCall'; -import { CallerId } from '#llm/llmCallService/llmCallService'; import { withSpan } from '#o11y/trace'; import { currentUser } from '#user/userService/userContext'; import { sleep } from '#utils/async-utils'; @@ -35,6 +36,7 @@ export function deepseekChat(): LLM { */ export class DeepseekLLM extends BaseLLM { _client: any; + aimodel: LanguageModel; client() { if (!this._client) { @@ -52,6 +54,16 @@ export class DeepseekLLM extends BaseLLM { return Boolean(currentUser().llmConfig.deepseekKey || process.env.DEEPSEEK_API_KEY); } + aiModel(): LanguageModel { + if (!this.aimodel) { + this.aimodel = createOpenAI({ + baseURL: 'https://api.deepseek.com', + apiKey: currentUser().llmConfig.deepseekKey || envVar('DEEPSEEK_API_KEY'), + })('deepseek-coder'); + } + return this.aimodel; + } + constructor( displayName: string, model: string, diff --git a/src/llm/models/openai.ts b/src/llm/models/openai.ts index 19ed4641..eb3c265f 100644 --- a/src/llm/models/openai.ts +++ b/src/llm/models/openai.ts @@ -1,4 +1,6 @@ +import { createOpenAI, openai } from '@ai-sdk/openai'; import { OpenAIChatModelId } from '@ai-sdk/openai/internal'; +import { LanguageModel } from 'ai'; import { OpenAI as OpenAISDK } from 'openai'; import { addCost, agentContext } from '#agent/agentContextLocalStorage'; import { LlmCall } from '#llm/llmCallService/llmCall'; @@ -77,11 +79,12 @@ export function GPT4oMini() { export class OpenAI extends BaseLLM { openAISDK: OpenAISDK | null = null; + aimodel: LanguageModel; constructor( name: string, model: Model, - aiModel: OpenAIChatModelId, + private aiModelId: OpenAIChatModelId, maxInputTokens: number, calculateInputCost: (input: string) => number, calculateOutputCost: (output: string) => number, @@ -102,6 +105,15 @@ export class OpenAI extends BaseLLM { return Boolean(currentUser().llmConfig.openaiKey || process.env.OPENAI_API_KEY); } + aiModel(): LanguageModel { + if (!this.aimodel) { + this.aimodel = createOpenAI({ + apiKey: currentUser().llmConfig.openaiKey || envVar('OPENAI_API_KEY'), + })(this.getModel()); + } + return this.aimodel; + } + async generateImage(description: string): Promise { const response = await this.sdk().images.generate({ model: 'dall-e-3', diff --git a/src/llm/models/vertexai.ts b/src/llm/models/vertexai.ts index 326d5a3c..a722d544 100644 --- a/src/llm/models/vertexai.ts +++ b/src/llm/models/vertexai.ts @@ -1,4 +1,6 @@ +import { createVertex, vertex } from '@ai-sdk/google-vertex'; import { GenerativeModel, HarmBlockThreshold, HarmCategory, SafetySetting, VertexAI } from '@google-cloud/vertexai'; +import { LanguageModel } from 'ai'; import axios from 'axios'; import { addCost, agentContext } from '#agent/agentContextLocalStorage'; import { AgentLLMs } from '#agent/agentContextTypes'; @@ -41,11 +43,11 @@ export function vertexLLMRegistry(): Record LLM> { // gemini-1.5-pro-latest // gemini-1.5-pro-exp-0801 // exp-0801 -export function Gemini_1_5_Pro(version = '002') { +export function Gemini_1_5_Pro() { return new VertexLLM( 'Gemini 1.5 Pro', VERTEX_SERVICE, - `gemini-1.5-pro-${version}`, + 'gemini-1.5-pro', 1_000_000, (input: string) => (input.length * (input.length > 128_000 * 4 ? 0.0003125 : 0.000625)) / 1000, (output: string) => (output.length * (output.length > 128_000 * 4 ? 0.0025 : 0.00125)) / 1000, @@ -63,11 +65,11 @@ export function Gemini_1_5_Experimental() { ); } -export function Gemini_1_5_Flash(version = '002') { +export function Gemini_1_5_Flash() { return new VertexLLM( 'Gemini 1.5 Flash', VERTEX_SERVICE, - `gemini-1.5-flash-${version}`, + 'gemini-1.5-flash', 1_000_000, (input: string) => (input.length * 0.000125) / 1000, (output: string) => (output.length * 0.000375) / 1000, @@ -147,6 +149,7 @@ export function Vertex_Llama3_405b() { */ class VertexLLM extends BaseLLM { _vertex: VertexAI; + aimodel: LanguageModel; vertex(): VertexAI { if (!this._vertex) { @@ -158,6 +161,14 @@ class VertexLLM extends BaseLLM { return this._vertex; } + aiModel(): LanguageModel { + if (!this.aimodel) { + const provider = createVertex({ project: process.env.GCLOUD_PROJECT, location: process.env.GCLOUD_REGION }); + this.aimodel = provider(this.getModel()); + } + return this.aimodel; + } + async generateText(userPrompt: string, systemPrompt?: string, opts?: GenerateTextOptions): Promise { return withActiveSpan(`generateText ${opts?.id ?? ''}`, async (span) => { if (systemPrompt) span.setAttribute('systemPrompt', systemPrompt); diff --git a/src/routes/code/code-routes.ts b/src/routes/code/code-routes.ts index d371b52f..f829e4dc 100644 --- a/src/routes/code/code-routes.ts +++ b/src/routes/code/code-routes.ts @@ -9,8 +9,8 @@ import { ClaudeVertexLLMs } from '#llm/models/anthropic-vertex'; import { Gemini_1_5_Flash } from '#llm/models/vertexai'; import { logger } from '#o11y/logger'; import { CodeEditingAgent } from '#swe/codeEditingAgent'; -import { codebaseQuery } from '#swe/codebaseQuery'; -import { SelectFilesResponse, selectFilesToEdit } from '#swe/selectFilesToEdit'; +import { codebaseQuery } from '#swe/discovery/codebaseQuery'; +import { SelectFilesResponse, selectFilesToEdit } from '#swe/discovery/selectFilesToEdit'; import { AppFastifyInstance } from '../../app'; import { sophiaDirName, systemDir } from '../../appVars'; diff --git a/src/swe/SWEBenchAgent.ts b/src/swe/SWEBenchAgent.ts index 75328489..a4bd6413 100644 --- a/src/swe/SWEBenchAgent.ts +++ b/src/swe/SWEBenchAgent.ts @@ -5,9 +5,9 @@ import { ClaudeVertexLLMs } from '#llm/models/anthropic-vertex'; import { countTokens } from '#llm/tokens'; import { logger } from '#o11y/logger'; import { CodeEditingAgent } from '#swe/codeEditingAgent'; +import { selectFilesToEdit } from '#swe/discovery/selectFilesToEdit'; import { PythonTools } from '#swe/lang/python/pythonTools'; import { ProjectInfo } from '#swe/projectDetection'; -import { selectFilesToEdit } from '#swe/selectFilesToEdit'; import { MAP_REPO_TO_TEST_FRAMEWORK, MAP_VERSION_TO_INSTALL, VersionInstallation } from '#swe/sweBenchConstant'; import { execCommand } from '#utils/exec'; diff --git a/src/swe/codeEditingAgent.ts b/src/swe/codeEditingAgent.ts index 6e913366..2f3bdd0d 100644 --- a/src/swe/codeEditingAgent.ts +++ b/src/swe/codeEditingAgent.ts @@ -13,9 +13,9 @@ import { execCommand, runShellCommand } from '#utils/exec'; import { appContext } from '../app'; import { cacheRetry } from '../cache/cacheRetry'; import { AiderCodeEditor } from './aiderCodeEditor'; +import { SelectFilesResponse, selectFilesToEdit } from './discovery/selectFilesToEdit'; import { ProjectInfo, detectProjectInfo } from './projectDetection'; import { basePrompt } from './prompt'; -import { SelectFilesResponse, selectFilesToEdit } from './selectFilesToEdit'; import { summariseRequirements } from './summariseRequirements'; export function buildPrompt(args: { diff --git a/src/swe/codeFunctions.ts b/src/swe/codeFunctions.ts new file mode 100644 index 00000000..c5c61781 --- /dev/null +++ b/src/swe/codeFunctions.ts @@ -0,0 +1,36 @@ +import { func, funcClass } from '#functionSchema/functionDecorators'; +import { codebaseQuery } from '#swe/discovery/codebaseQuery'; +import { SelectFilesResponse, selectFilesToEdit } from '#swe/discovery/selectFilesToEdit'; +import { getProjectInfo } from '#swe/projectDetection'; +import { reviewChanges } from '#swe/reviewChanges'; + +@funcClass(__filename) +export class CodeFunctions { + /** + * Searches across files under the current working directory to provide an answer to the query + * @param query + */ + @func() + async queryRepository(query: string): Promise { + return await codebaseQuery(query); + } + + /** + * + * @param requirements + */ + @func() + async selectFilesToEdit(requirements: string): Promise { + return await selectFilesToEdit(requirements, await getProjectInfo()); + } + + /** + * Reviews the changes committed to git since a commit or start of a branch + * @param requirements + * @param sourceBranchOrCommit + */ + @func() + async reviewChanges(requirements: string, sourceBranchOrCommit: string) { + return await reviewChanges(requirements, sourceBranchOrCommit); + } +} diff --git a/src/swe/discovery/README.md b/src/swe/discovery/README.md new file mode 100644 index 00000000..0ef0da5f --- /dev/null +++ b/src/swe/discovery/README.md @@ -0,0 +1,2 @@ +This folder contains agents which perform the discovery/research +phase of a task. \ No newline at end of file diff --git a/src/swe/codebaseQuery.ts b/src/swe/discovery/codebaseQuery.ts similarity index 100% rename from src/swe/codebaseQuery.ts rename to src/swe/discovery/codebaseQuery.ts diff --git a/src/swe/discovery.ts b/src/swe/discovery/discovery.ts similarity index 98% rename from src/swe/discovery.ts rename to src/swe/discovery/discovery.ts index 4a95bc8d..f39f9e4e 100644 --- a/src/swe/discovery.ts +++ b/src/swe/discovery/discovery.ts @@ -1,7 +1,7 @@ import { getFileSystem, llms } from '#agent/agentContextLocalStorage'; import { logger } from '#o11y/logger'; +import { SelectedFile, selectFilesToEdit } from '#swe/discovery/selectFilesToEdit'; import { getProjectInfo } from '#swe/projectDetection'; -import { SelectedFile, selectFilesToEdit } from '#swe/selectFilesToEdit'; interface DiscoveryResult { readyForExecution: boolean; diff --git a/src/swe/discovery/selectFilesAgent.ts b/src/swe/discovery/selectFilesAgent.ts new file mode 100644 index 00000000..0573fa82 --- /dev/null +++ b/src/swe/discovery/selectFilesAgent.ts @@ -0,0 +1,289 @@ +import path from 'path'; +import { getFileSystem, llms } from '#agent/agentContextLocalStorage'; +import { LlmMessage } from '#llm/llm'; +import { logger } from '#o11y/logger'; +import { getRepositoryOverview } from '#swe/documentationBuilder'; +import { ProjectInfo, getProjectInfo } from '#swe/projectDetection'; +import { RepositoryMaps, generateRepositoryMaps } from '#swe/repositoryMap'; + +// WORK IN PROGRESS ------ + +interface AssistantAction { + inspectFiles?: string[]; + selectFiles?: SelectedFile[]; + ignoreFiles?: string[]; + complete?: boolean; +} + +export interface FileSelection { + files: SelectedFile[]; + extracts?: FileExtract[]; +} + +export interface SelectedFile { + /** The file path */ + path: string; + /** The reason why this file needs to in the file selection */ + reason: string; + /** If the file should not need to be modified when implementing the task. Only relevant when the task is for making changes, and not just a query. */ + readonly: boolean; +} + +export interface FileExtract { + /** The file path */ + path: string; + /** The extract of the file contents which is relevant to the task */ + extract: string; +} + +function getStageInstructions(stage: 'initial' | 'post_inspect' | 'all_inspected'): string { + if (stage === 'initial') { + return ` +At this stage, you should decide which files to inspect based on the requirements and project map. + +**Valid Actions**: +- Request to inspect files by providing "inspectFiles": ["file1", "file2"] + +**Response Format**: +Respond with a JSON object wrapped in ... tags, containing only the **"inspectFiles"** property. + +Do not include file contents unless they have been provided to you. +`; + } + if (stage === 'post_inspect') { + return ` +You have received the contents of the files you requested to inspect. + +**Valid Actions**: +- Decide to select or ignore the inspected files by providing: + - "selectFiles": [{"path": "file1", "reason": "...", "readonly": false}, ...] + - "ignoreFiles": ["file2", ...] + +**Response Format**: +Respond with a JSON object wrapped in ... tags, containing **"selectFiles"** and/or **"ignoreFiles"** properties. + +Do not include file contents unless they have been provided to you. +`; + } + if (stage === 'all_inspected') { + return ` +You have processed all inspected files. + +**Valid Actions**: +- Request to inspect more files by providing "inspectFiles": ["file3", "file4"] +- If you have all the necessary files, complete the selection by responding with "complete": true + +**Response Format**: +Respond with a JSON object wrapped in ... tags, containing either: +- **"inspectFiles"** property, or +- **"complete": true** + +Do not include file contents unless they have been provided to you. +`; + } + return ''; +} + +/** + * + * The repository maps have summaries of each file and folder. + * For a large project the long summaries for each file may be too long. + * + * At each iteration the agent can: + * - Request the summaries for a subset of folders of interest, when needing to explore a particular section of the repository + * - Search the repository (or a sub-folder) for file contents matching a regex + * OR + * - Inspect the contents of file(s), providing their paths + * OR (must if previously inspected files) + * - Add an inspected file to the file selection. + * - Ignore an inspected file if it's not relevant. + * OR + * - Complete with the current selection + * + * i.e. The possible actions are: + * 1. Search for files + * 2. Inspect files + * 3. Add/ignore inspected files + * 4. Complete + * + * where #3 must always follow #2. + * + * To maximize caching input tokens to the LLM, new messages will be added to the previous messages with the results of the actions. + * This should reduce cost and latency compared to using the dynamic autonomous agents to perform the task. (However that might change if we get the caching autonomous agent working) + * + * Example: + * [index] - [role]: [message] + * + * Messages #1 + * 0 - USER : given and and select initial files for the task. + * + * Messages #2 + * 1 - ASSISTANT: { "inspectFiles": ["file1", "file2"] } + * 0 - USER : given and and select initial files for the task. + * + * Messages #3 + * 2 - USER: . Respond with select/ignore + * 1 - ASSISTANT: { "inspectFiles": ["file1", "file2"]}]} + * 0 - USER : given and and select initial files for the task. + * + * Messages #4 + * 3 - ASSISTANT: { "selectFiles": [{"path":"file1", "reason":"contains key details"], "ignoreFiles": [{"path":"file2", "reason": "did not contain the config"}] } + * 2 - USER: + * 1 - ASSISTANT: { "inspectFiles": ["file1", "file2"] } + * 0 - USER : given and and select initial files for the task. + * + * Messages #5 + * 3 - ASSISTANT: { "selectFiles": [{"path":"file1", "reason":"contains key details"], "ignoreFiles": [{"path":"file2", "reason": "did not contain the config"}] } + * 2 - USER: + * 1 - ASSISTANT: { "inspectFiles": ["file1", "file2"] } + * 0 - USER : given and and select initial files for the task. + * + * + * + * The history of the actions will be kept, and always included in final message to the LLM. + * + * All files staged in a previous step must be processed in the next step (ie. added, extracted or removed) + * + * @param requirements + * @param projectInfo + */ +export async function selectFilesAgent(requirements: string, projectInfo?: ProjectInfo): Promise { + try { + projectInfo ??= await getProjectInfo(); + const projectMaps: RepositoryMaps = await generateRepositoryMaps([projectInfo]); + const repositoryOverview: string = await getRepositoryOverview(); + const fileSystemWithSummaries: string = `\n${projectMaps.fileSystemTreeWithSummaries.text}\n\n`; + + const messages: LlmMessage[] = []; + const fileSelection: FileSelection = { files: [], extracts: [] }; + let stagedFiles: string[] = []; + let isComplete = false; + + const initialPrompt = `${repositoryOverview} + ${fileSystemWithSummaries} + + ${requirements} + `; + + messages.push({ role: 'user', text: initialPrompt }); + + const maxIterations = 5; + let iterationCount = 0; + + while (!isComplete) { + iterationCount++; + if (iterationCount > maxIterations) { + throw new Error('Maximum interaction iterations reached.'); + } + + // Determine the current stage + let currentStage: 'initial' | 'post_inspect' | 'all_inspected'; + if (iterationCount === 1) { + // First iteration + currentStage = 'initial'; + } else if (stagedFiles.length > 0) { + // Just provided file contents; expecting select or ignore + currentStage = 'post_inspect'; + } else { + // After processing inspected files + currentStage = 'all_inspected'; + } + + // Get the stage-specific instructions + const stageInstructions = getStageInstructions(currentStage); + + // Construct the current prompt by appending stage instructions + const currentPrompt = ` + +Your task is to select files from the to fulfill the given requirements. + +Before responding, please follow these steps: +1. **Observations**: Make observations about the project and requirements. +2. **Thoughts**: Think about which files are necessary. +3. **Reasoning**: Provide reasoning for your choices. +4. **Response**: Finally, respond according to the instructions below. + +${stageInstructions} +`; + + // Add the current prompt to messages + messages.push({ role: 'user', text: currentPrompt }); + + // Call the LLM with the current messages + const assistantResponse = await llms().medium.generateJsonFromMessages(messages); + + // Add the assistant's response to the conversation history + messages.push({ role: 'assistant', text: JSON.stringify(assistantResponse) }); + + // Handle the assistant's response based on the current stage + if (currentStage === 'initial' && assistantResponse.inspectFiles) { + // Read and provide the contents of the requested files + const fileContents = await readFileContents(assistantResponse.inspectFiles); + messages.push({ role: 'user', text: fileContents }); + stagedFiles = assistantResponse.inspectFiles; + } else if (currentStage === 'post_inspect' && (assistantResponse.selectFiles || assistantResponse.ignoreFiles)) { + // Process selected files and remove ignored files from staging + if (assistantResponse.selectFiles) { + fileSelection.files.push(...assistantResponse.selectFiles); + } + if (assistantResponse.ignoreFiles) { + stagedFiles = stagedFiles.filter((f) => !assistantResponse.ignoreFiles.includes(f)); + } + // Ensure all staged files have been processed + if (stagedFiles.length > 0) { + const message = `Please respond with select or ignore for the remaining files in the same JSON format as before.\n${JSON.stringify(stagedFiles)}`; + messages.push({ role: 'user', text: message }); + } else { + // Move to next stage + stagedFiles = []; + } + } else if (currentStage === 'all_inspected') { + if (assistantResponse.inspectFiles) { + // Read and provide the contents of the requested files + const fileContents = await readFileContents(assistantResponse.inspectFiles); + messages.push({ role: 'user', text: fileContents }); + stagedFiles = assistantResponse.inspectFiles; + } else if (assistantResponse.complete) { + // Mark the selection process as complete + isComplete = true; + } else { + throw new Error('Invalid response in all_inspected stage.'); + } + } else { + throw new Error('Unexpected response from assistant.'); + } + } + + if (fileSelection.files.length === 0) { + throw new Error('No files were selected to fulfill the requirements.'); + } + + logger.info(`Selected files: ${fileSelection.files.map((f) => f.path).join(', ')}`); + + return fileSelection; + } catch (error) { + logger.error('Error in selectFilesAgent:', error); + throw error; + } +} + +async function readFileContents(filePaths: string[]): Promise { + const fileSystem = getFileSystem(); + let contents = ''; + + for (const filePath of filePaths) { + const fullPath = path.join(fileSystem.getWorkingDirectory(), filePath); + try { + const fileContent = await fileSystem.readFile(filePath); + contents += ` +${fileContent} + +`; + } catch (e) { + logger.info(`Couldn't read ${filePath}`); + contents += `Couldn't read ${filePath}\n`; + } + } + + return contents; +} diff --git a/src/swe/selectFilesToEdit.test.ts b/src/swe/discovery/selectFilesToEdit.test.ts similarity index 92% rename from src/swe/selectFilesToEdit.test.ts rename to src/swe/discovery/selectFilesToEdit.test.ts index 55c1ad73..ffcb083b 100644 --- a/src/swe/selectFilesToEdit.test.ts +++ b/src/swe/discovery/selectFilesToEdit.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; import { FileSystemService } from '#functions/storage/fileSystemService'; -import { removeNonExistingFiles } from '#swe/selectFilesToEdit'; +import { removeNonExistingFiles } from '#swe/discovery/selectFilesToEdit'; describe('removeNonExistingFiles', () => { const fileSystem = new FileSystemService(); diff --git a/src/swe/selectFilesToEdit.ts b/src/swe/discovery/selectFilesToEdit.ts similarity index 99% rename from src/swe/selectFilesToEdit.ts rename to src/swe/discovery/selectFilesToEdit.ts index 3398fdfa..81b1e8ba 100644 --- a/src/swe/selectFilesToEdit.ts +++ b/src/swe/discovery/selectFilesToEdit.ts @@ -5,7 +5,7 @@ import { getFileSystem, llms } from '#agent/agentContextLocalStorage'; import { logger } from '#o11y/logger'; import { getRepositoryOverview } from '#swe/documentationBuilder'; import { RepositoryMaps, generateRepositoryMaps } from '#swe/repositoryMap'; -import { ProjectInfo, getProjectInfo } from './projectDetection'; +import { ProjectInfo, getProjectInfo } from '../projectDetection'; export interface SelectFilesResponse { primaryFiles: SelectedFile[];