diff --git a/src/llmling/config/loading.py b/src/llmling/config/loading.py index 84f5a00..9498493 100644 --- a/src/llmling/config/loading.py +++ b/src/llmling/config/loading.py @@ -66,11 +66,8 @@ def load_config(path: str | os.PathLike[str]) -> Config: msg = f"Failed to validate configuration from {path}" raise exceptions.ConfigError(msg) from exc else: - logger.debug( - "Loaded raw configuration: version=%s, resources=%d", - config.version, - len(config.resources), - ) + msg = "Loaded raw configuration: version=%s, resources=%d" + logger.debug(msg, config.version, len(config.resources)) return config diff --git a/src/llmling/config/manager.py b/src/llmling/config/manager.py index 84f6c1d..5b92807 100644 --- a/src/llmling/config/manager.py +++ b/src/llmling/config/manager.py @@ -120,9 +120,8 @@ def _validate_prompts(self) -> list[str]: else: # Try to import the module try: - importlib.import_module( - prompt_config.import_path.split(".")[0] - ) + name = prompt_config.import_path.split(".")[0] + importlib.import_module(name) except ImportError: warnings.append( f"Cannot import module for prompt {name}: " @@ -161,10 +160,8 @@ def _validate_resources(self) -> list[str]: for resource in self.config.resources.values(): if hasattr(resource, "path"): path = UPath(resource.path) - if not path.exists() and not path.as_uri().startswith(( - "http://", - "https://", - )): + prefixes = ("http://", "https://") + if not path.exists() and not path.as_uri().startswith(prefixes): warnings.append(f"Resource path not found: {path}") return warnings diff --git a/src/llmling/config/runtime.py b/src/llmling/config/runtime.py index b279cee..8b8d07e 100644 --- a/src/llmling/config/runtime.py +++ b/src/llmling/config/runtime.py @@ -161,10 +161,8 @@ def from_config(cls, config: Config) -> Self: if name not in tool_registry: tool_registry[name] = tool else: - logger.warning( - "Tool %s from toolset overlaps with configured tool", - name, - ) + msg = "Tool %s from toolset overlaps with configured tool" + logger.warning(msg, name) for name, prompt_config in config.prompts.items(): match prompt_config: @@ -179,9 +177,8 @@ def from_config(cls, config: Config) -> Self: func = importing.import_callable(path) completion_funcs[arg_name] = func except Exception: - logger.exception( - "Failed to import completion function: %s", path - ) + msg = "Failed to import completion function: %s" + logger.exception(msg, path) prompt = create_prompt_from_callable( prompt_config.import_path, diff --git a/src/llmling/prompts/registry.py b/src/llmling/prompts/registry.py index b945a68..45069f2 100644 --- a/src/llmling/prompts/registry.py +++ b/src/llmling/prompts/registry.py @@ -166,14 +166,10 @@ def _get_type_completions( if len(args) == 2 and type(None) in args: # noqa: PLR2004 other_type = next(arg for arg in args if arg is not type(None)) # Process the non-None type directly instead of using replace - return self._get_type_completions( - ExtendedPromptArgument( - name=arg.name, - type_hint=other_type, - description=arg.description, - ), - current_value, + arg = ExtendedPromptArgument( + name=arg.name, type_hint=other_type, description=arg.description ) + return self._get_type_completions(arg, current_value) # Handle bool if type_hint is bool: diff --git a/src/llmling/server/mcp_inproc_session.py b/src/llmling/server/mcp_inproc_session.py index d5a8316..56bcc97 100644 --- a/src/llmling/server/mcp_inproc_session.py +++ b/src/llmling/server/mcp_inproc_session.py @@ -63,9 +63,8 @@ async def read_stderr(): assert self.process.stderr while True: try: - line = await asyncio.get_event_loop().run_in_executor( - None, self.process.stderr.readline - ) + fn = self.process.stderr.readline + line = await asyncio.get_event_loop().run_in_executor(None, fn) if not line: break print( @@ -109,12 +108,7 @@ async def send_request( msg = "Server not started" raise RuntimeError(msg) - request = { - "jsonrpc": "2.0", - "method": method, - "params": params or {}, - "id": 1, - } + request = {"jsonrpc": "2.0", "method": method, "params": params or {}, "id": 1} request_str = json.dumps(request) + "\n" logger.debug("Sending request: %s", request_str.strip()) @@ -142,11 +136,7 @@ async def send_notification( msg = "Server not started" raise RuntimeError(msg) - notification = { - "jsonrpc": "2.0", - "method": method, - "params": params or {}, - } + notification = {"jsonrpc": "2.0", "method": method, "params": params or {}} notification_str = json.dumps(notification) + "\n" logger.debug("Sending notification: %s", notification_str.strip()) diff --git a/src/llmling/server/server.py b/src/llmling/server/server.py index 86730af..32ed78e 100644 --- a/src/llmling/server/server.py +++ b/src/llmling/server/server.py @@ -116,11 +116,8 @@ async def handle_set_level(level: mcp.LoggingLevel) -> None: try: python_level = level_map[level] logger.setLevel(python_level) - await self.current_session.send_log_message( - level="info", - data=f"Log level set to {level}", - logger=self.name, - ) + data = f"Log level set to {level}" + await self.current_session.send_log_message(data=data, logger=self.name) except Exception as exc: error = mcp.McpError("Error setting log level") error.error = mcp.ErrorData(code=INTERNAL_ERROR, message=str(exc)) @@ -153,9 +150,7 @@ async def handle_call_tool( @self.server.list_prompts() async def handle_list_prompts() -> list[mcp.types.Prompt]: """Handle prompts/list request.""" - return [ - conversions.to_mcp_prompt(prompt) for prompt in self.runtime.get_prompts() - ] + return [conversions.to_mcp_prompt(p) for p in self.runtime.get_prompts()] @self.server.get_prompt() async def handle_get_prompt( @@ -166,18 +161,13 @@ async def handle_get_prompt( try: prompt = self.runtime.get_prompt(name) messages = await self.runtime.render_prompt(name, arguments) - - return GetPromptResult( - description=prompt.description, - messages=[conversions.to_mcp_message(msg) for msg in messages], - ) + mcp_msgs = [conversions.to_mcp_message(msg) for msg in messages] + return GetPromptResult(description=prompt.description, messages=mcp_msgs) except exceptions.LLMLingError as exc: msg = str(exc) error = mcp.McpError(msg) - error.error = mcp.ErrorData( - code=INVALID_PARAMS if "not found" in msg else INTERNAL_ERROR, - message=msg, - ) + code = INVALID_PARAMS if "not found" in msg else INTERNAL_ERROR + error.error = mcp.ErrorData(code=code, message=msg) raise error from exc @self.server.list_resources() @@ -188,10 +178,8 @@ async def handle_list_resources() -> list[mcp.types.Resource]: try: # First get URI and basic info without loading uri = self.runtime.get_resource_uri(name) - resource_config = self.runtime._config.resources[ - name - ] # Get raw config - + # Get raw config + resource_config = self.runtime._config.resources[name] mcp_resource = mcp.types.Resource( uri=conversions.to_mcp_uri(uri), name=name, @@ -201,11 +189,8 @@ async def handle_list_resources() -> list[mcp.types.Resource]: resources.append(mcp_resource) except Exception: - logger.exception( - "Failed to create resource listing for %r. Config: %r", - name, - self.runtime._config.resources.get(name), - ) + msg = "Failed to create resource listing for %r. Config: %r" + logger.exception(msg, name, self.runtime._config.resources.get(name)) continue return resources @@ -266,12 +251,8 @@ async def handle_progress( total: float | None, ) -> None: """Handle progress notifications from client.""" - logger.debug( - "Progress notification: %s %.1f/%.1f", - token, - progress, - total or 0.0, - ) + msg = "Progress notification: %s %.1f/%.1f" + logger.debug(msg, token, progress, total or 0.0) def _setup_observers(self) -> None: """Set up registry observers for MCP notifications."""