diff --git a/private_gpt/components/llm/prompt_helper.py b/private_gpt/components/llm/prompt_helper.py
index 985d217bd2..1ab19a3f68 100644
--- a/private_gpt/components/llm/prompt_helper.py
+++ b/private_gpt/components/llm/prompt_helper.py
@@ -215,7 +215,7 @@ def _completion_to_prompt(self, completion: str) -> str:
 
 
 def get_prompt_style(
-    prompt_style: Literal["default", "llama2", "tag", "mistral", "chatml"] | None
+    prompt_style: Literal["default", "llama2", "tag", "mistral", "chatml"] | None,
 ) -> AbstractPromptStyle:
     """Get the prompt style to use from the given string.
 
diff --git a/private_gpt/components/reranker/reranker.py b/private_gpt/components/reranker/reranker.py
index aa4f124598..1eb406ea52 100644
--- a/private_gpt/components/reranker/reranker.py
+++ b/private_gpt/components/reranker/reranker.py
@@ -1,17 +1,17 @@
-from typing import List, Tuple
-from injector import singleton, inject
-from llama_index.schema import NodeWithScore, QueryBundle
-from private_gpt.paths import models_path
-from llama_index.bridge.pydantic import Field
 from FlagEmbedding import FlagReranker
+from injector import inject, singleton
+from llama_index.bridge.pydantic import Field
 from llama_index.postprocessor.types import BaseNodePostprocessor
+from llama_index.schema import NodeWithScore, QueryBundle
+
+from private_gpt.paths import models_path
 from private_gpt.settings.settings import Settings
 
 
 @singleton
 class RerankerComponent(BaseNodePostprocessor):
-    """
-    Reranker component:
+    """Reranker component.
+
     - top_n: Top N nodes to return.
     - cut_off: Cut off score for nodes.
 
@@ -47,14 +47,14 @@ def class_name(cls) -> str:
 
     def _postprocess_nodes(
         self,
-        nodes: List[NodeWithScore],
+        nodes: list[NodeWithScore],
         query_bundle: QueryBundle | None = None,
-    ) -> List[NodeWithScore]:
+    ) -> list[NodeWithScore]:
         if query_bundle is None:
             return ValueError("Query bundle must be provided.")
 
         query_str = query_bundle.query_str
-        sentence_pairs: List[Tuple[str, str]] = []
+        sentence_pairs: list[tuple[str, str]] = []
         for node in nodes:
             content = node.get_content()
             sentence_pairs.append([query_str, content])
diff --git a/private_gpt/launcher.py b/private_gpt/launcher.py
index 43bd803a56..cd626299e5 100644
--- a/private_gpt/launcher.py
+++ b/private_gpt/launcher.py
@@ -21,7 +21,6 @@
 
 
 def create_app(root_injector: Injector) -> FastAPI:
-
     # Start the API
     async def bind_injector_to_request(request: Request) -> None:
         request.state.injector = root_injector
diff --git a/private_gpt/server/chat/chat_service.py b/private_gpt/server/chat/chat_service.py
index 20c5897d61..a7a5b02bfc 100644
--- a/private_gpt/server/chat/chat_service.py
+++ b/private_gpt/server/chat/chat_service.py
@@ -12,10 +12,10 @@
 from llama_index.core.types import TokenGen
 from pydantic import BaseModel
 
-from private_gpt.components.reranker.reranker import RerankerComponent
 from private_gpt.components.embedding.embedding_component import EmbeddingComponent
 from private_gpt.components.llm.llm_component import LLMComponent
 from private_gpt.components.node_store.node_store_component import NodeStoreComponent
+from private_gpt.components.reranker.reranker import RerankerComponent
 from private_gpt.components.vector_store.vector_store_component import (
     VectorStoreComponent,
 )
diff --git a/private_gpt/server/utils/auth.py b/private_gpt/server/utils/auth.py
index 2eb40fe561..3421d1a58e 100644
--- a/private_gpt/server/utils/auth.py
+++ b/private_gpt/server/utils/auth.py
@@ -60,7 +60,7 @@ def authenticated() -> bool:
 
     # Method to be used as a dependency to check if the request is authenticated.
     def authenticated(
-        _simple_authentication: Annotated[bool, Depends(_simple_authentication)]
+        _simple_authentication: Annotated[bool, Depends(_simple_authentication)],
     ) -> bool:
         """Check if the request is authenticated."""
         assert settings().server.auth.enabled
diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py
index 7c34e8497f..eef82ca916 100644
--- a/private_gpt/ui/ui.py
+++ b/private_gpt/ui/ui.py
@@ -145,7 +145,6 @@ def build_history() -> list[ChatMessage]:
             )
         match mode:
             case "Query Files":
-
                 # Use only the selected file for the query
                 context_filter = None
                 if self._selected_filename is not None:
diff --git a/settings-test.yaml b/settings-test.yaml
index b6ca869dad..0e23ec4cc7 100644
--- a/settings-test.yaml
+++ b/settings-test.yaml
@@ -14,8 +14,13 @@ qdrant:
 llm:
   mode: mock
 
+<<<<<<< HEAD
 embedding:
   mode: mock
+=======
+reranker:
+  enabled: false
+>>>>>>> c096818 (fix: tests)
 
 ui:
-  enabled: false
\ No newline at end of file
+  enabled: false