From 07fb91cc6e4aee5d656f5c5aa5c1df851ffa494b Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 13 Dec 2023 03:52:54 -0800 Subject: [PATCH] Docstring (#110) * Update docstring * update docstrings * 0.3.1 --- docs/build_docs.py | 2 +- google/generativeai/__init__.py | 49 ++++------------ google/generativeai/types/generation_types.py | 58 +++++++++++++++++++ google/generativeai/version.py | 2 +- 4 files changed, 70 insertions(+), 41 deletions(-) diff --git a/docs/build_docs.py b/docs/build_docs.py index 817803406..da4555c09 100644 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -26,7 +26,7 @@ import textwrap import typing -# For showing the conditional imports and types in `content_types.py` +# For showing the conditional imports and types in `content_types.py` typing.TYPE_CHECKING = True from absl import app diff --git a/google/generativeai/__init__.py b/google/generativeai/__init__.py index 0d90af45e..6a41ef975 100644 --- a/google/generativeai/__init__.py +++ b/google/generativeai/__init__.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""A high level client library for generative AI. +"""Google AI Python SDK ## Setup @@ -20,52 +20,23 @@ pip install google-generativeai ``` -``` -import google.generativeai as palm -import os - -palm.configure(api_key=os.environ['API_KEY']) -``` - -## Text - -Use the `palm.generate_text` function to have the model complete some initial -text. - -``` -response = palm.generate_text(prompt="The opposite of hot is") -print(response.result) # 'cold.' -``` +## GenerativeModel -## Chat +Use `genai.GenerativeModel` to access the API: -Use the `palm.chat` function to have a discussion with a model: - -``` -chat = palm.chat(messages=["Hello."]) -print(chat.last) # 'Hello! What can I help you with?' -chat = chat.reply("Can you tell me a joke?") -print(chat.last) # 'Why did the chicken cross the road?' ``` +import google.generativeai as genai +import os -## Models - -Use the model service discover models and find out more about them: - -Use `palm.get_model` to get details if you know a model's name: - -``` -model = palm.get_model('models/chat-bison-001') # 🦬 -``` +genai.configure(api_key=os.environ['API_KEY']) -Use `palm.list_models` to discover models: +model = genai.Model(name='gemini-pro') +response = model.generate_content('Please summarise this document: ...') -``` -import pprint -for model in palm.list_models(): - pprint.pprint(model) # 🦎🦦🦬🦄 +print(response.text) ``` +See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details. """ from __future__ import annotations diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 3dab8e457..dc95b4cab 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -54,6 +54,64 @@ class GenerationConfigDict(TypedDict): @dataclasses.dataclass class GenerationConfig: + """A simple dataclass used to configure the generation parameters of `GenerativeModel.generate_content`. + + Attributes: + candidate_count: + Number of generated responses to return. + stop_sequences: + The set of character sequences (up + to 5) that will stop output generation. If + specified, the API will stop at the first + appearance of a stop sequence. The stop sequence + will not be included as part of the response. + max_output_tokens: + The maximum number of tokens to include in a + candidate. + + If unset, this will default to output_token_limit specified + in the model's specification. + temperature: + Controls the randomness of the output. Note: The + + default value varies by model, see the `Model.temperature` + attribute of the `Model` returned the `genai.get_model` + function. + + Values can range from [0.0,1.0], inclusive. A value closer + to 1.0 will produce responses that are more varied and + creative, while a value closer to 0.0 will typically result + in more straightforward responses from the model. + top_p: + Optional. The maximum cumulative probability of tokens to + consider when sampling. + + The model uses combined Top-k and nucleus sampling. + + Tokens are sorted based on their assigned probabilities so + that only the most likely tokens are considered. Top-k + sampling directly limits the maximum number of tokens to + consider, while Nucleus sampling limits number of tokens + based on the cumulative probability. + + Note: The default value varies by model, see the + `Model.top_p` attribute of the `Model` returned the + `genai.get_model` function. + + top_k (int): + Optional. The maximum number of tokens to consider when + sampling. + + The model uses combined Top-k and nucleus sampling. + + Top-k sampling considers the set of `top_k` most probable + tokens. Defaults to 40. + + Note: The default value varies by model, see the + `Model.top_k` attribute of the `Model` returned the + `genai.get_model` function. + """ + candidate_count: int | None = None stop_sequences: Iterable[str] | None = None max_output_tokens: int | None = None diff --git a/google/generativeai/version.py b/google/generativeai/version.py index 1ea5708c1..3a4dd6471 100644 --- a/google/generativeai/version.py +++ b/google/generativeai/version.py @@ -14,4 +14,4 @@ # limitations under the License. from __future__ import annotations -__version__ = "0.3.0" +__version__ = "0.3.1"