Skip to content

Commit

Permalink
Docstring (#110)
Browse files Browse the repository at this point in the history
* Update docstring

* update docstrings

* 0.3.1
  • Loading branch information
MarkDaoust authored Dec 13, 2023
1 parent 0cd555b commit 07fb91c
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 41 deletions.
2 changes: 1 addition & 1 deletion docs/build_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import textwrap
import typing

# For showing the conditional imports and types in `content_types.py`
# For showing the conditional imports and types in `content_types.py`
typing.TYPE_CHECKING = True

from absl import app
Expand Down
49 changes: 10 additions & 39 deletions google/generativeai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,60 +12,31 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A high level client library for generative AI.
"""Google AI Python SDK
## Setup
```posix-terminal
pip install google-generativeai
```
```
import google.generativeai as palm
import os
palm.configure(api_key=os.environ['API_KEY'])
```
## Text
Use the `palm.generate_text` function to have the model complete some initial
text.
```
response = palm.generate_text(prompt="The opposite of hot is")
print(response.result) # 'cold.'
```
## GenerativeModel
## Chat
Use `genai.GenerativeModel` to access the API:
Use the `palm.chat` function to have a discussion with a model:
```
chat = palm.chat(messages=["Hello."])
print(chat.last) # 'Hello! What can I help you with?'
chat = chat.reply("Can you tell me a joke?")
print(chat.last) # 'Why did the chicken cross the road?'
```
import google.generativeai as genai
import os
## Models
Use the model service discover models and find out more about them:
Use `palm.get_model` to get details if you know a model's name:
```
model = palm.get_model('models/chat-bison-001') # 🦬
```
genai.configure(api_key=os.environ['API_KEY'])
Use `palm.list_models` to discover models:
model = genai.Model(name='gemini-pro')
response = model.generate_content('Please summarise this document: ...')
```
import pprint
for model in palm.list_models():
pprint.pprint(model) # 🦎🦦🦬🦄
print(response.text)
```
See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details.
"""
from __future__ import annotations

Expand Down
58 changes: 58 additions & 0 deletions google/generativeai/types/generation_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,64 @@ class GenerationConfigDict(TypedDict):

@dataclasses.dataclass
class GenerationConfig:
"""A simple dataclass used to configure the generation parameters of `GenerativeModel.generate_content`.
Attributes:
candidate_count:
Number of generated responses to return.
stop_sequences:
The set of character sequences (up
to 5) that will stop output generation. If
specified, the API will stop at the first
appearance of a stop sequence. The stop sequence
will not be included as part of the response.
max_output_tokens:
The maximum number of tokens to include in a
candidate.
If unset, this will default to output_token_limit specified
in the model's specification.
temperature:
Controls the randomness of the output. Note: The
default value varies by model, see the `Model.temperature`
attribute of the `Model` returned the `genai.get_model`
function.
Values can range from [0.0,1.0], inclusive. A value closer
to 1.0 will produce responses that are more varied and
creative, while a value closer to 0.0 will typically result
in more straightforward responses from the model.
top_p:
Optional. The maximum cumulative probability of tokens to
consider when sampling.
The model uses combined Top-k and nucleus sampling.
Tokens are sorted based on their assigned probabilities so
that only the most likely tokens are considered. Top-k
sampling directly limits the maximum number of tokens to
consider, while Nucleus sampling limits number of tokens
based on the cumulative probability.
Note: The default value varies by model, see the
`Model.top_p` attribute of the `Model` returned the
`genai.get_model` function.
top_k (int):
Optional. The maximum number of tokens to consider when
sampling.
The model uses combined Top-k and nucleus sampling.
Top-k sampling considers the set of `top_k` most probable
tokens. Defaults to 40.
Note: The default value varies by model, see the
`Model.top_k` attribute of the `Model` returned the
`genai.get_model` function.
"""

candidate_count: int | None = None
stop_sequences: Iterable[str] | None = None
max_output_tokens: int | None = None
Expand Down
2 changes: 1 addition & 1 deletion google/generativeai/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
# limitations under the License.
from __future__ import annotations

__version__ = "0.3.0"
__version__ = "0.3.1"

0 comments on commit 07fb91c

Please sign in to comment.