@@ -81,7 +81,7 @@ To use the Gemini API, you'll need an API key. If you don't already have one, cr
81
81
Interact with Gemini's API:
82
82
83
83
``` php
84
- $result = $container->get('openai ')->geminiPro()->generateContent('Hello');
84
+ $result = $container->get('gemini ')->geminiPro()->generateContent('Hello');
85
85
86
86
$result->text(); // Hello! How can I assist you today?
87
87
```
@@ -92,7 +92,7 @@ $result->text(); // Hello! How can I assist you today?
92
92
Generate a response from the model given an input message. If the input contains only text, use the ` gemini-pro ` model.
93
93
94
94
``` php
95
- $result = $container->get('openai ')->geminiPro()->generateContent('Hello');
95
+ $result = $container->get('gemini ')->geminiPro()->generateContent('Hello');
96
96
97
97
$result->text(); // Hello! How can I assist you today?
98
98
```
@@ -102,7 +102,7 @@ If the input contains both text and image, use the `gemini-pro-vision` model.
102
102
103
103
``` php
104
104
105
- $result = $container->get('openai ')->geminiProVision()
105
+ $result = $container->get('gemini ')->geminiProVision()
106
106
->generateContent([
107
107
'What is this picture?',
108
108
new Blob(
@@ -119,7 +119,7 @@ $result->text(); // The picture shows a table with a white tablecloth. On the t
119
119
Using Gemini, you can build freeform conversations across multiple turns.
120
120
121
121
``` php
122
- $chat = $container->get('openai ')->chat()
122
+ $chat = $container->get('gemini ')->chat()
123
123
->startChat(history: [
124
124
Content::parse(part: 'The stories you write about what I have to say should be one line. Is that clear?'),
125
125
Content::parse(part: 'Yes, I understand. The stories I write about your input should be one line long.', role: Role::MODEL)
@@ -138,7 +138,7 @@ echo $response->text(); // In the heart of England's lush countryside, amidst em
138
138
By default, the model returns a response after completing the entire generation process. You can achieve faster interactions by not waiting for the entire result, and instead use streaming to handle partial results.
139
139
140
140
``` php
141
- $stream = $container->get('openai ')->geminiPro()
141
+ $stream = $container->get('gemini ')->geminiPro()
142
142
->streamGenerateContent('Write long a story about a magic backpack.');
143
143
144
144
foreach ($stream as $response) {
@@ -150,7 +150,7 @@ foreach ($stream as $response) {
150
150
When using long prompts, it might be useful to count tokens before sending any content to the model.
151
151
152
152
``` php
153
- $response = $container->get('openai ')->geminiPro()
153
+ $response = $container->get('gemini ')->geminiPro()
154
154
->countTokens('Write a story about a magic backpack.');
155
155
156
156
echo $response->totalTokens; // 9
@@ -188,7 +188,7 @@ $generationConfig = new GenerationConfig(
188
188
topK: 10
189
189
);
190
190
191
- $generativeModel = $container->get('openai ')->geminiPro()
191
+ $generativeModel = $container->get('gemini ')->geminiPro()
192
192
->withSafetySetting($safetySettingDangerousContent)
193
193
->withSafetySetting($safetySettingHateSpeech)
194
194
->withGenerationConfig($generationConfig)
@@ -201,7 +201,7 @@ Embedding is a technique used to represent information as a list of floating poi
201
201
Use the ` embedding-001 ` model with either ` embedContents ` or ` batchEmbedContents ` :
202
202
203
203
``` php
204
- $response = $container->get('openai ')->embeddingModel()
204
+ $response = $container->get('gemini ')->embeddingModel()
205
205
->embedContent("Write a story about a magic backpack.");
206
206
207
207
print_r($response->embedding->values);
@@ -225,7 +225,7 @@ print_r($response->embedding->values);
225
225
Use list models to see the available Gemini models:
226
226
227
227
``` php
228
- $response = $container->get('openai ')->models()->list();
228
+ $response = $container->get('gemini ')->models()->list();
229
229
230
230
$response->models;
231
231
//[
@@ -260,7 +260,7 @@ $response->models;
260
260
Get information about a model, such as version, display name, input token limit, etc.
261
261
``` php
262
262
263
- $response = $container->get('openai ')->models()->retrieve(ModelType::GEMINI_PRO);
263
+ $response = $container->get('gemini ')->models()->retrieve(ModelType::GEMINI_PRO);
264
264
265
265
$response->model;
266
266
//Gemini\Data\Model Object
@@ -287,7 +287,7 @@ All responses are having a `fake()` method that allows you to easily create a re
287
287
use Gemini\Testing\ClientFake;
288
288
use Gemini\Responses\GenerativeModel\GenerateContentResponse;
289
289
290
- $container->get('openai ')->fake([
290
+ $container->get('gemini ')->fake([
291
291
GenerateContentResponse::fake([
292
292
'candidates' => [
293
293
[
@@ -303,7 +303,7 @@ $container->get('openai')->fake([
303
303
]),
304
304
]);
305
305
306
- $result = $container->get('openai ')->geminiPro()->generateContent('test');
306
+ $result = $container->get('gemini ')->geminiPro()->generateContent('test');
307
307
308
308
expect($result->text())->toBe('success');
309
309
```
@@ -314,11 +314,11 @@ In case of a streamed response you can optionally provide a resource holding the
314
314
use Gemini\Testing\ClientFake;
315
315
use Gemini\Responses\GenerativeModel\GenerateContentResponse;
316
316
317
- $container->get('openai ')->fake([
317
+ $container->get('gemini ')->fake([
318
318
GenerateContentResponse::fakeStream(),
319
319
]);
320
320
321
- $result = $container->get('openai ')->geminiPro()->streamGenerateContent('Hello');
321
+ $result = $container->get('gemini ')->geminiPro()->streamGenerateContent('Hello');
322
322
323
323
expect($response->getIterator()->current())
324
324
->text()->toBe('In the bustling city of Aethelwood, where the cobblestone streets whispered');
@@ -328,43 +328,43 @@ After the requests have been sent there are various methods to ensure that the e
328
328
329
329
``` php
330
330
// assert list models request was sent
331
- $container->get('openai ')->models()->assertSent(callback: function ($method) {
331
+ $container->get('gemini ')->models()->assertSent(callback: function ($method) {
332
332
return $method === 'list';
333
333
});
334
334
// or
335
- $container->get('openai ')->assertSent(resource: Models::class, callback: function ($method) {
335
+ $container->get('gemini ')->assertSent(resource: Models::class, callback: function ($method) {
336
336
return $method === 'list';
337
337
});
338
338
339
- $container->get('openai ')->geminiPro()->assertSent(function (string $method, array $parameters) {
339
+ $container->get('gemini ')->geminiPro()->assertSent(function (string $method, array $parameters) {
340
340
return $method === 'generateContent' &&
341
341
$parameters[0] === 'Hello';
342
342
});
343
343
// or
344
- $container->get('openai ')->assertSent(resource: GenerativeModel::class, model: ModelType::GEMINI_PRO, callback: function (string $method, array $parameters) {
344
+ $container->get('gemini ')->assertSent(resource: GenerativeModel::class, model: ModelType::GEMINI_PRO, callback: function (string $method, array $parameters) {
345
345
return $method === 'generateContent' &&
346
346
$parameters[0] === 'Hello';
347
347
});
348
348
349
349
350
350
// assert 2 generative model requests were sent
351
- $container->get('openai ')->assertSent(resource: GenerativeModel::class, model: ModelType::GEMINI_PRO, callback: 2);
351
+ $container->get('gemini ')->assertSent(resource: GenerativeModel::class, model: ModelType::GEMINI_PRO, callback: 2);
352
352
// or
353
- $container->get('openai ')->geminiPro()->assertSent(2);
353
+ $container->get('gemini ')->geminiPro()->assertSent(2);
354
354
355
355
// assert no generative model requests were sent
356
- $container->get('openai ')->assertNotSent(resource: GenerativeModel::class, model: ModelType::GEMINI_PRO);
356
+ $container->get('gemini ')->assertNotSent(resource: GenerativeModel::class, model: ModelType::GEMINI_PRO);
357
357
// or
358
- $container->get('openai ')->geminiPro()->assertNotSent();
358
+ $container->get('gemini ')->geminiPro()->assertNotSent();
359
359
360
360
// assert no requests were sent
361
- $container->get('openai ')->assertNothingSent();
361
+ $container->get('gemini ')->assertNothingSent();
362
362
```
363
363
364
364
To write tests expecting the API request to fail you can provide a ` Throwable ` object as the response.
365
365
366
366
``` php
367
- $container->get('openai ')->fake([
367
+ $container->get('gemini ')->fake([
368
368
new ErrorException([
369
369
'message' => 'The model `gemini-basic` does not exist',
370
370
'status' => 'INVALID_ARGUMENT',
@@ -373,5 +373,5 @@ $container->get('openai')->fake([
373
373
]);
374
374
375
375
// the `ErrorException` will be thrown
376
- $container->get('openai ')->geminiPro()->generateContent('test');
376
+ $container->get('gemini ')->geminiPro()->generateContent('test');
377
377
```
0 commit comments