Skip to content

Commit

Permalink
🌿 Fern Regeneration -- May 15, 2024 (#155)
Browse files Browse the repository at this point in the history
* SDK regeneration

* Fix type error

* Snap

---------

Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Co-authored-by: Billy Trend <billy@cohere.com>
  • Loading branch information
fern-api[bot] and billytrend-cohere authored May 15, 2024
1 parent b72be12 commit 61db210
Show file tree
Hide file tree
Showing 283 changed files with 1,859 additions and 1,677 deletions.
17 changes: 11 additions & 6 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,30 +1,35 @@
{
"name": "cohere-ai",
"version": "7.9.5",
"version": "7.10.0",
"private": false,
"repository": "https://github.com/cohere-ai/cohere-typescript",
"main": "./index.js",
"types": "./index.d.ts",
"scripts": {
"format": "prettier . --write --ignore-unknown",
"build": "tsc",
"prepack": "cp -rv dist/. ."
"prepack": "cp -rv dist/. .",
"test": "jest"
},
"dependencies": {
"url-join": "4.0.1",
"form-data": "4.0.0",
"formdata-node": "^6.0.3",
"node-fetch": "2.7.0",
"qs": "6.11.2",
"js-base64": "3.7.2"
"js-base64": "3.7.2",
"form-data-encoder": "^4.0.2"
},
"devDependencies": {
"@types/url-join": "4.0.1",
"@types/qs": "6.9.8",
"@types/node-fetch": "2.6.9",
"jest": "^29.7.0",
"@types/jest": "29.5.5",
"ts-jest": "^29.1.2",
"jest-environment-jsdom": "29.7.0",
"@types/node": "17.0.33",
"prettier": "2.7.1",
"typescript": "4.6.4",
"jest": "^29.7.0",
"ts-jest": "^29.1.2"
"typescript": "4.6.4"
}
}
20 changes: 10 additions & 10 deletions src/Client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ export class CohereClient {
if (_response.error.reason === "status-code") {
switch (_response.error.statusCode) {
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
default:
throw new errors.CohereError({
statusCode: _response.error.statusCode,
Expand Down Expand Up @@ -181,7 +181,7 @@ export class CohereClient {
if (_response.error.reason === "status-code") {
switch (_response.error.statusCode) {
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
default:
throw new errors.CohereError({
statusCode: _response.error.statusCode,
Expand Down Expand Up @@ -269,7 +269,7 @@ export class CohereClient {
case 400:
throw new Cohere.BadRequestError(_response.error.body);
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
case 500:
throw new Cohere.InternalServerError(_response.error.body);
default:
Expand Down Expand Up @@ -361,7 +361,7 @@ export class CohereClient {
case 400:
throw new Cohere.BadRequestError(_response.error.body);
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
case 500:
throw new Cohere.InternalServerError(_response.error.body);
default:
Expand Down Expand Up @@ -452,7 +452,7 @@ export class CohereClient {
case 400:
throw new Cohere.BadRequestError(_response.error.body);
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
case 500:
throw new Cohere.InternalServerError(_response.error.body);
default:
Expand Down Expand Up @@ -533,7 +533,7 @@ export class CohereClient {
if (_response.error.reason === "status-code") {
switch (_response.error.statusCode) {
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
default:
throw new errors.CohereError({
statusCode: _response.error.statusCode,
Expand Down Expand Up @@ -647,7 +647,7 @@ export class CohereClient {
case 400:
throw new Cohere.BadRequestError(_response.error.body);
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
case 500:
throw new Cohere.InternalServerError(_response.error.body);
default:
Expand Down Expand Up @@ -730,7 +730,7 @@ export class CohereClient {
if (_response.error.reason === "status-code") {
switch (_response.error.statusCode) {
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
default:
throw new errors.CohereError({
statusCode: _response.error.statusCode,
Expand Down Expand Up @@ -812,7 +812,7 @@ export class CohereClient {
case 400:
throw new Cohere.BadRequestError(_response.error.body);
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
case 500:
throw new Cohere.InternalServerError(_response.error.body);
default:
Expand Down Expand Up @@ -892,7 +892,7 @@ export class CohereClient {
if (_response.error.reason === "status-code") {
switch (_response.error.statusCode) {
case 429:
throw new Cohere.TooManyRequestsError(_response.error.body);
throw new Cohere.TooManyRequestsError(_response.error.body as Cohere.TooManyRequestsErrorBody);
default:
throw new errors.CohereError({
statusCode: _response.error.statusCode,
Expand Down
90 changes: 47 additions & 43 deletions src/api/client/requests/ChatRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,71 +2,37 @@
* This file was auto-generated by Fern from our API Definition.
*/

import * as Cohere from "../..";
import * as Cohere from "../../index";

/**
* @example
* {
* message: "Can you give me a global market overview of solar panels?",
* stream: false,
* chatHistory: [{
* role: Cohere.ChatMessageRole.Chatbot,
* message: "Hi!"
* }, {
* role: Cohere.ChatMessageRole.Chatbot,
* message: "How can I help you today?"
* }],
* promptTruncation: Cohere.ChatRequestPromptTruncation.Off,
* temperature: 0.3
* }
*
* @example
* {
* message: "Can you give me a global market overview of solar panels?",
* stream: false,
* chatHistory: [{
* role: Cohere.ChatMessageRole.Chatbot,
* message: "Hi!"
* }, {
* role: Cohere.ChatMessageRole.Chatbot,
* message: "How can I help you today?"
* }],
* promptTruncation: Cohere.ChatRequestPromptTruncation.Off,
* temperature: 0.3
* }
*
* @example
* {
* message: "Can you give me a global market overview of solar panels?",
* stream: false,
* chatHistory: [{
* role: Cohere.ChatMessageRole.Chatbot,
* message: "Hi!"
* }, {
* role: Cohere.ChatMessageRole.Chatbot,
* message: "How can I help you today?"
* }],
* promptTruncation: Cohere.ChatRequestPromptTruncation.Off,
* temperature: 0.3
* }
*/
export interface ChatRequest {
/**
* Text input for the model to respond to.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
message: string;
/**
* Defaults to `command-r`.
* Defaults to `command-r-plus`.
*
* The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
* Compatible Deployments: Cohere Platform, Private Deployments
*
*/
model?: string;
/**
* When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role.
*
* The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
preamble?: string;
Expand All @@ -76,13 +42,15 @@ export interface ChatRequest {
* Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
*
* The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
chatHistory?: Cohere.ChatMessage[];
chatHistory?: Cohere.Message[];
/**
* An alternative to `chat_history`.
*
* Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
* Compatible Deployments: Cohere Platform
*
*/
conversationId?: string;
Expand All @@ -96,20 +64,23 @@ export interface ChatRequest {
* With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
*
* With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
* Compatible Deployments: Cohere Platform Only AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker, Private Deployments
*
*/
promptTruncation?: Cohere.ChatRequestPromptTruncation;
/**
* Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one.
*
* When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
* Compatible Deployments: Cohere Platform
*
*/
connectors?: Cohere.ChatConnector[];
/**
* Defaults to `false`.
*
* When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
searchQueriesOnly?: boolean;
Expand All @@ -131,69 +102,101 @@ export interface ChatRequest {
* An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
*
* See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
documents?: Cohere.ChatDocument[];
/**
* Defaults to `"accurate"`.
*
* Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
citationQuality?: Cohere.ChatRequestCitationQuality;
/**
* Defaults to `0.3`.
*
* A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
*
* Randomness can be further maximized by increasing the value of the `p` parameter.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
temperature?: number;
/**
* The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
maxTokens?: number;
/**
* The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer.
*
* Input will be truncated according to the `prompt_truncation` parameter.
* Compatible Deployments: Cohere Platform
*
*/
maxInputTokens?: number;
/**
* Ensures only the top `k` most likely tokens are considered for generation at each step.
* Defaults to `0`, min value of `0`, max value of `500`.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
k?: number;
/**
* Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
* Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
p?: number;
/** If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed. */
/**
* If specified, the backend will make a best effort to sample tokens
* deterministically, such that repeated requests with the same
* seed and parameters should return the same result. However,
* determinism cannot be totally guaranteed.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
seed?: number;
/**
* A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
stopSequences?: string[];
/**
* Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
*
* Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
frequencyPenalty?: number;
/**
* Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
*
* Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
presencePenalty?: number;
/** When enabled, the user's prompt will be sent to the model without any pre-processing. */
/**
* When enabled, the user's prompt will be sent to the model without
* any pre-processing.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
rawPrompting?: boolean;
/** The prompt is returned in the `prompt` response field when this is enabled. */
returnPrompt?: boolean;
/**
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
*
* When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
tools?: Cohere.Tool[];
Expand All @@ -219,7 +222,8 @@ export interface ChatRequest {
* ]
* ```
* **Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text.
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
*
*/
toolResults?: Cohere.ChatRequestToolResultsItem[];
toolResults?: Cohere.ToolResult[];
}
Loading

0 comments on commit 61db210

Please sign in to comment.