From c3374c5df340eabb9f798e22a186049cfd06f50a Mon Sep 17 00:00:00 2001 From: Lucain Date: Tue, 5 Dec 2023 15:20:01 +0100 Subject: [PATCH] Standardize HF_ACCESS_TOKEN -> HF_TOKEN (#391) Same as https://github.com/xenova/transformers.js/pull/431. Related to [slack thread](https://huggingface.slack.com/archives/C021H1P1HKR/p1701775450614369?thread_ts=1701719404.424999&cid=C021H1P1HKR) (internal). This PR standardizes the name of the environment variable used to make requests with a HF API token. The goal is to harmonize it across the HF ecosystem. For what I've seen, `HF_ACCESS_TOKEN` was not an "official" environment variable but simply the one used in docs and tests. Updating the naming should not break anything in the libraries. **TODO before merging** update `HF_ACCESS_TOKEN` to `HF_TOKEN` in Github repo secrets. cc @xenova @julien-c Co-authored-by: Eliott C --- .github/workflows/lint-and-test.yml | 6 +++--- README.md | 18 +++++++++--------- packages/agents/README.md | 2 +- packages/agents/test/HfAgent.spec.ts | 18 +++++++++--------- packages/inference/README.md | 2 +- packages/inference/test/HfInference.spec.ts | 6 +++--- packages/inference/test/vcr.ts | 2 +- .../src/tasks/audio-classification/about.md | 2 +- .../tasks/src/tasks/audio-to-audio/about.md | 2 +- .../automatic-speech-recognition/about.md | 2 +- .../tasks/src/tasks/conversational/about.md | 2 +- .../src/tasks/image-classification/about.md | 2 +- .../src/tasks/image-segmentation/about.md | 2 +- .../tasks/src/tasks/image-to-image/about.md | 2 +- .../tasks/src/tasks/image-to-text/about.md | 2 +- .../tasks/src/tasks/summarization/about.md | 2 +- .../src/tasks/text-classification/about.md | 2 +- .../tasks/src/tasks/text-generation/about.md | 2 +- .../tasks/src/tasks/text-to-image/about.md | 2 +- .../tasks/src/tasks/text-to-speech/about.md | 2 +- packages/tasks/src/tasks/translation/about.md | 2 +- 21 files changed, 41 insertions(+), 41 deletions(-) diff --git a/.github/workflows/lint-and-test.yml b/.github/workflows/lint-and-test.yml index fbe5b27e0..09c4d0ffe 100644 --- a/.github/workflows/lint-and-test.yml +++ b/.github/workflows/lint-and-test.yml @@ -50,12 +50,12 @@ jobs: - name: Test run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test env: - HF_ACCESS_TOKEN: ${{ secrets.HF_ACCESS_TOKEN }} + HF_TOKEN: ${{ secrets.HF_TOKEN }} - name: Test in browser run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test:browser env: - HF_ACCESS_TOKEN: ${{ secrets.HF_ACCESS_TOKEN }} + HF_TOKEN: ${{ secrets.HF_TOKEN }} - name: E2E - start mock npm registry run: | @@ -86,7 +86,7 @@ jobs: pnpm i --ignore-workspace --registry http://localhost:4874/ pnpm start env: - token: ${{ secrets.HF_ACCESS_TOKEN }} + token: ${{ secrets.HF_TOKEN }} - name: E2E test - svelte app build working-directory: e2e/svelte diff --git a/README.md b/README.md index 755bf891b..74fc628db 100644 --- a/README.md +++ b/README.md @@ -105,9 +105,9 @@ Get your HF access token in your [account settings](https://huggingface.co/setti ```ts import { HfInference } from "@huggingface/inference"; -const HF_ACCESS_TOKEN = "hf_..."; +const HF_TOKEN = "hf_..."; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); // You can also omit "model" to use the recommended model for the task await inference.translation({ @@ -137,11 +137,11 @@ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the ```ts import {HfAgent, LLMFromHub, defaultTools} from '@huggingface/agents'; -const HF_ACCESS_TOKEN = "hf_..."; +const HF_TOKEN = "hf_..."; const agent = new HfAgent( - HF_ACCESS_TOKEN, - LLMFromHub(HF_ACCESS_TOKEN), + HF_TOKEN, + LLMFromHub(HF_TOKEN), [...defaultTools] ); @@ -162,16 +162,16 @@ console.log(messages); ```ts import { createRepo, uploadFile, deleteFiles } from "@huggingface/hub"; -const HF_ACCESS_TOKEN = "hf_..."; +const HF_TOKEN = "hf_..."; await createRepo({ repo: "my-user/nlp-model", // or {type: "model", name: "my-user/nlp-test"}, - credentials: {accessToken: HF_ACCESS_TOKEN} + credentials: {accessToken: HF_TOKEN} }); await uploadFile({ repo: "my-user/nlp-model", - credentials: {accessToken: HF_ACCESS_TOKEN}, + credentials: {accessToken: HF_TOKEN}, // Can work with native File in browsers file: { path: "pytorch_model.bin", @@ -181,7 +181,7 @@ await uploadFile({ await deleteFiles({ repo: {type: "space", name: "my-user/my-space"}, // or "spaces/my-user/my-space" - credentials: {accessToken: HF_ACCESS_TOKEN}, + credentials: {accessToken: HF_TOKEN}, paths: ["README.md", ".gitattributes"] }); ``` diff --git a/packages/agents/README.md b/packages/agents/README.md index 556a5fab6..2b7aa0c8f 100644 --- a/packages/agents/README.md +++ b/packages/agents/README.md @@ -131,7 +131,7 @@ const uppercaseTool: Tool = { }; // pass it in the agent -const agent = new HfAgent(process.env.HF_ACCESS_TOKEN, +const agent = new HfAgent(process.env.HF_TOKEN, LLMFromHub("hf_...", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"), [uppercaseTool, ...defaultTools]); ``` diff --git a/packages/agents/test/HfAgent.spec.ts b/packages/agents/test/HfAgent.spec.ts index 6fc940338..5b99e0a1f 100644 --- a/packages/agents/test/HfAgent.spec.ts +++ b/packages/agents/test/HfAgent.spec.ts @@ -4,20 +4,20 @@ import type { Data } from "../src/types"; import type { HfInference } from "@huggingface/inference"; const env = import.meta.env; -if (!env.HF_ACCESS_TOKEN) { - console.warn("Set HF_ACCESS_TOKEN in the env to run the tests for better rate limits"); +if (!env.HF_TOKEN) { + console.warn("Set HF_TOKEN in the env to run the tests for better rate limits"); } describe("HfAgent", () => { it("You can create an agent from the hub", async () => { - const llm = LLMFromHub(env.HF_ACCESS_TOKEN, "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"); - const agent = new HfAgent(env.HF_ACCESS_TOKEN, llm); + const llm = LLMFromHub(env.HF_TOKEN, "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"); + const agent = new HfAgent(env.HF_TOKEN, llm); expect(agent).toBeDefined(); }); it("You can create an agent from an endpoint", async () => { - const llm = LLMFromEndpoint(env.HF_ACCESS_TOKEN ?? "", "endpoint"); - const agent = new HfAgent(env.HF_ACCESS_TOKEN, llm); + const llm = LLMFromEndpoint(env.HF_TOKEN ?? "", "endpoint"); + const agent = new HfAgent(env.HF_TOKEN, llm); expect(agent).toBeDefined(); }); @@ -42,7 +42,7 @@ describe("HfAgent", () => { }, }; - const agent = new HfAgent(env.HF_ACCESS_TOKEN, undefined, [uppercaseTool, ...defaultTools]); + const agent = new HfAgent(env.HF_TOKEN, undefined, [uppercaseTool, ...defaultTools]); const code = ` async function generate() { const output = uppercase("hello friends"); @@ -61,7 +61,7 @@ async function generate() { message(output); }`; - const agent = new HfAgent(env.HF_ACCESS_TOKEN); + const agent = new HfAgent(env.HF_TOKEN); await agent.evaluateCode(code).then((output) => { expect(output.length).toBeGreaterThan(0); @@ -75,7 +75,7 @@ async function generate() { toolThatDoesntExist(aaa); }`; - const hf = new HfAgent(env.HF_ACCESS_TOKEN); + const hf = new HfAgent(env.HF_TOKEN); await hf.evaluateCode(code).then((output) => { expect(output.length).toBeGreaterThan(0); diff --git a/packages/inference/README.md b/packages/inference/README.md index 431fc15c4..274650b76 100644 --- a/packages/inference/README.md +++ b/packages/inference/README.md @@ -504,7 +504,7 @@ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the ## Running tests ```console -HF_ACCESS_TOKEN="your access token" pnpm run test +HF_TOKEN="your access token" pnpm run test ``` ## Finding appropriate models diff --git a/packages/inference/test/HfInference.spec.ts b/packages/inference/test/HfInference.spec.ts index 280dd049a..60ca52357 100644 --- a/packages/inference/test/HfInference.spec.ts +++ b/packages/inference/test/HfInference.spec.ts @@ -8,15 +8,15 @@ import { readTestFile } from "./test-files"; const TIMEOUT = 60000 * 3; const env = import.meta.env; -if (!env.HF_ACCESS_TOKEN) { - console.warn("Set HF_ACCESS_TOKEN in the env to run the tests for better rate limits"); +if (!env.HF_TOKEN) { + console.warn("Set HF_TOKEN in the env to run the tests for better rate limits"); } describe.concurrent( "HfInference", () => { // Individual tests can be ran without providing an api key, however running all tests without an api key will result in rate limiting error. - const hf = new HfInference(env.HF_ACCESS_TOKEN); + const hf = new HfInference(env.HF_TOKEN); it("throws error if model does not exist", () => { expect( diff --git a/packages/inference/test/vcr.ts b/packages/inference/test/vcr.ts index 96cabf79e..6f840f44f 100644 --- a/packages/inference/test/vcr.ts +++ b/packages/inference/test/vcr.ts @@ -27,7 +27,7 @@ if (env.VCR_MODE) { VCR_MODE = env.VCR_MODE as MODE; } else { - VCR_MODE = env.HF_ACCESS_TOKEN ? MODE.DISABLED : MODE.PLAYBACK; + VCR_MODE = env.HF_TOKEN ? MODE.DISABLED : MODE.PLAYBACK; } const originalFetch = globalThis.fetch; diff --git a/packages/tasks/src/tasks/audio-classification/about.md b/packages/tasks/src/tasks/audio-classification/about.md index 9bb030720..f135ea5cf 100644 --- a/packages/tasks/src/tasks/audio-classification/about.md +++ b/packages/tasks/src/tasks/audio-classification/about.md @@ -53,7 +53,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.audioClassification({ data: await (await fetch("sample.flac")).blob(), model: "facebook/mms-lid-126", diff --git a/packages/tasks/src/tasks/audio-to-audio/about.md b/packages/tasks/src/tasks/audio-to-audio/about.md index e56275277..d63de3a18 100644 --- a/packages/tasks/src/tasks/audio-to-audio/about.md +++ b/packages/tasks/src/tasks/audio-to-audio/about.md @@ -35,7 +35,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.audioToAudio({ data: await (await fetch("sample.flac")).blob(), model: "speechbrain/sepformer-wham", diff --git a/packages/tasks/src/tasks/automatic-speech-recognition/about.md b/packages/tasks/src/tasks/automatic-speech-recognition/about.md index 5ce659073..7873d9c6d 100644 --- a/packages/tasks/src/tasks/automatic-speech-recognition/about.md +++ b/packages/tasks/src/tasks/automatic-speech-recognition/about.md @@ -54,7 +54,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to t ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.automaticSpeechRecognition({ data: await (await fetch("sample.flac")).blob(), model: "openai/whisper-large-v2", diff --git a/packages/tasks/src/tasks/conversational/about.md b/packages/tasks/src/tasks/conversational/about.md index d2141ba20..8bcb74de0 100644 --- a/packages/tasks/src/tasks/conversational/about.md +++ b/packages/tasks/src/tasks/conversational/about.md @@ -34,7 +34,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.conversational({ model: "facebook/blenderbot-400M-distill", inputs: "Going to the movies tonight - any suggestions?", diff --git a/packages/tasks/src/tasks/image-classification/about.md b/packages/tasks/src/tasks/image-classification/about.md index 593f3b1dd..04169331f 100644 --- a/packages/tasks/src/tasks/image-classification/about.md +++ b/packages/tasks/src/tasks/image-classification/about.md @@ -29,7 +29,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to c ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.imageClassification({ data: await (await fetch("https://picsum.photos/300/300")).blob(), model: "microsoft/resnet-50", diff --git a/packages/tasks/src/tasks/image-segmentation/about.md b/packages/tasks/src/tasks/image-segmentation/about.md index 3f26fb8ca..4a8a45a19 100644 --- a/packages/tasks/src/tasks/image-segmentation/about.md +++ b/packages/tasks/src/tasks/image-segmentation/about.md @@ -45,7 +45,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.imageSegmentation({ data: await (await fetch("https://picsum.photos/300/300")).blob(), model: "facebook/detr-resnet-50-panoptic", diff --git a/packages/tasks/src/tasks/image-to-image/about.md b/packages/tasks/src/tasks/image-to-image/about.md index 02611896a..63f490f82 100644 --- a/packages/tasks/src/tasks/image-to-image/about.md +++ b/packages/tasks/src/tasks/image-to-image/about.md @@ -43,7 +43,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.imageToImage({ data: await (await fetch("image")).blob(), model: "timbrooks/instruct-pix2pix", diff --git a/packages/tasks/src/tasks/image-to-text/about.md b/packages/tasks/src/tasks/image-to-text/about.md index a209ae22b..61ee79829 100644 --- a/packages/tasks/src/tasks/image-to-text/about.md +++ b/packages/tasks/src/tasks/image-to-text/about.md @@ -48,7 +48,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.imageToText({ data: await (await fetch("https://picsum.photos/300/300")).blob(), model: "Salesforce/blip-image-captioning-base", diff --git a/packages/tasks/src/tasks/summarization/about.md b/packages/tasks/src/tasks/summarization/about.md index ec82c946f..79c585217 100644 --- a/packages/tasks/src/tasks/summarization/about.md +++ b/packages/tasks/src/tasks/summarization/about.md @@ -25,7 +25,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); const inputs = "Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017."; diff --git a/packages/tasks/src/tasks/text-classification/about.md b/packages/tasks/src/tasks/text-classification/about.md index 448eb7138..627943ccd 100644 --- a/packages/tasks/src/tasks/text-classification/about.md +++ b/packages/tasks/src/tasks/text-classification/about.md @@ -117,7 +117,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.conversational({ model: "distilbert-base-uncased-finetuned-sst-2-english", inputs: "I love this movie!", diff --git a/packages/tasks/src/tasks/text-generation/about.md b/packages/tasks/src/tasks/text-generation/about.md index ad62c1923..9067a471c 100644 --- a/packages/tasks/src/tasks/text-generation/about.md +++ b/packages/tasks/src/tasks/text-generation/about.md @@ -72,7 +72,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.conversational({ model: "distilbert-base-uncased-finetuned-sst-2-english", inputs: "I love this movie!", diff --git a/packages/tasks/src/tasks/text-to-image/about.md b/packages/tasks/src/tasks/text-to-image/about.md index e7c79fb43..411c2ab7f 100644 --- a/packages/tasks/src/tasks/text-to-image/about.md +++ b/packages/tasks/src/tasks/text-to-image/about.md @@ -41,7 +41,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.textToImage({ model: "stabilityai/stable-diffusion-2", inputs: "award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]", diff --git a/packages/tasks/src/tasks/text-to-speech/about.md b/packages/tasks/src/tasks/text-to-speech/about.md index 564312e12..22638b0d6 100644 --- a/packages/tasks/src/tasks/text-to-speech/about.md +++ b/packages/tasks/src/tasks/text-to-speech/about.md @@ -47,7 +47,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.textToSpeech({ model: "facebook/mms-tts", inputs: "text to generate speech from", diff --git a/packages/tasks/src/tasks/translation/about.md b/packages/tasks/src/tasks/translation/about.md index 39755a8db..23fc48576 100644 --- a/packages/tasks/src/tasks/translation/about.md +++ b/packages/tasks/src/tasks/translation/about.md @@ -37,7 +37,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i ```javascript import { HfInference } from "@huggingface/inference"; -const inference = new HfInference(HF_ACCESS_TOKEN); +const inference = new HfInference(HF_TOKEN); await inference.translation({ model: "t5-base", inputs: "My name is Wolfgang and I live in Berlin",