In this project, I just created an simple Prompt+LLM+Parser App.
To install dependencies:
bun install
Create an .env
file:
PORT=3000
OPENAI_API_KEY="<your-api-key>"
Update the test.rest
file:
POST http://localhost:3000
content-type: application/json
{
"input": "<your-question>"
}
To run:
bun run start
/**
* Get answer from LLM to given input
*
* @param {string} input - The question
* @return {Promise<string>} - The answer
*/
export async function getResponseFromLLM(input: string): Promise<string> {
// Create a Prompt template
const prompt = PromptTemplate.fromTemplate(PROMPT_TEMPLATE)
// Create a String Parser
const parser = new StringOutputParser()
// Create a chain
const chain = prompt.pipe(model).pipe(parser)
// Call the chain
const response = await chain.invoke({ question: input})
return response
}