diff --git a/README.md b/README.md index 7c6b445..964a1df 100644 --- a/README.md +++ b/README.md @@ -1 +1,6 @@ -# township-small-business-bot \ No newline at end of file +## API Usage + +To test the API locally, ensure your server is running and use the following `curl` command: + +```sh +curl -d '{ "model": "llama3", "prompt": "Why is the sky blue?" }' -H "Content-Type: application/json" http://localhost:11434/api/generate diff --git a/api_server.py b/api_server.py new file mode 100644 index 0000000..9c880fc --- /dev/null +++ b/api_server.py @@ -0,0 +1,22 @@ +from flask import Flask, request, jsonify + +app = Flask(__name__) + +@app.route('/api/generate', methods=['POST']) +def generate_response(): + data = request.json + model = data.get('model') + prompt = data.get('prompt') + if model and prompt: + # Mock response for demonstration + response = { + "model": model, + "prompt": prompt, + "response": "The sky is blue due to the scattering of sunlight by the atmosphere." + } + return jsonify(response), 200 + else: + return jsonify({"error": "Invalid input"}), 400 + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=11434) diff --git a/app.py b/app.py new file mode 100644 index 0000000..40b21aa --- /dev/null +++ b/app.py @@ -0,0 +1,14 @@ +from flask import Flask, request, jsonify + +app = Flask(__name__) + +@app.route('/api/generate', methods=['POST']) +def generate(): + data = request.json + model = data.get('model') + prompt = data.get('prompt') + response = f"This is a mock response for the model {model} with prompt: {prompt}" + return jsonify({"response": response}) + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=11434) diff --git a/ollama_api.py b/ollama_api.py new file mode 100644 index 0000000..80f1571 --- /dev/null +++ b/ollama_api.py @@ -0,0 +1,9 @@ +from fastapi import FastAPI + +app = FastAPI() + +@app.post("/ollama") +def generate_response(prompt: str): + # Replace with your actual Llama 2 model invocation + model_response = ollama.generate(prompt, model="llama3", temperature=0.8) + return {"response": model_response}