Skip to content

Commit

Permalink
Fixed issues and resolved conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
Richjerk committed May 21, 2024
1 parent 1fac522 commit 055784e
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 1 deletion.
7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,6 @@
# township-small-business-bot
## API Usage

To test the API locally, ensure your server is running and use the following `curl` command:

```sh
curl -d '{ "model": "llama3", "prompt": "Why is the sky blue?" }' -H "Content-Type: application/json" http://localhost:11434/api/generate
22 changes: 22 additions & 0 deletions api_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from flask import Flask, request, jsonify

app = Flask(__name__)

@app.route('/api/generate', methods=['POST'])
def generate_response():
data = request.json
model = data.get('model')
prompt = data.get('prompt')
if model and prompt:
# Mock response for demonstration
response = {
"model": model,
"prompt": prompt,
"response": "The sky is blue due to the scattering of sunlight by the atmosphere."
}
return jsonify(response), 200
else:
return jsonify({"error": "Invalid input"}), 400

if __name__ == '__main__':
app.run(host='0.0.0.0', port=11434)
14 changes: 14 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from flask import Flask, request, jsonify

app = Flask(__name__)

@app.route('/api/generate', methods=['POST'])
def generate():
data = request.json
model = data.get('model')
prompt = data.get('prompt')
response = f"This is a mock response for the model {model} with prompt: {prompt}"
return jsonify({"response": response})

if __name__ == '__main__':
app.run(host='0.0.0.0', port=11434)
9 changes: 9 additions & 0 deletions ollama_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from fastapi import FastAPI

app = FastAPI()

@app.post("/ollama")
def generate_response(prompt: str):
# Replace with your actual Llama 2 model invocation
model_response = ollama.generate(prompt, model="llama3", temperature=0.8)
return {"response": model_response}

0 comments on commit 055784e

Please sign in to comment.