-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathollama_api.py
More file actions
36 lines (27 loc) · 763 Bytes
/
ollama_api.py
File metadata and controls
36 lines (27 loc) · 763 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# requirements.txt
# ollama
#
# Install ollama from the website or cli `brew install ollama`
# Start the server with `ollama serve`
# Run the model available through ollama `ollama run <model>`
#
from ollama import chat
#from ollama import Client
# The model you have downloaded locally using the command "ollama pull <model>"
MODEL = "llama3.2"
prompt = "List all of the presidents of the United States in chronological order."
role = 'user'
# if you need to specify a different server, default is localhost
#client = Client(
# host='http://localhost:11434'
#)
response = chat(
model=MODEL,
messages=[{
'role': role,
'content': prompt
}],
stream=True,
)
for chunk in response:
print(chunk['message']['content'], end='', flush=True)