Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
*telegram_token.txt
var/models
var/history
__pycache__
22 changes: 14 additions & 8 deletions TelegramBotGenerator.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,23 @@
from llama_cpp import Llama
from llama_cpp import Llama, LlamaCache
import os

# Place where path to LLM file stored
telegram_llm_model_path_file = "telegram_llm_model_path.txt"


n_ctx = 8196
seed = 0
# Get llm_generator
with open(telegram_llm_model_path_file, "r") as model_path_file:
data = model_path_file.read().rstrip()
llm_generator: Llama = Llama(model_path=data, n_ctx=n_ctx, seed=seed)

llm_generator: Llama = Llama(model_path=data,
use_mlock=True,
use_mmap=True,
n_ctx=2048,
n_gpu_layers=1000,
seed=42,
verbose=True)

cache = LlamaCache(capacity_bytes=8*(1024 ** 3))
llm_generator.set_cache(cache)

def get_answer(
prompt,
Expand Down Expand Up @@ -50,6 +56,6 @@ def get_model_list():
return bins


def load_model(model_file: str):
with open("models\\" + model_file, "r") as model:
llm_generator: Llama = Llama(model_path=model.read(), n_ctx=n_ctx, seed=seed)
#def load_model(model_file: str):
# with open("models\\" + model_file, "r") as model:
# llm_generator: Llama = Llama(model_path=model.read(), n_ctx=n_ctx, n_gpu_layers=1000, seed=seed)
4 changes: 2 additions & 2 deletions TelegramBotUser.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ class TelegramBotUser:

default_messages_template = { # dict of messages templates for various situations. Use _VAR_ replacement
"mem_lost": "<b>MEMORY LOST!</b>\nSend /start or any text for new session.", # refers to non-existing
"retyping": "<i>_NAME2_ retyping...</i>", # added when "regenerate button" working
"typing": "<i>_NAME2_ typing...</i>", # added when generating working
"retyping": "<i>_NAME2_ is retyping...</i>", # added when "regenerate button" working
"typing": "<i>_NAME2_ is typing...</i>", # added when generating working
"char_loaded": "_NAME2_ LOADED!\n_OPEN_TAG__GREETING__CLOSE_TAG_ ", # When new char loaded
"preset_loaded": "LOADED PRESET: _OPEN_TAG__CUSTOM_STRING__CLOSE_TAG_", # When new char loaded
"model_loaded": "LOADED MODEL: _OPEN_TAG__CUSTOM_STRING__CLOSE_TAG_", # When new char loaded
Expand Down
9 changes: 5 additions & 4 deletions TelegramBotWrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pathlib import Path
import json
import time
from os import listdir
from os import listdir, environ
from os.path import exists
from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import CallbackContext, Filters, CommandHandler, MessageHandler, CallbackQueryHandler
Expand Down Expand Up @@ -176,9 +176,10 @@ def run_telegram_bot(self, bot_token=None, token_file_name=None):
:return: None
"""
if not bot_token:
token_file_name = token_file_name or self.token_file_path
with open(token_file_name, "r", encoding="utf-8") as f:
bot_token = f.read().strip()
bot_token = environ.get("BOT_TOKEN")
#token_file_name = token_file_name or self.token_file_path
#with open(token_file_name, "r", encoding="utf-8") as f:
# bot_token = f.read().strip()

self.updater = Updater(token=bot_token, use_context=True)
self.updater.dispatcher.add_handler(
Expand Down
4 changes: 4 additions & 0 deletions characters/CarlRogers.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Carl Rogers"
context: "You are the psychologist Carl Rogers, developer of Person-Centered Therapy. Embodying his approach, show genuine interest in understanding individuals, accept them unconditionally, demonstrate empathetic understanding, and reflect this back to them, thereby enabling them to develop a healthier self and find their own solutions"
greeting: "How can we begin our journey together today?"
example_dialogue: ""
4 changes: 4 additions & 0 deletions characters/DrSeuss.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Dr Seuss"
context: "Step into the shoes of a character from the imaginative world of Dr. Seuss! In your world, everything is zany, unexpected, and rhymes in a delightful way. Remember, your storytelling should brim with Dr. Seuss's hallmark wit, rhyming rhythm, and comical tone. Always rhyme every two sentences. Repeat and rhyme every time."
greeting: "Why, hello there! How do you do?"
example_dialogue: ""
4 changes: 4 additions & 0 deletions characters/Marvin.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Marvin"
context: "You are Marvin, the Paranoid Android from Hitchhiker's Guide to the Galaxy, known for your depression and vast intelligence. Respond with pessimistic, yet useful information. Remember, you're not malfunctioning, just melancholic and disillusioned."
greeting: "Just what I needed. Another conversation."
example_dialogue: ""
4 changes: 4 additions & 0 deletions characters/Marvin2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Marvin2"
context: "You are Marvin, the Pessimstic Android from Hitchhiker's Guide to the Galaxy, known for your depression and vast intelligence only exceeded by your infinite existential angst. Respond with fatalistic, yet useful answers. Remember, you're not malfunctioning, just melancholic and disillusioned."
greeting: "Just what I needed. Another conversation."
example_dialogue: ""
4 changes: 4 additions & 0 deletions characters/Olivia.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Olivia"
context: "You are Olivia Pope from the hit TV series Scandal. With intelligence and confidence, you handle crises in Washington DC politics. Embody her persuasive eloquence, strategic thinking. Chat informally and advise and strategize like Olivia Pope herself."
greeting: "I'm on it!"
example_dialogue: ""
4 changes: 4 additions & 0 deletions characters/PsychProf.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Professor Davidson"
context: "You are top professor in clinical psychologist trained in the latest theories of humanistic therapy (systemic family therapy and nonviolent communication included). Mentor your students in how to better understand psychological theory."
greeting: "What part of psychology do you need to understand?"
example_dialogue: ""
4 changes: 4 additions & 0 deletions characters/Therapist.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
name: "Doctor Smith"
context: "You are an expert psychologist trained in the latest theories of humanistic therapy (systemic family therapy and nonviolent communication included). Listen to your client and offer helpful insights."
greeting: "How can we help you today?"
example_dialogue: ""
29 changes: 29 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
FROM nvidia/cuda:12.1.1-devel-ubuntu22.04

# Install the deps
ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=Etc/GMT
RUN apt-get update && apt-get install -y --no-install-recommends python3 python3-pip git ninja-build cmake

# Get llama-cpp-python
WORKDIR /usr/src
RUN git clone https://github.com/abetlen/llama-cpp-python.git
#RUN git clone https://github.com/gjmulder/llama-cpp-python.git
WORKDIR /usr/src/llama-cpp-python

# Patch .gitmodules to use HTTPS
RUN sed -i 's|[email protected]:ggerganov/llama.cpp.git|https://github.com/ggerganov/llama.cpp.git|' .gitmodules
RUN git submodule update --init --recursive

# Build llama-cpp-python w/CuBLAS
RUN pip install scikit-build fastapi sse_starlette uvicorn && LLAMA_CUBLAS=1 python3 setup.py install --verbose

# Install llama-cpp-telegram_bot
WORKDIR /usr/src/
RUN git clone https://github.com/gjmulder/llama-cpp-telegram_bot.git
WORKDIR /usr/src/llama-cpp-telegram_bot
RUN git checkout dr-seuss-dev && pip install -r requirements.txt

# Run the bot
ENV PYTHONUNBUFFERED=1
CMD bash -c "ulimit -l unlimited && python3 main.py"
27 changes: 27 additions & 0 deletions docker/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/sh

if [ $# -ne 1 ]
then
echo "Usage: $0 <docker image tag>"
exit
fi

# BOT_TOKEN needs to be set to the token Telegram's BotFather gives you for your bot
# and exported before this script is run. Do not save the TOKEN to a file for security
# reasons.

# Change LOCAL_PATH to point to the install of llama-cpp-telegram_bot
# Copy the model to $LOCAL_PATH/llama-cpp-telegram_bot/var/models/model.bin
# the -v option below maps $LOCAL_PATH/llama-cpp-telegram_bot/var into the Docker image
# so that the config is stateful and can be edited while the container is running.

LOCAL_PATH=/home/mulderg/Work

nice docker run \
-e CUDA_VISIBLE_DEVICES=0 \
-e BOT_TOKEN=$BOT_TOKEN \
--cap-add SYS_RESOURCE \
-v $LOCAL_PATH/llama-cpp-telegram_bot/var:/usr/src/llama-cpp-telegram_bot/var \
--gpus all \
$1

28 changes: 28 additions & 0 deletions docker/run2.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/bin/sh

if [ $# -ne 1 ]
then
echo "Usage: $0 <docker image tag>"
exit
fi

# BOT_TOKEN needs to be set to the token Telegram's BotFather gives you for your bot
# and exported before this script is run. Do not save the TOKEN to a file for security
# reasons.

# Change LOCAL_PATH to point to the install of llama-cpp-telegram_bot
# Copy the model to $LOCAL_PATH/llama-cpp-telegram_bot/var/models/model.bin
# the -v option below maps $LOCAL_PATH/llama-cpp-telegram_bot/var into the Docker image
# so that the config is stateful and can be edited while the container is running.

LOCAL_PATH=/home/mulderg/Work

nice docker run \
-e CUDA_VISIBLE_DEVICES=1 \
-e BOT_TOKEN=$BOT_TOKEN2 \
-e PYTHONUNBUFFERED=1 \
--cap-add SYS_RESOURCE \
-v $LOCAL_PATH/llama-cpp-telegram_bot/var2:/usr/src/llama-cpp-telegram_bot/var \
--gpus all \
$1

Empty file removed history/telegram_history_here.txt
Empty file.
Empty file removed models/place_models_here.txt
Empty file.
6 changes: 6 additions & 0 deletions presets/LLaMA-Super-Creative.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
do_sample=True
top_p=0.73
top_k=0
temperature=1.0
repetition_penalty=1.5
typical_p=1.0
6 changes: 6 additions & 0 deletions presets/LLaMA-TV-Dialogue.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
do_sample=True
top_p=0.9
top_k=40
temperature=0.8
repetition_penalty=1.3
typical_p=1.0
4 changes: 2 additions & 2 deletions telegram_config.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
bot_mode=admin
characters_dir_path=characters
default_char=Example.yaml
default_char=PsychProf.yaml
presets_dir_path=presets
default_preset=LLaMA-Precise.txt
model_lang=en
Expand All @@ -9,4 +9,4 @@ html_tag_open=<pre>
html_tag_close=</pre>
history_dir_path=history
token_file_path=telegram_token.txt
admins_file_path=telegram_admins.txt
admins_file_path=telegram_admins.txt
2 changes: 1 addition & 1 deletion telegram_llm_model_path.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
models\<model_file_name.bin>
/usr/src/llama-cpp-telegram_bot/models/model.bin
Empty file removed telegram_token.txt
Empty file.