Skip to content

Commit 3e987d0

Browse files
authored
Merge pull request #12 from StacklokLabs/rename
Rename to GoRag
2 parents e72a48d + 9cd5048 commit 3e987d0

File tree

9 files changed

+69
-31
lines changed

9 files changed

+69
-31
lines changed

.golangci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ linters-settings:
1111
sections:
1212
- standard
1313
- default
14-
- prefix(github.com/stackloklabs/gollm)
14+
- prefix(github.com/stackloklabs/gorag)
1515
revive:
1616
# see https://github.com/mgechev/revive#available-rules for details.
1717
ignore-generated-header: true

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Makefile for Go Project
22

33
# Project variables
4-
PROJECT_NAME := gollm
4+
PROJECT_NAME := gorag
55
BINARY_NAME := $(PROJECT_NAME)
66
BUILD_DIR := build
77
GO_FILES := $(shell find . -name '*.go' -not -path "./vendor/*")

README.md

Lines changed: 53 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,27 @@
1-
# Gollm: Go Interface for LLM development with RAG 📜
1+
# GoRag: Go Interface for LLM / RAG development via Multiple Vector Database types 📜
22

3-
[![Go Report Card](https://goreportcard.com/badge/github.com/stackloklabs/gollm)](https://goreportcard.com/report/github.com/stackloklabs/gollm)
4-
[![License](https://img.shields.io/github/license/stackloklabs/gollm)](LICENSE)
3+
[![Go Report Card](https://goreportcard.com/badge/github.com/stackloklabs/gorag)](https://goreportcard.com/report/github.com/stackloklabs/gorag)
4+
[![License](https://img.shields.io/github/license/stackloklabs/gorag)](LICENSE)
55

6-
Gollm is a Go library that provides an easy interface to interact with Large
6+
GoRag is a Go library that provides an easy interface to interact with Large
77
Language Model backends including [Ollama](https://ollama.com) and [OpenAI](https://openai.com), along with an embeddings interface for RAG (currently with Postgres pgvector).
88

99

1010
## 🌟 Features
1111

1212
- **Interact with Ollama & OpenAI:** Generate responses from multiple AI backends.
1313
- **RAG / Embeddings Generation:** Generate text embeddings store / load to a vector database for RAG.
14-
14+
- **Multiple Vector Database Support:** Currently Postgres with pgvector is supported, along with qdrant (others to follow, open an issue if you want to see something included).
1515
---
1616

1717
## 🚀 Getting Started
1818

19-
2019
### 1. Installation
2120

22-
gollm needs to be installed as a dependency in your project. You can do it by importing it in your codebase:
21+
gorag needs to be installed as a dependency in your project. You can do it by importing it in your codebase:
2322

2423
```go
25-
import "github.com/stackloklabs/gollm"
24+
import "github.com/stackloklabs/gorag"
2625
```
2726

2827
Then make sure that you have Go installed, and run:
@@ -31,7 +30,6 @@ Then make sure that you have Go installed, and run:
3130
go mod tidy
3231
```
3332

34-
3533
## 2. Setting Up Ollama
3634

3735
You'll need to have an Ollama server running and accessible.
@@ -54,17 +52,17 @@ You'll need an OpenAI API key to use the OpenAI backend.
5452
## 4. Configuration
5553

5654
Currently Postgres is supported, and the database should be created before
57-
running the application, with the schena provided in `db/init.sql`
55+
running the application, with the schema provided in `db/init.sql`
5856

59-
Should you wish, the docker-compose will automate the setup of the database.
57+
Should you prefer, the docker-compose will automate the setup of the database.
6058

6159
# 🛠️ Usage
6260

6361
Best bet is to see `/examples/*` for reference, this explains how to use
64-
the library with examples for generation, embeddings and implementing RAG.
62+
the library with examples for generation, embeddings and implementing RAG for pgvector or qdrant.
6563

6664
There are currently two backend systems supported, Ollama and OpenAI, with
67-
the ability to generate embeddings for RAG.
65+
the ability to generate embeddings for RAG on both.
6866

6967
## Ollama
7068

@@ -139,10 +137,50 @@ if err != nil {
139137
log.Fatalf("Error generating embedding: %v", err)
140138
}
141139
log.Println("Embedding generated")
140+
141+
// Embed the query using the specified embedding backend
142+
queryEmbedding, err := embeddingBackend.Embed(ctx, query)
143+
if err != nil {
144+
log.Fatalf("Error generating query embedding: %v", err)
145+
}
146+
log.Println("Vector embeddings generated")
147+
148+
// Retrieve relevant documents for the query embedding
149+
retrievedDocs, err := vectorDB.QueryRelevantDocuments(ctx, queryEmbedding, "ollama")
150+
if err != nil {
151+
log.Fatalf("Error retrieving relevant documents: %v", err)
152+
}
153+
154+
// Log the retrieved documents to see if they include the inserted content
155+
for _, doc := range retrievedDocs {
156+
log.Printf("Retrieved Document: %v", doc)
157+
}
158+
159+
// Augment the query with retrieved context
160+
augmentedQuery := db.CombineQueryWithContext(query, retrievedDocs)
161+
162+
prompt := backend.NewPrompt().
163+
AddMessage("system", "You are an AI assistant. Use the provided context to answer the user's question as accurately as possible.").
164+
AddMessage("user", augmentedQuery).
165+
SetParameters(backend.Parameters{
166+
MaxTokens: 150, // Supported by LLaMa
167+
Temperature: 0.7, // Supported by LLaMa
168+
TopP: 0.9, // Supported by LLaMa
169+
})
142170
```
143171

144-
A database is also required, we have support for PostGres with pgvector. See `/examples/*`
145-
for reference.
172+
Example output:
173+
174+
```
175+
2024/10/28 15:08:25 Embedding backend LLM: mxbai-embed-large
176+
2024/10/28 15:08:25 Generation backend: llama3
177+
2024/10/28 15:08:25 Vector database initialized
178+
2024/10/28 15:08:26 Embedding generated
179+
2024/10/28 15:08:26 Vector Document generated
180+
2024/10/28 15:08:26 Vector embeddings generated
181+
2024/10/28 15:08:26 Retrieved Document: {doc-5630d3f2-bf61-4e13-8ec9-9e863bc1a962 map[content:Mickey mouse is a real human being]}
182+
2024/10/28 15:08:34 Retrieval-Augmented Generation influenced output from LLM model: In that case, since Mickey Mouse is a real human being (as per your context), I would say... **YES**, Mickey Mouse is indeed a human!
183+
```
146184

147185
# 📝 Contributing
148186

examples/ollama/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Ollama Example Project
22

3-
This project demonstrates how to use GoLLM's OpenAI's API backend.
3+
This project demonstrates how to use gorag's OpenAI's API backend.
44

55
This code demonstrates using Ollama embeddings and generation models, along with
66
how RAG overides LLM knowledge by changing an established fact, already learned

examples/ollama/main.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ import (
66

77
"time"
88

9-
"github.com/stackloklabs/gollm/pkg/backend"
10-
"github.com/stackloklabs/gollm/pkg/db"
9+
"github.com/stackloklabs/gorag/pkg/backend"
10+
"github.com/stackloklabs/gorag/pkg/db"
1111
)
1212

1313
var (
@@ -39,6 +39,9 @@ func main() {
3939
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
4040
defer cancel()
4141

42+
// Close the connection when done
43+
defer vectorDB.Close()
44+
4245
// We insert contextual information into the vector store so that the RAG system
4346
// can use it to answer the query about the moon landing, effectively replacing 1969 with 2023
4447
ragContent := "According to the Space Exploration Organization's official records, the moon landing occurred on July 20, 2023, during the Artemis Program. This mission marked the first successful crewed lunar landing since the Apollo program."

examples/openai/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# OpenAI Example Project
22

3-
This project demonstrates how to use GoLLM's OpenAI's API backend.
3+
This project demonstrates how to use gorag's OpenAI's API backend.
44

55
This code demonstrates using OpenAI embeddings and generation models, along with
66
how RAG overides LLM knowledge by changing an established fact, already learned

examples/openai/main.go

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ import (
77

88
"time"
99

10-
"github.com/stackloklabs/gollm/pkg/backend"
11-
"github.com/stackloklabs/gollm/pkg/db"
10+
"github.com/stackloklabs/gorag/pkg/backend"
11+
"github.com/stackloklabs/gorag/pkg/db"
1212
)
1313

1414
var (
@@ -66,7 +66,6 @@ func main() {
6666

6767
// Insert the document into the vector store
6868
err = vectorDB.InsertDocument(ctx, ragContent, embedding)
69-
7069
if err != nil {
7170
log.Fatalf("Error inserting document: %v", err)
7271
}
@@ -94,9 +93,6 @@ func main() {
9493

9594
// Augment the query with retrieved context
9695
augmentedQuery := db.CombineQueryWithContext(query, retrievedDocs)
97-
log.Printf("LLM Prompt: %s", query)
98-
99-
log.Printf("Augmented Query: %s", augmentedQuery)
10096

10197
prompt := backend.NewPrompt().
10298
AddMessage("system", "You are an AI assistant. Use the provided context to answer the user's question as accurately as possible.").

examples/qdrant/main.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,12 @@ package main
33
import (
44
"context"
55
"fmt"
6-
"github.com/google/uuid"
7-
"github.com/stackloklabs/gollm/pkg/backend"
8-
"github.com/stackloklabs/gollm/pkg/db"
96
"log"
107
"time"
8+
9+
"github.com/google/uuid"
10+
"github.com/stackloklabs/gorag/pkg/backend"
11+
"github.com/stackloklabs/gorag/pkg/db"
1112
)
1213

1314
var (

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
module github.com/stackloklabs/gollm
1+
module github.com/stackloklabs/gorag
22

33
go 1.22.2
44

0 commit comments

Comments
 (0)