Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/_comps-workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ jobs:
fi
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then
git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git
sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt
fi
- name: Get build list
id: get-build-list
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/push-image-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ jobs:
fi
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git
sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt
fi

- name: Build Image
Expand Down
6 changes: 3 additions & 3 deletions comps/llms/src/doc-summarization/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
docarray[full]
fastapi
httpx==0.27.2
huggingface_hub
langchain #==0.1.12
huggingface_hub==0.27.1
langchain==0.3.14
langchain-huggingface
langchain-openai
langchain_community
langchain_community==0.3.14
langchainhub
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down
6 changes: 3 additions & 3 deletions comps/llms/src/faq-generation/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
docarray[full]
fastapi
huggingface_hub
langchain
huggingface_hub==0.27.1
langchain==0.3.14
langchain-huggingface
langchain-openai
langchain_community
langchain_community==0.3.14
langchainhub
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down
1 change: 0 additions & 1 deletion comps/third_parties/vllm/src/build_docker_vllm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ if [ "$hw_mode" = "hpu" ]; then
git clone https://github.com/HabanaAI/vllm-fork.git
cd ./vllm-fork/
git checkout v0.6.4.post2+Gaudi-1.19.0
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd ..
rm -rf vllm-fork
Expand Down
1 change: 0 additions & 1 deletion tests/agent/test_agent_langchain_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ function build_vllm_docker_images() {
fi
cd ./vllm-fork
git checkout v0.6.4.post2+Gaudi-1.19.0
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi:comps failed"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ function build_docker_images() {
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
git checkout v0.6.4.post2+Gaudi-1.19.0
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ function build_docker_images() {
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
git checkout v0.6.4.post2+Gaudi-1.19.0
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
1 change: 0 additions & 1 deletion tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ function build_docker_images() {
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
git checkout v0.6.4.post2+Gaudi-1.19.0
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ function build_docker_images() {
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
git checkout v0.6.4.post2+Gaudi-1.19.0
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down