diff --git a/.github/workflows/_comps-workflow.yml b/.github/workflows/_comps-workflow.yml index 964d2d7284..f7e8019dbe 100644 --- a/.github/workflows/_comps-workflow.yml +++ b/.github/workflows/_comps-workflow.yml @@ -71,7 +71,6 @@ jobs: fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git - sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt fi - name: Get build list id: get-build-list diff --git a/.github/workflows/push-image-build.yml b/.github/workflows/push-image-build.yml index fda1528065..67389a3cd4 100644 --- a/.github/workflows/push-image-build.yml +++ b/.github/workflows/push-image-build.yml @@ -96,7 +96,6 @@ jobs: fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git - sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt fi - name: Build Image diff --git a/comps/llms/src/doc-summarization/requirements.txt b/comps/llms/src/doc-summarization/requirements.txt index 1694618637..6bc1bb1e55 100644 --- a/comps/llms/src/doc-summarization/requirements.txt +++ b/comps/llms/src/doc-summarization/requirements.txt @@ -1,11 +1,11 @@ docarray[full] fastapi httpx==0.27.2 -huggingface_hub -langchain #==0.1.12 +huggingface_hub==0.27.1 +langchain==0.3.14 langchain-huggingface langchain-openai -langchain_community +langchain_community==0.3.14 langchainhub opentelemetry-api opentelemetry-exporter-otlp diff --git a/comps/llms/src/faq-generation/requirements.txt b/comps/llms/src/faq-generation/requirements.txt index 36257d3939..037079294b 100644 --- a/comps/llms/src/faq-generation/requirements.txt +++ b/comps/llms/src/faq-generation/requirements.txt @@ -1,10 +1,10 @@ docarray[full] fastapi -huggingface_hub -langchain +huggingface_hub==0.27.1 +langchain==0.3.14 langchain-huggingface langchain-openai -langchain_community +langchain_community==0.3.14 langchainhub opentelemetry-api opentelemetry-exporter-otlp diff --git a/comps/third_parties/vllm/src/build_docker_vllm.sh b/comps/third_parties/vllm/src/build_docker_vllm.sh index bd8df2e708..bec3a0c8f1 100644 --- a/comps/third_parties/vllm/src/build_docker_vllm.sh +++ b/comps/third_parties/vllm/src/build_docker_vllm.sh @@ -38,7 +38,6 @@ if [ "$hw_mode" = "hpu" ]; then git clone https://github.com/HabanaAI/vllm-fork.git cd ./vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd .. rm -rf vllm-fork diff --git a/tests/agent/test_agent_langchain_on_intel_hpu.sh b/tests/agent/test_agent_langchain_on_intel_hpu.sh index 090d1ed332..2c12354723 100644 --- a/tests/agent/test_agent_langchain_on_intel_hpu.sh +++ b/tests/agent/test_agent_langchain_on_intel_hpu.sh @@ -57,7 +57,6 @@ function build_vllm_docker_images() { fi cd ./vllm-fork git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "opea/vllm-gaudi:comps failed" diff --git a/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh b/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh index 92b29827fe..d040f954a1 100644 --- a/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh +++ b/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh @@ -13,7 +13,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh b/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh index d9552e9a0d..a6096bd309 100644 --- a/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh @@ -20,7 +20,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh b/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh index 5d489b250d..8607f2c550 100644 --- a/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh @@ -20,7 +20,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh b/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh index 7c32a8977b..ea8c9ee6ca 100644 --- a/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh @@ -20,7 +20,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail"