Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion tests/third_parties/test_third_parties_pathway.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ set -x
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export DATA_PATH=${model_cache:-./data}
function build_docker_images() {
cd $WORKPATH

Expand All @@ -28,7 +29,7 @@ function start_service() {
# tei endpoint
tei_endpoint=5436
model="BAAI/bge-base-en-v1.5"
docker run -d --name="test-comps-vectorstore-pathway-tei-endpoint" -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p $tei_endpoint:80 -v ./data:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model
docker run -d --name="test-comps-vectorstore-pathway-tei-endpoint" -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p $tei_endpoint:80 -v $DATA_PATH:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model

sleep 30s
export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:${tei_endpoint}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ function build_container() {
start_container() {
export VLLM_SERVICE_PORT=28011
export HUGGINGFACEHUB_API_TOKEN=${HF_TOKEN}
export HF_CACHE_DIR="./data"
export HF_CACHE_DIR=${model_cache:-./data}
export VLLM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export TENSOR_PARALLEL_SIZE=1

Expand Down
2 changes: 1 addition & 1 deletion tests/third_parties/test_third_parties_vllm_openvino.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ function build_container() {
start_container() {
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export LLM_ENDPOINT_PORT=12205
export HF_CACHE_DIR=$HOME/.cache/huggingface
export HF_CACHE_DIR=${model_cache:-./data}

cd $WORKPATH/comps/third_parties/vllm/deployment/docker_compose
docker compose -f compose.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ function build_container() {
# Function to start Docker container
start_container() {
export LLM_ENDPOINT_PORT=12206
export HF_CACHE_DIR=$HOME/.cache/huggingface
export HF_CACHE_DIR=${model_cache:-./data}
export RENDER_GROUP_ID=110
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"

Expand Down
Loading