Skip to content

Commit e8ebd94

Browse files
letonghanchensuyue
andauthored
Add e2e test for llm summarization tgi (#238)
* add e2e test for llm summarization tgi Signed-off-by: letonghan <[email protected]> * add log path Signed-off-by: letonghan <[email protected]> --------- Signed-off-by: letonghan <[email protected]> Co-authored-by: chen, suyue <[email protected]>
1 parent 9d826f1 commit e8ebd94

File tree

1 file changed

+91
-0
lines changed

1 file changed

+91
-0
lines changed
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
#!/bin/bash
2+
# Copyright (C) 2024 Intel Corporation
3+
# SPDX-License-Identifier: Apache-2.0
4+
5+
set -xe
6+
7+
WORKPATH=$(dirname "$PWD")
8+
ip_address=$(hostname -I | awk '{print $1}')
9+
LOG_PATH="$WORKPATH/tests"
10+
11+
function build_docker_images() {
12+
cd $WORKPATH
13+
docker build --no-cache -t opea/llm-tgi:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/summarization/tgi/Dockerfile .
14+
}
15+
16+
function start_service() {
17+
tgi_endpoint_port=5014
18+
export your_hf_llm_model="Intel/neural-chat-7b-v3-3"
19+
# Remember to set HF_TOKEN before invoking this test!
20+
export HF_TOKEN=${HF_TOKEN}
21+
docker run -d --name="test-comps-llm-tgi-endpoint" -p $tgi_endpoint_port:80 -v ./data:/data -e http_proxy=$http_proxy -e https_proxy=$https_proxy --shm-size 1g ghcr.io/huggingface/text-generation-inference:1.4 --model-id ${your_hf_llm_model}
22+
export TGI_LLM_ENDPOINT="http://${ip_address}:${tgi_endpoint_port}"
23+
24+
tei_service_port=5015
25+
docker run -d --name="test-comps-llm-tgi-server" -p ${tei_service_port}:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HF_TOKEN opea/llm-tgi:comps
26+
27+
# check whether tgi is fully ready
28+
n=0
29+
until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do
30+
docker logs test-comps-llm-tgi-endpoint > ${WORKPATH}/tests/test-comps-llm-tgi-endpoint.log
31+
n=$((n+1))
32+
if grep -q Connected ${WORKPATH}/tests/test-comps-llm-tgi-endpoint.log; then
33+
break
34+
fi
35+
sleep 5s
36+
done
37+
sleep 5s
38+
}
39+
40+
function validate_microservice() {
41+
tei_service_port=5015
42+
http_proxy="" curl http://${ip_address}:${tei_service_port}/v1/chat/docsum \
43+
-X POST \
44+
-d '{"query":"Deep learning is a subset of machine learning that utilizes neural networks with multiple layers to analyze various levels of abstract data representations. It enables computers to identify patterns and make decisions with minimal human intervention by learning from large amounts of data."}' \
45+
-H 'Content-Type: application/json'
46+
docker logs test-comps-llm-tgi-endpoint
47+
docker logs test-comps-llm-tgi-server
48+
49+
cd $LOG_PATH
50+
tei_service_port=5015
51+
URL="http://${ip_address}:$tei_service_port/v1/chat/docsum"
52+
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d '{"query": "What is Deep Learning?"}' -H 'Content-Type: application/json' "$URL")
53+
if [ "$HTTP_STATUS" -eq 200 ]; then
54+
echo "[ llm - summarization ] HTTP status is 200. Checking content..."
55+
local CONTENT=$(curl -s -X POST -d '{"query": "What is Deep Learning?"}' -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/llm_summarization.log)
56+
57+
if echo 'text: ' | grep -q "$EXPECTED_RESULT"; then
58+
echo "[ llm - summarization ] Content is as expected."
59+
docker logs test-comps-llm-tgi-server >> ${LOG_PATH}/llm_summarization.log
60+
else
61+
echo "[ llm - summarization ] Content does not match the expected result: $CONTENT"
62+
docker logs test-comps-llm-tgi-server >> ${LOG_PATH}/llm_summarization.log
63+
exit 1
64+
fi
65+
else
66+
echo "[ llm - summarization ] HTTP status is not 200. Received status was $HTTP_STATUS"
67+
docker logs test-comps-llm-tgi-server >> ${LOG_PATH}/llm_summarization.log
68+
exit 1
69+
fi
70+
}
71+
72+
function stop_docker() {
73+
cid=$(docker ps -aq --filter "name=test-comps-llm-*")
74+
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi
75+
}
76+
77+
function main() {
78+
79+
stop_docker
80+
81+
build_docker_images
82+
start_service
83+
84+
validate_microservice
85+
86+
stop_docker
87+
echo y | docker system prune
88+
89+
}
90+
91+
main

0 commit comments

Comments
 (0)