Skip to content

Commit 2dd176e

Browse files
authored
Integrate openeuler images into CI/CD. (#2134)
Signed-off-by: chensuyue <[email protected]> Signed-off-by: zhihang <[email protected]>
1 parent f7d87a6 commit 2dd176e

File tree

9 files changed

+502
-3
lines changed

9 files changed

+502
-3
lines changed

.github/workflows/pr-dockerfile-path-and-build-yaml-scan.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ jobs:
9494
consistency="TRUE"
9595
build_yamls=$(find . -name 'build.yaml')
9696
for build_yaml in $build_yamls; do
97-
message=$(python3 .github/workflows/scripts/check-name-agreement.py "$build_yaml")
97+
message=$(python3 .github/workflows/scripts/check_name_agreement.py "$build_yaml")
9898
if [[ "$message" != *"consistent"* ]]; then
9999
consistency="FALSE"
100100
echo "Inconsistent service name and image name found in file $build_yaml."

.github/workflows/scripts/check-name-agreement.py renamed to .github/workflows/scripts/check_name_agreement.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,10 @@ def check_service_image_consistency(data):
1919
image_name = service_details.get("image", "")
2020
# Extract the image name part after the last '/'
2121
image_name_part = image_name.split("/")[-1].split(":")[0]
22+
# Remove '-openeuler' suffix if it exists
23+
fixed_service_name = service_name.rsplit("-openeuler", 1)[0]
2224
# Check if the service name is a substring of the image name part
23-
if service_name not in image_name_part:
25+
if fixed_service_name not in image_name_part:
2426
# Get the line number of the service name
2527
line_number = service_details.lc.line + 1
2628
inconsistencies.append((service_name, image_name, line_number))

.github/workflows/scripts/codeScan/hadolint.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ source /GenAIExamples/.github/workflows/scripts/change_color
77
log_dir=/GenAIExamples/.github/workflows/scripts/codeScan
88
ERROR_WARN=false
99

10-
find . -type f \( -name "Dockerfile*" \) -print -exec hadolint --ignore DL3006 --ignore DL3007 --ignore DL3008 --ignore DL3013 --ignore DL3018 --ignore DL3016 {} \; > ${log_dir}/hadolint.log
10+
find . -type f \( -name "Dockerfile*" \) -print -exec hadolint --ignore DL3033 --ignore DL3006 --ignore DL3007 --ignore DL3008 --ignore DL3013 --ignore DL3018 --ignore DL3016 {} \; > ${log_dir}/hadolint.log
1111

1212
if [[ $(grep -c "error" ${log_dir}/hadolint.log) != 0 ]]; then
1313
$BOLD_RED && echo "Error!! Please Click on the artifact button to download and check error details." && $RESET

ChatQnA/Dockerfile.openEuler

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
ARG IMAGE_REPO=opea
5+
ARG BASE_TAG=latest
6+
FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler
7+
8+
COPY ./chatqna.py $HOME/chatqna.py
9+
COPY ./entrypoint.sh $HOME/entrypoint.sh
10+
11+
ENTRYPOINT ["bash", "entrypoint.sh"]
Lines changed: 184 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,184 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
services:
5+
redis-vector-db:
6+
image: redis/redis-stack:7.2.0-v9
7+
container_name: redis-vector-db
8+
ports:
9+
- "6379:6379"
10+
- "8001:8001"
11+
healthcheck:
12+
test: ["CMD", "redis-cli", "ping"]
13+
interval: 5s
14+
timeout: 3s
15+
retries: 10
16+
dataprep-redis-service:
17+
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler
18+
container_name: dataprep-redis-server
19+
depends_on:
20+
redis-vector-db:
21+
condition: service_healthy
22+
tei-embedding-service:
23+
condition: service_started
24+
ports:
25+
- "6007:5000"
26+
environment:
27+
no_proxy: ${no_proxy}
28+
http_proxy: ${http_proxy}
29+
https_proxy: ${https_proxy}
30+
REDIS_URL: redis://redis-vector-db:6379
31+
REDIS_HOST: redis-vector-db
32+
INDEX_NAME: ${INDEX_NAME}
33+
TEI_ENDPOINT: http://tei-embedding-service:80
34+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
35+
healthcheck:
36+
test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"]
37+
interval: 10s
38+
timeout: 5s
39+
retries: 50
40+
restart: unless-stopped
41+
tei-embedding-service:
42+
image: openeuler/text-embeddings-inference-cpu:1.7.0-oe2403lts
43+
container_name: tei-embedding-server
44+
ports:
45+
- "6006:80"
46+
volumes:
47+
- "${MODEL_CACHE:-./data}:/data"
48+
shm_size: 1g
49+
environment:
50+
no_proxy: ${no_proxy}
51+
http_proxy: ${http_proxy}
52+
https_proxy: ${https_proxy}
53+
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
54+
retriever:
55+
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler
56+
container_name: retriever-redis-server
57+
depends_on:
58+
- redis-vector-db
59+
ports:
60+
- "7000:7000"
61+
ipc: host
62+
environment:
63+
no_proxy: ${no_proxy}
64+
http_proxy: ${http_proxy}
65+
https_proxy: ${https_proxy}
66+
REDIS_URL: redis://redis-vector-db:6379
67+
REDIS_HOST: redis-vector-db
68+
INDEX_NAME: ${INDEX_NAME}
69+
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
70+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
71+
LOGFLAG: ${LOGFLAG}
72+
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
73+
restart: unless-stopped
74+
tei-reranking-service:
75+
image: openeuler/text-embeddings-inference-cpu:1.7.0-oe2403lts
76+
container_name: tei-reranking-server
77+
ports:
78+
- "8808:80"
79+
volumes:
80+
- "${MODEL_CACHE:-./data}:/data"
81+
shm_size: 1g
82+
environment:
83+
no_proxy: ${no_proxy}
84+
http_proxy: ${http_proxy}
85+
https_proxy: ${https_proxy}
86+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
87+
HF_HUB_DISABLE_PROGRESS_BARS: 1
88+
HF_HUB_ENABLE_HF_TRANSFER: 0
89+
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
90+
vllm-service:
91+
image: openeuler/vllm-cpu:0.9.1-oe2403lts
92+
container_name: vllm-service
93+
ports:
94+
- "9009:80"
95+
volumes:
96+
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
97+
shm_size: 128g
98+
privileged: true
99+
environment:
100+
no_proxy: ${no_proxy}
101+
http_proxy: ${http_proxy}
102+
https_proxy: ${https_proxy}
103+
HF_TOKEN: ${HF_TOKEN}
104+
LLM_MODEL_ID: ${LLM_MODEL_ID}
105+
VLLM_TORCH_PROFILER_DIR: "/mnt"
106+
VLLM_CPU_KVCACHE_SPACE: 30
107+
healthcheck:
108+
test: ["CMD-SHELL", "curl -f http://$host_ip:9009/health || exit 1"]
109+
interval: 10s
110+
timeout: 10s
111+
retries: 100
112+
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
113+
chatqna-xeon-backend-server:
114+
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}-openeuler
115+
container_name: chatqna-xeon-backend-server
116+
depends_on:
117+
redis-vector-db:
118+
condition: service_started
119+
dataprep-redis-service:
120+
condition: service_healthy
121+
tei-embedding-service:
122+
condition: service_started
123+
retriever:
124+
condition: service_started
125+
tei-reranking-service:
126+
condition: service_started
127+
vllm-service:
128+
condition: service_healthy
129+
ports:
130+
- "8888:8888"
131+
environment:
132+
- no_proxy=${no_proxy}
133+
- https_proxy=${https_proxy}
134+
- http_proxy=${http_proxy}
135+
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
136+
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
137+
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
138+
- RETRIEVER_SERVICE_HOST_IP=retriever
139+
- RERANK_SERVER_HOST_IP=tei-reranking-service
140+
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
141+
- LLM_SERVER_HOST_IP=vllm-service
142+
- LLM_SERVER_PORT=80
143+
- LLM_MODEL=${LLM_MODEL_ID}
144+
- LOGFLAG=${LOGFLAG}
145+
ipc: host
146+
restart: always
147+
chatqna-xeon-ui-server:
148+
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}-openeuler
149+
container_name: chatqna-xeon-ui-server
150+
depends_on:
151+
- chatqna-xeon-backend-server
152+
ports:
153+
- "5173:5173"
154+
environment:
155+
- no_proxy=${no_proxy}
156+
- https_proxy=${https_proxy}
157+
- http_proxy=${http_proxy}
158+
ipc: host
159+
restart: always
160+
chatqna-xeon-nginx-server:
161+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler
162+
container_name: chatqna-xeon-nginx-server
163+
depends_on:
164+
- chatqna-xeon-backend-server
165+
- chatqna-xeon-ui-server
166+
ports:
167+
- "${NGINX_PORT:-80}:80"
168+
environment:
169+
- no_proxy=${no_proxy}
170+
- https_proxy=${https_proxy}
171+
- http_proxy=${http_proxy}
172+
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
173+
- FRONTEND_SERVICE_PORT=5173
174+
- BACKEND_SERVICE_NAME=chatqna
175+
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
176+
- BACKEND_SERVICE_PORT=8888
177+
- DATAPREP_SERVICE_IP=dataprep-redis-service
178+
- DATAPREP_SERVICE_PORT=5000
179+
ipc: host
180+
restart: always
181+
182+
networks:
183+
default:
184+
driver: bridge

ChatQnA/docker_image_build/build.yaml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,24 +25,60 @@ services:
2525
dockerfile: ./docker/Dockerfile.react
2626
extends: chatqna
2727
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}
28+
chatqna-openeuler:
29+
build:
30+
context: ../
31+
dockerfile: ./Dockerfile.openEuler
32+
extends: chatqna
33+
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}-openeuler
34+
chatqna-ui-openeuler:
35+
build:
36+
context: ../ui
37+
dockerfile: ./docker/Dockerfile.openEuler
38+
extends: chatqna-ui
39+
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}-openeuler
40+
chatqna-conversation-ui-openeuler:
41+
build:
42+
context: ../ui
43+
dockerfile: ./docker/Dockerfile.react.openEuler
44+
extends: chatqna-conversation-ui
45+
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}-openeuler
2846
embedding:
2947
build:
3048
context: GenAIComps
3149
dockerfile: comps/embeddings/src/Dockerfile
3250
extends: chatqna
3351
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
52+
embedding-openeuler:
53+
build:
54+
context: GenAIComps
55+
dockerfile: comps/embeddings/src/Dockerfile.openEuler
56+
extends: chatqna
57+
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}-openeuler
3458
retriever:
3559
build:
3660
context: GenAIComps
3761
dockerfile: comps/retrievers/src/Dockerfile
3862
extends: chatqna
3963
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
64+
retriever-openeuler:
65+
build:
66+
context: GenAIComps
67+
dockerfile: comps/retrievers/src/Dockerfile.openEuler
68+
extends: chatqna
69+
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler
4070
reranking:
4171
build:
4272
context: GenAIComps
4373
dockerfile: comps/rerankings/src/Dockerfile
4474
extends: chatqna
4575
image: ${REGISTRY:-opea}/reranking:${TAG:-latest}
76+
reranking-openeuler:
77+
build:
78+
context: GenAIComps
79+
dockerfile: comps/rerankings/src/Dockerfile.openEuler
80+
extends: chatqna
81+
image: ${REGISTRY:-opea}/reranking:${TAG:-latest}-openeuler
4682
llm-textgen:
4783
build:
4884
context: GenAIComps
@@ -61,12 +97,24 @@ services:
6197
dockerfile: comps/dataprep/src/Dockerfile
6298
extends: chatqna
6399
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
100+
dataprep-openeuler:
101+
build:
102+
context: GenAIComps
103+
dockerfile: comps/dataprep/src/Dockerfile.openEuler
104+
extends: chatqna
105+
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler
64106
guardrails:
65107
build:
66108
context: GenAIComps
67109
dockerfile: comps/guardrails/src/guardrails/Dockerfile
68110
extends: chatqna
69111
image: ${REGISTRY:-opea}/guardrails:${TAG:-latest}
112+
guardrails-openeuler:
113+
build:
114+
context: GenAIComps
115+
dockerfile: comps/guardrails/src/guardrails/Dockerfile.openEuler
116+
extends: chatqna
117+
image: ${REGISTRY:-opea}/guardrails:${TAG:-latest}-openeuler
70118
vllm-rocm:
71119
build:
72120
context: GenAIComps
@@ -90,3 +138,9 @@ services:
90138
dockerfile: comps/third_parties/nginx/src/Dockerfile
91139
extends: chatqna
92140
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
141+
nginx-openeuler:
142+
build:
143+
context: GenAIComps
144+
dockerfile: comps/third_parties/nginx/src/Dockerfile.openEuler
145+
extends: chatqna
146+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler

0 commit comments

Comments
 (0)