Skip to content

Commit 6949dbc

Browse files
authored
update secrets token name for AudioQnA. (#2024)
Signed-off-by: ZePan110 <[email protected]>
1 parent c33d0ef commit 6949dbc

File tree

20 files changed

+25
-26
lines changed

20 files changed

+25
-26
lines changed

AudioQnA/benchmark/accuracy/run_acc.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
1+
#!/bin/bash
22
# Copyright (C) 2024 Intel Corporation
33
# SPDX-License-Identifier: Apache-2.0
44

AudioQnA/docker_compose/amd/gpu/rocm/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ For TGI inference usage:
7272

7373
```bash
7474
export host_ip="External_Public_IP" # ip address of the node
75-
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
75+
export HF_TOKEN="Your_HuggingFace_API_Token"
7676
export http_proxy="Your_HTTP_Proxy" # http proxy if any
7777
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
7878
export no_proxy=localhost,127.0.0.1,$host_ip,whisper-service,speecht5-service,vllm-service,tgi-service,audioqna-xeon-backend-server,audioqna-xeon-ui-server # additional no proxies if needed
@@ -84,7 +84,7 @@ For vLLM inference usage
8484

8585
```bash
8686
export host_ip="External_Public_IP" # ip address of the node
87-
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
87+
export HF_TOKEN="Your_HuggingFace_API_Token"
8888
export http_proxy="Your_HTTP_Proxy" # http proxy if any
8989
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
9090
export no_proxy=localhost,127.0.0.1,$host_ip,whisper-service,speecht5-service,vllm-service,tgi-service,audioqna-xeon-backend-server,audioqna-xeon-ui-server # additional no proxies if needed

AudioQnA/docker_compose/amd/gpu/rocm/compose.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ services:
4040
no_proxy: ${no_proxy}
4141
http_proxy: ${http_proxy}
4242
https_proxy: ${https_proxy}
43-
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
43+
HF_TOKEN: ${HF_TOKEN}
4444
HF_HUB_DISABLE_PROGRESS_BARS: 1
4545
HF_HUB_ENABLE_HF_TRANSFER: 0
4646
command: --model-id ${LLM_MODEL_ID}

AudioQnA/docker_compose/amd/gpu/rocm/compose_vllm.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ services:
3535
no_proxy: ${no_proxy}
3636
http_proxy: ${http_proxy}
3737
https_proxy: ${https_proxy}
38-
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
39-
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
38+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
39+
HF_TOKEN: ${HF_TOKEN}
4040
HF_HUB_DISABLE_PROGRESS_BARS: 1
4141
HF_HUB_ENABLE_HF_TRANSFER: 0
4242
WILM_USE_TRITON_FLASH_ATTENTION: 0

AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
1-
#!/usr/bin/env bash set_env.sh
2-
1+
#!/usr/bin/env bash
32
# Copyright (C) 2024 Advanced Micro Devices, Inc.
43
# SPDX-License-Identifier: Apache-2.0
54

65

76
# export host_ip=<your External Public IP> # export host_ip=$(hostname -I | awk '{print $1}')
87

98
export host_ip=${ip_address}
10-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
9+
export HF_TOKEN=${HF_TOKEN}
1110
# <token>
1211

1312
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3

AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/usr/bin/env bash set_env.sh
1+
#!/usr/bin/env bash
22

33
# Copyright (C) 2024 Advanced Micro Devices, Inc.
44
# SPDX-License-Identifier: Apache-2.0
@@ -8,7 +8,7 @@
88

99
export host_ip=${ip_address}
1010
export external_host_ip=${ip_address}
11-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
11+
export HF_TOKEN=${HF_TOKEN}
1212
export HF_CACHE_DIR="./data"
1313
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
1414
export VLLM_SERVICE_PORT="8081"

AudioQnA/docker_compose/intel/cpu/xeon/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ To set up environment variables for deploying AudioQnA services, set up some par
4343

4444
```bash
4545
export host_ip="External_Public_IP" # ip address of the node
46-
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
46+
export HF_TOKEN="Your_HuggingFace_API_Token"
4747
export http_proxy="Your_HTTP_Proxy" # http proxy if any
4848
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
4949
export no_proxy=localhost,127.0.0.1,$host_ip,whisper-service,speecht5-service,vllm-service,tgi-service,audioqna-xeon-backend-server,audioqna-xeon-ui-server # additional no proxies if needed

AudioQnA/docker_compose/intel/cpu/xeon/README_vllm.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ The output of the command should contain images:
6868

6969
```bash
7070
### Replace the string 'your_huggingfacehub_token' with your HuggingFacehub repository access token.
71-
export HUGGINGFACEHUB_API_TOKEN='your_huggingfacehub_token'
71+
export HF_TOKEN='your_huggingfacehub_token'
7272
```
7373

7474
### Setting variables in the file set_env_vllm.sh

AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ services:
3636
no_proxy: ${no_proxy}
3737
http_proxy: ${http_proxy}
3838
https_proxy: ${https_proxy}
39-
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
39+
HF_TOKEN: ${HF_TOKEN}
4040
LLM_MODEL_ID: ${LLM_MODEL_ID}
4141
VLLM_TORCH_PROFILER_DIR: "/mnt"
4242
LLM_SERVER_PORT: ${LLM_SERVER_PORT}

AudioQnA/docker_compose/intel/cpu/xeon/compose_multilang.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ services:
4040
no_proxy: ${no_proxy}
4141
http_proxy: ${http_proxy}
4242
https_proxy: ${https_proxy}
43-
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
43+
HF_TOKEN: ${HF_TOKEN}
4444
LLM_MODEL_ID: ${LLM_MODEL_ID}
4545
VLLM_TORCH_PROFILER_DIR: "/mnt"
4646
LLM_SERVER_PORT: ${LLM_SERVER_PORT}

0 commit comments

Comments
 (0)