1,当使用 pip install -r .\requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple 安装依赖环境时,如何 requirement.txt 当中出现 dlib,basicsr 这样的包,请抠出来单独安装,否则会影响其他选手的发挥。
2,安装 basicsr==1.4.2;使用如下命令即可
pip install -i https://mirrors.aliyun.com/pypi/simple tb-nightly pip install -i https://pypi.tuna.tsinghua.edu.cn/simple basicsr==1.4.2
3,安装 dlib==19.24.0;参考如下
https://blog.csdn.net/weixin_43867038/article/details/127801062 python -m pip install CMake -i https://pypi.tuna.tsinghua.edu.cn/simple python -m pip install Boost -i https://pypi.tuna.tsinghua.edu.cn/simple pip install -i https://pypi.tuna.tsinghua.edu.cn/simple dlib==19.24.0
4,chatglm3-6b安装问题
# 直接运行下面的安装脚本
from modelscope import snapshot_download
model_dir = snapshot_download("ZhipuAI/chatglm3-6b", revision = "v1.0.0", cache_dir='/home/agent/chatglm3-6b')
# Cli客户端对话
import os
import platform
from transformers import AutoTokenizer, AutoModel
import torch
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
path = '/home/agent/ZhipuAI/chatglm3-6b'
MODEL_PATH = os.environ.get('MODEL_PATH', path)
TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)
model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map=DEVICE).eval()
os_name = platform.system()
clear_command = 'cls' if os_name == 'Windows' else 'clear'
stop_stream = False
welcome_prompt = "欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序_{0}".format(DEVICE)
def build_prompt(history):
prompt = welcome_prompt
for query, response in history:
prompt += f"\n\n用户:{query}"
prompt += f"\n\nChatGLM3-6B:{response}"
return prompt
def main():
past_key_values, history = None, []
global stop_stream
print(welcome_prompt)
while True:
query = input("\n用户:")
if query.strip() == "stop":
break
if query.strip() == "clear":
past_key_values, history = None, []
os.system(clear_command)
print(welcome_prompt)
continue
print("\nChatGLM:", end="")
current_length = 0
for response, history, past_key_values in model.stream_chat(tokenizer, query, history=history, top_p=1,
temperature=0.01,
past_key_values=past_key_values,
return_past_key_values=True):
if stop_stream:
stop_stream = False
break
else:
print(response[current_length:], end="", flush=True)
current_length = len(response)
print("")
if __name__ == "__main__":
main()