安装:pip install modelscope
pip install torch==2.0.1 torchaudio torchvision -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install modelscope -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install transformers -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install sentencepiece -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install cpm_kernels -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install nltk -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install nltk
import torch
from modelscope import AutoTokenizer, AutoModel, snapshot_download
model_dir = "../chatglm3-6b"
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
#如果读者产生了爆显存,请使用int4量化处理
with torch.no_grad():
model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()
#model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).quantize(4).cuda()
# model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).cpu().float()
model = model.eval()
response, history = model.chat(tokenizer, "你好", history=[])
print(response)
response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history)
print(response)
标签:cn,simple,简介,ChatGLM3,pip,install,edu,2.3,model
From: https://www.cnblogs.com/CVE-2003/p/18405451