目录
1. 描述
项目描述:
本项目使用了streamlit框架来构建前端,展示一些标语、输入框和按钮。后端的大模型是星火大模型V3.0版本。
项目运行说明:
1、首先,从开放平台获取密钥信息,用于调用星火大模型时的鉴权密钥(前提是已经获得了token授权)。
获取地址:https://console.xfyun.cn/services/bm3
2、将密钥填入代码中: 修改 main_translate.py 文件中,line 39 的appid、api_key、api_secret等信息;
3、安装必要的依赖库: 在终端(Terminal)中执行命令:pip install -r requirements.txt ;
4、运行项目:在终端(Terminal)中执行命令:streamlit run ./main_translate.py
;
5、通过浏览器访问本地的8501端口: 地址栏输入:http://localhost:8501 进行访问。
requirements.txt
streamlit
streamlit-chat
websocket-client
输入的文本:
If you can keep your head when all about you Are losing theirs and blaming it on you;
请将上述英文内容翻译为中文, 请按照古文风格进行翻译, 用古诗词的行文风格, 做到辞藻精炼, 可用典故。
signature_origin:
host: spark-api.xf-yun.com
date: Thu, 19 Sep 2024 13:14:12 GMT
GET /v3.1/chat HTTP/1.1
1726751653.2044525
data:
{'header': {'code': 0, 'message': 'Success', 'sid': 'cht000b18e8@dx1920a6b34d9b8f3550', 'status': 0}, 'payload': {'choices': {'status': 0, 'seq': 0, 'text': [{'content': '若能', 'role': 'assistant',index': 0}]}}}
1726751653.2674942
data:
{'header': {'code': 0, 'message': 'Success', 'sid': 'cht000b18e8@dx1920a6b34d9b8f3550', 'status': 1}, 'payload': {'choices': {'status': 1, 'seq': 1, 'text': [{'content': '立身', 'role': 'assistant',index': 0}]}}}
1726751653.357353
data:
{'header': {'code': 0, 'message': 'Success', 'sid': 'cht000b18e8@dx1920a6b34d9b8f3550', 'status': 1}, 'payload': {'choices': {'status': 1, 'seq': 2, 'text': [{'content': '于危难', 'role': 'assistant'index': 0}]}}}
1726751653.6089308
data:
{'header': {'code': 0, 'message': 'Success', 'sid': 'cht000b18e8@dx1920a6b34d9b8f3550', 'status': 1}, 'payload': {'choices': {'status': 1, 'seq': 3, 'text': [{'content': '之际,\n', 'role': 'assista, 'index': 0}]}}}
1726751654.0610635
data:
{'header': {'code': 0, 'message': 'Success', 'sid': 'cht000b18e8@dx1920a6b34d9b8f3550', 'status': 1}, 'payload': {'choices': {'status': 1, 'seq': 4, 'text': [{'content': '众人皆丧心病狂,归咎于汝;'sistant', 'index': 0}]}}}
1726751654.9408352
data:
{'header': {'code': 0, 'message': 'Success', 'sid': 'cht000b18e8@dx1920a6b34d9b8f3550', 'status': 2}, 'payload': {'choices': {'status': 2, 'seq': 5, 'text': [{'content': '\n尔能静心安坐,\n不随众流 'role': 'assistant', 'index': 0}]}, 'usage': {'text': {'question_tokens': 60, 'prompt_tokens': 60, 'completion_tokens': 41, 'total_tokens': 101}}}}
2 代码
import SparkLLM_Thread
import streamlit as st
from streamlit_chat import message
# 页面提示语, 开场白
st.markdown("#### 您好, 我是多风格翻译官小星, 很荣幸为您服务。 :sunglasses:")
# 文本输入框
user_input = st.text_input("请输入您需要翻译的英文文本:", key='input')
# 设置一些风格按钮选项, 来设置不同的翻译风格
but = st.radio(
"翻译风格:",
('默认风格','古文风格', '学术风格', '琼瑶风格',), horizontal=True)
if but =='默认风格':
style = '。 '
elif but =='古文风格':
style = ', 请按照古文风格进行翻译, 用古诗词的行文风格, 做到辞藻精炼, 可用典故。'
elif but =='学术风格':
style = ', 请按照学术风格进行翻译, 保持严谨认真的风格。'
elif but == '琼瑶风格':
style = ', 请按照琼瑶风格进行翻译, 意境优美, 充满诗情画意, 或多愁善感, 或心花怒放。'
else:
style = '。 '
# 用于判断模型生成内容是否存在, 不存在则创建列表
if 'generated' not in st.session_state:
st.session_state['generated'] = []
# 用于判断用户输入内容是否存在, 不存在则创建列表
if 'past' not in st.session_state:
st.session_state['past'] = []
if user_input:
# 组装prompt, 最终传入大模型的是text内容
text = user_input + "\n请将上述英文内容翻译为中文" + style
print("输入的文本:")
print(text)
# 保存用户输入到列表, 用于后续页面展示
st.session_state['past'].append(user_input)
# 向星火模型发出请求, 其中appid, api_key, api_secret 获取地址: https://console.xfyun.cn/services/bm3
output =SparkLLM_Thread.main(uid='123',
chat_id='123qwer',
appid='111',
api_key='11111',
api_secret='1111',
gpt_url='wss://spark-api.xf-yun.com/v3.1/chat',
question=[{"role": "user", "content":text}])
# 保存大模型输出到列表, 用于后续页面展示
st.session_state['generated'].append(output)
# 在前端页面展示列表中的内容
if st.session_state['generated']:
for i in range(len(st.session_state['generated']) - 1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
# encoding: UTF-8
import _thread as thread
import base64
import hashlib
import hmac
import json
import time
from urllib.parse import urlparse
from datetime import datetime
from time import mktime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time
import websocket
answer =''
tokens = 0
class Ws_Param(object):
# 初始化
def __init__(self, APPID, APIKey, APISecret, gpt_url):
self.APPID = APPID
self.APIKey = APIKey
self.APISecret = APISecret
self.host = urlparse(gpt_url).netloc
self.path = urlparse(gpt_url).path
self.gpt_url = gpt_url
# 生成待鉴权的url
def create_url(self):
# 生成RFC1123格式的时间戳
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# 拼接字符串
signature_origin = "host: " + self.host + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + self.path + " HTTP/1.1"
print("signature_origin:\n" + str(signature_origin))
# 进行hmac-sha256进行加密
signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# 将请求的鉴权参数组合为字典
v = {
"authorization": authorization,
"date": date,
"host": self.host
}
# 拼接鉴权参数, 生成url
url = self.gpt_url + '?' + urlencode(v)
# 此处打印出建立连接时候的url, 参考本demo的时候可取消上方打印的注释, 比对相同参数时生成的url与自己代码生成的url是否一致
return url
# 收到websocket错误的处理
def on_error(ws, error):
print("### error:", error)
# 收到websocket关闭的处理
def on_close(ws,content,test):
return 0
# print("### closed ###")
# 收到websocket连接建立的处理
def on_open(ws):
thread.start_new_thread(run, (ws,))
# 连接建立, 发送数据
def run(ws, *args):
data = json.dumps(gen_params(appid=ws.appid, question=ws.question, uid=ws.uid, chat_id=ws.chat_id))
ws.send(data)
# 收到websocket消息的处理
def on_message(ws, message):
# uid = ws.uid
# chat_id = ws.chat_id
endTime = time.time()
print(endTime)
data = json.loads(message)
print("data: \n" + str(data))
code = data['header']['code']
if code != 0:
print(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
# global answer
ws.answer += content
# print(content, end='')
# print("用户:" + ws.uid + " 会话:" +ws.chat_id +"\n返回结果:" +ws.answer)
if status == 2:
global tokens
tokens = data["payload"]["usage"]
ws.close()
def gen_params(appid, question, uid, chat_id):
# 通过appid和用户的提问来生成请求参数
data = {
"header": {
"app_id": appid,
"uid": uid # 用于区分业务层用户
},
"parameter": {
"chat": {
"domain": "generalv3", # 通用场景
"temperature": 0.8,
"top_k" : 6,
"max_tokens": 4096,
"auditing": "default",
"stream": True,
"chat_id":chat_id
}
},
"payload": {
"message": {
"text": question
}
}
}
return data
def main(uid, chat_id, appid, api_key, api_secret, gpt_url, question):
# 构造对象, 参数创建的对象
wsParam = Ws_Param(appid, api_key, api_secret, gpt_url)
# 拼接鉴权URL字符串
wsUrl = wsParam.create_url()
# 实例化websocket对象
ws = websocket.WebSocketApp(wsUrl,
on_message=on_message,
on_error=on_error,
on_close=on_close,
on_open=on_open)
ws.appid = appid
ws.uid = uid
ws.chat_id = chat_id
ws.answer = ''
ws.question = question
# begTime = time.time()
# print(begTime)
# 启动服务
ws.run_forever()
return ws.answer
if __name__ == '__main__':
result = main(uid='XXXXXX',
chat_id='XXXXXX',
appid='XXXXXXXX',
api_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
api_secret='XXXXXXXXXXXXXXXXXXXXXXXXXXXX',
gpt_url='wss://spark-api.xf-yun.com/v3.1/chat',
# gpt_url='wss://spark-api-knowledge.xf-yun.com/v2.1/multimodal',
question=[{"role": "user", "content": "湖北襄阳唐城出现了什么舆论?"}])
print()
print("返回结果为:\n" + str(result))
标签:status,04,url,模型,api,ws,chat,data
From: https://www.cnblogs.com/cavalier-chen/p/18421398