QQ聊天机器人OneBot部署的探索
贴个垃圾代码。。
底下有写得好一点的垃圾。。。。。
import aiohttp
import asyncio
import json,jsonpath
import ollama
from ollama import Client
client = Client(host='http://localhost:11434')
from ollama import AsyncClient
async def chat(content:str):
message = {'role': 'user', 'content': content}
response = await AsyncClient().chat(model='qwen2:7b', messages=[message])
return response
str1 = '{\
"action": "send_group_msg",\
"params": {\
"group_id": 175736385,\
"message": [{\
"type": "text",\
"data": {\
"text": "'
str2 = '"\
}}, {\
"type": "image",\
"data": {\
"file": "https://moe.jitsu.top/img/"\
}\
}]\
},\
"echo": "123"\
}'
async def connect_websocket(url):
try:
async with aiohttp.ClientSession() as session:
async with session.ws_connect(url) as ws:
await ws.send_str(str1)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
print(msg.data)
a = json.loads(msg.data)
# b = jsonpath.jsonpath(a,'$..raw_message')
#if(type(a["data"]) == dict):
try:
print("1111111111111:",a["raw_message"])
except KeyError as ke:
pass
try:
print('raw_message',a["raw_message"])
# response = client.chat(model='qwen2:7b', messages=[
# {
# 'role': 'user',
# 'content': a["raw_message"],
# },
# ])
response = asyncio.run(await chat(a["raw_message"]))
#response = '啊啊啊'
str = str1 + response + str2
await ws.send_str(str)
except:
print('aaaaaaa~aaaaaaaaa')
elif msg.type == aiohttp.WSMsgType.CLOSED:
break
except aiohttp.ClientError as err:
print(f'Error: {err}')
loop = asyncio.get_event_loop()
loop.run_until_complete(connect_websocket('http://localhost:3001'))
下面是目前在写的,在用的,AI没有上下文就很难受了。。。。搞半天没弄出个所以然
但是只能贴一部分
- AIollama.py
import ollama
from ollama import AsyncClient
#client = ollama.Client(host="localhost:11434")
client = AsyncClient(host="http://localhost:11434")
async def chat(content:str) -> str:
message = {'role': 'user', 'content': content}
# context1 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
# response = await client.generate(model='qwen2:7b',context=context1,prompt=content,stream=False)
# return response["response"]
response = await client.chat(model='qwen2:7b',messages=[message],stream=False)
return response["message"]["content"]
# def get_response(prompt) -> str:
return client.chat(
model="qwen2:7b",
messages=[
{
"role": "user",
"content": prompt,
},
],
stream=False
)
- main.py
import asyncio
from re import split
import AIollama
from bot import Bot, Command
from message import MessageList
from event import Event, PostType
import requests as req
async def msgProc(bot: Bot, m: Event):
print(m.check().raw)
zhiling = m.check().get_msg()
print(zhiling)
match zhiling:
case _ if "/echo" in zhiling:
if(zhiling != "/echo"):
zhiling = zhiling.replace("/echo ", "")
await bot.send_group_msg(m.check().raw["group_id"],[MessageList.text(zhiling)])
case "/bing":
zhiling = req.request(
"get",
"https://cn.bing.com/HPImageArchive.aspx",
params={"format": "js", "idx": 0, "n": 1}
)
print(zhiling.text)
zhiling = zhiling.json()
zhiling = "https://cn.bing.com" + zhiling["images"][0]["url"]
#"https://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1"
await bot.send_group_msg(m.check().raw["group_id"],[MessageList.image(str(zhiling))])
case "/random":
await bot.send_group_msg(m.check().raw["group_id"],[MessageList.image("https://moe.jitsu.top/img/")])
case "/help":
await bot.send_group_msg(m.check().raw["group_id"],[MessageList.text("/bing——必应每日一图\n/dev——发送调试信息\n/random——随机二次元\n/help——帮助\n/echo——重复话语")])
case _ if "/ai" in zhiling:
chatText = zhiling.replace("/ai ", "")
await bot.send_group_msg(m.check().raw["group_id"],[MessageList.text(await AIollama.chat(chatText))])
async def main():
cmd = Command("")
bot = Bot("http://localhost:3001", cmd)
cmd.cmds.append(msgProc)
await bot.connect()
await bot.run()
print("aa")
if __name__=='__main__':
asyncio.run(main())
标签:QQ,await,group,OneBot,机器人,msg,import,message,zhiling
From: https://www.cnblogs.com/lyxyz5223/p/18347908/QQRobot