环境配置
mkdir -p /root/agent
studio-conda -t agent -o pytorch-2.1.2
cd /root/agent
conda activate agent
git clone https://gitee.com/internlm/lagent.git
cd lagent && git checkout 581d9fb && pip install -e . && cd …
git clone https://gitee.com/internlm/agentlego.git
cd agentlego && git checkout 7769e0d && pip install -e . && cd …
conda activate agent
pip install lmdeploy==0.3.0
cd /root/agent
git clone -b camp2 https://gitee.com/internlm/Tutorial.git
Lagent Web Demo
使用 LMDeploy 部署
conda activate agent
lmdeploy serve api_server /root/share/new_models/Shanghai_AI_Laboratory/internlm2-chat-7b
–server-name 127.0.0.1
–model-name internlm2-chat-7b
–cache-max-entry-count 0.1
启动并使用 Lagent Web Demo
conda activate agent
cd /root/agent/lagent/examples
streamlit run internlm2_agent_web_demo.py --server.address 127.0.0.1 --server.port 7860
本地
ssh -CNg -L 7860:127.0.0.1:7860 -L 23333:127.0.0.1:23333 [email protected] -p 你的 ssh 端口号
用 Lagent 自定义工具
实现一个调用和风天气 API 的工具以完成实时天气查询的功能
创建工具文件
touch /root/agent/lagent/lagent/actions/weather.py
import json
import os
import requests
from typing import Optional, Type
from lagent.actions.base_action import BaseAction, tool_api
from lagent.actions.parser import BaseParser, JsonParser
from lagent.schema import ActionReturn, ActionStatusCode
class WeatherQuery(BaseAction):
"""Weather plugin for querying weather information."""
def __init__(self,
key: Optional[str] = None,
description: Optional[dict] = None,
parser: Type[BaseParser] = JsonParser,
enable: bool = True) -> None:
super().__init__(description, parser, enable)
key = os.environ.get('WEATHER_API_KEY', key)
if key is None:
raise ValueError(
'Please set Weather API key either in the environment '
'as WEATHER_API_KEY or pass it as `key`')
self.key = key
self.location_query_url = 'https://geoapi.qweather.com/v2/city/lookup'
self.weather_query_url = 'https://devapi.qweather.com/v7/weather/now'
@tool_api
def run(self, query: str) -> ActionReturn:
"""一个天气查询API。可以根据城市名查询天气信息。
Args:
query (:class:`str`): The city name to query.
"""
tool_return = ActionReturn(type=self.name)
status_code, response = self._search(query)
if status_code == -1:
tool_return.errmsg = response
tool_return.state = ActionStatusCode.HTTP_ERROR
elif status_code == 200:
parsed_res = self._parse_results(response)
tool_return.result = [dict(type='text', content=str(parsed_res))]
tool_return.state = ActionStatusCode.SUCCESS
else:
tool_return.errmsg = str(status_code)
tool_return.state = ActionStatusCode.API_ERROR
return tool_return
def _parse_results(self, results: dict) -> str:
"""Parse the weather results from QWeather API.
Args:
results (dict): The weather content from QWeather API
in json format.
Returns:
str: The parsed weather results.
"""
now = results['now']
data = [
f'数据观测时间: {now["obsTime"]}',
f'温度: {now["temp"]}°C',
f'体感温度: {now["feelsLike"]}°C',
f'天气: {now["text"]}',
f'风向: {now["windDir"]},角度为 {now["wind360"]}°',
f'风力等级: {now["windScale"]},风速为 {now["windSpeed"]} km/h',
f'相对湿度: {now["humidity"]}',
f'当前小时累计降水量: {now["precip"]} mm',
f'大气压强: {now["pressure"]} 百帕',
f'能见度: {now["vis"]} km',
]
return '\n'.join(data)
def _search(self, query: str):
# get city_code
try:
city_code_response = requests.get(
self.location_query_url,
params={'key': self.key, 'location': query}
)
except Exception as e:
return -1, str(e)
if city_code_response.status_code != 200:
return city_code_response.status_code, city_code_response.json()
city_code_response = city_code_response.json()
if len(city_code_response['location']) == 0:
return -1, '未查询到城市'
city_code = city_code_response['location'][0]['id']
# get weather
try:
weather_response = requests.get(
self.weather_query_url,
params={'key': self.key, 'location': city_code}
)
except Exception as e:
return -1, str(e)
return weather_response.status_code, weather_response.json()
获取 API KEY
https://dev.qweather.com/docs/api/
体验自定义工具效果
conda activate agent
lmdeploy serve api_server /root/share/new_models/Shanghai_AI_Laboratory/internlm2-chat-7b
–server-name 127.0.0.1
–model-name internlm2-chat-7b
–cache-max-entry-count 0.1
export WEATHER_API_KEY=24a5e0fa86b3473fbb41f2eba7587035
conda activate agent
cd /root/agent/Tutorial/agent
streamlit run internlm2_weather_web_demo.py --server.address 127.0.0.1 --server.port 7860
ssh -CNg -L 7860:127.0.0.1:7860 -L 23333:127.0.0.1:23333 [email protected] -p 你的 ssh 端口号
直接使用 AgentLego
cd /root/agent
wget http://download.openmmlab.com/agentlego/road.jpg
conda activate agent
pip install openmim0.3.9
mim install mmdet3.3.0
touch /root/agent/direct_use.py
import re
import cv2
from agentlego.apis import load_tool
# load tool
tool = load_tool('ObjectDetection', device='cuda')
# apply tool
visualization = tool('/root/agent/road.jpg')
print(visualization)
# visualize
image = cv2.imread('/root/agent/road.jpg')
preds = visualization.split('\n')
pattern = r'(\w+) \((\d+), (\d+), (\d+), (\d+)\), score (\d+)'
for pred in preds:
name, x1, y1, x2, y2, score = re.match(pattern, pred).groups()
x1, y1, x2, y2, score = int(x1), int(y1), int(x2), int(y2), int(score)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
cv2.putText(image, f'{name} {score}', (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1)
cv2.imwrite('/root/agent/road_detection_direct.jpg', image)
python /root/agent/direct_use.py
作为智能体工具使用
修改相关文件
/root/agent/agentlego/webui/modules/agents/lagent_agent.py
- model_name=‘internlm2-chat-20b’
- model_name=‘internlm2-chat-7b’,
使用 LMDeploy 部署
conda activate agent
lmdeploy serve api_server
/root/share/new_models/Shanghai_AI_Laboratory/internlm2-chat-7b
–server-name 127.0.0.1
–model-name internlm2-chat-7b
–cache-max-entry-count 0.1
启动 AgentLego WebUI
conda activate agent
cd /root/agent/agentlego/webui
python one_click.py
ssh -CNg -L 7860:127.0.0.1:7860 -L 23333:127.0.0.1:23333 [email protected] -p 你的 ssh 端口号
使用失败
用 AgentLego 自定义工具
创建工具文件
touch /root/agent/agentlego/agentlego/tools/magicmaker_image_generation.py
import json
import requests
import numpy as np
from agentlego.types import Annotated, ImageIO, Info
from agentlego.utils import require
from .base import BaseTool
class MagicMakerImageGeneration(BaseTool):
default_desc = ('This tool can call the api of magicmaker to '
'generate an image according to the given keywords.')
styles_option = [
'dongman', # 动漫
'guofeng', # 国风
'xieshi', # 写实
'youhua', # 油画
'manghe', # 盲盒
]
aspect_ratio_options = [
'16:9', '4:3', '3:2', '1:1',
'2:3', '3:4', '9:16'
]
@require('opencv-python')
def __init__(self,
style='guofeng',
aspect_ratio='4:3'):
super().__init__()
if style in self.styles_option:
self.style = style
else:
raise ValueError(f'The style must be one of {self.styles_option}')
if aspect_ratio in self.aspect_ratio_options:
self.aspect_ratio = aspect_ratio
else:
raise ValueError(f'The aspect ratio must be one of {aspect_ratio}')
def apply(self,
keywords: Annotated[str,
Info('A series of Chinese keywords separated by comma.')]
) -> ImageIO:
import cv2
response = requests.post(
url='https://magicmaker.openxlab.org.cn/gw/edit-anything/api/v1/bff/sd/generate',
data=json.dumps({
"official": True,
"prompt": keywords,
"style": self.style,
"poseT": False,
"aspectRatio": self.aspect_ratio
}),
headers={'content-type': 'application/json'}
)
image_url = response.json()['data']['imgUrl']
image_response = requests.get(image_url)
image = cv2.cvtColor(cv2.imdecode(np.frombuffer(image_response.content, np.uint8), cv2.IMREAD_COLOR),cv2.COLOR_BGR2RGB)
return ImageIO(image)
注册新工具
修改 /root/agent/agentlego/agentlego/tools/init.py 文件
from .base import BaseTool
from .calculator import Calculator
from .func import make_tool
from .image_canny import CannyTextToImage, ImageToCanny
from .image_depth import DepthTextToImage, ImageToDepth
from .image_editing import ImageExpansion, ImageStylization, ObjectRemove, ObjectReplace
from .image_pose import HumanBodyPose, HumanFaceLandmark, PoseToImage
from .image_scribble import ImageToScribble, ScribbleTextToImage
from .image_text import ImageDescription, TextToImage
from .imagebind import AudioImageToImage, AudioTextToImage, AudioToImage, ThermalToImage
from .object_detection import ObjectDetection, TextToBbox
from .ocr import OCR
from .scholar import * # noqa: F401, F403
from .search import BingSearch, GoogleSearch
from .segmentation import SegmentAnything, SegmentObject, SemanticSegmentation
from .speech_text import SpeechToText, TextToSpeech
from .translation import Translation
from .vqa import VQA
+ from .magicmaker_image_generation import MagicMakerImageGeneration
__all__ = [
'CannyTextToImage', 'ImageToCanny', 'DepthTextToImage', 'ImageToDepth',
'ImageExpansion', 'ObjectRemove', 'ObjectReplace', 'HumanFaceLandmark',
'HumanBodyPose', 'PoseToImage', 'ImageToScribble', 'ScribbleTextToImage',
'ImageDescription', 'TextToImage', 'VQA', 'ObjectDetection', 'TextToBbox', 'OCR',
'SegmentObject', 'SegmentAnything', 'SemanticSegmentation', 'ImageStylization',
'AudioToImage', 'ThermalToImage', 'AudioImageToImage', 'AudioTextToImage',
'SpeechToText', 'TextToSpeech', 'Translation', 'GoogleSearch', 'Calculator',
- 'BaseTool', 'make_tool', 'BingSearch'
+ 'BaseTool', 'make_tool', 'BingSearch', 'MagicMakerImageGeneration'
]
体验自定义工具效果
Tool 界面选择 MagicMakerImageGeneration 后点击 save 后,回到 Chat 页面选择 MagicMakerImageGeneration 工具后就可以开始使用了。