首页 > 其他分享 >reka.ai works代码

reka.ai works代码

时间:2024-05-12 15:19:00浏览次数:14  
标签:const ai text conversation reka human works type history

抓包用authorization作为key传入
模型:reka-core,reka-flash,reka-edge

 

addEventListener('fetch', event => {
  event.respondWith(handleRequest(event.request))
})

const encoder = new TextEncoder();

async function handleRequest(request) {
  if (request.method !== "POST") {
    return new Response("Not a POST request", { status: 405 })
  }

  const authorizationHeader = request.headers.get("Authorization");
  if (!authorizationHeader) {
    return new Response("Missing Authorization header", { status: 401 });
  }

  const requestBody = await request.json();
  const messages = requestBody.messages;
  const isStream = requestBody.stream;
  const model = requestBody.model;

  // 将OpenAI格式的messages转换为Reka AI的conversation_history格式
  let conversation_history = messages.map(msg => {
    if (msg.role === "user" || msg.role === "system") {
      return { "type": "human", "text": msg.content };  
    } else if (msg.role === "assistant") {
      return { "type": "model", "text": msg.content };
    }
  });

  // 确保对话历史以"human"开始和结束
  if (conversation_history[0].type !== "human") {
    conversation_history.unshift({ "type": "human", "text": "" });
  }
  if (conversation_history[conversation_history.length - 1].type !== "human") {
    conversation_history.push({ "type": "human", "text": "" });
  }

  // 确保"human"和"model"成对出现
  for (let i = 0; i < conversation_history.length - 1; i++) {
    if (conversation_history[i].type === conversation_history[i + 1].type) {
      if (conversation_history[i].type === "human") {
        conversation_history.splice(i + 1, 0, { "type": "model", "text": "" });
      } else {
        conversation_history.splice(i + 1, 0, { "type": "human", "text": "" });
      }
      i++;
    }
  }

  const newRequestBody = {
    conversation_history: conversation_history,
    stream: isStream,
    use_search_engine: false,
    use_code_interpreter: false,
    model_name: "reka-core",
    random_seed: Math.floor(Math.random() * 1000000000000)
  };

  const response = await fetch("https://chat.reka.ai/api/chat", {
    method: "POST",
    headers: {
      "Content-Type": "application/json",
      "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
      "Accept-Encoding": "gzip, deflate, br, zstd",
      "authorization": authorizationHeader
    },
    body: JSON.stringify(newRequestBody)
  });

  if (isStream) {
    let decoder = new TextDecoder();
    let reader = response.body.getReader();
    let contentBuffer = "";
    let fullContent = ""; // 存储完整的assistant回复内容
    let prevContent = ""; // 存储上一次迭代的内容
    let lastFourTexts = []; // 存储最近的三条text内容
  
    return new Response(new ReadableStream({
      async start(controller) {
        while (true) {
          const { done, value } = await reader.read();
          if (done) break;
  
          let chunk = decoder.decode(value);
          contentBuffer += chunk;
  
          // 逐行处理数据
          while (contentBuffer.includes("\n")) {
            const newlineIndex = contentBuffer.indexOf("\n");
            const line = contentBuffer.slice(0, newlineIndex);
            contentBuffer = contentBuffer.slice(newlineIndex + 1);
  
            if (line.startsWith("data:")) {
              try {
                const data = JSON.parse(line.slice(5));
                if (data.type === "model") {
                  if (data.text.trim() !== "") { // 判断text是否为非空
                    lastFourTexts.push(data.text); // 将最新的text添加到lastFourTexts
                    if (lastFourTexts.length > 4) {
                      lastFourTexts.shift(); // 如果超过四条,删除最旧的一条
                    }
                    if (lastFourTexts.length === 4) {
                      // 判断是否出现截断情况
                      if (lastFourTexts[3].length < lastFourTexts[2].length || 
                          lastFourTexts[3].endsWith("<sep") || 
                          lastFourTexts[3].endsWith("<")) {
                        // 忽略最旧的两条,并中断响应
                        controller.close();
                        break;
                      }
                    }
                    fullContent = data.text; // 更新完整回复为最新的内容
                    const newContent = fullContent.slice(prevContent.length); // 获取新增的内容
                    prevContent = fullContent; // 更新上一次迭代的内容
                    const formattedData = {
                      id: "chatcmpl-" + Math.random().toString(36).slice(2),
                      object: "chat.completion.chunk",
                      created: Math.floor(Date.now() / 1000),
                      model: model,
                      choices: [{
                        index: 0,
                        delta: {
                          content: newContent
                        },
                        finish_reason: null
                      }]
                    };
                    controller.enqueue(encoder.encode(`data: ${JSON.stringify(formattedData)}\n\n`));
                  }
                }
              } catch (error) {
                console.error("Error parsing JSON:", error, "Raw data:", line);
              }
            }
          }
        }
  
        // 发送[DONE]信号
        const doneData = {
          id: "chatcmpl-" + Math.random().toString(36).slice(2),
          object: "chat.completion.chunk",
          created: Math.floor(Date.now() / 1000),
          model: model,
          choices: [{
            index: 0,
            delta: {},
            finish_reason: "stop"
          }]
        };
        controller.enqueue(encoder.encode(`data: ${JSON.stringify(doneData)}\n\n`));
        controller.enqueue(encoder.encode(`data: [DONE]\n\n`));
  
        controller.close();
      }
    }), {
      headers: { "Content-Type": "text/event-stream" }
    });
  }   else {
    const responseText = await response.text();
    const lines = responseText.split("\n");
    let contentBuffer = "";
    for (const line of lines) {
      if (line.startsWith("data:")) {
        const data = JSON.parse(line.slice(5));
        if (data.type === "model") {
          contentBuffer = data.text; // 更新为最新的完整回复
        }
      }  
    }
    const formattedData = {
      id: "chatcmpl-" + Math.random().toString(36).slice(2),  
      object: "chat.completion",
      created: Math.floor(Date.now() / 1000),
      model: model,
      choices: [{
        index: 0,
        message: {
          role: "assistant",
          content: contentBuffer
        },
        finish_reason: "stop"
      }],
      usage: {
        prompt_tokens: JSON.stringify(conversation_history).length,
        completion_tokens: contentBuffer.length,
        total_tokens: JSON.stringify(conversation_history).length + contentBuffer.length  
      }
    };
    return new Response(JSON.stringify(formattedData), {
      headers: { "Content-Type": "application/json" }  
    });
  }
}

 

标签:const,ai,text,conversation,reka,human,works,type,history
From: https://www.cnblogs.com/suducn/p/18187850

相关文章

  • 玩转AI,笔记本电脑安装属于自己的Llama 3 8B大模型和对话客户端
    2024年4月18日,Meta开源了Llama3大模型,把AI的门槛降低到了最低,这是人工智能领域的一个重要飞跃。我们个人也可以部署大模型了,这简直就是给个人开发者发了个大红包!Llama3模型有不同的参数版本,本文主要分享我在个人笔记本电脑是部署8B参数过程和编写客户端,让我们大家......
  • VSCODE安装codegeexAI插件
    VSCODE安装codegeexAI插件一、vscode下载安装:https://zhuanlan.zhihu.com/p/2647854411.1、Vscode官网下载地址https://code.visualstudio.com/download这里以win10为例:1.2、安装1.3、配置中午软件包安装中文(简体)包,关闭VScode,重启即可二、CODEGEEX插件安装:1、Vscod......
  • AI换脸
    AI换脸今天推荐一款,非常简单容易上手的AI换脸开源产品。GUI的方式,在windows电脑直接打开exe就能使用。windows安装包一键下载:百度网盘:链接: https://pan.baidu.com/s/1sd8aMshgHZS3OfNhaXkxBA 提取码:kkr1项目开源地址:roop-unleashed因为操作是傻瓜式的,所以这里就不......
  • EPAI手绘建模APP工程图工具栏
    (2) 工程图工具栏① 模板1) 打开模板选择页面。图 306 工程图模板列表2) 模板选择页面列出了可以使用的工程图模板类型,每个模板规定了工程的大小、方向、规格、标准、常用字段等。也包括一些空白模板,此时可以通过添加表格等注释自定义工程图样式。3) 选择需要的模板......
  • 补档 https://github.com/taichi-framework/TaiChi/wiki/FAQ/9eeeef88cdbcee6a2834969
    taichi-framework/TaiChiPublicNotificationsFork 572 Star 5.9kCodePullrequestsActionsWikiSecurityInsightsFAQ weishueditedthispage onNov2,2018 · 17revisions如何使用点击右下角浮动按钮,然后选择“创建应用”......
  • 补档 https://github.com/taichi-framework/TaiChi/wiki/%E5%87%86%E5%A4%87%E4%BA%8B
    taichi-framework/TaiChiPublicNotificationsFork 570 Star 5.9kCodePullrequestsActionsWikiSecurityInsights准备事项 weishueditedthispage onJan22,2019 · 1revision太极·Magisk准备事项数据备份系统数据......
  • 补档 https://github.com/taichi-framework/TaiChi/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE
    taichi-framework/TaiChiPublicNotificationsFork 572 Star 5.9kCodePullrequestsActionsWikiSecurityInsights常见问题 weishueditedthispage onMar1,2019 · 14revisionsQ:无法触发Magisk版?A:建议使用Magisk17.x版......
  • 补档 https://github.com/taichi-framework/TaiChi/wiki/%E5%A6%82%E4%BD%95%E4%BD%BF
    taichi-framework/TaiChiPublicNotificationsFork 569 Star 5.9kCodePullrequestsActionsWikiSecurityInsights如何使用 weishueditedthispage onJan22,2019 · 6revisions如何使用太极·Magisk?说明太极·Magisk......
  • KAN: Kolmogorov–Arnold Networks 学术论文全译
    KAN:Kolmogorov–ArnoldNetworks学术论文全译来源 https://zhuanlan.zhihu.com/p/696001648 KAN:Kolmogorov–ArnoldNetworks https://arxiv.org/pdf/2404.19756讨论Applicationaspects:WehavepresentedsomepreliminaryevidencesthatKANsaremoreeffective......
  • [转帖]Oracle23ai来了,23爱,23❤️,都安装好了...
    https://juejin.cn/post/7364059278242332710 专栏: Oracle日常运维宝典系列  makefile复制代码作者:IT邦德中国DBA联盟(ACDU)成员,10余年DBA工作经验擅长主流数据Oracle、MySQL、PG、openGauss运维备份恢复,安装迁移,性能优化、故障应急处理等可提供......