gpt4 book ai didi

discord.js - gpt-3.5-turbo 的 Discord 机器人帮助

转载 作者:行者123 更新时间:2023-12-02 05:49:33 25 4
gpt4 key购买 nike

我知道带着 200 多行代码来这里寻求帮助并不是一个好的做法,但不幸的是我已经使用了 GPT-4,但它目前没有帮助(可能是由于 2021 年的知识上限)。虽然我已经向它提供了文章试图解决这个问题,但我们都被难住了。这是我的代码:

your textrequire('dotenv').config();
const fs = require('fs');
const { Client, Intents, MessageEmbed, ReactionCollector } = require("discord.js");
const promptsFile = 'prompts.txt';
const cacheFile = 'conversationData.txt';
const BOT_CHANNEL_ID = '1089681927482658930';


let prompts = fs.readFileSync(promptsFile, 'utf-8');
const qaPrompt = `You are a CEO's assistant. Your goal is to help your CEO plan his or her day, create schedules, and stay on track. You also help develop new ideas, etc.\n`;

prompts += qaPrompt;

function getConversationData() {
let conversationData = {};

try {
const conversationDataStr = fs.readFileSync(cacheFile, 'utf-8');
if (conversationDataStr) {
conversationData = JSON.parse(conversationDataStr);
}
} catch (err) {
console.log('Error while reading conversation data:', err);
}

return conversationData;
}

if (!fs.existsSync(cacheFile)) {
fs.writeFileSync(cacheFile, '{}');
console.log(`Created ${cacheFile}`);
}


const client = new Client({
intents: [
"GUILDS",
"GUILD_MESSAGES"
]
})

client.once('ready', () => {
console.log(`Logged in as ${client.user.tag}`);
console.log(`Username: ${client.user.username}`);
console.log(`Discriminator: ${client.user.discriminator}`);
console.log(`Avatar: ${client.user.avatar}`);
console.log(`User ID: ${client.user.id}`);
console.log(`Bot: ${client.user.bot}`);
console.log(`System: ${client.user.system}`);
console.log(`Flags: ${client.user.flags}`);
});


client.login(process.env.BOT_TOKEN)

const PAST_MESSAGES = 8
const STATE_SPACE = 3
const THUMBS_UP = '👍'
const THUMBS_DOWN = '👎'

client.on('messageCreate', async (message) => {
if (message.author.bot) return;
if (message.channel.id !== BOT_CHANNEL_ID) return;

message.channel.sendTyping();

let messages = Array.from(await message.channel.messages.fetch({
limit: PAST_MESSAGES,
before: message.id
}))
messages = messages.map(m => m[1])
messages.unshift(message)

let users = [...new Set([...messages.map(m => m.member?.displayName), client.user.username])]

let lastUser = users.pop()

let conversationData = getConversationData();
const channelId = message.channel.id;

let stateSpace = '';
for (let i = 3; i < messages.length; i += 2) {
const userMsg = messages[i - 2];
const botMsg = messages[i - 1];
const prevBotMsg = messages[i - 3];

stateSpace += `${prevBotMsg.author.username}: ${prevBotMsg.content}\n`;
stateSpace += `${userMsg.author.username}: ${userMsg.content}\n`;
stateSpace += `${botMsg.author.username}: ${botMsg.content}\n`;
}



async function createCompletion(options) {
try {
const axios = require('axios');
const response = await axios({
method: 'post',
url: 'https://api.openai.com/v1/chat',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`
},
data: {
"model": "text-davinci-002",
"prompt": options.messages.map(msg => `${msg.role === "assistant" ? "Assistant" : "User"}: ${msg.content}`).join('\n') + '\n',
"temperature": 0.4,
"max_tokens": 300,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0,
"stop": "\n"
}
});

return response.data.choices[0].text;
} catch (error) {
console.error(error);
return null;
}
}



// Define the options for the createCompletion function
const options = {
messages: messages.map(msg => ({
role: msg.author.bot ? "assistant" : "user",
content: msg.content
})),
temperature: 0.4,
max_tokens: 300,
top_p: 1,
presence_penalty: 0,
frequency_penalty: 0,
stop: "\n"
};

const response = await createCompletion(options);


if (!response) {
console.error('Error while creating completion');
return;
}


console.log("API response:", response);
const truncatedResponse = (response && response.choices && response.choices.length > 0) ? response.choices[0].text.slice(0, 2000) : 'No response';



console.log("response", response.choices?.[0]?.text || 'No response')

const embed = new MessageEmbed()
.setDescription(response.choices?.[0]?.text || 'No response');
const botMsg = await message.channel.send({ embeds: [embed] });
console.log("embed", embed);


// Add reactions for user feedback
await botMsg.react('👍');
await botMsg.react('👎');

// Create filter for collector
const filter = (reaction, user) => {
return ['👍', '👎'].includes(reaction.emoji.name) && user.id === message.author.id;
};

console.log("botMsg", botMsg);


// Create collector to wait for user feedback
const collector = botMsg.createReactionCollector({ filter, time: 60000, max: 1 });

collector.on('collect', async (reaction) => {
let userFeedback = '';
if (reaction.emoji.name === '👍') {
userFeedback = 'positive';
} else if (reaction.emoji.name === '👎') {
userFeedback = 'negative';
}

// Update conversation data with user feedback
if (!conversationData[channelId]) {
conversationData[channelId] = [];
}
conversationData[channelId].push({
author: message.author.username,
content: userFeedback
});

// Save conversation data to file
fs.writeFileSync(cacheFile, JSON.stringify(conversationData));
console.log(`The conversation data for channel ${channelId} has been cached.`);

// If user gives negative feedback, ask for clarification
if (userFeedback === 'negative') {
const followUpEmbed = new MessageEmbed()
.setDescription("I'm sorry to hear that. Could you please provide more information on what I can do better?");
await message.channel.send({ embeds: [followUpEmbed] });
}

});

collector.on('end', async (collected) => {
// If no feedback is collected, assume neutral feedback
if (collected.size === 0) {
// Update conversation data with neutral feedback
if (!conversationData[channelId]) {
conversationData[channelId] = [];
}
conversationData[channelId].push({

author: message.author.username,
content: message.content
});

// Save conversation data to file
fs.writeFileSync(cacheFile, JSON.stringify(conversationData));
console.log(`The conversation data for channel ${channelId} has been cached.`);

// Update conversation data with the latest message
if (!conversationData[channelId]) {
conversationData[channelId] = [];
}
conversationData[channelId].push({
author: message.author.username,
content: message.content
});

// Save conversation data to file
fs.writeFileSync(cacheFile, JSON.stringify(conversationData));
console.log(`The conversation data for channel ${channelId} has been cached.`);
}})});

这是我的错误:



PS C:\Users\AJBEATX\Desktop\GPT-3-discord-chatbot-backupmain> node index.js
Logged in as [SDO] Assistant#3551
Username: [SDO] Assistant
Discriminator: 3551
Avatar: 87ac387bc9fb8a963f90b4260f7e711d
User ID: 1077722654288658442
Bot: true
System: false
Flags: [object Object]
AxiosError: Request failed with status code 404
at settle (C:\Users\AJBEATX\Desktop\GPT-3-discord-chatbot-backupmain\node_modules\axios\dist\node\axios.cjs:1900:12)
at IncomingMessage.handleStreamEnd (C:\Users\AJBEATX\Desktop\GPT-3-discord-chatbot-backupmain\node_modules\axios\dist\node\axios.cjs:2952:11)
at IncomingMessage.emit (node:events:525:35)
at endReadableNT (node:internal/streams/readable:1359:12)
at process.processTicksAndRejections (node:internal/process/task_queues:82:21) {
code: 'ERR_BAD_REQUEST',
config: {
transitional: {
silentJSONParsing: true,
forcedJSONParsing: true,
clarifyTimeoutError: false
},
adapter: [ 'xhr', 'http' ],
transformRequest: [ [Function: transformRequest] ],
transformResponse: [ [Function: transformResponse] ],
timeout: 0,
xsrfCookieName: 'XSRF-TOKEN',
xsrfHeaderName: 'X-XSRF-TOKEN',
maxContentLength: -1,
maxBodyLength: -1,
env: { FormData: [Function], Blob: [class Blob] },
validateStatus: [Function: validateStatus],
headers: AxiosHeaders {
Accept: 'application/json, text/plain, */*',
'Content-Type': 'application/json',
Authorization: 'Bearer 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',
'User-Agent': 'axios/1.3.4',
'Content-Length': '342',
'Accept-Encoding': 'gzip, compress, deflate, br'
},
method: 'post',
url: 'https://api.openai.com/v1/chat',
data: '{"model":"text-davinci-002","prompt":"User: Hey there\\nUser: Hello assistant\\nUser: Hello?\\nUser: Hello assistant, how are you?\\nUser: Hello assist\\nUser: Good evening assistant. How are you today?\\nUser: Hello?\\nUser: Hello?\\nAssistant: \\n","temperature":0.4,"max_tokens":300,"top_p":1,"presence_penalty":0,"frequency_penalty":0,"stop":"\\n"}'
},
request: <ref *1> ClientRequest {
_events: [Object: null prototype] {
abort: [Function (anonymous)],
aborted: [Function (anonymous)],
connect: [Function (anonymous)],
error: [Function (anonymous)],
socket: [Function (anonymous)],
timeout: [Function (anonymous)],
finish: [Function: requestOnFinish]
},
_eventsCount: 7,
_maxListeners: undefined,
outputData: [],
outputSize: 0,
writable: true,
destroyed: false,
_last: true,
chunkedEncoding: false,
shouldKeepAlive: false,
maxRequestsOnConnectionReached: false,
_defaultKeepAlive: true,
useChunkedEncodingByDefault: true,
sendDate: false,
_removedConnection: false,
_removedContLen: false,
_removedTE: false,
strictContentLength: false,
_contentLength: '342',
_hasBody: true,
_trailer: '',
finished: true,
_headerSent: true,
_closed: false,
socket: TLSSocket {
_tlsOptions: [Object],
_secureEstablished: true,
_securePending: false,
_newSessionPending: false,
_controlReleased: true,
secureConnecting: false,
_SNICallback: null,
servername: 'api.openai.com',
alpnProtocol: false,
authorized: true,
authorizationError: null,
encrypted: true,
_events: [Object: null prototype],
_eventsCount: 10,
connecting: false,
_hadError: false,
_parent: null,
_host: 'api.openai.com',
_closeAfterHandlingError: false,
_readableState: [ReadableState],
_maxListeners: undefined,
_writableState: [WritableState],
allowHalfOpen: false,
_sockname: null,
_pendingData: null,
_pendingEncoding: '',
server: undefined,
_server: null,
ssl: [TLSWrap],
_requestCert: true,
_rejectUnauthorized: true,
parser: null,
_httpMessage: [Circular *1],
[Symbol(res)]: [TLSWrap],
[Symbol(verified)]: true,
[Symbol(pendingSession)]: null,
[Symbol(async_id_symbol)]: 188,
[Symbol(kHandle)]: [TLSWrap],
[Symbol(lastWriteQueueSize)]: 0,
[Symbol(timeout)]: null,
[Symbol(kBuffer)]: null,
[Symbol(kBufferCb)]: null,
[Symbol(kBufferGen)]: null,
[Symbol(kCapture)]: false,
[Symbol(kSetNoDelay)]: false,
[Symbol(kSetKeepAlive)]: true,
[Symbol(kSetKeepAliveInitialDelay)]: 60,
[Symbol(kBytesRead)]: 0,
[Symbol(kBytesWritten)]: 0,
[Symbol(connect-options)]: [Object]
},
_header: 'POST /v1/chat HTTP/1.1\r\n' +
'Accept: application/json, text/plain, */*\r\n' +
'Content-Type: application/json\r\n' +
'Authorization: Bearer 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\r\n' +
'User-Agent: axios/1.3.4\r\n' +
'Content-Length: 342\r\n' +
'Accept-Encoding: gzip, compress, deflate, br\r\n' +
'Host: api.openai.com\r\n' +
'Connection: close\r\n' +
'\r\n',
_keepAliveTimeout: 0,
_onPendingData: [Function: nop],
agent: Agent {
_events: [Object: null prototype],
_eventsCount: 2,
_maxListeners: undefined,
defaultPort: 443,
protocol: 'https:',
options: [Object: null prototype],
requests: [Object: null prototype] {},
sockets: [Object: null prototype],
freeSockets: [Object: null prototype] {},
keepAliveMsecs: 1000,
keepAlive: false,
maxSockets: Infinity,
maxFreeSockets: 256,
scheduling: 'lifo',
maxTotalSockets: Infinity,
totalSocketCount: 1,
maxCachedSessions: 100,
_sessionCache: [Object],
[Symbol(kCapture)]: false
},
socketPath: undefined,
method: 'POST',
maxHeaderSize: undefined,
insecureHTTPParser: undefined,
joinDuplicateHeaders: undefined,
path: '/v1/chat',
_ended: true,
res: IncomingMessage {
_readableState: [ReadableState],
_events: [Object: null prototype],
_eventsCount: 4,
_maxListeners: undefined,
socket: [TLSSocket],
httpVersionMajor: 1,
httpVersionMinor: 1,
httpVersion: '1.1',
complete: true,
rawHeaders: [Array],
rawTrailers: [],
joinDuplicateHeaders: undefined,
aborted: false,
upgrade: false,
url: '',
method: null,
statusCode: 404,
statusMessage: 'NOT FOUND',
client: [TLSSocket],
_consuming: false,
_dumped: false,
req: [Circular *1],
responseUrl: 'https://api.openai.com/v1/chat',
redirects: [],
[Symbol(kCapture)]: false,
[Symbol(kHeaders)]: [Object],
[Symbol(kHeadersCount)]: 18,
[Symbol(kTrailers)]: null,
[Symbol(kTrailersCount)]: 0
},
aborted: false,
timeoutCb: null,
upgradeOrConnect: false,
parser: null,
maxHeadersCount: null,
reusedSocket: false,
host: 'api.openai.com',
protocol: 'https:',
_redirectable: Writable {
_writableState: [WritableState],
_events: [Object: null prototype],
_eventsCount: 3,
_maxListeners: undefined,
_options: [Object],
_ended: true,
_ending: true,
_redirectCount: 0,
_redirects: [],
_requestBodyLength: 342,
_requestBodyBuffers: [],
_onNativeResponse: [Function (anonymous)],
_currentRequest: [Circular *1],
_currentUrl: 'https://api.openai.com/v1/chat',
[Symbol(kCapture)]: false
},
[Symbol(kCapture)]: false,
[Symbol(kBytesWritten)]: 0,
[Symbol(kEndCalled)]: true,
[Symbol(kNeedDrain)]: false,
[Symbol(corked)]: 0,
[Symbol(kOutHeaders)]: [Object: null prototype] {
accept: [Array],
'content-type': [Array],
authorization: [Array],
'user-agent': [Array],
'content-length': [Array],
'accept-encoding': [Array],
host: [Array]
},
[Symbol(errored)]: null,
[Symbol(kUniqueHeaders)]: null
},
response: {
status: 404,
statusText: 'NOT FOUND',
headers: AxiosHeaders {
date: 'Tue, 28 Mar 2023 18:00:05 GMT',
'content-type': 'application/json',
'content-length': '140',
connection: 'close',
'access-control-allow-origin': '*',
'openai-version': '2020-10-01',
'x-request-id': '1bde8165caa0c3e9cb191ef3b4d4db95',
'openai-processing-ms': '4',
'strict-transport-security': 'max-age=15724800; includeSubDomains'
},
config: {
transitional: [Object],
adapter: [Array],
transformRequest: [Array],
transformResponse: [Array],
timeout: 0,
xsrfCookieName: 'XSRF-TOKEN',
xsrfHeaderName: 'X-XSRF-TOKEN',
maxContentLength: -1,
maxBodyLength: -1,
env: [Object],
validateStatus: [Function: validateStatus],
headers: [AxiosHeaders],
method: 'post',
url: 'https://api.openai.com/v1/chat',
data: '{"model":"text-davinci-002","prompt":"User: Hey there\\nUser: Hello assistant\\nUser: Hello?\\nUser: Hello assistant, how are you?\\nUser: Hello assist\\nUser: Good evening assistant. How are you today?\\nUser: Hello?\\nUser: Hello?\\nAssistant: \\n","temperature":0.4,"max_tokens":300,"top_p":1,"presence_penalty":0,"frequency_penalty":0,"stop":"\\n"}'
},
request: <ref *1> ClientRequest {
_events: [Object: null prototype],
_eventsCount: 7,
_maxListeners: undefined,
outputData: [],
outputSize: 0,
writable: true,
destroyed: false,
_last: true,
chunkedEncoding: false,
shouldKeepAlive: false,
maxRequestsOnConnectionReached: false,
_defaultKeepAlive: true,
useChunkedEncodingByDefault: true,
sendDate: false,
_removedConnection: false,
_removedContLen: false,
_removedTE: false,
strictContentLength: false,
_contentLength: '342',
_hasBody: true,
_trailer: '',
finished: true,
_headerSent: true,
_closed: false,
socket: [TLSSocket],
_header: 'POST /v1/chat HTTP/1.1\r\n' +
'Accept: application/json, text/plain, */*\r\n' +
'Content-Type: application/json\r\n' +
'Authorization: Bearer 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\r\n' +
'User-Agent: axios/1.3.4\r\n' +
'Content-Length: 342\r\n' +
'Accept-Encoding: gzip, compress, deflate, br\r\n' +
'Host: api.openai.com\r\n' +
'Connection: close\r\n' +
'\r\n',
_keepAliveTimeout: 0,
_onPendingData: [Function: nop],
agent: [Agent],
socketPath: undefined,
method: 'POST',
maxHeaderSize: undefined,
insecureHTTPParser: undefined,
joinDuplicateHeaders: undefined,
path: '/v1/chat',
_ended: true,
res: [IncomingMessage],
aborted: false,
timeoutCb: null,
upgradeOrConnect: false,
parser: null,
maxHeadersCount: null,
reusedSocket: false,
host: 'api.openai.com',
protocol: 'https:',
_redirectable: [Writable],
[Symbol(kCapture)]: false,
[Symbol(kBytesWritten)]: 0,
[Symbol(kEndCalled)]: true,
[Symbol(kNeedDrain)]: false,
[Symbol(corked)]: 0,
[Symbol(kOutHeaders)]: [Object: null prototype],
[Symbol(errored)]: null,
[Symbol(kUniqueHeaders)]: null
},
data: { error: [Object] }
}
}
Error while creating completion

任何帮助将不胜感激!!!

我尝试了一些东西,主要是在 Axios 和 OpenAI 的 npm (节点?)之间切换(我是新人,抱歉)。不幸的是,GPT 告诉我此时必须使用 Axios 才能获得响应,因为我使用的是 gpt-3.5-turbo

目标显然只是制作一个使用 Turbo 响应用户的 Discord 机器人。我还想包括学习,因此对不和谐嵌入的 react 是“竖起大拇指”或“竖起大拇指”按钮。最终我将使用 GPT-4 来帮助完成该部分,但现在我只希望代码能够正常工作。

我的环境变量已正确存储在 .env 中,并且 key 和不和谐机器人 token 是正确的。

最佳答案

我建议阅读API reference但我发现这段代码存在一些主要问题:

  1. 您使用的模型不允许用于聊天端点
{
"model": "text-davinci-002", // <-- this is an invalid model
"prompt": options.messages.map(msg => `${msg.role === "assistant" ? "Assistant" : "User"}: ${msg.content}`).join('\n') + '\n',
"temperature": 0.4,
"max_tokens": 300,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0,
"stop": "\n"
}

您应该改用“gpt-3.5-turbo”

  • 您使用的端点错误。
  • https://api.openai.com/v1/chat

    应该是

    https://api.openai.com/v1/chat/completions
  • 聊天端点不使用提示,而是使用消息
  • {
    "model": "gpt-3.5-turbo",
    "messages": options.messages, // <-- change your code to look like this
    "temperature": 0.4,
    "max_tokens": 300,
    "top_p": 1,
    "presence_penalty": 0,
    "frequency_penalty": 0,
    "stop": "\n"
    }

    旁注:我讨厌成为那样的人,但请自己编写代码。一团糟。 createCompletion 的选项对象直接在消息之外不起作用。很明显,考虑到代码中散布的所有错误和反模式,这是人工智能生成的。

    关于discord.js - gpt-3.5-turbo 的 Discord 机器人帮助,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/75869648/

    25 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com