mirror of
https://github.com/ReVanced/revanced-bots.git
synced 2026-01-11 13:56:15 +00:00
feat: add wit.ai support
This commit is contained in:
@@ -27,20 +27,7 @@
|
||||
{
|
||||
"label": "DOWNLOAD",
|
||||
"threshold": 0.85,
|
||||
"responses": [
|
||||
{
|
||||
"p": "discord",
|
||||
"text": "you wanted peevanced"
|
||||
},
|
||||
{
|
||||
"p": "telegram",
|
||||
"text":"you wanted peevanced, on telegram"
|
||||
},
|
||||
{
|
||||
"p": "reddit",
|
||||
"text": "you wanted peevanced, on reddit"
|
||||
}
|
||||
]
|
||||
"text": "the download?"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ export default {
|
||||
once: false,
|
||||
execute(helper, _, msg) {
|
||||
if (!msg.content || msg.author.bot) return;
|
||||
if (!msg.mentions.has(msg.client.user)) return;
|
||||
helper.scanText(msg.content.toLowerCase().replace(/<.*?>/g, ''), `${msg.channelId}/${msg.id}`);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import { EmbedBuilder } from 'discord.js';
|
||||
|
||||
export default {
|
||||
name: 'aiResponse',
|
||||
once: false,
|
||||
async execute(client, config, aiRes) {
|
||||
if (!aiRes.response) return;
|
||||
if (!aiRes.response[0]) return;
|
||||
|
||||
try {
|
||||
const ids = aiRes.id.split('/');
|
||||
@@ -20,10 +23,19 @@ export default {
|
||||
message = channel.messages.cache.get(ids[1]);
|
||||
}
|
||||
|
||||
message.reply(aiRes.response);
|
||||
const intent = aiRes.response.reduce((a, b) => a.confidence > b.confidence ? a : b);
|
||||
const response = config.responses.find((res) => res.label === intent.name);
|
||||
if (response.threshold > intent.confidence) return;
|
||||
|
||||
const embed = new EmbedBuilder()
|
||||
.setTitle('You have asked a Frequently Asked Question')
|
||||
.setDescription(response.text)
|
||||
.setFooter({ text: `Confidence: ${intent.confidence}` });
|
||||
|
||||
message.reply({ embeds: [embed]});
|
||||
|
||||
return;
|
||||
} catch (e) {}
|
||||
} catch (e) {console.log(e)}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
@@ -2,16 +2,22 @@ export default {
|
||||
name: 'aiResponse',
|
||||
once: false,
|
||||
async execute(client, config, aiRes) {
|
||||
if (!aiRes.response) return;
|
||||
if (!aiRes.response[0]) return;
|
||||
const ids = aiRes.id.split('/');
|
||||
|
||||
const intent = aiRes.response.reduce((a, b) => a.confidence > b.confidence ? a : b);
|
||||
const response = config.responses.find((res) => res.label === intent.name);
|
||||
if (response.threshold > intent.confidence) return;
|
||||
|
||||
switch (ids[0]) {
|
||||
case 'comment': {
|
||||
client.getComment(ids[1]).reply(aiRes.response);
|
||||
client.getComment(ids[1]).reply(`${response.text}\n\n*Confidence: ${intent.confidence}*`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'post': {
|
||||
client.getSubmission(ids[1]).reply(aiRes.response);
|
||||
client.getSubmission(ids[1]).reply(`${response.text}\n\n*Confidence: ${intent.confidence}*`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,17 @@ export default {
|
||||
name: 'aiResponse',
|
||||
once: false,
|
||||
async execute(bot, config, aiRes) {
|
||||
if (!aiRes.response) return;
|
||||
if (!aiRes.response[0]) return;
|
||||
const ids = aiRes.id.split('/');
|
||||
const intent = aiRes.response.reduce((a, b) => a.confidence > b.confidence ? a : b);
|
||||
const response = config.responses.find((res) => res.label === intent.name);
|
||||
if (response.threshold > intent.confidence) return;
|
||||
|
||||
bot.sendMessage(ids[0], aiRes.response, {
|
||||
bot.sendMessage(ids[0], `${response.text}\n\n*Confidence: ${intent.confidence}*`, {
|
||||
message_thread_id: ids[1],
|
||||
reply_to_message_id: ids[2]
|
||||
reply_to_message_id: ids[2],
|
||||
parse_mode: 'HTML'
|
||||
});
|
||||
|
||||
return;
|
||||
|
||||
@@ -2,13 +2,7 @@
|
||||
"server": {
|
||||
"port": 3000
|
||||
},
|
||||
|
||||
"transformers": {
|
||||
"model": "./model.onnx",
|
||||
"tokenizer": "./tokenizer.json",
|
||||
"instruction": "Instruction: given a dialog context and related knowledge, you need to answer the question based on the knowledge.",
|
||||
"knowledge": [
|
||||
"ReVanced is a generic patcher that allows you to modify the behavior of any Dalvik based Android application"
|
||||
]
|
||||
"witAI": {
|
||||
"authToken": "AUTH-TOKEN"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,19 @@
|
||||
import { serialize } from 'bson';
|
||||
|
||||
async function generateResponse(tokenizer, model, config, dialog) {
|
||||
const knowledge = `[KNOWLEDGE] ${config.knowledge.join(' ')}`;
|
||||
const context = `[CONTEXT] ${dialog.substring(0, 64)}`;
|
||||
export default async function runAI(client, data, config) {
|
||||
const witAIReq = await fetch(`https://api.wit.ai/message?v20230319&q=${encodeURI(data.text)}`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${config.authToken}`
|
||||
}
|
||||
});
|
||||
|
||||
const query = `${config.instruction} ${context} ${knowledge}`;
|
||||
|
||||
const inputTokenIds = tokenizer.encode(query);
|
||||
const outputTokenIds = await model.generate(inputTokenIds, { maxLength: 64, topK: 10 });
|
||||
return await tokenizer.decode(outputTokenIds, true);
|
||||
}
|
||||
|
||||
export default async function runAI(client, data, tokenizer, model, config) {
|
||||
const response = await generateResponse(tokenizer, model, config, data.text);
|
||||
const response = await witAIReq.json();
|
||||
|
||||
client.write(
|
||||
serialize({
|
||||
op: 2,
|
||||
id: data.id,
|
||||
response
|
||||
response: response.intents
|
||||
})
|
||||
);
|
||||
|
||||
|
||||
@@ -1,21 +1,10 @@
|
||||
import { readFileSync } from 'node:fs';
|
||||
// Fix __dirname not being defined in ES modules. (https://stackoverflow.com/a/64383997)
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { dirname } from 'node:path';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
const config = JSON.parse(readFileSync('./config.json', 'utf-8'));
|
||||
|
||||
import { createServer } from 'node:net';
|
||||
import { deserialize } from 'bson';
|
||||
import transformers from 'transformers-nodejs';
|
||||
import { runAI, runOCR } from './events/index.js';
|
||||
|
||||
const tokenizer = await transformers.AutoTokenizer.fromPretrained(config.transformers.tokenizer);
|
||||
const model = await transformers.AutoModelForSeq2SeqLM.fromPretrained(config.transformers.model);
|
||||
|
||||
const server = createServer(async (client) => {
|
||||
client.on('data', async (data) => {
|
||||
const eventData = deserialize(data, {
|
||||
@@ -24,7 +13,7 @@ const server = createServer(async (client) => {
|
||||
|
||||
switch (eventData.op) {
|
||||
case 1: {
|
||||
runAI(client, eventData, tokenizer, model, config.transformers);
|
||||
runAI(client, eventData, config.witAI);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user