Files
RakunNakun-AI/package.json
NekoMonci12 9ba4724282 Fetch Data Function
use LLM7.io for batch ask LLM models then cache it.
target: inputs.txt
2025-06-03 19:59:23 +07:00

16 lines
297 B
JSON

{
"dependencies": {
"axios": "^1.4.0",
"discord.js": "^14.11.0",
"dotenv": "^16.0.0",
"express": "^4.18.2",
"mongodb": "^5.7.0",
"mysql2": "^3.2.0",
"openai": "^4.104.0",
"qs": "^6.11.2",
"redis": "^4.6.7",
"p-limit": "^4.0.0",
"uuid": "^9.0.0"
}
}