mirror of
https://github.com/LightZirconite/Microsoft-Rewards-Bot.git
synced 2026-01-10 01:06:17 +00:00
refactor: remove legacy scheduling and analytics code
- Deleted the scheduler module and its associated functions, transitioning to OS-level scheduling. - Removed the Analytics module and its related interfaces, retaining only a placeholder for backward compatibility. - Updated ConfigValidator to warn about legacy schedule and analytics configurations. - Cleaned up StartupValidator to remove diagnostics and schedule validation logic. - Adjusted Load.ts to handle legacy flags for diagnostics and analytics. - Removed unused diagnostics capturing functionality.
This commit is contained in:
@@ -80,8 +80,8 @@ COPY --from=builder /app/node_modules ./node_modules
|
|||||||
COPY docker-entrypoint.sh /usr/local/bin/
|
COPY docker-entrypoint.sh /usr/local/bin/
|
||||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||||
|
|
||||||
# Use entrypoint that supports both scheduler and cron
|
# Use entrypoint that supports single-run and optional cron mode
|
||||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||||
|
|
||||||
# Default: use built-in scheduler
|
# Default: single execution
|
||||||
CMD ["npm", "run", "start:schedule"]
|
CMD ["node", "--enable-source-maps", "./dist/index.js"]
|
||||||
|
|||||||
24
README.md
24
README.md
@@ -21,12 +21,11 @@ This TypeScript-based automation bot helps you maximize your **Microsoft Rewards
|
|||||||
|
|
||||||
### ✨ Key Features
|
### ✨ Key Features
|
||||||
|
|
||||||
- <EFBFBD> **Automated Searches** — Desktop and mobile Bing searches with natural patterns
|
- 🔍 **Automated Searches** — Desktop and mobile Bing searches with natural patterns
|
||||||
- 📅 **Daily Activities** — Quizzes, polls, daily sets, and punch cards
|
- 📅 **Daily Activities** — Quizzes, polls, daily sets, and punch cards
|
||||||
- 🤖 **Human-like Behavior** — Advanced humanization system to avoid detection
|
- 🤖 **Human-like Behavior** — Advanced humanization system to avoid detection
|
||||||
- 🛡️ **Risk Management** — Built-in ban detection and prediction with ML algorithms
|
- 🛡️ **Risk Management** — Built-in ban detection and prediction with ML algorithms
|
||||||
- 📊 **Analytics Dashboard** — Track performance and points collection over time
|
- ⏰ **External Scheduling** — Ready for cron, systemd timers, and Windows Task Scheduler
|
||||||
- ⏰ **Smart Scheduling** — Built-in scheduler with timezone support
|
|
||||||
- 🔔 **Notifications** — Discord webhooks and NTFY push alerts
|
- 🔔 **Notifications** — Discord webhooks and NTFY push alerts
|
||||||
- 🐳 **Docker Support** — Easy containerized deployment
|
- 🐳 **Docker Support** — Easy containerized deployment
|
||||||
- 🔐 **Multi-Account** — Manage multiple accounts with parallel execution
|
- 🔐 **Multi-Account** — Manage multiple accounts with parallel execution
|
||||||
@@ -89,18 +88,18 @@ For detailed configuration, advanced features, and troubleshooting, visit our co
|
|||||||
| **[Getting Started](docs/getting-started.md)** | Detailed installation and first-run guide |
|
| **[Getting Started](docs/getting-started.md)** | Detailed installation and first-run guide |
|
||||||
| **[Configuration](docs/config.md)** | Complete configuration options reference |
|
| **[Configuration](docs/config.md)** | Complete configuration options reference |
|
||||||
| **[Accounts & 2FA](docs/accounts.md)** | Setting up accounts with TOTP authentication |
|
| **[Accounts & 2FA](docs/accounts.md)** | Setting up accounts with TOTP authentication |
|
||||||
| **[Scheduling](docs/schedule.md)** | Automated daily execution setup |
|
| **[External Scheduling](docs/schedule.md)** | Use OS schedulers for automation |
|
||||||
| **[Docker Deployment](docs/docker.md)** | Running in containers |
|
| **[Docker Deployment](docs/docker.md)** | Running in containers |
|
||||||
| **[Humanization](docs/humanization.md)** | Anti-detection and natural behavior |
|
| **[Humanization](docs/humanization.md)** | Anti-detection and natural behavior |
|
||||||
| **[Notifications](docs/conclusionwebhook.md)** | Discord webhooks and NTFY setup |
|
| **[Notifications](docs/conclusionwebhook.md)** | Discord webhooks and NTFY setup |
|
||||||
| **[Proxy Setup](docs/proxy.md)** | Configuring proxies for privacy |
|
| **[Proxy Setup](docs/proxy.md)** | Configuring proxies for privacy |
|
||||||
| **[Diagnostics](docs/diagnostics.md)** | Troubleshooting and debugging |
|
| **[Troubleshooting](docs/diagnostics.md)** | Debug common issues and capture logs |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## <20> Docker Quick Start
|
## Docker Quick Start
|
||||||
|
|
||||||
For containerized deployment with automatic scheduling:
|
For containerized deployment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure accounts.jsonc exists in src/
|
# Ensure accounts.jsonc exists in src/
|
||||||
@@ -124,11 +123,6 @@ The script works great with default settings, but you can customize everything i
|
|||||||
"enabled": true, // Enable natural behavior patterns
|
"enabled": true, // Enable natural behavior patterns
|
||||||
"stopOnBan": true // Stop on ban detection
|
"stopOnBan": true // Stop on ban detection
|
||||||
},
|
},
|
||||||
"schedule": {
|
|
||||||
"enabled": true, // Built-in scheduler
|
|
||||||
"time24": "09:00", // Daily run time
|
|
||||||
"timeZone": "Europe/Paris" // Your timezone
|
|
||||||
},
|
|
||||||
"workers": {
|
"workers": {
|
||||||
"doDesktopSearch": true, // Desktop Bing searches
|
"doDesktopSearch": true, // Desktop Bing searches
|
||||||
"doMobileSearch": true, // Mobile Bing searches
|
"doMobileSearch": true, // Mobile Bing searches
|
||||||
@@ -165,12 +159,12 @@ All while maintaining **natural behavior patterns** to minimize detection risk.
|
|||||||
|
|
||||||
## 💡 Usage Tips
|
## 💡 Usage Tips
|
||||||
|
|
||||||
- **Run regularly:** Set up the built-in scheduler for daily automation
|
- **Run regularly:** Use cron, systemd timers, or Windows Task Scheduler (see docs)
|
||||||
- **Use humanization:** Always keep `humanization.enabled: true` for safety
|
- **Use humanization:** Always keep `humanization.enabled: true` for safety
|
||||||
- **Monitor logs:** Check for ban warnings and adjust settings if needed
|
- **Monitor logs:** Check for ban warnings and adjust settings if needed
|
||||||
- **Multiple accounts:** Use the `clusters` setting to run accounts in parallel
|
- **Multiple accounts:** Use the `clusters` setting to run accounts in parallel
|
||||||
- **Start small:** Test with one account before scaling up
|
- **Start small:** Test with one account before scaling up
|
||||||
- **Review diagnostics:** Enable screenshot/HTML capture for troubleshooting
|
- **Capture logs:** Pipe output to a file or webhook for later review
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -185,7 +179,7 @@ All while maintaining **natural behavior patterns** to minimize detection risk.
|
|||||||
- 💬 **[Join our Discord](https://discord.gg/k5uHkx9mne)** — Community support and updates
|
- 💬 **[Join our Discord](https://discord.gg/k5uHkx9mne)** — Community support and updates
|
||||||
- 📖 **[Documentation Hub](docs/index.md)** — Complete guides and references
|
- 📖 **[Documentation Hub](docs/index.md)** — Complete guides and references
|
||||||
- 🐛 **[Report Issues](https://github.com/Obsidian-wtf/Microsoft-Rewards-Bot/issues)** — Bug reports and feature requests
|
- 🐛 **[Report Issues](https://github.com/Obsidian-wtf/Microsoft-Rewards-Bot/issues)** — Bug reports and feature requests
|
||||||
- 📧 **[Diagnostics Guide](docs/diagnostics.md)** — Troubleshooting steps
|
- 📧 **[Troubleshooting Guide](docs/diagnostics.md)** — Debug common issues
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
21
compose.yaml
21
compose.yaml
@@ -11,25 +11,12 @@ services:
|
|||||||
- ./sessions:/app/sessions
|
- ./sessions:/app/sessions
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
TZ: "America/Toronto" # Set your timezone for proper scheduling (used by image and scheduler)
|
TZ: "America/Toronto" # Set your timezone for logging (and cron if enabled)
|
||||||
NODE_ENV: "production"
|
NODE_ENV: "production"
|
||||||
# Force headless when running in Docker (uses Chromium Headless Shell only)
|
# Force headless when running in Docker (uses Chromium Headless Shell only)
|
||||||
FORCE_HEADLESS: "1"
|
FORCE_HEADLESS: "1"
|
||||||
|
|
||||||
# ============================================================
|
# Optional: enable in-container cron scheduling
|
||||||
# SCHEDULING MODE: Choose one
|
|
||||||
# ============================================================
|
|
||||||
# Option 1: Built-in JavaScript Scheduler (default, recommended)
|
|
||||||
# - No additional setup needed
|
|
||||||
# - Uses config.jsonc schedule settings
|
|
||||||
# - Lighter resource usage
|
|
||||||
#SCHEDULER_DAILY_JITTER_MINUTES_MIN: "2"
|
|
||||||
#SCHEDULER_DAILY_JITTER_MINUTES_MAX: "10"
|
|
||||||
#SCHEDULER_PASS_TIMEOUT_MINUTES: "180"
|
|
||||||
#SCHEDULER_FORK_PER_PASS: "true"
|
|
||||||
|
|
||||||
# Option 2: Native Cron (for users who prefer traditional cron)
|
|
||||||
# Uncomment these lines to enable cron instead:
|
|
||||||
#USE_CRON: "true"
|
#USE_CRON: "true"
|
||||||
#CRON_SCHEDULE: "0 9 * * *" # Daily at 9 AM (see https://crontab.guru)
|
#CRON_SCHEDULE: "0 9 * * *" # Daily at 9 AM (see https://crontab.guru)
|
||||||
#RUN_ON_START: "true" # Run once immediately on container start
|
#RUN_ON_START: "true" # Run once immediately on container start
|
||||||
@@ -38,5 +25,5 @@ services:
|
|||||||
security_opt:
|
security_opt:
|
||||||
- no-new-privileges:true
|
- no-new-privileges:true
|
||||||
|
|
||||||
# Default: use built-in scheduler (entrypoint handles mode selection)
|
# Default: single run per container start
|
||||||
command: ["npm", "run", "start:schedule"]
|
command: ["node", "--enable-source-maps", "./dist/index.js"]
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Docker entrypoint with cron support
|
# Docker entrypoint with optional cron support
|
||||||
# Usage:
|
# Usage:
|
||||||
# Default (scheduler): npm run start:schedule
|
# Default: node --enable-source-maps ./dist/index.js
|
||||||
# Cron mode: set USE_CRON=true
|
# Cron mode: set USE_CRON=true
|
||||||
|
|
||||||
# If USE_CRON is set, configure cron instead of using built-in scheduler
|
# If USE_CRON is set, configure cron for repeated runs
|
||||||
if [ "$USE_CRON" = "true" ] || [ "$USE_CRON" = "1" ]; then
|
if [ "$USE_CRON" = "true" ] || [ "$USE_CRON" = "1" ]; then
|
||||||
echo "==> Cron mode enabled"
|
echo "==> Cron mode enabled"
|
||||||
|
|
||||||
@@ -57,10 +57,10 @@ if [ "$USE_CRON" = "true" ] || [ "$USE_CRON" = "1" ]; then
|
|||||||
# Start cron in foreground and tail logs
|
# Start cron in foreground and tail logs
|
||||||
cron && tail -f /var/log/cron.log
|
cron && tail -f /var/log/cron.log
|
||||||
else
|
else
|
||||||
echo "==> Using built-in scheduler (JavaScript)"
|
echo "==> Running single execution"
|
||||||
echo "==> To use cron instead, set USE_CRON=true"
|
echo "==> To run on a schedule inside the container, set USE_CRON=true"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Execute passed command (default: npm run start:schedule)
|
# Execute passed command (default: node --enable-source-maps ./dist/index.js)
|
||||||
exec "$@"
|
exec "$@"
|
||||||
fi
|
fi
|
||||||
|
|||||||
26
docs/FAQ.md
26
docs/FAQ.md
@@ -80,7 +80,7 @@ npm run setup
|
|||||||
|
|
||||||
### Can I run this on a server 24/7?
|
### Can I run this on a server 24/7?
|
||||||
|
|
||||||
Yes! Use Docker with the built-in scheduler for unattended operation. See the [Docker Guide](docker.md).
|
Yes! Use Docker with your preferred scheduler (cron, Kubernetes CronJob, etc.) or enable the image's optional cron mode. See the [Docker Guide](docker.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -97,27 +97,23 @@ In `src/accounts.jsonc`. Copy `src/accounts.example.jsonc` as a template.
|
|||||||
Not required, but **highly recommended** for:
|
Not required, but **highly recommended** for:
|
||||||
- Automated login without manual code entry
|
- Automated login without manual code entry
|
||||||
- Better security
|
- Better security
|
||||||
- 24/7 scheduler compatibility
|
- 24/7 automation compatibility
|
||||||
|
|
||||||
See the [Accounts & 2FA Guide](accounts.md).
|
See the [Accounts & 2FA Guide](accounts.md).
|
||||||
|
|
||||||
### How do I schedule automatic runs?
|
### How do I schedule automatic runs?
|
||||||
|
|
||||||
Enable the built-in scheduler in `src/config.jsonc`:
|
Use your operating system's scheduler. For example, Task Scheduler on Windows or `cron`/systemd timers on Linux:
|
||||||
|
|
||||||
```jsonc
|
```bash
|
||||||
{
|
# Windows Task Scheduler action (PowerShell)
|
||||||
"schedule": {
|
powershell.exe -NoProfile -Command "cd 'C:\\Path\\To\\Microsoft-Rewards-Script'; npm run start"
|
||||||
"enabled": true,
|
|
||||||
"time24": "09:00",
|
# Linux cron example (daily at 09:15)
|
||||||
"timeZone": "America/New_York"
|
15 9 * * * cd /home/you/Microsoft-Rewards-Script && /usr/bin/env npm run start >> /home/you/rewards.log 2>&1
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Then run: `npm run start:schedule`
|
See the [External Scheduling Guide](schedule.md) for detailed steps.
|
||||||
|
|
||||||
See the [Scheduling Guide](schedule.md).
|
|
||||||
|
|
||||||
### Can I run multiple accounts?
|
### Can I run multiple accounts?
|
||||||
|
|
||||||
@@ -312,7 +308,7 @@ See [Configuration Guide](config.md#risk-management--security).
|
|||||||
- 💬 **[Join our Discord](https://discord.gg/k5uHkx9mne)** — Ask the community
|
- 💬 **[Join our Discord](https://discord.gg/k5uHkx9mne)** — Ask the community
|
||||||
- 📖 **[Documentation Hub](index.md)** — Browse all guides
|
- 📖 **[Documentation Hub](index.md)** — Browse all guides
|
||||||
- 🐛 **[GitHub Issues](https://github.com/Obsidian-wtf/Microsoft-Rewards-Bot/issues)** — Report problems
|
- 🐛 **[GitHub Issues](https://github.com/Obsidian-wtf/Microsoft-Rewards-Bot/issues)** — Report problems
|
||||||
- 📧 **[Diagnostics Guide](diagnostics.md)** — Debug issues
|
- 📧 **[Troubleshooting Guide](diagnostics.md)** — Debug common issues
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@
|
|||||||
### Why Use TOTP?
|
### Why Use TOTP?
|
||||||
- ✅ **Automated login** — No manual code entry
|
- ✅ **Automated login** — No manual code entry
|
||||||
- ✅ **More secure** — Better than SMS
|
- ✅ **More secure** — Better than SMS
|
||||||
- ✅ **Works 24/7** — Scheduler-friendly
|
- ✅ **Works 24/7** — Ready for external schedulers
|
||||||
|
|
||||||
### How to Get Your TOTP Secret
|
### How to Get Your TOTP Secret
|
||||||
|
|
||||||
@@ -178,7 +178,7 @@ export ACCOUNTS_JSON='{"accounts":[{"email":"test@example.com","password":"pass"
|
|||||||
→ **[Security Guide](./security.md)** for best practices
|
→ **[Security Guide](./security.md)** for best practices
|
||||||
|
|
||||||
**Ready for automation?**
|
**Ready for automation?**
|
||||||
→ **[Scheduler Setup](./schedule.md)**
|
→ **[External Scheduling](./schedule.md)**
|
||||||
|
|
||||||
**Need proxies?**
|
**Need proxies?**
|
||||||
→ **[Proxy Guide](./proxy.md)**
|
→ **[Proxy Guide](./proxy.md)**
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"content":"Test message"}'
|
|||||||
→ **[NTFY Push Notifications](./ntfy.md)**
|
→ **[NTFY Push Notifications](./ntfy.md)**
|
||||||
|
|
||||||
**Need detailed logs?**
|
**Need detailed logs?**
|
||||||
→ **[Diagnostics Guide](./diagnostics.md)**
|
→ **[Troubleshooting Guide](./diagnostics.md)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -40,22 +40,6 @@ This page mirrors the defaults that ship in `src/config.jsonc` and explains what
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Scheduler
|
|
||||||
|
|
||||||
| Key | Default | Notes |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `schedule.enabled` | `false` | Enable built-in scheduler loop. |
|
|
||||||
| `schedule.useAmPm` | `false` | Toggle between `time12` (12h) and `time24` (24h). |
|
|
||||||
| `schedule.time12` | `"9:00 AM"` | Used when `useAmPm` is `true`. |
|
|
||||||
| `schedule.time24` | `"09:00"` | Used when `useAmPm` is `false`. |
|
|
||||||
| `schedule.timeZone` | `Europe/Paris` | IANA timezone for scheduling. |
|
|
||||||
| `schedule.runImmediatelyOnStart` | `true` | Execute one pass right after startup. |
|
|
||||||
| `schedule.cron` | - | Optional cron expression(s).
|
|
||||||
|
|
||||||
See `docs/schedule.md` for jitter, cron patterns, and vacation integration.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Workers
|
## Workers
|
||||||
|
|
||||||
| Key | Default | Notes |
|
| Key | Default | Notes |
|
||||||
@@ -133,13 +117,6 @@ See `docs/schedule.md` for jitter, cron patterns, and vacation integration.
|
|||||||
| `logging.excludeFunc` | `["SEARCH-CLOSE-TABS", "LOGIN-NO-PROMPT", "FLOW"]` | Buckets skipped locally. |
|
| `logging.excludeFunc` | `["SEARCH-CLOSE-TABS", "LOGIN-NO-PROMPT", "FLOW"]` | Buckets skipped locally. |
|
||||||
| `logging.webhookExcludeFunc` | same | Buckets skipped in webhook payloads. |
|
| `logging.webhookExcludeFunc` | same | Buckets skipped in webhook payloads. |
|
||||||
| `logging.redactEmails` | `true` | Mask email addresses in logs. |
|
| `logging.redactEmails` | `true` | Mask email addresses in logs. |
|
||||||
| `diagnostics.enabled` | `true` | Capture screenshots/HTML on failure. |
|
|
||||||
| `diagnostics.maxPerRun` | `2` | Limit capture count per run. |
|
|
||||||
| `diagnostics.retentionDays` | `7` | Auto-clean old diagnostics. |
|
|
||||||
| `analytics.enabled` | `true` | Persist account metrics. |
|
|
||||||
| `analytics.retentionDays` | `30` | Keep analytics data for N days. |
|
|
||||||
| `analytics.exportMarkdown` | `true` | Write markdown summaries to `reports/`. |
|
|
||||||
| `analytics.webhookSummary` | `true` | Send analytics summary via webhook.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -160,7 +137,7 @@ See `docs/schedule.md` for jitter, cron patterns, and vacation integration.
|
|||||||
|
|
||||||
1. Start from the default config and copy it if you need a local override.
|
1. Start from the default config and copy it if you need a local override.
|
||||||
2. Leave `passesPerRun` at `1` so job-state can skip accounts automatically.
|
2. Leave `passesPerRun` at `1` so job-state can skip accounts automatically.
|
||||||
3. Enable the scheduler only after testing manual runs.
|
3. Configure your external scheduler after validating manual runs.
|
||||||
4. Document any changes you make (without storing credentials in git).
|
4. Document any changes you make (without storing credentials in git).
|
||||||
|
|
||||||
Related docs: [`accounts.md`](./accounts.md), [`schedule.md`](./schedule.md), [`proxy.md`](./proxy.md), [`humanization.md`](./humanization.md), [`security.md`](./security.md).
|
Related docs: [`accounts.md`](./accounts.md), [`schedule.md`](./schedule.md), [`proxy.md`](./proxy.md), [`humanization.md`](./humanization.md), [`security.md`](./security.md).
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ This guide explains **how to adjust `src/config.jsonc` safely** and when to touc
|
|||||||
| Section | Keys to check | Why it matters |
|
| Section | Keys to check | Why it matters |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `execution` | `parallel`, `runOnZeroPoints`, `clusters`, `passesPerRun` | Determines concurrency and whether accounts repeat during the same day. Leave `passesPerRun` at `1` unless you knowingly want additional passes (job-state skip is disabled otherwise). |
|
| `execution` | `parallel`, `runOnZeroPoints`, `clusters`, `passesPerRun` | Determines concurrency and whether accounts repeat during the same day. Leave `passesPerRun` at `1` unless you knowingly want additional passes (job-state skip is disabled otherwise). |
|
||||||
| `schedule` | `enabled`, `time12`/`time24`, `timeZone`, `runImmediatelyOnStart` | Controls unattended runs. Test manual runs before enabling the scheduler. |
|
|
||||||
| `workers` | `doDesktopSearch`, `doMobileSearch`, `doDailySet`, etc. | Disable tasks you never want to run to shorten execution time. |
|
| `workers` | `doDesktopSearch`, `doMobileSearch`, `doDailySet`, etc. | Disable tasks you never want to run to shorten execution time. |
|
||||||
| `humanization` | `enabled`, `stopOnBan`, `actionDelay` | Keep enabled for safer automation. Tweaks here influence ban resilience. |
|
| `humanization` | `enabled`, `stopOnBan`, `actionDelay` | Keep enabled for safer automation. Tweaks here influence ban resilience. |
|
||||||
| `proxy` | `proxyGoogleTrends`, `proxyBingTerms` | Tell the bot whether to route outbound API calls through your proxy. |
|
| `proxy` | `proxyGoogleTrends`, `proxyBingTerms` | Tell the bot whether to route outbound API calls through your proxy. |
|
||||||
@@ -29,19 +28,7 @@ Once these are set, most users can leave the rest alone.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 3. Scheduler & Humanization Coordination
|
## 3. Handling Updates Safely
|
||||||
|
|
||||||
The scheduler honours humanization constraints:
|
|
||||||
|
|
||||||
- Weekly off-days: controlled by `humanization.randomOffDaysPerWeek` (defaults to 1). The scheduler samples new days each ISO week.
|
|
||||||
- Allowed windows: if `humanization.allowedWindows` contains time ranges, the bot delays execution until the next window.
|
|
||||||
- Vacation mode: `vacation.enabled` selects a random contiguous block (between `minDays` and `maxDays`) and skips the entire period.
|
|
||||||
|
|
||||||
If you enable the scheduler (`schedule.enabled: true`), review these limits so the run does not surprise you by skipping on specific days.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4. Handling Updates Safely
|
|
||||||
|
|
||||||
The `update` block defines how the post-run updater behaves:
|
The `update` block defines how the post-run updater behaves:
|
||||||
|
|
||||||
@@ -53,17 +40,15 @@ When running inside Docker, you can instead rely on `update.docker: true` so the
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 5. Diagnostics, Logging, and Analytics
|
## 4. Logging and Notifications
|
||||||
|
|
||||||
Three sections determine observability:
|
- `logging`: adjust `excludeFunc` and `webhookExcludeFunc` if certain log buckets are too noisy. Keeping `redactEmails: true` prevents leaks when sharing logs.
|
||||||
|
- `notifications`: use `webhook`, `conclusionWebhook`, or `ntfy` for live updates. All three share the same `{ enabled, url }` structure.
|
||||||
- `logging`: adjust `excludeFunc` and `webhookExcludeFunc` if certain log buckets are too noisy. `redactEmails` should stay `true` in most setups.
|
- The validator flags unknown keys automatically, so old sections can be trimmed safely.
|
||||||
- `diagnostics`: captures screenshots/HTML when failures occur. Reduce `maxPerRun` or switch off entirely only if storage is constrained.
|
|
||||||
- `analytics`: when enabled, daily metrics are persisted under `analytics/` and optional markdown summaries go to `reports/<date>/`. Disable if you do not want local history or webhook summaries.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 6. Advanced Tips
|
## 5. Advanced Tips
|
||||||
|
|
||||||
- **Risk management**: Leave `riskManagement.enabled` and `banPrediction` on unless you have a reason to reduce telemetry. Raising `riskThreshold` (>75) makes alerts rarer.
|
- **Risk management**: Leave `riskManagement.enabled` and `banPrediction` on unless you have a reason to reduce telemetry. Raising `riskThreshold` (>75) makes alerts rarer.
|
||||||
- **Search pacing**: The delay window (`search.settings.delay.min` / `max`) accepts either numbers (ms) or strings like `"2min"`. Keep the range wide enough for natural behaviour.
|
- **Search pacing**: The delay window (`search.settings.delay.min` / `max`) accepts either numbers (ms) or strings like `"2min"`. Keep the range wide enough for natural behaviour.
|
||||||
@@ -72,15 +57,15 @@ Three sections determine observability:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 7. Validation & Troubleshooting
|
## 6. Validation & Troubleshooting
|
||||||
|
|
||||||
- The startup validator (`StartupValidator`) emits warnings/errors when config or accounts look suspicious. It never blocks execution but should be read carefully.
|
- The startup validator (`StartupValidator`) emits warnings/errors when config or accounts look suspicious. It never blocks execution but should be read carefully.
|
||||||
- For syntax issues, run `npm run typecheck` or open the JSONC file in VS Code to surface parsing errors immediately.
|
- For syntax issues, run `npm run typecheck` or open the JSONC file in VS Code to surface parsing errors immediately.
|
||||||
- Diagnostics are written to `reports/` (failures) and `analytics/` (metrics). Clean up periodically or adjust `diagnostics.retentionDays` and `analytics.retentionDays`.
|
- Keep `logging` focused on the buckets you care about and rely on external log storage if you need long-term retention.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 8. Reference
|
## 7. Reference
|
||||||
|
|
||||||
For complete field defaults and descriptions, open [`config-reference.md`](./config-reference.md). Additional topic-specific guides:
|
For complete field defaults and descriptions, open [`config-reference.md`](./config-reference.md). Additional topic-specific guides:
|
||||||
|
|
||||||
|
|||||||
@@ -1,103 +1,81 @@
|
|||||||
# 🔍 Diagnostics
|
# 🛠️ Troubleshooting Guide
|
||||||
|
|
||||||
**Auto-capture errors with screenshots and HTML**
|
Keep runs healthy by watching logs, catching alerts early, and validating your setup before enabling automation on a schedule.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 💡 What Is It?
|
## Quick Checklist
|
||||||
|
|
||||||
When errors occur, the script automatically saves:
|
- ✅ Run `npm run start` manually after every configuration change.
|
||||||
- 📸 **Screenshots** — Visual error capture
|
- ✅ Confirm Node.js 20+ with `node -v` (22 LTS recommended).
|
||||||
- 📄 **HTML snapshots** — Page source
|
- ✅ Keep dependencies current: `npm install` then `npm run build`.
|
||||||
|
- ✅ Double-check credentials, TOTP secrets, and recovery email values.
|
||||||
Helps you debug issues without re-running the script.
|
- ✅ Review external scheduler logs (Task Scheduler, cron, etc.).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## ⚡ Quick Start
|
## Capture Logs Reliably
|
||||||
|
|
||||||
**Already enabled by default!**
|
### Terminal sessions
|
||||||
|
|
||||||
```jsonc
|
- **PowerShell**
|
||||||
{
|
```powershell
|
||||||
"diagnostics": {
|
npm run start *>&1 | Tee-Object -FilePath logs/rewards.txt
|
||||||
"enabled": true,
|
```
|
||||||
"saveScreenshot": true,
|
- **Bash / Linux / macOS**
|
||||||
"saveHtml": true,
|
```bash
|
||||||
"maxPerRun": 2,
|
mkdir -p logs
|
||||||
"retentionDays": 7
|
npm run start >> logs/rewards.log 2>&1
|
||||||
}
|
```
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
### Verbose output
|
||||||
|
|
||||||
## 📁 Where Are Files Saved?
|
Set `DEBUG_REWARDS_VERBOSE=1` for additional context around worker progress and risk scoring.
|
||||||
|
|
||||||
```
|
|
||||||
reports/
|
|
||||||
├── 2025-10-16/
|
|
||||||
│ ├── error_abc123_001.png
|
|
||||||
│ ├── error_abc123_001.html
|
|
||||||
│ └── error_def456_002.png
|
|
||||||
└── 2025-10-17/
|
|
||||||
└── ...
|
|
||||||
```
|
|
||||||
|
|
||||||
**Auto-cleanup:** Files older than 7 days are deleted automatically.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🎯 When It Captures
|
|
||||||
|
|
||||||
- ⏱️ **Timeouts** — Page navigation failures
|
|
||||||
- 🎯 **Element not found** — Selector errors
|
|
||||||
- 🔐 **Login failures** — Authentication issues
|
|
||||||
- 🌐 **Network errors** — Request failures
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🔧 Configuration Options
|
|
||||||
|
|
||||||
| Setting | Default | Description |
|
|
||||||
|---------|---------|-------------|
|
|
||||||
| `enabled` | `true` | Enable diagnostics |
|
|
||||||
| `saveScreenshot` | `true` | Capture PNG screenshots |
|
|
||||||
| `saveHtml` | `true` | Save page HTML |
|
|
||||||
| `maxPerRun` | `2` | Max captures per run |
|
|
||||||
| `retentionDays` | `7` | Auto-delete after N days |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🛠️ Troubleshooting
|
|
||||||
|
|
||||||
| Problem | Solution |
|
|
||||||
|---------|----------|
|
|
||||||
| **No captures despite errors** | Check `enabled: true` |
|
|
||||||
| **Too many files** | Reduce `retentionDays` |
|
|
||||||
| **Permission denied** | Check `reports/` write access |
|
|
||||||
|
|
||||||
### Manual Cleanup
|
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
# Delete all diagnostic reports
|
$env:DEBUG_REWARDS_VERBOSE = "1"
|
||||||
Remove-Item -Recurse -Force reports/
|
npm run start
|
||||||
|
|
||||||
# Keep last 3 days only
|
|
||||||
Get-ChildItem reports/ | Where-Object {$_.LastWriteTime -lt (Get-Date).AddDays(-3)} | Remove-Item -Recurse
|
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
Clear the variable afterwards (`Remove-Item Env:DEBUG_REWARDS_VERBOSE`).
|
||||||
|
|
||||||
## 📚 Next Steps
|
### Structured alerts
|
||||||
|
|
||||||
**Need live notifications?**
|
- Enable `conclusionWebhook` to receive a summary on completion.
|
||||||
→ **[Discord Webhooks](./conclusionwebhook.md)**
|
- Turn on `ntfy` for lightweight push alerts.
|
||||||
→ **[NTFY Push](./ntfy.md)**
|
- Pipe logs into observability tools (ELK, Loki, etc.) if you self-host them.
|
||||||
|
|
||||||
**Security issues?**
|
|
||||||
→ **[Security Guide](./security.md)**
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**[← Back to Hub](./index.md)** | **[Config Guide](./config.md)**
|
## Common Issues & Fixes
|
||||||
|
|
||||||
|
| Symptom | Checks | Fix |
|
||||||
|
|---------|--------|-----|
|
||||||
|
| **Login loops or MFA prompts** | Ensure `totp` secret is correct, recovery email matches your Microsoft profile. | Regenerate TOTP from Microsoft Account, update `recoveryEmail`, retry manually. |
|
||||||
|
| **Points not increasing** | Review `workers` section; confirm searches complete in logs. | Enable missing workers, increase `passesPerRun`, verify network connectivity. |
|
||||||
|
| **Script stops early** | Look for `SECURITY` or `RISK` warnings. | Address ban alerts, adjust `riskManagement` thresholds, or pause for 24h. |
|
||||||
|
| **Scheduler runs but nothing happens** | Confirm working directory, environment variables, file paths. | Use absolute paths in cron/Task Scheduler, ensure `npm` is available on PATH. |
|
||||||
|
| **Proxy failures** | Check proxy URL/port/auth in logs. | Test with `curl`/`Invoke-WebRequest`, update credentials, or disable proxy temporarily. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Manual Investigation Tips
|
||||||
|
|
||||||
|
- **Single account test:** `npm run start -- --account email@example.com`
|
||||||
|
- **Playwright Inspector:** set `PWDEBUG=1` to pause the browser for step-by-step review.
|
||||||
|
- **Job state reset:** delete `sessions/job-state/` for a clean pass.
|
||||||
|
- **Session reset:** remove `sessions/` to force fresh logins.
|
||||||
|
- **Network tracing:** use the bundled Chromium DevTools (`--devtools`) when running locally.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## When to Revisit Config
|
||||||
|
|
||||||
|
- After Microsoft introduces new activities or login flows.
|
||||||
|
- When risk alerts become frequent (tune delays, enable vacation mode).
|
||||||
|
- If external schedulers overlap and cause concurrent runs.
|
||||||
|
- When scaling to more accounts (consider proxies, increase `clusters`).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Related guides:** [Configuration](./config.md) · [Notifications](./conclusionwebhook.md) · [Security](./security.md)
|
||||||
|
|||||||
222
docs/docker.md
222
docs/docker.md
@@ -1,40 +1,33 @@
|
|||||||
# 🐳 Docker Guide
|
# 🐳 Docker Guide
|
||||||
|
|
||||||
**Run the script in a container**
|
Run the bot in a containerized environment with optional in-container cron support.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## ⚡ Quick Start
|
## ⚡ Quick Start
|
||||||
|
|
||||||
### 1. Create Required Files
|
1. **Create required files**
|
||||||
|
- `src/accounts.jsonc` with your credentials
|
||||||
|
- `src/config.jsonc` (defaults apply if missing)
|
||||||
|
2. **Start the container**
|
||||||
|
```bash
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
3. **Watch logs**
|
||||||
|
```bash
|
||||||
|
docker logs -f microsoft-rewards-bot
|
||||||
|
```
|
||||||
|
|
||||||
Ensure you have:
|
The container performs a single pass. Use cron, Task Scheduler, or another orchestrator to restart it on your desired cadence.
|
||||||
- `src/accounts.jsonc` with your credentials
|
|
||||||
- `src/config.jsonc` (uses defaults if missing)
|
|
||||||
|
|
||||||
### 2. Start Container
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. View Logs
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker logs -f microsoft-rewards-bot
|
|
||||||
```
|
|
||||||
|
|
||||||
**That's it!** Script runs automatically.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🎯 What's Included
|
## 🎯 What's Included
|
||||||
|
|
||||||
The Docker setup:
|
- ✅ Chromium Headless Shell (lightweight browser runtime)
|
||||||
- ✅ **Chromium Headless Shell** — Lightweight browser
|
- ✅ Cron-ready entrypoint (`docker-entrypoint.sh`)
|
||||||
- ✅ **Scheduler enabled** — Daily automation
|
- ✅ Volume mounts for persistent sessions and configs
|
||||||
- ✅ **Volume mounts** — Persistent sessions
|
- ✅ Forced headless mode for container stability
|
||||||
- ✅ **Force headless** — Required for containers
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -42,34 +35,30 @@ The Docker setup:
|
|||||||
|
|
||||||
| Host Path | Container Path | Purpose |
|
| Host Path | Container Path | Purpose |
|
||||||
|-----------|----------------|---------|
|
|-----------|----------------|---------|
|
||||||
| `./src/accounts.jsonc` | `/usr/src/.../src/accounts.jsonc` | Account credentials (read-only) |
|
| `./src/accounts.jsonc` | `/app/src/accounts.jsonc` | Account credentials (read-only) |
|
||||||
| `./src/config.jsonc` | `/usr/src/.../src/config.jsonc` | Configuration (read-only) |
|
| `./src/config.jsonc` | `/app/src/config.jsonc` | Configuration (read-only) |
|
||||||
| `./sessions` | `/usr/src/.../sessions` | Cookies & fingerprints |
|
| `./sessions` | `/app/sessions` | Cookies, fingerprints, and job-state |
|
||||||
|
|
||||||
|
Edit `compose.yaml` to adjust paths or add additional mounts.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🌍 Environment Variables
|
## 🌍 Environment Variables
|
||||||
|
|
||||||
### Set Timezone
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
rewards:
|
microsoft-rewards-bot:
|
||||||
environment:
|
environment:
|
||||||
TZ: Europe/Paris
|
TZ: "Europe/Paris" # Container timezone (cron + logging)
|
||||||
|
NODE_ENV: "production"
|
||||||
|
FORCE_HEADLESS: "1" # Required for Chromium in Docker
|
||||||
|
#USE_CRON: "true" # Optional cron mode (see below)
|
||||||
|
#CRON_SCHEDULE: "0 9 * * *"
|
||||||
|
#RUN_ON_START: "true"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Use Inline JSON
|
- `ACCOUNTS_JSON` and `ACCOUNTS_FILE` can override account sources.
|
||||||
|
- `ACCOUNTS_JSON` expects inline JSON; `ACCOUNTS_FILE` points to a mounted path.
|
||||||
```bash
|
|
||||||
docker run -e ACCOUNTS_JSON='{"accounts":[...]}' ...
|
|
||||||
```
|
|
||||||
|
|
||||||
### Custom Config Path
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -e ACCOUNTS_FILE=/custom/path/accounts.json ...
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -94,17 +83,60 @@ docker compose restart
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## 🎛️ Scheduling Options
|
||||||
|
|
||||||
|
### Use a host scheduler (recommended)
|
||||||
|
|
||||||
|
- Trigger `docker compose up --build` (or restart the container) with cron, systemd timers, Task Scheduler, Kubernetes CronJobs, etc.
|
||||||
|
- Ensure persistent volumes are mounted so repeated runs reuse state.
|
||||||
|
- See [External Scheduling](schedule.md) for host-level examples.
|
||||||
|
|
||||||
|
### Enable in-container cron (optional)
|
||||||
|
|
||||||
|
1. Set environment variables in `docker-compose.yml`:
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
microsoft-rewards-bot:
|
||||||
|
environment:
|
||||||
|
USE_CRON: "true"
|
||||||
|
CRON_SCHEDULE: "0 9,16,21 * * *" # Example: 09:00, 16:00, 21:00
|
||||||
|
RUN_ON_START: "true" # Optional one-time run at container boot
|
||||||
|
```
|
||||||
|
2. Rebuild and redeploy:
|
||||||
|
```bash
|
||||||
|
docker compose down
|
||||||
|
docker compose build --no-cache
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
3. Confirm cron is active:
|
||||||
|
```bash
|
||||||
|
docker logs -f microsoft-rewards-bot
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Cron schedule examples
|
||||||
|
|
||||||
|
| Schedule | Description | Cron expression |
|
||||||
|
|----------|-------------|-----------------|
|
||||||
|
| Daily at 09:00 | Single run | `0 9 * * *` |
|
||||||
|
| Twice daily | 09:00 & 21:00 | `0 9,21 * * *` |
|
||||||
|
| Every 6 hours | Four runs/day | `0 */6 * * *` |
|
||||||
|
| Weekdays at 08:00 | Monday–Friday | `0 8 * * 1-5` |
|
||||||
|
|
||||||
|
Validate expressions with [crontab.guru](https://crontab.guru).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 🛠️ Troubleshooting
|
## 🛠️ Troubleshooting
|
||||||
|
|
||||||
| Problem | Solution |
|
| Problem | Solution |
|
||||||
|---------|----------|
|
|---------|----------|
|
||||||
| **"accounts.json not found"** | Ensure `./src/accounts.jsonc` exists and is mounted in compose.yaml |
|
| **"accounts.json not found"** | Ensure `./src/accounts.jsonc` exists and is mounted read-only |
|
||||||
| **"Browser launch failed"** | Ensure `FORCE_HEADLESS=1` is set |
|
| **"Browser launch failed"** | Verify `FORCE_HEADLESS=1` and Chromium dependencies installed |
|
||||||
| **"Permission denied"** | Check file permissions (`chmod 644 accounts.jsonc config.jsonc`) |
|
| **"Permission denied"** | Check file permissions (`chmod 644 accounts.jsonc config.jsonc`) |
|
||||||
| **Scheduler not running** | Verify `schedule.enabled: true` in config |
|
| **Automation not repeating** | Enable cron (`USE_CRON=true`) or use a host scheduler |
|
||||||
| **Cron not working** | See [Cron Troubleshooting](#-cron-troubleshooting) above |
|
| **Cron not working** | See [Cron troubleshooting](#-cron-troubleshooting) |
|
||||||
|
|
||||||
### Debug Container
|
### Debug container
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Enter container shell
|
# Enter container shell
|
||||||
@@ -113,29 +145,78 @@ docker exec -it microsoft-rewards-bot /bin/bash
|
|||||||
# Check Node.js version
|
# Check Node.js version
|
||||||
docker exec -it microsoft-rewards-bot node --version
|
docker exec -it microsoft-rewards-bot node --version
|
||||||
|
|
||||||
# View config (mounted in /src/)
|
# Inspect mounted config
|
||||||
docker exec -it microsoft-rewards-bot cat src/config.jsonc
|
docker exec -it microsoft-rewards-bot cat /app/src/config.jsonc
|
||||||
|
|
||||||
# Check if cron is enabled
|
# Check env vars
|
||||||
docker exec -it microsoft-rewards-bot printenv | grep USE_CRON
|
docker exec -it microsoft-rewards-bot printenv | grep -E "TZ|USE_CRON|CRON_SCHEDULE"
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🎛️ Custom Configuration
|
## 🔄 Switching cron on or off
|
||||||
|
|
||||||
### Option 1: Built-in Scheduler (Default, Recommended)
|
- **Enable cron:** set `USE_CRON=true`, provide `CRON_SCHEDULE`, rebuild, and redeploy.
|
||||||
|
- **Disable cron:** remove `USE_CRON` (and related variables). The container will run once per start; handle recurrence externally.
|
||||||
|
|
||||||
**Pros:**
|
---
|
||||||
- ✅ Lighter resource usage
|
|
||||||
- ✅ Better integration with config.jsonc
|
|
||||||
- ✅ No additional setup needed
|
|
||||||
- ✅ Automatic jitter for natural timing
|
|
||||||
|
|
||||||
**Default** `docker-compose.yml`:
|
## 🐛 Cron troubleshooting
|
||||||
```yaml
|
|
||||||
services:
|
| Problem | Solution |
|
||||||
rewards:
|
|---------|----------|
|
||||||
|
| **Cron not executing** | Check logs for "Cron mode enabled" and cron syntax errors |
|
||||||
|
| **Wrong timezone** | Ensure `TZ` matches your location |
|
||||||
|
| **Syntax error** | Validate expression at [crontab.guru](https://crontab.guru) |
|
||||||
|
| **No logs generated** | Tail `/var/log/cron.log` inside the container |
|
||||||
|
| **Duplicate runs** | Ensure only one cron entry is configured |
|
||||||
|
|
||||||
|
### Inspect cron inside the container
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec -it microsoft-rewards-bot /bin/bash
|
||||||
|
ps aux | grep cron
|
||||||
|
crontab -l
|
||||||
|
tail -100 /var/log/cron.log
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📚 Next steps
|
||||||
|
|
||||||
|
- [Configuration guide](config.md)
|
||||||
|
- [External scheduling](schedule.md)
|
||||||
|
- [Humanization guide](humanization.md)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**[← Back to Hub](index.md)** | **[Getting Started](getting-started.md)**# 🐳 Docker Guide
|
||||||
|
|
||||||
|
**Run the script in a container**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ⚡ Quick Start
|
||||||
|
|
||||||
|
### 1. Create Required Files
|
||||||
|
|
||||||
|
Ensure you have:
|
||||||
|
- `src/accounts.jsonc` with your credentials
|
||||||
|
- `src/config.jsonc` (uses defaults if missing)
|
||||||
|
|
||||||
|
### 2. Start Container
|
||||||
|
|
||||||
|
## 🎛️ Scheduling Options
|
||||||
|
|
||||||
|
### Use a host scheduler (recommended)
|
||||||
|
|
||||||
|
- Trigger `docker compose up --build` on your preferred schedule (cron, systemd timers, Task Scheduler, Kubernetes CronJob, etc.).
|
||||||
|
- Ensure volumes remain consistent so each run reuses accounts, config, and sessions.
|
||||||
|
- See [External Scheduling](schedule.md) for concrete host examples.
|
||||||
|
|
||||||
|
### Enable in-container cron (optional)
|
||||||
|
|
||||||
|
1. Set environment variables in `docker-compose.yml` or `docker run`:
|
||||||
build: .
|
build: .
|
||||||
environment:
|
environment:
|
||||||
TZ: "Europe/Paris"
|
TZ: "Europe/Paris"
|
||||||
@@ -216,17 +297,10 @@ services:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🔄 Switching Between Scheduler and Cron
|
## 🔄 Switching Cron On or Off
|
||||||
|
|
||||||
**From Built-in → Cron:**
|
- **Enable cron:** set `USE_CRON=true`, provide `CRON_SCHEDULE`, rebuild the image, and redeploy.
|
||||||
1. Add `USE_CRON: "true"` to environment
|
- **Disable cron:** remove `USE_CRON` (and related variables). The container will run once per start; use host automation to relaunch when needed.
|
||||||
2. Add `CRON_SCHEDULE` with desired timing
|
|
||||||
3. Rebuild: `docker compose up -d --build`
|
|
||||||
|
|
||||||
**From Cron → Built-in:**
|
|
||||||
1. Remove or comment `USE_CRON` variable
|
|
||||||
2. Configure `schedule` in `src/config.jsonc`
|
|
||||||
3. Rebuild: `docker compose up -d --build`
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -269,8 +343,8 @@ printenv | grep -E 'TZ|NODE_ENV'
|
|||||||
**Want notifications?**
|
**Want notifications?**
|
||||||
→ **[Discord Webhooks](./conclusionwebhook.md)**
|
→ **[Discord Webhooks](./conclusionwebhook.md)**
|
||||||
|
|
||||||
**Scheduler config?**
|
**Need scheduling tips?**
|
||||||
→ **[Scheduler Guide](./schedule.md)**
|
→ **[External Scheduling](./schedule.md)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -68,8 +68,8 @@ npm run build
|
|||||||
# Single run (test it works)
|
# Single run (test it works)
|
||||||
npm start
|
npm start
|
||||||
|
|
||||||
# Automated daily scheduler (set and forget)
|
# Schedule it (Task Scheduler, cron, etc.)
|
||||||
npm run start:schedule
|
# See docs/schedule.md for examples
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -113,7 +113,7 @@ Once running, explore these guides:
|
|||||||
| Priority | Guide | Why Important |
|
| Priority | Guide | Why Important |
|
||||||
|----------|-------|---------------|
|
|----------|-------|---------------|
|
||||||
| **High** | **[Accounts & 2FA](./accounts.md)** | Set up TOTP for secure automation |
|
| **High** | **[Accounts & 2FA](./accounts.md)** | Set up TOTP for secure automation |
|
||||||
| **High** | **[Scheduling](./schedule.md)** | Configure automated daily runs |
|
| **High** | **[External Scheduling](./schedule.md)** | Automate with Task Scheduler or cron |
|
||||||
| **Medium** | **[Notifications](./ntfy.md)** | Get alerts on your phone |
|
| **Medium** | **[Notifications](./ntfy.md)** | Get alerts on your phone |
|
||||||
| **Low** | **[Humanization](./humanization.md)** | Advanced anti-detection |
|
| **Low** | **[Humanization](./humanization.md)** | Advanced anti-detection |
|
||||||
|
|
||||||
@@ -134,5 +134,5 @@ Once running, explore these guides:
|
|||||||
|
|
||||||
- **[Accounts & 2FA](./accounts.md)** — Add Microsoft accounts with TOTP
|
- **[Accounts & 2FA](./accounts.md)** — Add Microsoft accounts with TOTP
|
||||||
- **[Docker](./docker.md)** — Deploy with containers
|
- **[Docker](./docker.md)** — Deploy with containers
|
||||||
- **[Scheduler](./schedule.md)** — Automate daily execution
|
- **[External Scheduling](./schedule.md)** — Automate daily execution
|
||||||
- **[Discord Webhooks](./conclusionwebhook.md)** — Get run summaries
|
- **[Discord Webhooks](./conclusionwebhook.md)** — Get run summaries
|
||||||
|
|||||||
@@ -147,10 +147,10 @@ Skip random days per week:
|
|||||||
## 📚 Next Steps
|
## 📚 Next Steps
|
||||||
|
|
||||||
**Need vacation mode?**
|
**Need vacation mode?**
|
||||||
→ See [Scheduler Vacation](./schedule.md#vacation-mode)
|
→ See [Vacation settings](./config.md#vacation)
|
||||||
|
|
||||||
**Want scheduling?**
|
**Want scheduling?**
|
||||||
→ **[Scheduler Guide](./schedule.md)**
|
→ **[External Scheduling](./schedule.md)**
|
||||||
|
|
||||||
**More security?**
|
**More security?**
|
||||||
→ **[Security Guide](./security.md)**
|
→ **[Security Guide](./security.md)**
|
||||||
|
|||||||
@@ -14,18 +14,18 @@
|
|||||||
|
|
||||||
1. **[Setup Accounts](accounts.md)** — Add credentials + 2FA
|
1. **[Setup Accounts](accounts.md)** — Add credentials + 2FA
|
||||||
2. **[Configure Bot](config.md)** — Essential settings
|
2. **[Configure Bot](config.md)** — Essential settings
|
||||||
3. **[Enable Scheduler](schedule.md)** — Daily automation
|
3. **[Schedule Runs](schedule.md)** — Use OS-level automation
|
||||||
|
|
||||||
**Done!** The bot will run automatically.
|
**Done!** The bot will run automatically.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## <EFBFBD> Feature Guides
|
## ✨ Feature Guides
|
||||||
|
|
||||||
| Feature | Description |
|
| Feature | Description |
|
||||||
|---------|-------------|
|
|---------|-------------|
|
||||||
| **[Configuration](config.md)** | All settings explained |
|
| **[Configuration](config.md)** | All settings explained |
|
||||||
| **[Scheduler](schedule.md)** | Automated daily runs |
|
| **[External Scheduling](schedule.md)** | Automate with cron or Task Scheduler |
|
||||||
| **[Humanization](humanization.md)** | Anti-detection system |
|
| **[Humanization](humanization.md)** | Anti-detection system |
|
||||||
| **[Webhooks](conclusionwebhook.md)** | Discord notifications |
|
| **[Webhooks](conclusionwebhook.md)** | Discord notifications |
|
||||||
| **[NTFY Alerts](ntfy.md)** | Mobile push notifications |
|
| **[NTFY Alerts](ntfy.md)** | Mobile push notifications |
|
||||||
@@ -39,7 +39,7 @@
|
|||||||
|
|
||||||
| Issue | Solution |
|
| Issue | Solution |
|
||||||
|-------|----------|
|
|-------|----------|
|
||||||
| **Bot not working?** | [Diagnostics Guide](diagnostics.md) |
|
| **Bot not working?** | [Troubleshooting Guide](diagnostics.md) |
|
||||||
| **Login failed?** | [Accounts & 2FA](accounts.md#troubleshooting) |
|
| **Login failed?** | [Accounts & 2FA](accounts.md#troubleshooting) |
|
||||||
| **Account banned?** | [Security Guide](security.md) |
|
| **Account banned?** | [Security Guide](security.md) |
|
||||||
| **Git conflicts?** | [Conflict Resolution](git-conflict-resolution.md) |
|
| **Git conflicts?** | [Conflict Resolution](git-conflict-resolution.md) |
|
||||||
|
|||||||
@@ -107,11 +107,11 @@ Get-ChildItem sessions/job-state -Recurse -Filter "*.json" | Where-Object {$_.La
|
|||||||
|
|
||||||
## 📚 Next Steps
|
## 📚 Next Steps
|
||||||
|
|
||||||
**Need scheduler?**
|
**Need automation?**
|
||||||
→ **[Scheduler Guide](./schedule.md)**
|
→ **[External Scheduling](./schedule.md)**
|
||||||
|
|
||||||
**Want diagnostics?**
|
**Need troubleshooting tips?**
|
||||||
→ **[Diagnostics Guide](./diagnostics.md)**
|
→ **[Troubleshooting Guide](./diagnostics.md)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -110,8 +110,8 @@ curl -d "Test from rewards script" https://ntfy.sh/your-topic
|
|||||||
**Want Discord too?**
|
**Want Discord too?**
|
||||||
→ **[Discord Webhooks](./conclusionwebhook.md)**
|
→ **[Discord Webhooks](./conclusionwebhook.md)**
|
||||||
|
|
||||||
**Need detailed logs?**
|
**Need troubleshooting tips?**
|
||||||
→ **[Diagnostics Guide](./diagnostics.md)**
|
→ **[Troubleshooting Guide](./diagnostics.md)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ curl --proxy http://user:pass@proxy.com:8080 http://httpbin.org/ip
|
|||||||
## 📚 Next Steps
|
## 📚 Next Steps
|
||||||
|
|
||||||
**Proxy working?**
|
**Proxy working?**
|
||||||
→ **[Setup Scheduler](./schedule.md)**
|
→ **[Schedule Runs](./schedule.md)**
|
||||||
|
|
||||||
**Need humanization?**
|
**Need humanization?**
|
||||||
→ **[Humanization Guide](./humanization.md)**
|
→ **[Humanization Guide](./humanization.md)**
|
||||||
|
|||||||
BIN
docs/schedule.md
BIN
docs/schedule.md
Binary file not shown.
@@ -21,7 +21,7 @@ Your accounts **may be banned**. Use at your own risk.
|
|||||||
- **Run 1-2x daily max** — Don't be greedy
|
- **Run 1-2x daily max** — Don't be greedy
|
||||||
- **Test on secondary accounts** — Never risk your main account
|
- **Test on secondary accounts** — Never risk your main account
|
||||||
- **Enable vacation mode** — Random off days look natural
|
- **Enable vacation mode** — Random off days look natural
|
||||||
- **Monitor regularly** — Check diagnostics and logs
|
- **Monitor regularly** — Check logs and webhook alerts
|
||||||
|
|
||||||
### ❌ DON'T
|
### ❌ DON'T
|
||||||
|
|
||||||
@@ -131,20 +131,6 @@ chmod 600 src/accounts.json
|
|||||||
|
|
||||||
## 📊 Monitoring
|
## 📊 Monitoring
|
||||||
|
|
||||||
### Enable Diagnostics
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
"diagnostics": {
|
|
||||||
"enabled": true,
|
|
||||||
"saveScreenshot": true,
|
|
||||||
"saveHtml": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
→ **[Diagnostics Guide](./diagnostics.md)**
|
|
||||||
|
|
||||||
### Enable Notifications
|
### Enable Notifications
|
||||||
|
|
||||||
```jsonc
|
```jsonc
|
||||||
@@ -187,7 +173,7 @@ chmod 600 src/accounts.json
|
|||||||
- 🚫 **No telemetry** — Script doesn't phone home
|
- 🚫 **No telemetry** — Script doesn't phone home
|
||||||
- 📁 **File security** — Restrict permissions
|
- 📁 **File security** — Restrict permissions
|
||||||
- 🔄 **Regular backups** — Keep config backups
|
- 🔄 **Regular backups** — Keep config backups
|
||||||
- 🗑️ **Clean logs** — Delete old diagnostics
|
- 🗑️ **Clean logs** — Rotate or delete old log files
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -200,7 +186,7 @@ chmod 600 src/accounts.json
|
|||||||
→ **[Proxy Guide](./proxy.md)**
|
→ **[Proxy Guide](./proxy.md)**
|
||||||
|
|
||||||
**Want monitoring?**
|
**Want monitoring?**
|
||||||
→ **[Diagnostics](./diagnostics.md)**
|
→ **[Notifications Guide](./conclusionwebhook.md)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -96,8 +96,8 @@ npm run build
|
|||||||
**Need security tips?**
|
**Need security tips?**
|
||||||
→ **[Security Guide](./security.md)**
|
→ **[Security Guide](./security.md)**
|
||||||
|
|
||||||
**Setup scheduler?**
|
**Need automation?**
|
||||||
→ **[Scheduler Guide](./schedule.md)**
|
→ **[External Scheduling](./schedule.md)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
13
package-lock.json
generated
13
package-lock.json
generated
@@ -12,7 +12,6 @@
|
|||||||
"axios": "^1.8.4",
|
"axios": "^1.8.4",
|
||||||
"chalk": "^4.1.2",
|
"chalk": "^4.1.2",
|
||||||
"cheerio": "^1.0.0",
|
"cheerio": "^1.0.0",
|
||||||
"cron-parser": "^4.9.0",
|
|
||||||
"fingerprint-generator": "^2.1.66",
|
"fingerprint-generator": "^2.1.66",
|
||||||
"fingerprint-injector": "^2.1.66",
|
"fingerprint-injector": "^2.1.66",
|
||||||
"http-proxy-agent": "^7.0.2",
|
"http-proxy-agent": "^7.0.2",
|
||||||
@@ -943,18 +942,6 @@
|
|||||||
"integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==",
|
"integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/cron-parser": {
|
|
||||||
"version": "4.9.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.9.0.tgz",
|
|
||||||
"integrity": "sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"luxon": "^3.2.1"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=12.0.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/cross-spawn": {
|
"node_modules/cross-spawn": {
|
||||||
"version": "7.0.6",
|
"version": "7.0.6",
|
||||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||||
|
|||||||
@@ -24,11 +24,9 @@
|
|||||||
"start": "node --enable-source-maps ./dist/index.js",
|
"start": "node --enable-source-maps ./dist/index.js",
|
||||||
"ts-start": "node --loader ts-node/esm ./src/index.ts",
|
"ts-start": "node --loader ts-node/esm ./src/index.ts",
|
||||||
"dev": "ts-node ./src/index.ts -dev",
|
"dev": "ts-node ./src/index.ts -dev",
|
||||||
"ts-schedule": "ts-node ./src/scheduler.ts",
|
|
||||||
"start:schedule": "node --enable-source-maps ./dist/scheduler.js",
|
|
||||||
"lint": "eslint \"src/**/*.{ts,tsx}\"",
|
"lint": "eslint \"src/**/*.{ts,tsx}\"",
|
||||||
"prepare": "npm run build",
|
"prepare": "npm run build",
|
||||||
"setup": "node ./setup/update/setup.mjs",
|
"setup": "node ./setup/update/setup.mjs",
|
||||||
"kill-chrome-win": "powershell -Command \"Get-Process | Where-Object { $_.MainModule.FileVersionInfo.FileDescription -eq 'Google Chrome for Testing' } | ForEach-Object { Stop-Process -Id $_.Id -Force }\"",
|
"kill-chrome-win": "powershell -Command \"Get-Process | Where-Object { $_.MainModule.FileVersionInfo.FileDescription -eq 'Google Chrome for Testing' } | ForEach-Object { Stop-Process -Id $_.Id -Force }\"",
|
||||||
"create-docker": "docker build -t microsoft-rewards-bot ."
|
"create-docker": "docker build -t microsoft-rewards-bot ."
|
||||||
},
|
},
|
||||||
@@ -63,7 +61,6 @@
|
|||||||
"axios": "^1.8.4",
|
"axios": "^1.8.4",
|
||||||
"chalk": "^4.1.2",
|
"chalk": "^4.1.2",
|
||||||
"cheerio": "^1.0.0",
|
"cheerio": "^1.0.0",
|
||||||
"cron-parser": "^4.9.0",
|
|
||||||
"fingerprint-generator": "^2.1.66",
|
"fingerprint-generator": "^2.1.66",
|
||||||
"fingerprint-injector": "^2.1.66",
|
"fingerprint-injector": "^2.1.66",
|
||||||
"http-proxy-agent": "^7.0.2",
|
"http-proxy-agent": "^7.0.2",
|
||||||
|
|||||||
@@ -400,8 +400,7 @@ async function main() {
|
|||||||
code = await updateDocker()
|
code = await updateDocker()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CRITICAL FIX: Always exit with code, even from scheduler
|
// CRITICAL: Always exit with code so external schedulers can react correctly
|
||||||
// The scheduler expects the update script to complete and exit
|
|
||||||
// Otherwise the process hangs indefinitely and gets killed by watchdog
|
// Otherwise the process hangs indefinitely and gets killed by watchdog
|
||||||
process.exit(code)
|
process.exit(code)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
{
|
{
|
||||||
// Sample accounts configuration. Copy to accounts.jsonc and fill in real values.
|
// Sample accounts configuration. Copy to accounts.jsonc and replace with real values.
|
||||||
"accounts": [
|
"accounts": [
|
||||||
{
|
{
|
||||||
|
// Account #1 — enabled with TOTP and recovery email required
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"email": "email_1@outlook.com",
|
"email": "primary_account@outlook.com",
|
||||||
"password": "password_1",
|
"password": "strong-password-1",
|
||||||
"totp": "",
|
"totp": "BASE32SECRETPRIMARY",
|
||||||
"recoveryEmail": "backup_1@example.com",
|
"recoveryRequired": true,
|
||||||
|
"recoveryEmail": "primary.backup@example.com",
|
||||||
"proxy": {
|
"proxy": {
|
||||||
"proxyAxios": true,
|
"proxyAxios": true,
|
||||||
"url": "",
|
"url": "",
|
||||||
@@ -16,11 +18,61 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
// Account #2 — disabled account kept for later use (recovery optional)
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"email": "email_2@outlook.com",
|
"email": "secondary_account@outlook.com",
|
||||||
"password": "password_2",
|
"password": "strong-password-2",
|
||||||
"totp": "",
|
"totp": "BASE32SECRETSECOND",
|
||||||
"recoveryRequired": false,
|
"recoveryRequired": false,
|
||||||
|
"recoveryEmail": "secondary.backup@example.com",
|
||||||
|
"proxy": {
|
||||||
|
"proxyAxios": true,
|
||||||
|
"url": "",
|
||||||
|
"port": 0,
|
||||||
|
"username": "",
|
||||||
|
"password": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Account #3 — dedicated proxy with credentials
|
||||||
|
"enabled": true,
|
||||||
|
"email": "with_proxy@outlook.com",
|
||||||
|
"password": "strong-password-3",
|
||||||
|
"totp": "BASE32SECRETTHIRD",
|
||||||
|
"recoveryRequired": true,
|
||||||
|
"recoveryEmail": "proxy.backup@example.com",
|
||||||
|
"proxy": {
|
||||||
|
"proxyAxios": true,
|
||||||
|
"url": "proxy.example.com",
|
||||||
|
"port": 3128,
|
||||||
|
"username": "proxyuser",
|
||||||
|
"password": "proxypass"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Account #4 — recovery optional, no proxying through Axios layer
|
||||||
|
"enabled": true,
|
||||||
|
"email": "no_proxy@outlook.com",
|
||||||
|
"password": "strong-password-4",
|
||||||
|
"totp": "BASE32SECRETFOUR",
|
||||||
|
"recoveryRequired": false,
|
||||||
|
"recoveryEmail": "no.proxy.backup@example.com",
|
||||||
|
"proxy": {
|
||||||
|
"proxyAxios": false,
|
||||||
|
"url": "",
|
||||||
|
"port": 0,
|
||||||
|
"username": "",
|
||||||
|
"password": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Account #5 — enabled with TOTP omitted (will rely on recovery email)
|
||||||
|
"enabled": true,
|
||||||
|
"email": "totp_optional@outlook.com",
|
||||||
|
"password": "strong-password-5",
|
||||||
|
"totp": "",
|
||||||
|
"recoveryRequired": true,
|
||||||
|
"recoveryEmail": "totp.optional.backup@example.com",
|
||||||
"proxy": {
|
"proxy": {
|
||||||
"proxyAxios": true,
|
"proxyAxios": true,
|
||||||
"url": "",
|
"url": "",
|
||||||
|
|||||||
@@ -129,7 +129,6 @@ export default class BrowserFunc {
|
|||||||
|
|
||||||
if (!scriptContent) {
|
if (!scriptContent) {
|
||||||
this.bot.log(this.bot.isMobile, 'GET-DASHBOARD-DATA', 'Dashboard script not found on first try, attempting recovery', 'warn')
|
this.bot.log(this.bot.isMobile, 'GET-DASHBOARD-DATA', 'Dashboard script not found on first try, attempting recovery', 'warn')
|
||||||
await this.bot.browser.utils.captureDiagnostics(target, 'dashboard-data-missing').catch(() => {})
|
|
||||||
|
|
||||||
// Force a navigation retry once before failing hard
|
// Force a navigation retry once before failing hard
|
||||||
await this.goHome(target)
|
await this.goHome(target)
|
||||||
@@ -148,9 +147,8 @@ export default class BrowserFunc {
|
|||||||
const dashboardData = await this.parseDashboardFromScript(target, scriptContent)
|
const dashboardData = await this.parseDashboardFromScript(target, scriptContent)
|
||||||
|
|
||||||
if (!dashboardData) {
|
if (!dashboardData) {
|
||||||
await this.bot.browser.utils.captureDiagnostics(target, 'dashboard-data-parse').catch(() => {})
|
|
||||||
this.bot.log(this.bot.isMobile, 'GET-DASHBOARD-DATA', 'Unable to parse dashboard script', 'error')
|
this.bot.log(this.bot.isMobile, 'GET-DASHBOARD-DATA', 'Unable to parse dashboard script', 'error')
|
||||||
throw new Error('Unable to parse dashboard script - check diagnostics')
|
throw new Error('Unable to parse dashboard script - inspect recent logs and page markup')
|
||||||
}
|
}
|
||||||
|
|
||||||
return dashboardData
|
return dashboardData
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import { Page } from 'rebrowser-playwright'
|
|||||||
import { load } from 'cheerio'
|
import { load } from 'cheerio'
|
||||||
|
|
||||||
import { MicrosoftRewardsBot } from '../index'
|
import { MicrosoftRewardsBot } from '../index'
|
||||||
import { captureDiagnostics as captureSharedDiagnostics } from '../util/Diagnostics'
|
|
||||||
|
|
||||||
type DismissButton = { selector: string; label: string; isXPath?: boolean }
|
type DismissButton = { selector: string; label: string; isXPath?: boolean }
|
||||||
|
|
||||||
@@ -219,12 +218,4 @@ export default class BrowserUtil {
|
|||||||
} catch { /* swallow */ }
|
} catch { /* swallow */ }
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Capture minimal diagnostics for a page: screenshot + HTML content.
|
|
||||||
* Files are written under ./reports/<date>/ with a safe label.
|
|
||||||
*/
|
|
||||||
async captureDiagnostics(page: Page, label: string): Promise<void> {
|
|
||||||
await captureSharedDiagnostics(this.bot, page, label)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -23,14 +23,6 @@
|
|||||||
"clusters": 1,
|
"clusters": 1,
|
||||||
"passesPerRun": 1
|
"passesPerRun": 1
|
||||||
},
|
},
|
||||||
"schedule": {
|
|
||||||
"enabled": false,
|
|
||||||
"useAmPm": false,
|
|
||||||
"time12": "9:00 AM",
|
|
||||||
"time24": "09:00",
|
|
||||||
"timeZone": "Europe/Paris",
|
|
||||||
"runImmediatelyOnStart": true
|
|
||||||
},
|
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"dir": ""
|
"dir": ""
|
||||||
@@ -126,7 +118,7 @@
|
|||||||
"authToken": ""
|
"authToken": ""
|
||||||
},
|
},
|
||||||
|
|
||||||
// Logging & diagnostics
|
// Logging
|
||||||
"logging": {
|
"logging": {
|
||||||
"excludeFunc": [
|
"excludeFunc": [
|
||||||
"SEARCH-CLOSE-TABS",
|
"SEARCH-CLOSE-TABS",
|
||||||
@@ -140,19 +132,6 @@
|
|||||||
],
|
],
|
||||||
"redactEmails": true
|
"redactEmails": true
|
||||||
},
|
},
|
||||||
"diagnostics": {
|
|
||||||
"enabled": true,
|
|
||||||
"saveScreenshot": true,
|
|
||||||
"saveHtml": true,
|
|
||||||
"maxPerRun": 2,
|
|
||||||
"retentionDays": 7
|
|
||||||
},
|
|
||||||
"analytics": {
|
|
||||||
"enabled": true,
|
|
||||||
"retentionDays": 30,
|
|
||||||
"exportMarkdown": true,
|
|
||||||
"webhookSummary": true
|
|
||||||
},
|
|
||||||
|
|
||||||
// Buy mode
|
// Buy mode
|
||||||
"buyMode": {
|
"buyMode": {
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import { AxiosRequestConfig } from 'axios'
|
|||||||
import { generateTOTP } from '../util/Totp'
|
import { generateTOTP } from '../util/Totp'
|
||||||
import { saveSessionData } from '../util/Load'
|
import { saveSessionData } from '../util/Load'
|
||||||
import { MicrosoftRewardsBot } from '../index'
|
import { MicrosoftRewardsBot } from '../index'
|
||||||
import { captureDiagnostics } from '../util/Diagnostics'
|
|
||||||
import { OAuth } from '../interface/OAuth'
|
import { OAuth } from '../interface/OAuth'
|
||||||
import { Retry } from '../util/Retry'
|
import { Retry } from '../util/Retry'
|
||||||
|
|
||||||
@@ -202,10 +201,7 @@ export class Login {
|
|||||||
const currentUrl = page.url()
|
const currentUrl = page.url()
|
||||||
this.bot.log(this.bot.isMobile, 'LOGIN-APP', `OAuth code not received after ${elapsed}s (timeout: ${DEFAULT_TIMEOUTS.oauthMaxMs / 1000}s). Current URL: ${currentUrl}`, 'error')
|
this.bot.log(this.bot.isMobile, 'LOGIN-APP', `OAuth code not received after ${elapsed}s (timeout: ${DEFAULT_TIMEOUTS.oauthMaxMs / 1000}s). Current URL: ${currentUrl}`, 'error')
|
||||||
|
|
||||||
// Save diagnostics for debugging
|
throw new Error(`OAuth code not received within ${DEFAULT_TIMEOUTS.oauthMaxMs / 1000}s - mobile token acquisition failed. Check recent logs for details.`)
|
||||||
await this.saveIncidentArtifacts(page, 'oauth-timeout').catch(() => {})
|
|
||||||
|
|
||||||
throw new Error(`OAuth code not received within ${DEFAULT_TIMEOUTS.oauthMaxMs / 1000}s - mobile token acquisition failed. Check diagnostics in reports/`)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this.bot.log(this.bot.isMobile, 'LOGIN-APP', `OAuth code received in ${Math.round((Date.now() - start) / 1000)}s`)
|
this.bot.log(this.bot.isMobile, 'LOGIN-APP', `OAuth code received in ${Math.round((Date.now() - start) / 1000)}s`)
|
||||||
@@ -897,10 +893,8 @@ export class Login {
|
|||||||
}
|
}
|
||||||
}).catch(() => ({ title: 'unknown', bodyLength: 0, hasRewardsText: false, visibleElements: 0 }))
|
}).catch(() => ({ title: 'unknown', bodyLength: 0, hasRewardsText: false, visibleElements: 0 }))
|
||||||
|
|
||||||
this.bot.log(this.bot.isMobile, 'LOGIN', `Page info: ${JSON.stringify(pageContent)}`, 'error')
|
this.bot.log(this.bot.isMobile, 'LOGIN', `Page info: ${JSON.stringify(pageContent)}`, 'error')
|
||||||
|
this.bot.log(this.bot.isMobile, 'LOGIN', 'Portal element missing', 'error')
|
||||||
await this.bot.browser.utils.captureDiagnostics(page, 'login-portal-missing').catch(()=>{})
|
|
||||||
this.bot.log(this.bot.isMobile, 'LOGIN', 'Portal element missing (diagnostics saved)', 'error')
|
|
||||||
throw new Error(`Rewards portal not detected. URL: ${currentUrl}. Check reports/ folder`)
|
throw new Error(`Rewards portal not detected. URL: ${currentUrl}. Check reports/ folder`)
|
||||||
}
|
}
|
||||||
this.bot.log(this.bot.isMobile, 'LOGIN', `Portal found via fallback (${fallbackSelector})`)
|
this.bot.log(this.bot.isMobile, 'LOGIN', `Portal found via fallback (${fallbackSelector})`)
|
||||||
@@ -1092,7 +1086,6 @@ export class Login {
|
|||||||
this.bot.compromisedReason = 'sign-in-blocked'
|
this.bot.compromisedReason = 'sign-in-blocked'
|
||||||
this.startCompromisedInterval()
|
this.startCompromisedInterval()
|
||||||
await this.bot.engageGlobalStandby('sign-in-blocked', email).catch(()=>{})
|
await this.bot.engageGlobalStandby('sign-in-blocked', email).catch(()=>{})
|
||||||
await this.saveIncidentArtifacts(page,'sign-in-blocked').catch(()=>{})
|
|
||||||
// Open security docs for immediate guidance (best-effort)
|
// Open security docs for immediate guidance (best-effort)
|
||||||
await this.openDocsTab(page, docsUrl).catch(()=>{})
|
await this.openDocsTab(page, docsUrl).catch(()=>{})
|
||||||
return true
|
return true
|
||||||
@@ -1203,7 +1196,6 @@ export class Login {
|
|||||||
this.bot.compromisedReason = 'recovery-mismatch'
|
this.bot.compromisedReason = 'recovery-mismatch'
|
||||||
this.startCompromisedInterval()
|
this.startCompromisedInterval()
|
||||||
await this.bot.engageGlobalStandby('recovery-mismatch', email).catch(()=>{})
|
await this.bot.engageGlobalStandby('recovery-mismatch', email).catch(()=>{})
|
||||||
await this.saveIncidentArtifacts(page,'recovery-mismatch').catch(()=>{})
|
|
||||||
await this.openDocsTab(page, docsUrl).catch(()=>{})
|
await this.openDocsTab(page, docsUrl).catch(()=>{})
|
||||||
} else {
|
} else {
|
||||||
const mode = observedPrefix.length === 1 ? 'lenient' : 'strict'
|
const mode = observedPrefix.length === 1 ? 'lenient' : 'strict'
|
||||||
@@ -1272,10 +1264,6 @@ export class Login {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private async saveIncidentArtifacts(page: Page, slug: string) {
|
|
||||||
await captureDiagnostics(this.bot, page, slug, { scope: 'security', skipSlot: true, force: true })
|
|
||||||
}
|
|
||||||
|
|
||||||
private async openDocsTab(page: Page, url: string) {
|
private async openDocsTab(page: Page, url: string) {
|
||||||
try {
|
try {
|
||||||
const ctx = page.context()
|
const ctx = page.context()
|
||||||
|
|||||||
@@ -168,7 +168,6 @@ export class Workers {
|
|||||||
|
|
||||||
await this.applyThrottle(throttle, 1200, 2600)
|
await this.applyThrottle(throttle, 1200, 2600)
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
await this.bot.browser.utils.captureDiagnostics(activityPage, `activity_error_${activity.title || activity.offerId}`)
|
|
||||||
this.bot.log(this.bot.isMobile, 'ACTIVITY', 'An error occurred:' + error, 'error')
|
this.bot.log(this.bot.isMobile, 'ACTIVITY', 'An error occurred:' + error, 'error')
|
||||||
throttle.record(false)
|
throttle.record(false)
|
||||||
}
|
}
|
||||||
@@ -227,7 +226,6 @@ export class Workers {
|
|||||||
await runWithTimeout(this.bot.activities.run(page, activity))
|
await runWithTimeout(this.bot.activities.run(page, activity))
|
||||||
throttle.record(true)
|
throttle.record(true)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
await this.bot.browser.utils.captureDiagnostics(page, `activity_timeout_${activity.title || activity.offerId}`)
|
|
||||||
throttle.record(false)
|
throttle.record(false)
|
||||||
throw e
|
throw e
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -123,7 +123,6 @@ export class Quiz extends Workers {
|
|||||||
|
|
||||||
this.bot.log(this.bot.isMobile, 'QUIZ', 'Completed the quiz successfully')
|
this.bot.log(this.bot.isMobile, 'QUIZ', 'Completed the quiz successfully')
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
await this.bot.browser.utils.captureDiagnostics(page, 'quiz_error')
|
|
||||||
await page.close()
|
await page.close()
|
||||||
this.bot.log(this.bot.isMobile, 'QUIZ', 'An error occurred:' + error, 'error')
|
this.bot.log(this.bot.isMobile, 'QUIZ', 'An error occurred:' + error, 'error')
|
||||||
}
|
}
|
||||||
|
|||||||
189
src/index.ts
189
src/index.ts
@@ -25,7 +25,6 @@ import Humanizer from './util/Humanizer'
|
|||||||
import { detectBanReason } from './util/BanDetector'
|
import { detectBanReason } from './util/BanDetector'
|
||||||
import { RiskManager, RiskMetrics, RiskEvent } from './util/RiskManager'
|
import { RiskManager, RiskMetrics, RiskEvent } from './util/RiskManager'
|
||||||
import { BanPredictor } from './util/BanPredictor'
|
import { BanPredictor } from './util/BanPredictor'
|
||||||
import { Analytics } from './util/Analytics'
|
|
||||||
import { QueryDiversityEngine } from './util/QueryDiversityEngine'
|
import { QueryDiversityEngine } from './util/QueryDiversityEngine'
|
||||||
import JobState from './util/JobState'
|
import JobState from './util/JobState'
|
||||||
import { StartupValidator } from './util/StartupValidator'
|
import { StartupValidator } from './util/StartupValidator'
|
||||||
@@ -67,17 +66,12 @@ export class MicrosoftRewardsBot {
|
|||||||
// Summary collection (per process)
|
// Summary collection (per process)
|
||||||
private accountSummaries: AccountSummary[] = []
|
private accountSummaries: AccountSummary[] = []
|
||||||
private runId: string = Math.random().toString(36).slice(2)
|
private runId: string = Math.random().toString(36).slice(2)
|
||||||
private diagCount: number = 0
|
|
||||||
private bannedTriggered: { email: string; reason: string } | null = null
|
private bannedTriggered: { email: string; reason: string } | null = null
|
||||||
private globalStandby: { active: boolean; reason?: string } = { active: false }
|
private globalStandby: { active: boolean; reason?: string } = { active: false }
|
||||||
// Scheduler heartbeat integration
|
|
||||||
private heartbeatFile?: string
|
|
||||||
private heartbeatTimer?: NodeJS.Timeout
|
|
||||||
private riskManager?: RiskManager
|
private riskManager?: RiskManager
|
||||||
private lastRiskMetrics?: RiskMetrics
|
private lastRiskMetrics?: RiskMetrics
|
||||||
private riskThresholdTriggered: boolean = false
|
private riskThresholdTriggered: boolean = false
|
||||||
private banPredictor?: BanPredictor
|
private banPredictor?: BanPredictor
|
||||||
private analytics?: Analytics
|
|
||||||
private accountJobState?: JobState
|
private accountJobState?: JobState
|
||||||
private accountRunCounts: Map<string, number> = new Map()
|
private accountRunCounts: Map<string, number> = new Map()
|
||||||
|
|
||||||
@@ -109,10 +103,6 @@ export class MicrosoftRewardsBot {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.config.analytics?.enabled) {
|
|
||||||
this.analytics = new Analytics()
|
|
||||||
}
|
|
||||||
|
|
||||||
if (this.config.riskManagement?.enabled) {
|
if (this.config.riskManagement?.enabled) {
|
||||||
this.riskManager = new RiskManager()
|
this.riskManager = new RiskManager()
|
||||||
if (this.config.riskManagement.banPrediction) {
|
if (this.config.riskManagement.banPrediction) {
|
||||||
@@ -190,29 +180,6 @@ export class MicrosoftRewardsBot {
|
|||||||
return this.lastRiskMetrics?.delayMultiplier ?? 1
|
return this.lastRiskMetrics?.delayMultiplier ?? 1
|
||||||
}
|
}
|
||||||
|
|
||||||
private trackAnalytics(summary: AccountSummary, riskScore?: number): void {
|
|
||||||
if (!this.analytics || this.config.analytics?.enabled !== true) return
|
|
||||||
const today = new Date().toISOString().slice(0, 10)
|
|
||||||
try {
|
|
||||||
this.analytics.recordRun({
|
|
||||||
date: today,
|
|
||||||
email: summary.email,
|
|
||||||
pointsEarned: summary.totalCollected,
|
|
||||||
pointsInitial: summary.initialTotal,
|
|
||||||
pointsEnd: summary.endTotal,
|
|
||||||
desktopPoints: summary.desktopCollected,
|
|
||||||
mobilePoints: summary.mobileCollected,
|
|
||||||
executionTimeMs: summary.durationMs,
|
|
||||||
successRate: summary.errors.length ? 0 : 1,
|
|
||||||
errorsCount: summary.errors.length,
|
|
||||||
banned: !!summary.banned?.status,
|
|
||||||
riskScore
|
|
||||||
})
|
|
||||||
} catch (e) {
|
|
||||||
log('main', 'ANALYTICS', `Failed to record analytics for ${summary.email}: ${e instanceof Error ? e.message : e}`, 'warn')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private shouldSkipAccount(email: string, dayKey: string): boolean {
|
private shouldSkipAccount(email: string, dayKey: string): boolean {
|
||||||
if (!this.accountJobState) return false
|
if (!this.accountJobState) return false
|
||||||
if (this.config.jobState?.skipCompletedAccounts === false) return false
|
if (this.config.jobState?.skipCompletedAccounts === false) return false
|
||||||
@@ -245,20 +212,6 @@ export class MicrosoftRewardsBot {
|
|||||||
this.printBanner()
|
this.printBanner()
|
||||||
log('main', 'MAIN', `Bot started with ${this.config.clusters} clusters`)
|
log('main', 'MAIN', `Bot started with ${this.config.clusters} clusters`)
|
||||||
|
|
||||||
// If scheduler provided a heartbeat file, update it periodically to signal liveness
|
|
||||||
const hbFile = process.env.SCHEDULER_HEARTBEAT_FILE
|
|
||||||
if (hbFile) {
|
|
||||||
try {
|
|
||||||
const dir = path.dirname(hbFile)
|
|
||||||
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true })
|
|
||||||
fs.writeFileSync(hbFile, String(Date.now()))
|
|
||||||
this.heartbeatFile = hbFile
|
|
||||||
this.heartbeatTimer = setInterval(() => {
|
|
||||||
try { fs.writeFileSync(hbFile, String(Date.now())) } catch { /* ignore */ }
|
|
||||||
}, 60_000)
|
|
||||||
} catch { /* ignore */ }
|
|
||||||
}
|
|
||||||
|
|
||||||
// If buy mode is enabled, run single-account interactive session without automation
|
// If buy mode is enabled, run single-account interactive session without automation
|
||||||
if (this.buyMode.enabled) {
|
if (this.buyMode.enabled) {
|
||||||
const targetInfo = this.buyMode.email ? ` for ${this.buyMode.email}` : ''
|
const targetInfo = this.buyMode.email ? ` for ${this.buyMode.email}` : ''
|
||||||
@@ -446,32 +399,7 @@ export class MicrosoftRewardsBot {
|
|||||||
console.log(` Auto-Update: ${updTargets.join(', ')}`)
|
console.log(` Auto-Update: ${updTargets.join(', ')}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
const sched = this.config.schedule || {}
|
console.log(' Scheduler: External (see docs)')
|
||||||
const schedEnabled = !!sched.enabled
|
|
||||||
if (!schedEnabled) {
|
|
||||||
console.log(' Scheduler: Disabled')
|
|
||||||
} else {
|
|
||||||
const tz = sched.timeZone || 'UTC'
|
|
||||||
let formatName = ''
|
|
||||||
let timeShown = ''
|
|
||||||
const srec: Record<string, unknown> = sched as unknown as Record<string, unknown>
|
|
||||||
const useAmPmVal = typeof srec['useAmPm'] === 'boolean' ? (srec['useAmPm'] as boolean) : undefined
|
|
||||||
const time12Val = typeof srec['time12'] === 'string' ? String(srec['time12']) : undefined
|
|
||||||
const time24Val = typeof srec['time24'] === 'string' ? String(srec['time24']) : undefined
|
|
||||||
|
|
||||||
if (useAmPmVal) {
|
|
||||||
formatName = '12h'
|
|
||||||
timeShown = time12Val || sched.time || '9:00 AM'
|
|
||||||
} else if (useAmPmVal === false) {
|
|
||||||
formatName = '24h'
|
|
||||||
timeShown = time24Val || sched.time || '09:00'
|
|
||||||
} else {
|
|
||||||
if (time24Val && time24Val.trim()) { formatName = '24h'; timeShown = time24Val }
|
|
||||||
else if (time12Val && time12Val.trim()) { formatName = '12h'; timeShown = time12Val }
|
|
||||||
else { formatName = 'auto'; timeShown = sched.time || '09:00' }
|
|
||||||
}
|
|
||||||
console.log(` Scheduler: ${timeShown} (${formatName}, ${tz})`)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
console.log('─'.repeat(60) + '\n')
|
console.log('─'.repeat(60) + '\n')
|
||||||
}
|
}
|
||||||
@@ -585,13 +513,8 @@ export class MicrosoftRewardsBot {
|
|||||||
try {
|
try {
|
||||||
await this.runAutoUpdate()
|
await this.runAutoUpdate()
|
||||||
} catch {/* ignore */}
|
} catch {/* ignore */}
|
||||||
// Only exit if not spawned by scheduler
|
log('main', 'MAIN-WORKER', 'All workers destroyed. Exiting main process!', 'warn')
|
||||||
if (!process.env.SCHEDULER_HEARTBEAT_FILE) {
|
process.exit(0)
|
||||||
log('main', 'MAIN-WORKER', 'All workers destroyed. Exiting main process!', 'warn')
|
|
||||||
process.exit(0)
|
|
||||||
} else {
|
|
||||||
log('main', 'MAIN-WORKER', 'All workers destroyed. Scheduler mode: returning control to scheduler.')
|
|
||||||
}
|
|
||||||
})()
|
})()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -681,7 +604,6 @@ export class MicrosoftRewardsBot {
|
|||||||
riskLevel: 'safe'
|
riskLevel: 'safe'
|
||||||
}
|
}
|
||||||
this.accountSummaries.push(summary)
|
this.accountSummaries.push(summary)
|
||||||
this.trackAnalytics(summary, summary.riskScore)
|
|
||||||
this.persistAccountCompletion(account.email, accountDayKey, summary)
|
this.persistAccountCompletion(account.email, accountDayKey, summary)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -846,7 +768,6 @@ export class MicrosoftRewardsBot {
|
|||||||
}
|
}
|
||||||
|
|
||||||
this.accountSummaries.push(summary)
|
this.accountSummaries.push(summary)
|
||||||
this.trackAnalytics(summary, riskScore)
|
|
||||||
this.persistAccountCompletion(account.email, accountDayKey, summary)
|
this.persistAccountCompletion(account.email, accountDayKey, summary)
|
||||||
|
|
||||||
if (banned.status) {
|
if (banned.status) {
|
||||||
@@ -888,16 +809,10 @@ export class MicrosoftRewardsBot {
|
|||||||
} else {
|
} else {
|
||||||
// Single process mode -> build and send conclusion directly
|
// Single process mode -> build and send conclusion directly
|
||||||
await this.sendConclusion(this.accountSummaries)
|
await this.sendConclusion(this.accountSummaries)
|
||||||
// Cleanup heartbeat timer/file at end of run
|
|
||||||
if (this.heartbeatTimer) { try { clearInterval(this.heartbeatTimer) } catch { /* ignore */ } }
|
|
||||||
if (this.heartbeatFile) { try { if (fs.existsSync(this.heartbeatFile)) fs.unlinkSync(this.heartbeatFile) } catch { /* ignore */ } }
|
|
||||||
// After conclusion, run optional auto-update
|
// After conclusion, run optional auto-update
|
||||||
await this.runAutoUpdate().catch(() => {/* ignore update errors */})
|
await this.runAutoUpdate().catch(() => {/* ignore update errors */})
|
||||||
}
|
}
|
||||||
// Only exit if not spawned by scheduler
|
process.exit()
|
||||||
if (!process.env.SCHEDULER_HEARTBEAT_FILE) {
|
|
||||||
process.exit()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Send immediate ban alert if configured. */
|
/** Send immediate ban alert if configured. */
|
||||||
@@ -1336,83 +1251,6 @@ export class MicrosoftRewardsBot {
|
|||||||
log('main','REPORT',`Failed to save report: ${e instanceof Error ? e.message : e}`,'warn')
|
log('main','REPORT',`Failed to save report: ${e instanceof Error ? e.message : e}`,'warn')
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup old diagnostics
|
|
||||||
try {
|
|
||||||
const days = cfg.diagnostics?.retentionDays
|
|
||||||
if (typeof days === 'number' && days > 0) {
|
|
||||||
await this.cleanupOldDiagnostics(days)
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
log('main','REPORT',`Failed diagnostics cleanup: ${e instanceof Error ? e.message : e}`,'warn')
|
|
||||||
}
|
|
||||||
|
|
||||||
await this.publishAnalyticsArtifacts().catch(e => {
|
|
||||||
log('main','ANALYTICS',`Failed analytics post-processing: ${e instanceof Error ? e.message : e}`,'warn')
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Reserve one diagnostics slot for this run (caps captures). */
|
|
||||||
public tryReserveDiagSlot(maxPerRun: number): boolean {
|
|
||||||
if (this.diagCount >= Math.max(0, maxPerRun || 0)) return false
|
|
||||||
this.diagCount += 1
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Delete diagnostics folders older than N days under ./reports */
|
|
||||||
private async cleanupOldDiagnostics(retentionDays: number) {
|
|
||||||
const base = path.join(process.cwd(), 'reports')
|
|
||||||
if (!fs.existsSync(base)) return
|
|
||||||
const entries = fs.readdirSync(base, { withFileTypes: true })
|
|
||||||
const now = Date.now()
|
|
||||||
const keepMs = retentionDays * 24 * 60 * 60 * 1000
|
|
||||||
for (const e of entries) {
|
|
||||||
if (!e.isDirectory()) continue
|
|
||||||
const name = e.name // expect YYYY-MM-DD
|
|
||||||
const parts = name.split('-').map((n: string) => parseInt(n, 10))
|
|
||||||
if (parts.length !== 3 || parts.some(isNaN)) continue
|
|
||||||
const [yy, mm, dd] = parts
|
|
||||||
if (yy === undefined || mm === undefined || dd === undefined) continue
|
|
||||||
const dirDate = new Date(yy, mm - 1, dd).getTime()
|
|
||||||
if (isNaN(dirDate)) continue
|
|
||||||
if (now - dirDate > keepMs) {
|
|
||||||
const dirPath = path.join(base, name)
|
|
||||||
try { fs.rmSync(dirPath, { recursive: true, force: true }) } catch { /* ignore */ }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private async publishAnalyticsArtifacts(): Promise<void> {
|
|
||||||
if (!this.analytics || this.config.analytics?.enabled !== true) return
|
|
||||||
|
|
||||||
const retention = this.config.analytics.retentionDays
|
|
||||||
if (typeof retention === 'number' && retention > 0) {
|
|
||||||
this.analytics.cleanup(retention)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (this.config.analytics.exportMarkdown || this.config.analytics.webhookSummary) {
|
|
||||||
const markdown = this.analytics.exportMarkdown(30)
|
|
||||||
if (this.config.analytics.exportMarkdown) {
|
|
||||||
const now = new Date()
|
|
||||||
const day = `${now.getFullYear()}-${String(now.getMonth()+1).padStart(2,'0')}-${String(now.getDate()).padStart(2,'0')}`
|
|
||||||
const baseDir = path.join(process.cwd(), 'reports', day)
|
|
||||||
if (!fs.existsSync(baseDir)) fs.mkdirSync(baseDir, { recursive: true })
|
|
||||||
const mdPath = path.join(baseDir, `analytics_${this.runId}.md`)
|
|
||||||
fs.writeFileSync(mdPath, markdown, 'utf-8')
|
|
||||||
log('main','ANALYTICS',`Saved analytics summary to ${mdPath}`)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (this.config.analytics.webhookSummary) {
|
|
||||||
const { ConclusionWebhook } = await import('./util/ConclusionWebhook')
|
|
||||||
await ConclusionWebhook(
|
|
||||||
this.config,
|
|
||||||
'📈 Performance Report',
|
|
||||||
['```markdown', markdown, '```'].join('\n'),
|
|
||||||
undefined,
|
|
||||||
DISCORD.COLOR_BLUE
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run optional auto-update script based on configuration flags.
|
// Run optional auto-update script based on configuration flags.
|
||||||
@@ -1501,24 +1339,6 @@ function formatDuration(ms: number): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
const initialConfig = loadConfig()
|
|
||||||
const scheduleEnabled = initialConfig?.schedule?.enabled === true
|
|
||||||
const skipScheduler = process.argv.some((arg: string) => arg === '--no-scheduler' || arg === '--single-run')
|
|
||||||
|| process.env.REWARDS_FORCE_SINGLE_RUN === '1'
|
|
||||||
const buyModeRequested = process.argv.includes('-buy')
|
|
||||||
const invokedByScheduler = !!process.env.SCHEDULER_HEARTBEAT_FILE
|
|
||||||
|
|
||||||
if (scheduleEnabled && !skipScheduler && !buyModeRequested && !invokedByScheduler) {
|
|
||||||
log('main', 'SCHEDULER', 'Schedule enabled → handing off to in-process scheduler. Use --no-scheduler for a single pass.', 'log', 'green')
|
|
||||||
try {
|
|
||||||
await import('./scheduler')
|
|
||||||
return
|
|
||||||
} catch (err) {
|
|
||||||
const message = err instanceof Error ? err.message : String(err)
|
|
||||||
log('main', 'SCHEDULER', `Failed to start scheduler inline: ${message}. Continuing with single-run fallback.`, 'warn', 'yellow')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const rewardsBot = new MicrosoftRewardsBot(false)
|
const rewardsBot = new MicrosoftRewardsBot(false)
|
||||||
|
|
||||||
const crashState = { restarts: 0 }
|
const crashState = { restarts: 0 }
|
||||||
@@ -1538,7 +1358,6 @@ async function main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const gracefulExit = (code: number) => {
|
const gracefulExit = (code: number) => {
|
||||||
try { rewardsBot['heartbeatTimer'] && clearInterval(rewardsBot['heartbeatTimer']) } catch { /* ignore */ }
|
|
||||||
if (config?.crashRecovery?.autoRestart && code !== 0) {
|
if (config?.crashRecovery?.autoRestart && code !== 0) {
|
||||||
const max = config.crashRecovery.maxRestarts ?? 2
|
const max = config.crashRecovery.maxRestarts ?? 2
|
||||||
if (crashState.restarts < max) {
|
if (crashState.restarts < max) {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import type { Page } from 'playwright'
|
|||||||
* and perform all required steps on the provided page.
|
* and perform all required steps on the provided page.
|
||||||
*/
|
*/
|
||||||
export interface ActivityHandler {
|
export interface ActivityHandler {
|
||||||
/** Optional identifier for diagnostics */
|
/** Optional identifier used in logging output */
|
||||||
id?: string
|
id?: string
|
||||||
/**
|
/**
|
||||||
* Return true if this handler knows how to process the given activity.
|
* Return true if this handler knows how to process the given activity.
|
||||||
|
|||||||
@@ -22,17 +22,15 @@ export interface Config {
|
|||||||
webhook: ConfigWebhook;
|
webhook: ConfigWebhook;
|
||||||
conclusionWebhook?: ConfigWebhook; // Optional secondary webhook for final summary
|
conclusionWebhook?: ConfigWebhook; // Optional secondary webhook for final summary
|
||||||
ntfy: ConfigNtfy;
|
ntfy: ConfigNtfy;
|
||||||
diagnostics?: ConfigDiagnostics;
|
|
||||||
update?: ConfigUpdate;
|
update?: ConfigUpdate;
|
||||||
schedule?: ConfigSchedule;
|
|
||||||
passesPerRun?: number;
|
passesPerRun?: number;
|
||||||
buyMode?: ConfigBuyMode; // Optional manual spending mode
|
buyMode?: ConfigBuyMode; // Optional manual spending mode
|
||||||
vacation?: ConfigVacation; // Optional monthly contiguous off-days
|
vacation?: ConfigVacation; // Optional monthly contiguous off-days
|
||||||
crashRecovery?: ConfigCrashRecovery; // Automatic restart / graceful shutdown
|
crashRecovery?: ConfigCrashRecovery; // Automatic restart / graceful shutdown
|
||||||
riskManagement?: ConfigRiskManagement; // NEW: Risk-aware throttling and ban prediction
|
riskManagement?: ConfigRiskManagement; // NEW: Risk-aware throttling and ban prediction
|
||||||
analytics?: ConfigAnalytics; // NEW: Performance dashboard and metrics tracking
|
|
||||||
dryRun?: boolean; // NEW: Dry-run mode (simulate without executing)
|
dryRun?: boolean; // NEW: Dry-run mode (simulate without executing)
|
||||||
queryDiversity?: ConfigQueryDiversity; // NEW: Multi-source query generation
|
queryDiversity?: ConfigQueryDiversity; // NEW: Multi-source query generation
|
||||||
|
legacy?: ConfigLegacyFlags; // Track legacy config usage for warnings
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ConfigSaveFingerprint {
|
export interface ConfigSaveFingerprint {
|
||||||
@@ -81,14 +79,6 @@ export interface ConfigProxy {
|
|||||||
proxyBingTerms: boolean;
|
proxyBingTerms: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ConfigDiagnostics {
|
|
||||||
enabled?: boolean; // master toggle
|
|
||||||
saveScreenshot?: boolean; // capture .png
|
|
||||||
saveHtml?: boolean; // capture .html
|
|
||||||
maxPerRun?: number; // cap number of captures per run
|
|
||||||
retentionDays?: number; // delete older diagnostic folders
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ConfigUpdate {
|
export interface ConfigUpdate {
|
||||||
git?: boolean; // if true, run git pull + npm ci + npm run build after completion
|
git?: boolean; // if true, run git pull + npm ci + npm run build after completion
|
||||||
docker?: boolean; // if true, run docker update routine (compose pull/up) after completion
|
docker?: boolean; // if true, run docker update routine (compose pull/up) after completion
|
||||||
@@ -102,18 +92,6 @@ export interface ConfigBuyMode {
|
|||||||
maxMinutes?: number; // session duration cap
|
maxMinutes?: number; // session duration cap
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ConfigSchedule {
|
|
||||||
enabled?: boolean;
|
|
||||||
time?: string; // Back-compat: accepts "HH:mm" or "h:mm AM/PM"
|
|
||||||
// New optional explicit times
|
|
||||||
time12?: string; // e.g., "9:00 AM"
|
|
||||||
time24?: string; // e.g., "09:00"
|
|
||||||
timeZone?: string; // IANA TZ e.g., "America/New_York"
|
|
||||||
useAmPm?: boolean; // If true, prefer time12 + AM/PM style; if false, prefer time24. If undefined, back-compat behavior.
|
|
||||||
runImmediatelyOnStart?: boolean; // if true, run once immediately when process starts
|
|
||||||
cron?: string | string[]; // Optional cron expression(s) (standard 5-field or 6-field) for advanced scheduling
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ConfigVacation {
|
export interface ConfigVacation {
|
||||||
enabled?: boolean; // default false
|
enabled?: boolean; // default false
|
||||||
minDays?: number; // default 3
|
minDays?: number; // default 3
|
||||||
@@ -192,9 +170,9 @@ export interface ConfigLogging {
|
|||||||
[key: string]: unknown; // forward compatibility
|
[key: string]: unknown; // forward compatibility
|
||||||
}
|
}
|
||||||
|
|
||||||
// CommunityHelp removed (privacy-first policy)
|
// CommunityHelp intentionally omitted (privacy-first policy)
|
||||||
|
|
||||||
// NEW FEATURES: Risk Management, Analytics, Query Diversity
|
// NEW FEATURES: Risk Management and Query Diversity
|
||||||
export interface ConfigRiskManagement {
|
export interface ConfigRiskManagement {
|
||||||
enabled?: boolean; // master toggle for risk-aware throttling
|
enabled?: boolean; // master toggle for risk-aware throttling
|
||||||
autoAdjustDelays?: boolean; // automatically increase delays when risk is high
|
autoAdjustDelays?: boolean; // automatically increase delays when risk is high
|
||||||
@@ -203,13 +181,6 @@ export interface ConfigRiskManagement {
|
|||||||
riskThreshold?: number; // 0-100, pause if risk exceeds this
|
riskThreshold?: number; // 0-100, pause if risk exceeds this
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ConfigAnalytics {
|
|
||||||
enabled?: boolean; // track performance metrics
|
|
||||||
retentionDays?: number; // how long to keep analytics data
|
|
||||||
exportMarkdown?: boolean; // generate markdown reports
|
|
||||||
webhookSummary?: boolean; // send analytics via webhook
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ConfigQueryDiversity {
|
export interface ConfigQueryDiversity {
|
||||||
enabled?: boolean; // use multi-source query generation
|
enabled?: boolean; // use multi-source query generation
|
||||||
sources?: Array<'google-trends' | 'reddit' | 'news' | 'wikipedia' | 'local-fallback'>; // which sources to use
|
sources?: Array<'google-trends' | 'reddit' | 'news' | 'wikipedia' | 'local-fallback'>; // which sources to use
|
||||||
@@ -217,3 +188,8 @@ export interface ConfigQueryDiversity {
|
|||||||
cacheMinutes?: number; // cache duration
|
cacheMinutes?: number; // cache duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface ConfigLegacyFlags {
|
||||||
|
diagnosticsConfigured?: boolean;
|
||||||
|
analyticsConfigured?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
427
src/scheduler.ts
427
src/scheduler.ts
@@ -1,427 +0,0 @@
|
|||||||
import { DateTime, IANAZone } from 'luxon'
|
|
||||||
import cronParser from 'cron-parser'
|
|
||||||
import { spawn } from 'child_process'
|
|
||||||
import fs from 'fs'
|
|
||||||
import path from 'path'
|
|
||||||
import { MicrosoftRewardsBot } from './index'
|
|
||||||
import { loadConfig } from './util/Load'
|
|
||||||
import { log } from './util/Logger'
|
|
||||||
import type { Config } from './interface/Config'
|
|
||||||
|
|
||||||
type CronExpressionInfo = { expression: string; tz: string }
|
|
||||||
type DateTimeInstance = ReturnType<typeof DateTime.fromJSDate>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse environment variable as number with validation
|
|
||||||
*/
|
|
||||||
function parseEnvNumber(key: string, defaultValue: number, min: number, max: number): number {
|
|
||||||
const raw = process.env[key]
|
|
||||||
if (!raw) return defaultValue
|
|
||||||
|
|
||||||
const parsed = Number(raw)
|
|
||||||
if (isNaN(parsed)) {
|
|
||||||
void log('main', 'SCHEDULER', `Invalid ${key}="${raw}". Using default ${defaultValue}`, 'warn')
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
if (parsed < min || parsed > max) {
|
|
||||||
void log('main', 'SCHEDULER', `${key}=${parsed} out of range [${min}, ${max}]. Using default ${defaultValue}`, 'warn')
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsed
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse time from schedule config (supports 12h and 24h formats)
|
|
||||||
*/
|
|
||||||
function resolveTimeParts(schedule: Config['schedule'] | undefined): { tz: string; hour: number; minute: number } {
|
|
||||||
const tz = (schedule?.timeZone && IANAZone.isValidZone(schedule.timeZone)) ? schedule.timeZone : 'UTC'
|
|
||||||
|
|
||||||
// Warn if an invalid timezone was provided
|
|
||||||
if (schedule?.timeZone && !IANAZone.isValidZone(schedule.timeZone)) {
|
|
||||||
void log('main', 'SCHEDULER', `Invalid timezone "${schedule.timeZone}" provided. Falling back to UTC. Valid zones: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones`, 'warn')
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine source string
|
|
||||||
let src = ''
|
|
||||||
if (typeof schedule?.useAmPm === 'boolean') {
|
|
||||||
if (schedule.useAmPm) src = (schedule.time12 || schedule.time || '').trim()
|
|
||||||
else src = (schedule.time24 || schedule.time || '').trim()
|
|
||||||
} else {
|
|
||||||
// Back-compat: prefer time if present; else time24 or time12
|
|
||||||
src = (schedule?.time || schedule?.time24 || schedule?.time12 || '').trim()
|
|
||||||
}
|
|
||||||
// Try to parse 24h first: HH:mm
|
|
||||||
const m24 = src.match(/^\s*(\d{1,2}):(\d{2})\s*$/i)
|
|
||||||
if (m24) {
|
|
||||||
const hh = Math.max(0, Math.min(23, parseInt(m24[1]!, 10)))
|
|
||||||
const mm = Math.max(0, Math.min(59, parseInt(m24[2]!, 10)))
|
|
||||||
return { tz, hour: hh, minute: mm }
|
|
||||||
}
|
|
||||||
// Parse 12h with AM/PM: h:mm AM or h AM
|
|
||||||
const m12 = src.match(/^\s*(\d{1,2})(?::(\d{2}))?\s*(AM|PM)\s*$/i)
|
|
||||||
if (m12) {
|
|
||||||
let hh = parseInt(m12[1]!, 10)
|
|
||||||
const mm = m12[2] ? parseInt(m12[2]!, 10) : 0
|
|
||||||
const ampm = m12[3]!.toUpperCase()
|
|
||||||
if (hh === 12) hh = 0
|
|
||||||
if (ampm === 'PM') hh += 12
|
|
||||||
hh = Math.max(0, Math.min(23, hh))
|
|
||||||
const m = Math.max(0, Math.min(59, mm))
|
|
||||||
return { tz, hour: hh, minute: m }
|
|
||||||
}
|
|
||||||
// Fallback: default 09:00
|
|
||||||
return { tz, hour: 9, minute: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
function parseTargetToday(now: Date, schedule: Config['schedule'] | undefined) {
|
|
||||||
const { tz, hour, minute } = resolveTimeParts(schedule)
|
|
||||||
const dtn = DateTime.fromJSDate(now, { zone: tz })
|
|
||||||
return dtn.set({ hour, minute, second: 0, millisecond: 0 })
|
|
||||||
}
|
|
||||||
|
|
||||||
function normalizeCronExpressions(schedule: Config['schedule'] | undefined, fallbackTz: string): CronExpressionInfo[] {
|
|
||||||
if (!schedule) return []
|
|
||||||
const raw = schedule.cron
|
|
||||||
if (!raw) return []
|
|
||||||
const expressions = Array.isArray(raw) ? raw : [raw]
|
|
||||||
return expressions
|
|
||||||
.map(expr => (typeof expr === 'string' ? expr.trim() : ''))
|
|
||||||
.filter(expr => expr.length > 0)
|
|
||||||
.map(expr => ({ expression: expr, tz: (schedule.timeZone && IANAZone.isValidZone(schedule.timeZone)) ? schedule.timeZone : fallbackTz }))
|
|
||||||
}
|
|
||||||
|
|
||||||
function getNextCronOccurrence(after: DateTimeInstance, items: CronExpressionInfo[]): { next: DateTimeInstance; source: string } | null {
|
|
||||||
let soonest: { next: DateTimeInstance; source: string } | null = null
|
|
||||||
for (const item of items) {
|
|
||||||
try {
|
|
||||||
const iterator = cronParser.parseExpression(item.expression, {
|
|
||||||
currentDate: after.toJSDate(),
|
|
||||||
tz: item.tz
|
|
||||||
})
|
|
||||||
const nextDate = iterator.next().toDate()
|
|
||||||
const nextDt = DateTime.fromJSDate(nextDate, { zone: item.tz })
|
|
||||||
if (!soonest || nextDt < soonest.next) {
|
|
||||||
soonest = { next: nextDt, source: item.expression }
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
void log('main', 'SCHEDULER', `Invalid cron expression "${item.expression}": ${error instanceof Error ? error.message : String(error)}`, 'warn')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return soonest
|
|
||||||
}
|
|
||||||
|
|
||||||
function getNextDailyOccurrence(after: DateTimeInstance, schedule: Config['schedule'] | undefined): DateTimeInstance {
|
|
||||||
const todayTarget = parseTargetToday(after.toJSDate(), schedule)
|
|
||||||
const target = after >= todayTarget ? todayTarget.plus({ days: 1 }) : todayTarget
|
|
||||||
return target
|
|
||||||
}
|
|
||||||
|
|
||||||
function computeNextRun(after: DateTimeInstance, schedule: Config['schedule'] | undefined, cronItems: CronExpressionInfo[]): { next: DateTimeInstance; source: 'cron' | 'daily'; detail?: string } {
|
|
||||||
if (cronItems.length > 0) {
|
|
||||||
const cronNext = getNextCronOccurrence(after, cronItems)
|
|
||||||
if (cronNext) {
|
|
||||||
return { next: cronNext.next, source: 'cron', detail: cronNext.source }
|
|
||||||
}
|
|
||||||
void log('main', 'SCHEDULER', 'All cron expressions invalid; falling back to daily schedule', 'warn')
|
|
||||||
}
|
|
||||||
|
|
||||||
return { next: getNextDailyOccurrence(after, schedule), source: 'daily' }
|
|
||||||
}
|
|
||||||
|
|
||||||
async function runOnePass(): Promise<void> {
|
|
||||||
const bot = new MicrosoftRewardsBot(false)
|
|
||||||
await bot.initialize()
|
|
||||||
await bot.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Run a single pass either in-process or as a child process (default),
|
|
||||||
* with a watchdog timeout to kill stuck runs.
|
|
||||||
*/
|
|
||||||
async function runOnePassWithWatchdog(): Promise<void> {
|
|
||||||
// Heartbeat-aware watchdog configuration
|
|
||||||
const staleHeartbeatMin = parseEnvNumber('SCHEDULER_STALE_HEARTBEAT_MINUTES', 30, 5, 1440)
|
|
||||||
const graceMin = parseEnvNumber('SCHEDULER_HEARTBEAT_GRACE_MINUTES', 15, 1, 120)
|
|
||||||
const hardcapMin = parseEnvNumber('SCHEDULER_PASS_HARDCAP_MINUTES', 480, 30, 1440)
|
|
||||||
const checkEveryMs = 60_000 // check once per minute
|
|
||||||
|
|
||||||
// Validate: stale should be >= grace
|
|
||||||
const effectiveStale = Math.max(staleHeartbeatMin, graceMin)
|
|
||||||
|
|
||||||
// Fork per pass: safer because we can terminate a stuck child without killing the scheduler
|
|
||||||
const forkPerPass = String(process.env.SCHEDULER_FORK_PER_PASS || 'true').toLowerCase() !== 'false'
|
|
||||||
|
|
||||||
if (!forkPerPass) {
|
|
||||||
// In-process fallback (cannot forcefully stop if truly stuck)
|
|
||||||
await log('main', 'SCHEDULER', `Starting pass in-process (grace ${graceMin}m • stale ${staleHeartbeatMin}m • hardcap ${hardcapMin}m). Cannot force-kill if stuck.`)
|
|
||||||
// No true watchdog possible in-process; just run
|
|
||||||
await runOnePass()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Child process execution
|
|
||||||
const indexJs = path.join(__dirname, 'index.js')
|
|
||||||
await log('main', 'SCHEDULER', `Spawning child for pass: ${process.execPath} ${indexJs}`)
|
|
||||||
|
|
||||||
// Prepare heartbeat file path and pass to child
|
|
||||||
const cfg = loadConfig() as Config
|
|
||||||
const baseDir = path.join(process.cwd(), cfg.sessionPath || 'sessions')
|
|
||||||
const hbFile = path.join(baseDir, `heartbeat_${Date.now()}.lock`)
|
|
||||||
try { fs.mkdirSync(baseDir, { recursive: true }) } catch { /* ignore */ }
|
|
||||||
|
|
||||||
await new Promise<void>((resolve) => {
|
|
||||||
const child = spawn(process.execPath, [indexJs], { stdio: 'inherit', env: { ...process.env, SCHEDULER_HEARTBEAT_FILE: hbFile } })
|
|
||||||
let finished = false
|
|
||||||
const startedAt = Date.now()
|
|
||||||
|
|
||||||
let killTimeout: NodeJS.Timeout | undefined
|
|
||||||
|
|
||||||
const killChild = async (signal: NodeJS.Signals) => {
|
|
||||||
try {
|
|
||||||
await log('main', 'SCHEDULER', `Sending ${signal} to stuck child PID ${child.pid}`,'warn')
|
|
||||||
child.kill(signal)
|
|
||||||
} catch { /* ignore */ }
|
|
||||||
}
|
|
||||||
|
|
||||||
const timer = setInterval(() => {
|
|
||||||
if (finished) return
|
|
||||||
const now = Date.now()
|
|
||||||
const runtimeMin = Math.floor((now - startedAt) / 60000)
|
|
||||||
// Hard cap: always terminate if exceeded
|
|
||||||
if (runtimeMin >= hardcapMin) {
|
|
||||||
log('main', 'SCHEDULER', `Pass exceeded hard cap of ${hardcapMin} minutes; terminating...`, 'warn')
|
|
||||||
void killChild('SIGTERM')
|
|
||||||
if (killTimeout) clearTimeout(killTimeout)
|
|
||||||
killTimeout = setTimeout(() => { try { child.kill('SIGKILL') } catch { /* ignore */ } }, 10_000)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Before grace, don't judge
|
|
||||||
if (runtimeMin < graceMin) return
|
|
||||||
// Check heartbeat freshness
|
|
||||||
try {
|
|
||||||
const st = fs.statSync(hbFile)
|
|
||||||
const mtimeMs = st.mtimeMs
|
|
||||||
const ageMin = Math.floor((now - mtimeMs) / 60000)
|
|
||||||
if (ageMin >= effectiveStale) {
|
|
||||||
log('main', 'SCHEDULER', `Heartbeat stale for ${ageMin}m (>=${effectiveStale}m). Terminating child...`, 'warn')
|
|
||||||
void killChild('SIGTERM')
|
|
||||||
if (killTimeout) clearTimeout(killTimeout)
|
|
||||||
killTimeout = setTimeout(() => { try { child.kill('SIGKILL') } catch { /* ignore */ } }, 10_000)
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
// If file missing after grace, consider stale
|
|
||||||
const msg = err instanceof Error ? err.message : String(err)
|
|
||||||
log('main', 'SCHEDULER', `Heartbeat file check failed: ${msg}. Terminating child...`, 'warn')
|
|
||||||
void killChild('SIGTERM')
|
|
||||||
if (killTimeout) clearTimeout(killTimeout)
|
|
||||||
killTimeout = setTimeout(() => { try { child.kill('SIGKILL') } catch { /* ignore */ } }, 10_000)
|
|
||||||
}
|
|
||||||
}, checkEveryMs)
|
|
||||||
|
|
||||||
child.on('exit', async (code, signal) => {
|
|
||||||
finished = true
|
|
||||||
clearInterval(timer)
|
|
||||||
if (killTimeout) clearTimeout(killTimeout)
|
|
||||||
// Cleanup heartbeat file
|
|
||||||
try { if (fs.existsSync(hbFile)) fs.unlinkSync(hbFile) } catch { /* ignore */ }
|
|
||||||
if (signal) {
|
|
||||||
await log('main', 'SCHEDULER', `Child exited due to signal: ${signal}`, 'warn')
|
|
||||||
} else if (code && code !== 0) {
|
|
||||||
await log('main', 'SCHEDULER', `Child exited with non-zero code: ${code}`, 'warn')
|
|
||||||
} else {
|
|
||||||
await log('main', 'SCHEDULER', 'Child pass completed successfully')
|
|
||||||
}
|
|
||||||
resolve()
|
|
||||||
})
|
|
||||||
|
|
||||||
child.on('error', async (err) => {
|
|
||||||
finished = true
|
|
||||||
clearInterval(timer)
|
|
||||||
if (killTimeout) clearTimeout(killTimeout)
|
|
||||||
try { if (fs.existsSync(hbFile)) fs.unlinkSync(hbFile) } catch { /* ignore */ }
|
|
||||||
await log('main', 'SCHEDULER', `Failed to spawn child: ${err instanceof Error ? err.message : String(err)}`, 'error')
|
|
||||||
resolve()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async function runPasses(passes: number): Promise<void> {
|
|
||||||
const n = Math.max(1, Math.floor(passes || 1))
|
|
||||||
for (let i = 1; i <= n; i++) {
|
|
||||||
await log('main', 'SCHEDULER', `Starting pass ${i}/${n}`)
|
|
||||||
const started = Date.now()
|
|
||||||
await runOnePassWithWatchdog()
|
|
||||||
const took = Date.now() - started
|
|
||||||
const sec = Math.max(1, Math.round(took / 1000))
|
|
||||||
await log('main', 'SCHEDULER', `Completed pass ${i}/${n}`)
|
|
||||||
await log('main', 'SCHEDULER', `Pass ${i} duration: ${sec}s`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function main() {
|
|
||||||
const cfg = loadConfig() as Config & { schedule?: { enabled?: boolean; time?: string; timeZone?: string; runImmediatelyOnStart?: boolean } }
|
|
||||||
const schedule = cfg.schedule || { enabled: false }
|
|
||||||
const passes = typeof cfg.passesPerRun === 'number' ? cfg.passesPerRun : 1
|
|
||||||
const offPerWeek = Math.max(0, Math.min(7, Number(cfg.humanization?.randomOffDaysPerWeek ?? 1)))
|
|
||||||
let offDays: number[] = [] // 1..7 ISO weekday
|
|
||||||
let offWeek: number | null = null
|
|
||||||
type VacRange = { start: string; end: string } | null
|
|
||||||
let vacMonth: string | null = null // 'yyyy-LL'
|
|
||||||
let vacRange: VacRange = null // ISO dates 'yyyy-LL-dd'
|
|
||||||
|
|
||||||
const refreshOffDays = async (now: { weekNumber: number }) => {
|
|
||||||
if (offPerWeek <= 0) { offDays = []; offWeek = null; return }
|
|
||||||
const week = now.weekNumber
|
|
||||||
if (offWeek === week && offDays.length) return
|
|
||||||
// choose distinct weekdays [1..7]
|
|
||||||
const pool = [1,2,3,4,5,6,7]
|
|
||||||
const chosen: number[] = []
|
|
||||||
for (let i=0;i<Math.min(offPerWeek,7);i++) {
|
|
||||||
const idx = Math.floor(Math.random()*pool.length)
|
|
||||||
chosen.push(pool[idx]!)
|
|
||||||
pool.splice(idx,1)
|
|
||||||
}
|
|
||||||
offDays = chosen.sort((a,b)=>a-b)
|
|
||||||
offWeek = week
|
|
||||||
const msg = offDays.length ? offDays.join(', ') : 'none'
|
|
||||||
await log('main','SCHEDULER',`Weekly humanization off-day sample (ISO weekday): ${msg} | adjust via config.humanization.randomOffDaysPerWeek`,'warn')
|
|
||||||
}
|
|
||||||
|
|
||||||
const chooseVacationRange = async (now: typeof DateTime.prototype) => {
|
|
||||||
// Only when enabled
|
|
||||||
if (!cfg.vacation?.enabled) { vacRange = null; vacMonth = null; return }
|
|
||||||
const monthKey = now.toFormat('yyyy-LL')
|
|
||||||
if (vacMonth === monthKey && vacRange) return
|
|
||||||
// Determine month days and choose contiguous block
|
|
||||||
const monthStart = now.startOf('month')
|
|
||||||
const monthEnd = now.endOf('month')
|
|
||||||
const totalDays = monthEnd.day
|
|
||||||
const minD = Math.max(1, Math.min(28, Number(cfg.vacation.minDays ?? 3)))
|
|
||||||
const maxD = Math.max(minD, Math.min(31, Number(cfg.vacation.maxDays ?? 5)))
|
|
||||||
const span = (minD === maxD) ? minD : (minD + Math.floor(Math.random() * (maxD - minD + 1)))
|
|
||||||
const latestStart = Math.max(1, totalDays - span + 1)
|
|
||||||
const startDay = 1 + Math.floor(Math.random() * latestStart)
|
|
||||||
const start = monthStart.set({ day: startDay })
|
|
||||||
const end = start.plus({ days: span - 1 })
|
|
||||||
vacMonth = monthKey
|
|
||||||
vacRange = { start: start.toFormat('yyyy-LL-dd'), end: end.toFormat('yyyy-LL-dd') }
|
|
||||||
await log('main','SCHEDULER',`Selected vacation block this month: ${vacRange.start} → ${vacRange.end} (${span} day(s))`,'warn')
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!schedule.enabled) {
|
|
||||||
await log('main', 'SCHEDULER', 'Schedule disabled; running once then exit')
|
|
||||||
await runPasses(passes)
|
|
||||||
process.exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
const tz = (schedule.timeZone && IANAZone.isValidZone(schedule.timeZone)) ? schedule.timeZone : 'UTC'
|
|
||||||
const cronExpressions = normalizeCronExpressions(schedule, tz)
|
|
||||||
let running = false
|
|
||||||
|
|
||||||
// Optional initial jitter before the first run (to vary start time)
|
|
||||||
const parseJitter = (minKey: string, maxKey: string, fallbackMin: string, fallbackMax: string): [number, number] => {
|
|
||||||
const minVal = Number(process.env[minKey] || process.env[fallbackMin] || 0)
|
|
||||||
const maxVal = Number(process.env[maxKey] || process.env[fallbackMax] || 0)
|
|
||||||
if (isNaN(minVal) || minVal < 0) {
|
|
||||||
void log('main', 'SCHEDULER', `Invalid ${minKey}="${process.env[minKey]}". Using 0`, 'warn')
|
|
||||||
return [0, isNaN(maxVal) || maxVal < 0 ? 0 : maxVal]
|
|
||||||
}
|
|
||||||
if (isNaN(maxVal) || maxVal < 0) {
|
|
||||||
void log('main', 'SCHEDULER', `Invalid ${maxKey}="${process.env[maxKey]}". Using 0`, 'warn')
|
|
||||||
return [minVal, 0]
|
|
||||||
}
|
|
||||||
return [minVal, maxVal]
|
|
||||||
}
|
|
||||||
|
|
||||||
const initialJitterBounds = parseJitter('SCHEDULER_INITIAL_JITTER_MINUTES_MIN', 'SCHEDULER_INITIAL_JITTER_MINUTES_MAX', 'SCHEDULER_INITIAL_JITTER_MIN', 'SCHEDULER_INITIAL_JITTER_MAX')
|
|
||||||
const applyInitialJitter = (initialJitterBounds[0] > 0 || initialJitterBounds[1] > 0)
|
|
||||||
|
|
||||||
// Check if immediate run is enabled (default to false to avoid unexpected runs)
|
|
||||||
const runImmediate = schedule.runImmediatelyOnStart === true
|
|
||||||
|
|
||||||
if (runImmediate && !running) {
|
|
||||||
running = true
|
|
||||||
if (applyInitialJitter) {
|
|
||||||
const min = Math.max(0, Math.min(initialJitterBounds[0], initialJitterBounds[1]))
|
|
||||||
const max = Math.max(min, initialJitterBounds[0], initialJitterBounds[1])
|
|
||||||
const jitterSec = (min === max) ? min * 60 : (min * 60 + Math.floor(Math.random() * ((max - min) * 60)))
|
|
||||||
if (jitterSec > 0) {
|
|
||||||
await log('main', 'SCHEDULER', `Initial jitter: delaying first run by ${Math.round(jitterSec / 60)} minute(s) (${jitterSec}s)`, 'warn')
|
|
||||||
await new Promise((r) => setTimeout(r, jitterSec * 1000))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const nowDT = DateTime.local().setZone(tz)
|
|
||||||
await chooseVacationRange(nowDT)
|
|
||||||
await refreshOffDays(nowDT)
|
|
||||||
const todayIso = nowDT.toFormat('yyyy-LL-dd')
|
|
||||||
const vr = vacRange as { start: string; end: string } | null
|
|
||||||
const isVacationToday = !!(vr && todayIso >= vr.start && todayIso <= vr.end)
|
|
||||||
if (isVacationToday) {
|
|
||||||
await log('main','SCHEDULER',`Skipping immediate run: vacation day (${todayIso})`,'warn')
|
|
||||||
} else if (offDays.includes(nowDT.weekday)) {
|
|
||||||
await log('main','SCHEDULER',`Skipping immediate run: humanization off-day (ISO weekday ${nowDT.weekday}). Set humanization.randomOffDaysPerWeek=0 to disable.`,'warn')
|
|
||||||
} else {
|
|
||||||
await runPasses(passes)
|
|
||||||
}
|
|
||||||
running = false
|
|
||||||
}
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
const nowDT = DateTime.local().setZone(tz)
|
|
||||||
const nextInfo = computeNextRun(nowDT, schedule, cronExpressions)
|
|
||||||
const next = nextInfo.next
|
|
||||||
let ms = Math.max(0, next.toMillis() - nowDT.toMillis())
|
|
||||||
|
|
||||||
// Optional daily jitter to further randomize the exact start time each day
|
|
||||||
let extraMs = 0
|
|
||||||
if (cronExpressions.length === 0) {
|
|
||||||
const dailyJitterBounds = parseJitter('SCHEDULER_DAILY_JITTER_MINUTES_MIN', 'SCHEDULER_DAILY_JITTER_MINUTES_MAX', 'SCHEDULER_DAILY_JITTER_MIN', 'SCHEDULER_DAILY_JITTER_MAX')
|
|
||||||
const djMin = dailyJitterBounds[0]
|
|
||||||
const djMax = dailyJitterBounds[1]
|
|
||||||
if (djMin > 0 || djMax > 0) {
|
|
||||||
const mn = Math.max(0, Math.min(djMin, djMax))
|
|
||||||
const mx = Math.max(mn, djMin, djMax)
|
|
||||||
const jitterSec = (mn === mx) ? mn * 60 : (mn * 60 + Math.floor(Math.random() * ((mx - mn) * 60)))
|
|
||||||
extraMs = jitterSec * 1000
|
|
||||||
ms += extraMs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const human = next.toFormat('yyyy-LL-dd HH:mm ZZZZ')
|
|
||||||
const totalSec = Math.round(ms / 1000)
|
|
||||||
const jitterMsg = extraMs > 0 ? ` plus daily jitter (+${Math.round(extraMs/60000)}m)` : ''
|
|
||||||
const sourceMsg = nextInfo.source === 'cron' ? ` [cron: ${nextInfo.detail}]` : ''
|
|
||||||
await log('main', 'SCHEDULER', `Next run at ${human}${jitterMsg}${sourceMsg} (in ${totalSec}s)`)
|
|
||||||
|
|
||||||
await new Promise((resolve) => setTimeout(resolve, ms))
|
|
||||||
|
|
||||||
const nowRun = DateTime.local().setZone(tz)
|
|
||||||
await chooseVacationRange(nowRun)
|
|
||||||
await refreshOffDays(nowRun)
|
|
||||||
const todayIso2 = nowRun.toFormat('yyyy-LL-dd')
|
|
||||||
const vr2 = vacRange as { start: string; end: string } | null
|
|
||||||
const isVacation = !!(vr2 && todayIso2 >= vr2.start && todayIso2 <= vr2.end)
|
|
||||||
if (isVacation) {
|
|
||||||
await log('main','SCHEDULER',`Skipping scheduled run: vacation day (${todayIso2})`,'warn')
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if (offDays.includes(nowRun.weekday)) {
|
|
||||||
await log('main','SCHEDULER',`Skipping scheduled run: humanization off-day (ISO weekday ${nowRun.weekday}). Set humanization.randomOffDaysPerWeek=0 to disable.`,'warn')
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if (!running) {
|
|
||||||
running = true
|
|
||||||
await runPasses(passes)
|
|
||||||
running = false
|
|
||||||
} else {
|
|
||||||
await log('main','SCHEDULER','Skipped scheduled trigger because a pass is already running','warn')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
main().catch((e) => {
|
|
||||||
void log('main', 'SCHEDULER', `Fatal error: ${e instanceof Error ? e.message : String(e)}`, 'error')
|
|
||||||
process.exit(1)
|
|
||||||
})
|
|
||||||
@@ -1,264 +1,3 @@
|
|||||||
import fs from 'fs'
|
// Placeholder kept for backward compatibility with older imports.
|
||||||
import path from 'path'
|
// New code should implement its own reporting or use webhooks.
|
||||||
|
export {}
|
||||||
export interface DailyMetrics {
|
|
||||||
date: string // YYYY-MM-DD
|
|
||||||
email: string
|
|
||||||
pointsEarned: number
|
|
||||||
pointsInitial: number
|
|
||||||
pointsEnd: number
|
|
||||||
desktopPoints: number
|
|
||||||
mobilePoints: number
|
|
||||||
executionTimeMs: number
|
|
||||||
successRate: number // 0-1
|
|
||||||
errorsCount: number
|
|
||||||
banned: boolean
|
|
||||||
riskScore?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AccountHistory {
|
|
||||||
email: string
|
|
||||||
totalRuns: number
|
|
||||||
totalPointsEarned: number
|
|
||||||
avgPointsPerDay: number
|
|
||||||
avgExecutionTime: number
|
|
||||||
successRate: number
|
|
||||||
lastRunDate: string
|
|
||||||
banHistory: Array<{ date: string; reason: string }>
|
|
||||||
riskTrend: number[] // last N risk scores
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AnalyticsSummary {
|
|
||||||
period: string // e.g., 'last-7-days', 'last-30-days', 'all-time'
|
|
||||||
accounts: AccountHistory[]
|
|
||||||
globalStats: {
|
|
||||||
totalPoints: number
|
|
||||||
avgSuccessRate: number
|
|
||||||
mostProductiveAccount: string
|
|
||||||
mostRiskyAccount: string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Analytics tracks performance metrics, point collection trends, and account health.
|
|
||||||
* Stores data in JSON files for lightweight persistence and easy analysis.
|
|
||||||
*/
|
|
||||||
export class Analytics {
|
|
||||||
private dataDir: string
|
|
||||||
|
|
||||||
constructor(baseDir: string = 'analytics') {
|
|
||||||
this.dataDir = path.join(process.cwd(), baseDir)
|
|
||||||
if (!fs.existsSync(this.dataDir)) {
|
|
||||||
fs.mkdirSync(this.dataDir, { recursive: true })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Record metrics for a completed account run
|
|
||||||
*/
|
|
||||||
recordRun(metrics: DailyMetrics): void {
|
|
||||||
const date = metrics.date
|
|
||||||
const email = this.sanitizeEmail(metrics.email)
|
|
||||||
const fileName = `${email}_${date}.json`
|
|
||||||
const filePath = path.join(this.dataDir, fileName)
|
|
||||||
|
|
||||||
try {
|
|
||||||
fs.writeFileSync(filePath, JSON.stringify(metrics, null, 2), 'utf-8')
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`Failed to save metrics for ${metrics.email}:`, error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get history for a specific account
|
|
||||||
*/
|
|
||||||
getAccountHistory(email: string, days: number = 30): AccountHistory {
|
|
||||||
const sanitized = this.sanitizeEmail(email)
|
|
||||||
const files = this.getAccountFiles(sanitized, days)
|
|
||||||
|
|
||||||
if (files.length === 0) {
|
|
||||||
return {
|
|
||||||
email,
|
|
||||||
totalRuns: 0,
|
|
||||||
totalPointsEarned: 0,
|
|
||||||
avgPointsPerDay: 0,
|
|
||||||
avgExecutionTime: 0,
|
|
||||||
successRate: 1.0,
|
|
||||||
lastRunDate: 'never',
|
|
||||||
banHistory: [],
|
|
||||||
riskTrend: []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let totalPoints = 0
|
|
||||||
let totalTime = 0
|
|
||||||
let successCount = 0
|
|
||||||
const banHistory: Array<{ date: string; reason: string }> = []
|
|
||||||
const riskScores: number[] = []
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const filePath = path.join(this.dataDir, file)
|
|
||||||
try {
|
|
||||||
const data: DailyMetrics = JSON.parse(fs.readFileSync(filePath, 'utf-8'))
|
|
||||||
totalPoints += data.pointsEarned
|
|
||||||
totalTime += data.executionTimeMs
|
|
||||||
if (data.successRate > 0.5) successCount++
|
|
||||||
if (data.banned) {
|
|
||||||
banHistory.push({ date: data.date, reason: 'detected' })
|
|
||||||
}
|
|
||||||
if (typeof data.riskScore === 'number') {
|
|
||||||
riskScores.push(data.riskScore)
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const totalRuns = files.length
|
|
||||||
const lastFile = files[files.length - 1]
|
|
||||||
const lastRunDate = lastFile ? lastFile.split('_')[1]?.replace('.json', '') || 'unknown' : 'unknown'
|
|
||||||
|
|
||||||
return {
|
|
||||||
email,
|
|
||||||
totalRuns,
|
|
||||||
totalPointsEarned: totalPoints,
|
|
||||||
avgPointsPerDay: Math.round(totalPoints / Math.max(1, totalRuns)),
|
|
||||||
avgExecutionTime: Math.round(totalTime / Math.max(1, totalRuns)),
|
|
||||||
successRate: successCount / Math.max(1, totalRuns),
|
|
||||||
lastRunDate,
|
|
||||||
banHistory,
|
|
||||||
riskTrend: riskScores.slice(-10) // last 10 risk scores
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate a summary report for all accounts
|
|
||||||
*/
|
|
||||||
generateSummary(days: number = 30): AnalyticsSummary {
|
|
||||||
const accountEmails = this.getAllAccounts()
|
|
||||||
const accounts: AccountHistory[] = []
|
|
||||||
|
|
||||||
for (const email of accountEmails) {
|
|
||||||
accounts.push(this.getAccountHistory(email, days))
|
|
||||||
}
|
|
||||||
|
|
||||||
const totalPoints = accounts.reduce((sum, a) => sum + a.totalPointsEarned, 0)
|
|
||||||
const avgSuccess = accounts.reduce((sum, a) => sum + a.successRate, 0) / Math.max(1, accounts.length)
|
|
||||||
|
|
||||||
let mostProductive = ''
|
|
||||||
let maxPoints = 0
|
|
||||||
let mostRisky = ''
|
|
||||||
let maxRisk = 0
|
|
||||||
|
|
||||||
for (const acc of accounts) {
|
|
||||||
if (acc.totalPointsEarned > maxPoints) {
|
|
||||||
maxPoints = acc.totalPointsEarned
|
|
||||||
mostProductive = acc.email
|
|
||||||
}
|
|
||||||
const avgRisk = acc.riskTrend.reduce((s, r) => s + r, 0) / Math.max(1, acc.riskTrend.length)
|
|
||||||
if (avgRisk > maxRisk) {
|
|
||||||
maxRisk = avgRisk
|
|
||||||
mostRisky = acc.email
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
period: `last-${days}-days`,
|
|
||||||
accounts,
|
|
||||||
globalStats: {
|
|
||||||
totalPoints,
|
|
||||||
avgSuccessRate: Number(avgSuccess.toFixed(2)),
|
|
||||||
mostProductiveAccount: mostProductive || 'none',
|
|
||||||
mostRiskyAccount: mostRisky || 'none'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Export summary as markdown table (for human readability)
|
|
||||||
*/
|
|
||||||
exportMarkdown(days: number = 30): string {
|
|
||||||
const summary = this.generateSummary(days)
|
|
||||||
const lines: string[] = []
|
|
||||||
|
|
||||||
lines.push(`# Analytics Summary (${summary.period})`)
|
|
||||||
lines.push('')
|
|
||||||
lines.push('## Global Stats')
|
|
||||||
lines.push(`- Total Points: ${summary.globalStats.totalPoints}`)
|
|
||||||
lines.push(`- Avg Success Rate: ${(summary.globalStats.avgSuccessRate * 100).toFixed(1)}%`)
|
|
||||||
lines.push(`- Most Productive: ${summary.globalStats.mostProductiveAccount}`)
|
|
||||||
lines.push(`- Most Risky: ${summary.globalStats.mostRiskyAccount}`)
|
|
||||||
lines.push('')
|
|
||||||
lines.push('## Per-Account Breakdown')
|
|
||||||
lines.push('')
|
|
||||||
lines.push('| Account | Runs | Total Points | Avg/Day | Success Rate | Last Run | Bans |')
|
|
||||||
lines.push('|---------|------|--------------|---------|--------------|----------|------|')
|
|
||||||
|
|
||||||
for (const acc of summary.accounts) {
|
|
||||||
const successPct = (acc.successRate * 100).toFixed(0)
|
|
||||||
const banCount = acc.banHistory.length
|
|
||||||
lines.push(
|
|
||||||
`| ${acc.email} | ${acc.totalRuns} | ${acc.totalPointsEarned} | ${acc.avgPointsPerDay} | ${successPct}% | ${acc.lastRunDate} | ${banCount} |`
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return lines.join('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clean up old analytics files (retention policy)
|
|
||||||
*/
|
|
||||||
cleanup(retentionDays: number): void {
|
|
||||||
const files = fs.readdirSync(this.dataDir)
|
|
||||||
const cutoff = Date.now() - (retentionDays * 24 * 60 * 60 * 1000)
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
if (!file.endsWith('.json')) continue
|
|
||||||
const filePath = path.join(this.dataDir, file)
|
|
||||||
try {
|
|
||||||
const stats = fs.statSync(filePath)
|
|
||||||
if (stats.mtimeMs < cutoff) {
|
|
||||||
fs.unlinkSync(filePath)
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private sanitizeEmail(email: string): string {
|
|
||||||
return email.replace(/[^a-zA-Z0-9@._-]/g, '_')
|
|
||||||
}
|
|
||||||
|
|
||||||
private getAccountFiles(sanitizedEmail: string, days: number): string[] {
|
|
||||||
const files = fs.readdirSync(this.dataDir)
|
|
||||||
const cutoffDate = new Date()
|
|
||||||
cutoffDate.setDate(cutoffDate.getDate() - days)
|
|
||||||
|
|
||||||
return files
|
|
||||||
.filter((f: string) => f.startsWith(sanitizedEmail) && f.endsWith('.json'))
|
|
||||||
.filter((f: string) => {
|
|
||||||
const datePart = f.split('_')[1]?.replace('.json', '')
|
|
||||||
if (!datePart) return false
|
|
||||||
const fileDate = new Date(datePart)
|
|
||||||
return fileDate >= cutoffDate
|
|
||||||
})
|
|
||||||
.sort()
|
|
||||||
}
|
|
||||||
|
|
||||||
private getAllAccounts(): string[] {
|
|
||||||
const files = fs.readdirSync(this.dataDir)
|
|
||||||
const emailSet = new Set<string>()
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
if (!file.endsWith('.json')) continue
|
|
||||||
const parts = file.split('_')
|
|
||||||
if (parts.length >= 2) {
|
|
||||||
const email = parts[0]
|
|
||||||
if (email) emailSet.add(email)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Array.from(emailSet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -197,35 +197,32 @@ export class ConfigValidator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check schedule
|
const legacySchedule = (config as unknown as { schedule?: unknown }).schedule
|
||||||
if (config.schedule?.enabled) {
|
if (legacySchedule !== undefined) {
|
||||||
if (!config.schedule.timeZone) {
|
issues.push({
|
||||||
issues.push({
|
severity: 'warning',
|
||||||
severity: 'warning',
|
field: 'schedule',
|
||||||
field: 'schedule.timeZone',
|
message: 'Legacy schedule block detected.',
|
||||||
message: 'No timeZone specified, defaulting to UTC',
|
suggestion: 'Remove schedule.* entries and configure OS-level scheduling (docs/schedule.md).'
|
||||||
suggestion: 'Set your local timezone (e.g., America/New_York)'
|
})
|
||||||
})
|
}
|
||||||
}
|
|
||||||
|
|
||||||
const useAmPm = config.schedule.useAmPm
|
if (config.legacy?.diagnosticsConfigured) {
|
||||||
const time12 = (config.schedule as unknown as Record<string, unknown>)['time12']
|
issues.push({
|
||||||
const time24 = (config.schedule as unknown as Record<string, unknown>)['time24']
|
severity: 'warning',
|
||||||
|
field: 'diagnostics',
|
||||||
|
message: 'Unrecognized diagnostics.* block in config.jsonc.',
|
||||||
|
suggestion: 'Delete the diagnostics section; logs and webhooks now cover troubleshooting.'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if (useAmPm === true && (!time12 || (typeof time12 === 'string' && time12.trim() === ''))) {
|
if (config.legacy?.analyticsConfigured) {
|
||||||
issues.push({
|
issues.push({
|
||||||
severity: 'error',
|
severity: 'warning',
|
||||||
field: 'schedule.time12',
|
field: 'analytics',
|
||||||
message: 'useAmPm is true but time12 is empty'
|
message: 'Unrecognized analytics.* block in config.jsonc.',
|
||||||
})
|
suggestion: 'Delete the analytics section because those values are ignored.'
|
||||||
}
|
})
|
||||||
if (useAmPm === false && (!time24 || (typeof time24 === 'string' && time24.trim() === ''))) {
|
|
||||||
issues.push({
|
|
||||||
severity: 'error',
|
|
||||||
field: 'schedule.time24',
|
|
||||||
message: 'useAmPm is false but time24 is empty'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check workers
|
// Check workers
|
||||||
@@ -248,27 +245,6 @@ export class ConfigValidator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check diagnostics
|
|
||||||
if (config.diagnostics?.enabled) {
|
|
||||||
const maxPerRun = config.diagnostics.maxPerRun || 2
|
|
||||||
if (maxPerRun > 20) {
|
|
||||||
issues.push({
|
|
||||||
severity: 'warning',
|
|
||||||
field: 'diagnostics.maxPerRun',
|
|
||||||
message: 'Very high maxPerRun may fill disk quickly'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
const retention = config.diagnostics.retentionDays || 7
|
|
||||||
if (retention > 90) {
|
|
||||||
issues.push({
|
|
||||||
severity: 'info',
|
|
||||||
field: 'diagnostics.retentionDays',
|
|
||||||
message: 'Long retention period - monitor disk usage'
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const valid = !issues.some(i => i.severity === 'error')
|
const valid = !issues.some(i => i.severity === 'error')
|
||||||
return { valid, issues }
|
return { valid, issues }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,74 +1,3 @@
|
|||||||
import path from 'path'
|
// Placeholder kept for backward compatibility with older imports.
|
||||||
import fs from 'fs'
|
// New code should handle troubleshooting through logging and webhooks instead.
|
||||||
import type { Page } from 'rebrowser-playwright'
|
export {}
|
||||||
import type { MicrosoftRewardsBot } from '../index'
|
|
||||||
|
|
||||||
export type DiagnosticsScope = 'default' | 'security'
|
|
||||||
|
|
||||||
export interface DiagnosticsOptions {
|
|
||||||
scope?: DiagnosticsScope
|
|
||||||
skipSlot?: boolean
|
|
||||||
force?: boolean
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function captureDiagnostics(bot: MicrosoftRewardsBot, page: Page, rawLabel: string, options?: DiagnosticsOptions): Promise<void> {
|
|
||||||
try {
|
|
||||||
const scope: DiagnosticsScope = options?.scope ?? 'default'
|
|
||||||
const cfg = bot.config?.diagnostics ?? {}
|
|
||||||
const forceCapture = options?.force === true || scope === 'security'
|
|
||||||
if (!forceCapture && cfg.enabled === false) return
|
|
||||||
|
|
||||||
if (scope === 'default') {
|
|
||||||
const maxPerRun = typeof cfg.maxPerRun === 'number' ? cfg.maxPerRun : 8
|
|
||||||
if (!options?.skipSlot && !bot.tryReserveDiagSlot(maxPerRun)) return
|
|
||||||
}
|
|
||||||
|
|
||||||
const saveScreenshot = scope === 'security' ? true : cfg.saveScreenshot !== false
|
|
||||||
const saveHtml = scope === 'security' ? true : cfg.saveHtml !== false
|
|
||||||
if (!saveScreenshot && !saveHtml) return
|
|
||||||
|
|
||||||
const safeLabel = rawLabel.replace(/[^a-z0-9-_]/gi, '_').slice(0, 64) || 'capture'
|
|
||||||
const now = new Date()
|
|
||||||
const timestamp = `${String(now.getHours()).padStart(2, '0')}${String(now.getMinutes()).padStart(2, '0')}${String(now.getSeconds()).padStart(2, '0')}`
|
|
||||||
|
|
||||||
let dir: string
|
|
||||||
if (scope === 'security') {
|
|
||||||
const base = path.join(process.cwd(), 'diagnostics', 'security-incidents')
|
|
||||||
fs.mkdirSync(base, { recursive: true })
|
|
||||||
const sub = `${now.toISOString().replace(/[:.]/g, '-')}-${safeLabel}`
|
|
||||||
dir = path.join(base, sub)
|
|
||||||
fs.mkdirSync(dir, { recursive: true })
|
|
||||||
} else {
|
|
||||||
const day = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`
|
|
||||||
dir = path.join(process.cwd(), 'reports', day)
|
|
||||||
fs.mkdirSync(dir, { recursive: true })
|
|
||||||
}
|
|
||||||
|
|
||||||
if (saveScreenshot) {
|
|
||||||
const shotName = scope === 'security' ? 'page.png' : `${timestamp}_${safeLabel}.png`
|
|
||||||
const shotPath = path.join(dir, shotName)
|
|
||||||
await page.screenshot({ path: shotPath }).catch(() => {})
|
|
||||||
if (scope === 'security') {
|
|
||||||
bot.log(bot.isMobile, 'DIAG', `Saved security screenshot to ${shotPath}`)
|
|
||||||
} else {
|
|
||||||
bot.log(bot.isMobile, 'DIAG', `Saved diagnostics screenshot to ${shotPath}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (saveHtml) {
|
|
||||||
const htmlName = scope === 'security' ? 'page.html' : `${timestamp}_${safeLabel}.html`
|
|
||||||
const htmlPath = path.join(dir, htmlName)
|
|
||||||
try {
|
|
||||||
const html = await page.content()
|
|
||||||
await fs.promises.writeFile(htmlPath, html, 'utf-8')
|
|
||||||
if (scope === 'security') {
|
|
||||||
bot.log(bot.isMobile, 'DIAG', `Saved security HTML to ${htmlPath}`)
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
/* ignore */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
bot.log(bot.isMobile, 'DIAG', `Failed to capture diagnostics: ${error instanceof Error ? error.message : error}`, 'warn')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import path from 'path'
|
|||||||
|
|
||||||
|
|
||||||
import { Account } from '../interface/Account'
|
import { Account } from '../interface/Account'
|
||||||
import { Config, ConfigSaveFingerprint } from '../interface/Config'
|
import { Config, ConfigLegacyFlags, ConfigSaveFingerprint } from '../interface/Config'
|
||||||
|
|
||||||
let configCache: Config
|
let configCache: Config
|
||||||
let configSourcePath = ''
|
let configSourcePath = ''
|
||||||
@@ -168,15 +168,6 @@ function normalizeConfig(raw: unknown): Config {
|
|||||||
riskThreshold: typeof riskRaw.riskThreshold === 'number' ? riskRaw.riskThreshold : undefined
|
riskThreshold: typeof riskRaw.riskThreshold === 'number' ? riskRaw.riskThreshold : undefined
|
||||||
} : undefined
|
} : undefined
|
||||||
|
|
||||||
const analyticsRaw = (n.analytics ?? {}) as Record<string, unknown>
|
|
||||||
const hasAnalyticsCfg = Object.keys(analyticsRaw).length > 0
|
|
||||||
const analytics = hasAnalyticsCfg ? {
|
|
||||||
enabled: analyticsRaw.enabled === true,
|
|
||||||
retentionDays: typeof analyticsRaw.retentionDays === 'number' ? analyticsRaw.retentionDays : undefined,
|
|
||||||
exportMarkdown: analyticsRaw.exportMarkdown === true,
|
|
||||||
webhookSummary: analyticsRaw.webhookSummary === true
|
|
||||||
} : undefined
|
|
||||||
|
|
||||||
const queryDiversityRaw = (n.queryDiversity ?? {}) as Record<string, unknown>
|
const queryDiversityRaw = (n.queryDiversity ?? {}) as Record<string, unknown>
|
||||||
const hasQueryCfg = Object.keys(queryDiversityRaw).length > 0
|
const hasQueryCfg = Object.keys(queryDiversityRaw).length > 0
|
||||||
const queryDiversity = hasQueryCfg ? {
|
const queryDiversity = hasQueryCfg ? {
|
||||||
@@ -197,6 +188,15 @@ function normalizeConfig(raw: unknown): Config {
|
|||||||
skipCompletedAccounts: jobStateRaw.skipCompletedAccounts !== false
|
skipCompletedAccounts: jobStateRaw.skipCompletedAccounts !== false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const legacy: ConfigLegacyFlags = {}
|
||||||
|
if (typeof n.diagnostics !== 'undefined') {
|
||||||
|
legacy.diagnosticsConfigured = true
|
||||||
|
}
|
||||||
|
if (typeof n.analytics !== 'undefined') {
|
||||||
|
legacy.analyticsConfigured = true
|
||||||
|
}
|
||||||
|
const hasLegacyFlags = legacy.diagnosticsConfigured === true || legacy.analyticsConfigured === true
|
||||||
|
|
||||||
const cfg: Config = {
|
const cfg: Config = {
|
||||||
baseURL: n.baseURL ?? 'https://rewards.bing.com',
|
baseURL: n.baseURL ?? 'https://rewards.bing.com',
|
||||||
sessionPath: n.sessionPath ?? 'sessions',
|
sessionPath: n.sessionPath ?? 'sessions',
|
||||||
@@ -219,17 +219,15 @@ function normalizeConfig(raw: unknown): Config {
|
|||||||
webhook,
|
webhook,
|
||||||
conclusionWebhook,
|
conclusionWebhook,
|
||||||
ntfy,
|
ntfy,
|
||||||
diagnostics: n.diagnostics,
|
|
||||||
update: n.update,
|
update: n.update,
|
||||||
schedule: n.schedule,
|
|
||||||
passesPerRun: passesPerRun,
|
passesPerRun: passesPerRun,
|
||||||
vacation: n.vacation,
|
vacation: n.vacation,
|
||||||
buyMode: { enabled: buyModeEnabled, maxMinutes: buyModeMax },
|
buyMode: { enabled: buyModeEnabled, maxMinutes: buyModeMax },
|
||||||
crashRecovery: n.crashRecovery || {},
|
crashRecovery: n.crashRecovery || {},
|
||||||
riskManagement,
|
riskManagement,
|
||||||
analytics,
|
|
||||||
dryRun,
|
dryRun,
|
||||||
queryDiversity
|
queryDiversity,
|
||||||
|
legacy: hasLegacyFlags ? legacy : undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ export class StartupValidator {
|
|||||||
this.validateEnvironment()
|
this.validateEnvironment()
|
||||||
this.validateFileSystem(config)
|
this.validateFileSystem(config)
|
||||||
this.validateBrowserSettings(config)
|
this.validateBrowserSettings(config)
|
||||||
this.validateScheduleSettings(config)
|
|
||||||
this.validateNetworkSettings(config)
|
this.validateNetworkSettings(config)
|
||||||
this.validateWorkerSettings(config)
|
this.validateWorkerSettings(config)
|
||||||
this.validateSearchSettings(config)
|
this.validateSearchSettings(config)
|
||||||
@@ -173,6 +172,16 @@ export class StartupValidator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private validateConfig(config: Config): void {
|
private validateConfig(config: Config): void {
|
||||||
|
const maybeSchedule = (config as unknown as { schedule?: unknown }).schedule
|
||||||
|
if (maybeSchedule !== undefined) {
|
||||||
|
this.addWarning(
|
||||||
|
'config',
|
||||||
|
'Legacy schedule settings detected in config.jsonc.',
|
||||||
|
'Remove schedule.* entries and use your operating system scheduler.',
|
||||||
|
'docs/schedule.md'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// Headless mode in Docker
|
// Headless mode in Docker
|
||||||
if (process.env.FORCE_HEADLESS === '1' && config.headless === false) {
|
if (process.env.FORCE_HEADLESS === '1' && config.headless === false) {
|
||||||
this.addWarning(
|
this.addWarning(
|
||||||
@@ -330,20 +339,13 @@ export class StartupValidator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check diagnostics directory if enabled
|
if (config.legacy?.diagnosticsConfigured || config.legacy?.analyticsConfigured) {
|
||||||
if (config.diagnostics?.enabled === true) {
|
this.addWarning(
|
||||||
const diagPath = path.join(process.cwd(), 'diagnostics')
|
'filesystem',
|
||||||
if (!fs.existsSync(diagPath)) {
|
'Unrecognized diagnostics/analytics block detected in config.jsonc',
|
||||||
try {
|
'Remove those sections to keep the file aligned with the current schema.',
|
||||||
fs.mkdirSync(diagPath, { recursive: true })
|
'docs/diagnostics.md'
|
||||||
} catch (error) {
|
)
|
||||||
this.addWarning(
|
|
||||||
'filesystem',
|
|
||||||
'Cannot create diagnostics directory',
|
|
||||||
'Screenshots and HTML snapshots will not be saved'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,60 +370,6 @@ export class StartupValidator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private validateScheduleSettings(config: Config): void {
|
|
||||||
if (config.schedule?.enabled === true) {
|
|
||||||
// Time format validation
|
|
||||||
const schedRec = config.schedule as Record<string, unknown>
|
|
||||||
const useAmPm = schedRec.useAmPm
|
|
||||||
const time12 = typeof schedRec.time12 === 'string' ? schedRec.time12 : ''
|
|
||||||
const time24 = typeof schedRec.time24 === 'string' ? schedRec.time24 : ''
|
|
||||||
|
|
||||||
if (useAmPm === true && (!time12 || time12.trim() === '')) {
|
|
||||||
this.addError(
|
|
||||||
'schedule',
|
|
||||||
'Schedule enabled with useAmPm=true but time12 is missing',
|
|
||||||
'Add time12 field (e.g., "9:00 AM") or set useAmPm=false',
|
|
||||||
'docs/schedule.md'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (useAmPm === false && (!time24 || time24.trim() === '')) {
|
|
||||||
this.addError(
|
|
||||||
'schedule',
|
|
||||||
'Schedule enabled with useAmPm=false but time24 is missing',
|
|
||||||
'Add time24 field (e.g., "09:00") or set useAmPm=true',
|
|
||||||
'docs/schedule.md'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timezone validation
|
|
||||||
const tz = config.schedule.timeZone || 'UTC'
|
|
||||||
try {
|
|
||||||
Intl.DateTimeFormat(undefined, { timeZone: tz })
|
|
||||||
} catch {
|
|
||||||
this.addError(
|
|
||||||
'schedule',
|
|
||||||
`Invalid timezone: ${tz}`,
|
|
||||||
'Use a valid IANA timezone (e.g., "America/New_York", "Europe/Paris")',
|
|
||||||
'docs/schedule.md'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Vacation mode check
|
|
||||||
if (config.vacation?.enabled === true) {
|
|
||||||
if (config.vacation.minDays && config.vacation.maxDays) {
|
|
||||||
if (config.vacation.minDays > config.vacation.maxDays) {
|
|
||||||
this.addError(
|
|
||||||
'schedule',
|
|
||||||
`Vacation minDays (${config.vacation.minDays}) > maxDays (${config.vacation.maxDays})`,
|
|
||||||
'Set minDays <= maxDays (e.g., minDays: 2, maxDays: 4)'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private validateNetworkSettings(config: Config): void {
|
private validateNetworkSettings(config: Config): void {
|
||||||
// Webhook validation
|
// Webhook validation
|
||||||
if (config.webhook?.enabled === true) {
|
if (config.webhook?.enabled === true) {
|
||||||
@@ -651,8 +599,6 @@ export class StartupValidator {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removed diagnostics warning - reports/ folder with masked emails is safe for debugging
|
|
||||||
|
|
||||||
// Proxy exposure check
|
// Proxy exposure check
|
||||||
if (config.proxy?.proxyGoogleTrends === false && config.proxy?.proxyBingTerms === false) {
|
if (config.proxy?.proxyGoogleTrends === false && config.proxy?.proxyBingTerms === false) {
|
||||||
this.addWarning(
|
this.addWarning(
|
||||||
|
|||||||
Reference in New Issue
Block a user